diff --git a/Dockerfile b/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..9c83142145c4befa8d9aab39a10039e3cd251672
--- /dev/null
+++ b/Dockerfile
@@ -0,0 +1,92 @@
+# =============================================================================
+# GitPilot - Hugging Face Spaces Dockerfile
+# =============================================================================
+# Follows the official HF Docker Spaces pattern:
+# https://huggingface.co/docs/hub/spaces-sdks-docker
+#
+# Architecture:
+# React UI (Vite build) -> FastAPI backend -> OllaBridge Cloud / any LLM
+# =============================================================================
+
+# -- Stage 1: Build React frontend -------------------------------------------
+FROM node:20-slim AS frontend-builder
+
+WORKDIR /build
+
+COPY frontend/package.json frontend/package-lock.json ./
+RUN npm ci --production=false
+
+COPY frontend/ ./
+RUN npm run build
+
+# -- Stage 2: Python runtime -------------------------------------------------
+FROM python:3.12-slim
+
+# System deps needed at runtime
+RUN apt-get update && apt-get install -y --no-install-recommends \
+ git curl ca-certificates \
+ && rm -rf /var/lib/apt/lists/*
+
+# HF Spaces runs containers as UID 1000 — create user early (official pattern)
+RUN useradd -m -u 1000 user
+
+USER user
+
+ENV HOME=/home/user \
+ PATH=/home/user/.local/bin:$PATH \
+ PYTHONUNBUFFERED=1 \
+ GITPILOT_PROVIDER=ollabridge \
+ OLLABRIDGE_BASE_URL=https://ruslanmv-ollabridge.hf.space \
+ GITPILOT_OLLABRIDGE_MODEL=qwen2.5:1.5b \
+ CORS_ORIGINS="*" \
+ GITPILOT_CONFIG_DIR=/tmp/gitpilot
+
+WORKDIR $HOME/app
+
+# ── Install Python dependencies BEFORE copying source code ──────────
+# This ensures pip install layers are cached even when code changes.
+COPY --chown=user pyproject.toml README.md ./
+
+# Step 1: lightweight deps (cached layer)
+RUN pip install --no-cache-dir --upgrade pip && \
+ pip install --no-cache-dir \
+ "fastapi>=0.111.0" \
+ "uvicorn[standard]>=0.30.0" \
+ "httpx>=0.27.0" \
+ "python-dotenv>=1.1.0,<1.2.0" \
+ "typer>=0.12.0,<0.24.0" \
+ "pydantic>=2.7.0,<2.12.0" \
+ "rich>=13.0.0" \
+ "pyjwt[crypto]>=2.8.0"
+
+# Step 2: heavy ML/agent deps (separate layer for better caching)
+RUN pip install --no-cache-dir \
+ "litellm" \
+ "crewai[anthropic]>=0.76.9" \
+ "crewai-tools>=0.13.4" \
+ "anthropic>=0.39.0" \
+ "ibm-watsonx-ai>=1.1.0" \
+ "langchain-ibm>=0.3.0"
+
+# ── Now copy source code (cache-busting only affects layers below) ──
+COPY --chown=user gitpilot ./gitpilot
+
+# Copy built frontend into gitpilot/web/
+COPY --chown=user --from=frontend-builder /build/dist/ ./gitpilot/web/
+
+# Step 3: editable install of gitpilot itself (deps already satisfied)
+RUN pip install --no-cache-dir --no-deps -e .
+
+EXPOSE 7860
+
+# NOTE: Do NOT add a Docker HEALTHCHECK here.
+# HF Spaces has its own HTTP probe on app_port (7860) and ignores the
+# Docker HEALTHCHECK directive.
+
+# Direct CMD — no shell script, fewer failure points.
+CMD ["python", "-m", "uvicorn", "gitpilot.api:app", \
+ "--host", "0.0.0.0", \
+ "--port", "7860", \
+ "--workers", "2", \
+ "--limit-concurrency", "10", \
+ "--timeout-keep-alive", "120"]
diff --git a/README.md b/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..5de6067d4f4f2254ae1c5014e1c868aac1865212
--- /dev/null
+++ b/README.md
@@ -0,0 +1,80 @@
+---
+title: GitPilot
+emoji: "\U0001F916"
+colorFrom: blue
+colorTo: indigo
+sdk: docker
+app_port: 7860
+startup_duration_timeout: 5m
+pinned: true
+license: mit
+short_description: Enterprise AI Coding Assistant for GitHub Repositories
+---
+
+# GitPilot — Hugging Face Spaces
+
+**Enterprise-grade AI coding assistant** for GitHub repositories with multi-LLM support, visual workflow insights, and intelligent code analysis.
+
+## What This Does
+
+This Space runs the full GitPilot stack:
+1. **React Frontend** — Professional dark-theme UI with chat, file browser, and workflow visualization
+2. **FastAPI Backend** — 80+ API endpoints for repository management, AI chat, planning, and execution
+3. **Multi-Agent AI** — CrewAI orchestration with 7 switchable agent topologies
+
+## LLM Providers
+
+GitPilot connects to your favorite LLM provider. Configure in **Admin / LLM Settings**:
+
+| Provider | Default | API Key Required |
+|---|---|---|
+| **OllaBridge Cloud** (default) | `qwen2.5:1.5b` | No |
+| OpenAI | `gpt-4o-mini` | Yes |
+| Anthropic Claude | `claude-sonnet-4-5` | Yes |
+| Ollama (local) | `llama3` | No |
+| Custom endpoint | Any model | Optional |
+
+## Quick Start
+
+1. Open the Space UI
+2. Enter your **GitHub Token** (Settings -> GitHub)
+3. Select a repository from the sidebar
+4. Start chatting with your AI coding assistant
+
+## API Endpoints
+
+| Endpoint | Description |
+|---|---|
+| `GET /api/health` | Health check |
+| `POST /api/chat/message` | Chat with AI assistant |
+| `POST /api/chat/plan` | Generate implementation plan |
+| `GET /api/repos` | List repositories |
+| `GET /api/settings` | View/update settings |
+| `GET /docs` | Interactive API docs (Swagger) |
+
+## Connect to OllaBridge Cloud
+
+By default, GitPilot connects to [OllaBridge Cloud](https://huggingface.co/spaces/ruslanmv/ollabridge) for LLM inference. This provides free access to open-source models without needing API keys.
+
+To use your own OllaBridge instance:
+1. Go to **Admin / LLM Settings**
+2. Select **OllaBridge** provider
+3. Enter your OllaBridge URL and model
+
+## Environment Variables
+
+Configure via HF Spaces secrets:
+
+| Variable | Description | Default |
+|---|---|---|
+| `GITPILOT_PROVIDER` | LLM provider | `ollabridge` |
+| `OLLABRIDGE_BASE_URL` | OllaBridge Cloud URL | `https://ruslanmv-ollabridge.hf.space` |
+| `GITHUB_TOKEN` | GitHub personal access token | - |
+| `OPENAI_API_KEY` | OpenAI API key (if using OpenAI) | - |
+| `ANTHROPIC_API_KEY` | Anthropic API key (if using Claude) | - |
+
+## Links
+
+- [GitPilot Repository](https://github.com/ruslanmv/gitpilot)
+- [OllaBridge Cloud](https://huggingface.co/spaces/ruslanmv/ollabridge)
+- [Documentation](https://github.com/ruslanmv/gitpilot#readme)
diff --git a/REPO_README.md b/REPO_README.md
new file mode 100644
index 0000000000000000000000000000000000000000..22772bf137d9933194bacab335e80098041cd8a0
--- /dev/null
+++ b/REPO_README.md
@@ -0,0 +1,402 @@
+
+
+
+
+# GitPilot
+
+### The first open-source multi-agent AI coding assistant.
+
+Multiple specialized agents — including Explorer, Planner, Coder, and Reviewer — collaborate seamlessly on every task. By default, GitPilot requests confirmation before executing high-impact actions. Switch to Auto or Plan mode at any time.
+
+[](https://pypi.org/project/gitcopilot/)
+[](https://www.python.org/)
+[](LICENSE)
+[](https://marketplace.visualstudio.com/)
+[](#contributing)
+
+[**Get Started**](#get-started) · [VS Code](#vs-code-extension) · [Web App](#web-app) · [How It Works](#how-it-works) · [Providers](#supported-ai-providers)
+
+
+
+---
+
+
+
+
+
+
+
+
+## Why GitPilot?
+
+Most AI coding tools are a **single model behind a chat box**. GitPilot is fundamentally different: it deploys a **team of four specialized AI agents** that collaborate on every task — just like a real engineering team.
+
+| Agent | Role | What it does |
+|---|---|---|
+| **Explorer** | Context | Reads your full repo, git log, test suite, and dependencies so the plan starts with real knowledge — not guesses |
+| **Planner** | Strategy | Drafts a safe, step-by-step plan with diffs and surfaces risks before any file is touched |
+| **Coder** | Execution | Writes code, runs your tests, and self-corrects on failure — iterating until the suite passes |
+| **Reviewer** | Quality | Validates the output, re-runs the suite, and drafts a commit message and PR summary |
+
+**You control how the agent runs.** Three execution modes — selectable per session from the VS Code compose bar or backend API:
+
+| Mode | Default? | Behavior |
+|---|---|---|
+| **Ask** | Yes | Prompts you before each dangerous action (write, edit, run, commit). You see the diff and click Allow / Deny. |
+| **Auto** | | Executes all tools automatically. Fastest for experienced users who trust the plan. |
+| **Plan** | | Read-only. Generates and displays the plan but blocks all file writes and commands. |
+
+Diffs are shown before they're applied. Tests run before anything is committed. No surprises.
+
+### What else sets GitPilot apart
+
+- 🧭 **Works where you work** — VS Code, web app, and CLI share one login, one history, and one set of approvals.
+- 🧠 **Any LLM, zero lock-in** — OpenAI, Anthropic Claude, IBM Watsonx, Ollama (local & free) or OllaBridge. Switch in settings, no code change.
+- 🔐 **Private by default** — run the entire stack locally with Ollama. No telemetry, no data leaves your machine.
+- 🏢 **Enterprise-ready, Apache 2.0 open source** — 854 passing tests, Docker & Hugging Face deployment recipes, audit the code yourself.
+- 🌍 **Runs anywhere** — laptop, private cloud, air-gapped environments, or managed hosting. Your repo, your rules.
+
+---
+
+## What is GitPilot?
+
+GitPilot is an AI assistant that helps you ship better code, faster — without giving up control. It understands your project, plans changes you can read before they happen, writes the code, runs your tests, and drafts the commit message and pull request for you.
+
+**Works with any language. Runs on any LLM.** Start free and local with Ollama, or bring your own OpenAI, Claude, or Watsonx key.
+
+```
+You: "Add input validation to the login form"
+
+GitPilot:
+ 1. Reading src/auth/login.ts...
+ 2. Planning 3 changes...
+ 3. Editing login.ts → [Apply Patch] [Revert]
+ 4. Running npm test... 3 passed
+ 5. Done — files written to your workspace.
+```
+
+---
+
+## Get Started
+
+### Option 1: VS Code Extension (recommended)
+
+Install the extension, configure your LLM, and start chatting:
+
+```
+1. Open VS Code
+2. Install "GitPilot Workspace" from Extensions
+3. Click the GitPilot icon in the sidebar
+4. Choose your AI provider (OpenAI, Claude, Ollama...)
+5. Start asking questions about your code
+```
+
+### Option 2: Web App
+
+Run the full web interface with Docker:
+
+```bash
+git clone https://github.com/ruslanmv/gitpilot.git
+cd gitpilot
+docker compose up
+```
+
+Open [http://localhost:3000](http://localhost:3000) in your browser.
+
+### Live Demo on Hugging Face
+
+Experience the application in action through our hosted demo environment:
+
+[](https://huggingface.co/spaces/ruslanmv/gitpilot)
+
+🔗 **Access the live demo:**
+[https://huggingface.co/spaces/ruslanmv/gitpilot](https://huggingface.co/spaces/ruslanmv/gitpilot)
+
+### Option 3: Python CLI (fastest)
+
+```bash
+pip install gitcopilot
+gitpilot serve
+```
+
+Open [http://localhost:8000](http://localhost:8000) and you're done.
+
+> **Heads up:** the PyPI package is published as **`gitcopilot`** (the name `gitpilot` was already taken) but the command you run is `gitpilot`. Python **3.11** or **3.12** required.
+
+---
+
+## VS Code Extension
+
+The sidebar panel gives you everything in one place:
+
+| Feature | What it does |
+|---|---|
+| **Chat** | Ask questions, request changes, review code |
+| **Execution Modes** | Bottom bar: `Auto` / `Ask` / `Plan` — controls agent permissions per session |
+| **Plan View** | See the step-by-step plan before changes are made |
+| **Plan Approval** | "Approve & Execute" / "Dismiss" bar — execution waits for your OK |
+| **Tool Approvals** | Per-action Allow / Allow for session / Deny cards (Ask mode) |
+| **Diff Preview** | Review proposed edits in VS Code's native diff viewer |
+| **Apply / Revert** | One click to apply changes, one click to undo |
+| **Quick Actions** | Explain, Review, Fix, Generate Tests, Security Scan |
+| **Smart Commit** | AI-generated commit messages |
+| **Code Lens** | Inline "Explain / Review" hints on functions |
+| **Settings Tab** | Branded settings page (General, Provider, Agent, Editor) |
+| **New Chat** | One click to clear chat and start a fresh session |
+
+### Execution modes
+
+The compose bar includes a mode selector that controls how the multi-agent pipeline runs:
+
+```
+[ Auto | Ask | Plan ] [ Send ] [ New Chat ]
+```
+
+| Mode | VS Code setting | Backend value | What happens |
+|---|---|---|---|
+| **Ask** (default) | `gitpilot.permissionMode: "normal"` | `"normal"` | Each dangerous tool (write, edit, run, commit) shows an approval card |
+| **Auto** | `gitpilot.permissionMode: "auto"` | `"auto"` | Tools execute automatically — no approval prompts |
+| **Plan** | `gitpilot.permissionMode: "plan"` | `"plan"` | Plan is generated and displayed, all writes/commands blocked |
+
+Mode changes are persisted to VS Code settings and synced to the backend via `PUT /api/permissions/mode`.
+
+### How approvals work
+
+```
+You send a request
+ → Explorer reads repo context
+ → Planner drafts step-by-step plan
+ → Plan appears in sidebar (Approve & Execute / Dismiss)
+ → You click Approve
+ → Coder begins execution
+ → Dangerous tool requested (e.g. write_file)
+ → Ask mode: approval card shown (Allow / Allow for session / Deny)
+ → Auto mode: executes immediately
+ → Plan mode: blocked
+ → Tests run, Reviewer validates
+ → Done — Apply Patch or Revert
+```
+
+> **Note:** Simple questions (e.g. "explain this code") may return a direct answer without generating a multi-step plan. This is expected — the planner activates for tasks that require file changes or multi-step execution.
+
+### Code generation and Apply Patch
+
+When you ask GitPilot to create or edit files, the response includes structured `edits` — not just text. The **Apply Patch** button writes them directly to your workspace.
+
+```
+You: "Create a Flask app with app.py, requirements.txt, and README.md"
+
+GitPilot:
+ → LLM generates 3 files with content
+ → Backend extracts structured edits (path + content)
+ → VS Code shows [Apply Patch] [Revert]
+ → You click Apply Patch
+ → 3 files written to disk
+ → Project context refreshes automatically
+ → First file opens in the editor
+```
+
+How it works under the hood:
+- The LLM is instructed to output code blocks with the filename on the fence line (` ```python hello.py`)
+- The backend parses these blocks into `ProposedEdit` objects with file path, kind, and content
+- All paths are sanitized (rejects `../` traversal, absolute paths, drive letters)
+- The extension stores edits in `activeTask.edits` and shows Apply / Revert
+- `PatchApplier` writes files via `vscode.workspace.fs.writeFile`
+- After apply, project context refreshes and the first file opens
+
+> **Note:** For folder-only sessions (no GitHub remote), code generation uses the LLM directly with structured output instructions. For GitHub-connected sessions, the full CrewAI multi-agent pipeline (Explorer → Planner → Coder → Reviewer) handles planning and execution.
+
+### Supported AI Providers
+
+| Provider | Setup | Free? |
+|---|---|---|
+| **Ollama** | Install Ollama, run `ollama pull llama3` | Yes |
+| **OllaBridge** | Works out of the box (cloud Ollama) | Yes |
+| **OpenAI** | Add your API key in settings | Paid |
+| **Claude** | Add your Anthropic API key | Paid |
+| **Watsonx** | Add IBM credentials | Paid |
+
+---
+
+## Web App
+
+The web interface includes:
+
+- Chat with real-time responses
+- GitHub integration (connect your repos)
+- File tree browser
+- Diff viewer with line-by-line changes
+- Pull request creation
+- Session history with checkpoints
+- Multi-repo support
+
+
+
+### Example: File Deletion
+
+
+### Example: Content Generation
+
+
+### Example: File Creation
+
+
+### Example multiple operations
+
+
+### Example of multiagent topologies
+
+
+---
+
+## How It Works
+
+
+
+
+
+
+
+
+GitPilot uses a multi-agent system powered by CrewAI:
+
+1. **Explorer** reads your repo structure, git log, and key files
+2. **Planner** creates a safe step-by-step plan with diffs
+3. **Executor** writes code and runs tests, self-correcting on failure
+4. **Reviewer** validates the output and summarises what changed
+
+In **Ask** mode (default), you approve every change before it's applied. In **Auto** mode, tools execute without prompts. In **Plan** mode, only the plan is generated — no files are touched.
+
+---
+
+## Project Structure
+
+```
+gitpilot/
+ gitpilot/ Python backend (FastAPI)
+ frontend/ React web app
+ extensions/vscode/ VS Code extension
+ docs/ Documentation and assets
+ tests/ Test suite
+```
+
+---
+
+## Configuration
+
+GitPilot works with environment variables or the settings UI.
+
+**Minimal setup** (Ollama, free, local):
+
+```bash
+# .env
+GITPILOT_PROVIDER=ollama
+OLLAMA_BASE_URL=http://localhost:11434
+GITPILOT_OLLAMA_MODEL=llama3
+```
+
+**Cloud setup** (OpenAI):
+
+```bash
+# .env
+GITPILOT_PROVIDER=openai
+OPENAI_API_KEY=sk-...
+GITPILOT_OPENAI_MODEL=gpt-4o-mini
+```
+
+**Cloud setup** (Claude):
+
+```bash
+# .env
+GITPILOT_PROVIDER=claude
+ANTHROPIC_API_KEY=sk-ant-...
+GITPILOT_CLAUDE_MODEL=claude-sonnet-4-5
+```
+
+All settings can also be changed from the VS Code extension or web UI without editing files.
+
+---
+
+## API
+
+GitPilot exposes a REST + WebSocket API:
+
+| Endpoint | What it does |
+|---|---|
+| `GET /api/status` | Server health check |
+| `POST /api/chat/send` | Send a message, get a response |
+| `POST /api/v2/chat/stream` | Stream agent events (SSE) — accepts `permission_mode` |
+| `WS /ws/v2/sessions/{id}` | Real-time WebSocket streaming |
+| `POST /api/chat/plan` | Generate an execution plan |
+| `POST /api/chat/execute` | Execute a plan |
+| `GET /api/repos` | List connected repositories |
+| `GET /api/sessions` | List chat sessions |
+| `GET /api/permissions` | Current permission policy |
+| `PUT /api/permissions/mode` | Set execution mode: `normal` / `auto` / `plan` |
+| `POST /api/v2/approval/respond` | Approve or deny a tool execution request |
+
+Full API docs at `http://localhost:8000/docs` (Swagger UI).
+
+---
+
+## Deployment
+
+### Hugging Face Spaces
+
+GitPilot runs on Hugging Face Spaces with OllaBridge (free):
+
+```
+Runtime: Docker
+Port: 7860
+Provider: OllaBridge (cloud Ollama)
+```
+
+### Docker Compose
+
+```bash
+docker compose up -d
+# Backend: http://localhost:8000
+# Frontend: http://localhost:3000
+```
+
+### Vercel
+
+The frontend deploys to Vercel. Set `VITE_BACKEND_URL` to your backend.
+
+---
+
+## Contributing
+
+```bash
+# Backend
+cd gitpilot
+pip install -e ".[dev]"
+pytest
+
+# Frontend
+cd frontend
+npm install
+npm run dev
+
+# VS Code Extension
+cd extensions/vscode
+npm install
+make compile
+# Press F5 in VS Code to launch debug host
+```
+
+---
+
+## License
+
+Apache License 2.0. See [LICENSE](LICENSE).
+
+---
+
+
+
+**GitPilot** is made by [Ruslan Magana Vsevolodovna](https://github.com/ruslanmv)
+
+[Star on GitHub](https://github.com/ruslanmv/gitpilot) • [Report a Bug](https://github.com/ruslanmv/gitpilot/issues) • [Request a Feature](https://github.com/ruslanmv/gitpilot/issues)
+
+
diff --git a/frontend/.dockerignore b/frontend/.dockerignore
new file mode 100644
index 0000000000000000000000000000000000000000..9da1acab57b3a83f0649dc5deb28b33600fe4ad3
--- /dev/null
+++ b/frontend/.dockerignore
@@ -0,0 +1,39 @@
+# Node
+node_modules/
+npm-debug.log*
+yarn-debug.log*
+yarn-error.log*
+.pnpm-debug.log*
+
+# Build
+dist/
+build/
+
+# Environment
+.env
+.env.local
+.env.development
+.env.test
+.env.production.local
+
+# IDE
+.vscode/
+.idea/
+*.swp
+*.swo
+*~
+
+# OS
+.DS_Store
+Thumbs.db
+
+# Git
+.git
+.gitignore
+
+# Testing
+coverage/
+.nyc_output/
+
+# Misc
+*.log
diff --git a/frontend/App.jsx b/frontend/App.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..9847bb709bcdd064fc8ec75cd068c0f56e08a169
--- /dev/null
+++ b/frontend/App.jsx
@@ -0,0 +1,1173 @@
+import React, { useCallback, useEffect, useMemo, useRef, useState } from "react";
+import StartupScreen from "./components/StartupScreen.jsx";
+import LoginPage from "./components/LoginPage.jsx";
+import RepoSelector from "./components/RepoSelector.jsx";
+import ProjectContextPanel from "./components/ProjectContextPanel.jsx";
+import ChatPanel from "./components/ChatPanel.jsx";
+import LlmSettings from "./components/LlmSettings.jsx";
+import FlowViewer from "./components/FlowViewer.jsx";
+import Footer from "./components/Footer.jsx";
+import ProjectSettingsModal from "./components/ProjectSettingsModal.jsx";
+import SessionSidebar from "./components/SessionSidebar.jsx";
+import ContextBar from "./components/ContextBar.jsx";
+import AddRepoModal from "./components/AddRepoModal.jsx";
+import UserMenu from "./components/UserMenu.jsx";
+import AboutModal from "./components/AboutModal.jsx";
+import {
+ WorkspaceModesTab,
+ SecurityTab,
+ IntegrationsTab,
+ SkillsTab,
+ SessionsTab,
+ AdvancedTab,
+} from "./components/AdminTabs";
+import { apiUrl, safeFetchJSON, fetchStatus } from "./utils/api.js";
+import { initApp } from "./utils/appInit.js";
+
+function makeRepoKey(repo) {
+ if (!repo) return null;
+ return repo.full_name || `${repo.owner}/${repo.name}`;
+}
+
+function uniq(arr) {
+ return Array.from(new Set((arr || []).filter(Boolean)));
+}
+
+function getProviderLabel(status) {
+ if (!status) return "Checking...";
+ return (
+ status?.provider?.name ||
+ status?.provider_name ||
+ status?.provider?.provider ||
+ "Checking..."
+ );
+}
+
+function getBackendVersion(status) {
+ if (!status) return "Checking...";
+ return status?.version || status?.app_version || "Checking...";
+}
+
+export default function App() {
+ const frontendVersion = __APP_VERSION__ || "unknown";
+
+ // ---- Multi-repo context state ----
+ const [contextRepos, setContextRepos] = useState([]);
+ // Each entry: { repoKey: "owner/repo", repo: {...}, branch: "main" }
+ const [activeRepoKey, setActiveRepoKey] = useState(null);
+ const [addRepoOpen, setAddRepoOpen] = useState(false);
+
+ const [activePage, setActivePage] = useState("workspace");
+ const [isAuthenticated, setIsAuthenticated] = useState(false);
+ const [isLoading, setIsLoading] = useState(true);
+ const [userInfo, setUserInfo] = useState(null);
+
+ // Startup / enterprise loader state
+ const [startupPhase, setStartupPhase] = useState("booting");
+ const [startupStatusMessage, setStartupStatusMessage] = useState("Starting application...");
+ const [startupDetailMessage, setStartupDetailMessage] = useState(
+ "Initializing authentication, provider, and workspace context."
+ );
+ const [startupStatusSnapshot, setStartupStatusSnapshot] = useState(null);
+
+ // Repo + Session State Machine
+ const [repoStateByKey, setRepoStateByKey] = useState({});
+ const [toast, setToast] = useState(null);
+ const [settingsOpen, setSettingsOpen] = useState(false);
+ const [aboutOpen, setAboutOpen] = useState(false);
+ const [adminTab, setAdminTab] = useState("overview");
+ const [adminStatus, setAdminStatus] = useState(null);
+
+ // Fetch admin status when overview tab is active
+ useEffect(() => {
+ if (activePage === "admin" && adminTab === "overview") {
+ fetchStatus()
+ .then((data) => setAdminStatus(data))
+ .catch(() => setAdminStatus(null));
+ }
+ }, [activePage, adminTab]);
+
+ // Claude-Code-on-Web: Session sidebar + Environment state
+ const [activeSessionId, setActiveSessionId] = useState(null);
+ const [activeEnvId, setActiveEnvId] = useState("default");
+ const [sessionRefreshNonce, setSessionRefreshNonce] = useState(0);
+
+ // Sidebar collapse state (persisted in localStorage)
+ const [sidebarCollapsed, setSidebarCollapsed] = useState(() => {
+ try {
+ return localStorage.getItem("gitpilot_sidebar_collapsed") === "true";
+ } catch {
+ return false;
+ }
+ });
+
+ const toggleSidebar = useCallback(() => {
+ setSidebarCollapsed((prev) => {
+ const next = !prev;
+ try {
+ localStorage.setItem("gitpilot_sidebar_collapsed", String(next));
+ } catch {}
+ return next;
+ });
+ }, []);
+
+ // Keyboard shortcut: Cmd/Ctrl + B to toggle sidebar
+ useEffect(() => {
+ const handler = (e) => {
+ if ((e.metaKey || e.ctrlKey) && e.key === "b") {
+ e.preventDefault();
+ toggleSidebar();
+ }
+ };
+ window.addEventListener("keydown", handler);
+ return () => window.removeEventListener("keydown", handler);
+ }, [toggleSidebar]);
+
+ // ---- Derived `repo` — keeps all downstream consumers unchanged ----
+ const repo = useMemo(() => {
+ const entry = contextRepos.find((r) => r.repoKey === activeRepoKey);
+ return entry?.repo || null;
+ }, [contextRepos, activeRepoKey]);
+
+ const repoKey = activeRepoKey;
+
+ // Convenient selectors
+ const currentRepoState = repoKey ? repoStateByKey[repoKey] : null;
+
+ const defaultBranch = currentRepoState?.defaultBranch || repo?.default_branch || "main";
+ const currentBranch = currentRepoState?.currentBranch || defaultBranch;
+ const sessionBranches = currentRepoState?.sessionBranches || [];
+ const lastExecution = currentRepoState?.lastExecution || null;
+ const pulseNonce = currentRepoState?.pulseNonce || 0;
+ const chatByBranch = currentRepoState?.chatByBranch || {};
+
+ // ---------------------------------------------------------------------------
+ // Multi-repo context management
+ // ---------------------------------------------------------------------------
+ const addRepoToContext = useCallback((r) => {
+ const key = makeRepoKey(r);
+ if (!key) return;
+
+ setContextRepos((prev) => {
+ if (prev.some((e) => e.repoKey === key)) {
+ setActiveRepoKey(key);
+ return prev;
+ }
+ const entry = { repoKey: key, repo: r, branch: r.default_branch || "main" };
+ return [...prev, entry];
+ });
+
+ setActiveRepoKey(key);
+ setAddRepoOpen(false);
+ }, []);
+
+ const removeRepoFromContext = useCallback((key) => {
+ setContextRepos((prev) => {
+ const next = prev.filter((e) => e.repoKey !== key);
+ setActiveRepoKey((curActive) => {
+ if (curActive === key) {
+ return next.length > 0 ? next[0].repoKey : null;
+ }
+ return curActive;
+ });
+ return next;
+ });
+ }, []);
+
+ const clearAllContext = useCallback(() => {
+ setContextRepos([]);
+ setActiveRepoKey(null);
+ }, []);
+
+ const handleContextBranchChange = useCallback((targetRepoKey, newBranch) => {
+ setContextRepos((prev) =>
+ prev.map((e) =>
+ e.repoKey === targetRepoKey ? { ...e, branch: newBranch } : e
+ )
+ );
+
+ setRepoStateByKey((prev) => {
+ const cur = prev[targetRepoKey];
+ if (!cur) return prev;
+ return {
+ ...prev,
+ [targetRepoKey]: { ...cur, currentBranch: newBranch },
+ };
+ });
+ }, []);
+
+ // Init / reconcile repo state when active repo changes
+ useEffect(() => {
+ if (!repoKey || !repo) return;
+
+ setRepoStateByKey((prev) => {
+ const existing = prev[repoKey];
+ const d = repo.default_branch || "main";
+
+ if (!existing) {
+ return {
+ ...prev,
+ [repoKey]: {
+ defaultBranch: d,
+ currentBranch: d,
+ sessionBranches: [],
+ lastExecution: null,
+ pulseNonce: 0,
+ chatByBranch: {
+ [d]: { messages: [], plan: null },
+ },
+ },
+ };
+ }
+
+ const next = { ...existing };
+ next.defaultBranch = d;
+
+ if (!next.chatByBranch?.[d]) {
+ next.chatByBranch = {
+ ...(next.chatByBranch || {}),
+ [d]: { messages: [], plan: null },
+ };
+ }
+
+ if (!next.currentBranch) next.currentBranch = d;
+
+ return { ...prev, [repoKey]: next };
+ });
+ }, [repoKey, repo?.id, repo?.default_branch]);
+
+ const showToast = (title, message) => {
+ setToast({ title, message });
+ window.setTimeout(() => setToast(null), 5000);
+ };
+
+ // ---------------------------------------------------------------------------
+ // Session management — every chat is backed by a Session (Claude Code parity)
+ // ---------------------------------------------------------------------------
+
+ const _creatingSessionRef = useRef(false);
+
+ const [chatBySession, setChatBySession] = useState({});
+
+ const ensureSession = useCallback(
+ async (sessionName, seedMessages) => {
+ if (activeSessionId) return activeSessionId;
+ if (!repo) return null;
+ if (_creatingSessionRef.current) return null;
+ _creatingSessionRef.current = true;
+
+ try {
+ const token = localStorage.getItem("github_token");
+ const headers = {
+ "Content-Type": "application/json",
+ ...(token ? { Authorization: `Bearer ${token}` } : {}),
+ };
+
+ const res = await fetch("/api/sessions", {
+ method: "POST",
+ headers,
+ body: JSON.stringify({
+ repo_full_name: repoKey,
+ branch: currentBranch,
+ name: sessionName || undefined,
+ repos: contextRepos.map((e) => ({
+ full_name: e.repoKey,
+ branch: e.branch,
+ mode: e.repoKey === activeRepoKey ? "write" : "read",
+ })),
+ active_repo: activeRepoKey,
+ }),
+ });
+
+ if (!res.ok) return null;
+ const data = await res.json();
+ const newId = data.session_id;
+
+ if (seedMessages && seedMessages.length > 0) {
+ setChatBySession((prev) => ({
+ ...prev,
+ [newId]: { messages: seedMessages, plan: null },
+ }));
+ }
+
+ setActiveSessionId(newId);
+ setSessionRefreshNonce((n) => n + 1);
+ return newId;
+ } catch (err) {
+ console.warn("Failed to create session:", err);
+ return null;
+ } finally {
+ _creatingSessionRef.current = false;
+ }
+ },
+ [activeSessionId, repo, repoKey, currentBranch, contextRepos, activeRepoKey]
+ );
+
+ const handleNewSession = async () => {
+ setActiveSessionId(null);
+
+ try {
+ const token = localStorage.getItem("github_token");
+ const headers = {
+ "Content-Type": "application/json",
+ ...(token ? { Authorization: `Bearer ${token}` } : {}),
+ };
+
+ const res = await fetch("/api/sessions", {
+ method: "POST",
+ headers,
+ body: JSON.stringify({
+ repo_full_name: repoKey,
+ branch: currentBranch,
+ repos: contextRepos.map((e) => ({
+ full_name: e.repoKey,
+ branch: e.branch,
+ mode: e.repoKey === activeRepoKey ? "write" : "read",
+ })),
+ active_repo: activeRepoKey,
+ }),
+ });
+
+ if (!res.ok) return;
+ const data = await res.json();
+ setActiveSessionId(data.session_id);
+ setSessionRefreshNonce((n) => n + 1);
+ showToast("Session Created", "New session started.");
+ } catch (err) {
+ console.warn("Failed to create session:", err);
+ }
+ };
+
+ /**
+ * Convert a backend Message object to the frontend chat UI shape.
+ * Backend: { role: "user|assistant|system", content: "...", timestamp, metadata }
+ * Frontend: { from: "user|ai", role: "user|assistant|system", content, answer, ... }
+ */
+ const normalizeBackendMessage = (m) => {
+ const role = m.role || "assistant";
+ const content = m.content || "";
+ if (role === "user") {
+ return { from: "user", role: "user", content, text: content };
+ }
+ if (role === "system") {
+ return { from: "ai", role: "system", content };
+ }
+ // assistant
+ return {
+ from: "ai",
+ role: "assistant",
+ content,
+ answer: content,
+ // Preserve any structured metadata the backend stored (plan, diff, etc.)
+ ...(m.metadata && typeof m.metadata === "object" ? m.metadata : {}),
+ };
+ };
+
+ /**
+ * Fetch persisted messages for a session from the backend.
+ * Returns an array of normalized frontend messages (ready for ChatPanel),
+ * or an empty array on failure.
+ */
+ const fetchSessionMessages = useCallback(async (sessionId) => {
+ if (!sessionId) return [];
+ try {
+ const token = localStorage.getItem("github_token");
+ const headers = { "Content-Type": "application/json" };
+ if (token) headers["Authorization"] = `Bearer ${token}`;
+
+ const res = await fetch(apiUrl(`/api/sessions/${sessionId}/messages`), {
+ headers,
+ });
+ if (!res.ok) {
+ console.warn(`[fetchSessionMessages] ${res.status} for ${sessionId}`);
+ return [];
+ }
+ const data = await res.json();
+ const backendMessages = Array.isArray(data.messages) ? data.messages : [];
+ return backendMessages.map(normalizeBackendMessage);
+ } catch (err) {
+ console.warn(`[fetchSessionMessages] Failed to fetch ${sessionId}:`, err);
+ return [];
+ }
+ }, []);
+
+ /**
+ * Handle click on a session in the sidebar.
+ *
+ * Critical ordering: we must hydrate chatBySession BEFORE setting
+ * activeSessionId, because ChatPanel's session-sync useEffect reads
+ * sessionChatState only when sessionId changes (it does NOT depend on
+ * chatBySession to avoid prop/state loops). If we set activeSessionId
+ * first, ChatPanel would see an empty messages array, then our async
+ * hydration would complete but ChatPanel wouldn't re-sync.
+ */
+ const handleSelectSession = useCallback(async (session) => {
+ // 1. Fetch persisted messages first
+ const messages = await fetchSessionMessages(session.id);
+
+ // 2. Seed the chat cache (ChatPanel will read this via sessionChatState)
+ setChatBySession((prev) => ({
+ ...prev,
+ [session.id]: {
+ ...(prev[session.id] || { plan: null }),
+ messages,
+ },
+ }));
+
+ // 3. NOW activate the session — ChatPanel's sync effect will read
+ // the hydrated messages from chatBySession[session.id]
+ setActiveSessionId(session.id);
+ if (session.branch && session.branch !== currentBranch) {
+ handleBranchChange(session.branch);
+ }
+ // eslint-disable-next-line react-hooks/exhaustive-deps
+ }, [fetchSessionMessages, currentBranch]);
+
+ const handleDeleteSession = useCallback(
+ (deletedId) => {
+ if (deletedId === activeSessionId) {
+ setActiveSessionId(null);
+
+ setChatBySession((prev) => {
+ const next = { ...prev };
+ delete next[deletedId];
+ return next;
+ });
+
+ if (repoKey) {
+ setRepoStateByKey((prev) => {
+ const cur = prev[repoKey];
+ if (!cur) return prev;
+ const branchKey = cur.currentBranch || cur.defaultBranch || defaultBranch;
+ return {
+ ...prev,
+ [repoKey]: {
+ ...cur,
+ chatByBranch: {
+ ...(cur.chatByBranch || {}),
+ [branchKey]: { messages: [], plan: null },
+ },
+ },
+ };
+ });
+ }
+ }
+ },
+ [activeSessionId, repoKey, defaultBranch]
+ );
+
+ // ---------------------------------------------------------------------------
+ // Chat persistence helpers
+ // ---------------------------------------------------------------------------
+ const updateChatForCurrentBranch = (patch) => {
+ if (!repoKey) return;
+
+ setRepoStateByKey((prev) => {
+ const cur = prev[repoKey];
+ if (!cur) return prev;
+
+ const branchKey = cur.currentBranch || cur.defaultBranch || defaultBranch;
+
+ const existing = cur.chatByBranch?.[branchKey] || {
+ messages: [],
+ plan: null,
+ };
+
+ return {
+ ...prev,
+ [repoKey]: {
+ ...cur,
+ chatByBranch: {
+ ...(cur.chatByBranch || {}),
+ [branchKey]: { ...existing, ...patch },
+ },
+ },
+ };
+ });
+ };
+
+ const currentChatState = useMemo(() => {
+ const b = currentBranch || defaultBranch;
+ return chatByBranch[b] || { messages: [], plan: null };
+ }, [chatByBranch, currentBranch, defaultBranch]);
+
+ const sessionChatState = useMemo(() => {
+ if (!activeSessionId) {
+ return currentChatState;
+ }
+ return chatBySession[activeSessionId] || { messages: [], plan: null };
+ }, [activeSessionId, chatBySession, currentChatState]);
+
+ const updateSessionChat = (patch) => {
+ if (activeSessionId) {
+ setChatBySession((prev) => ({
+ ...prev,
+ [activeSessionId]: {
+ ...(prev[activeSessionId] || { messages: [], plan: null }),
+ ...patch,
+ },
+ }));
+ } else {
+ updateChatForCurrentBranch(patch);
+ }
+ };
+
+ // ---------------------------------------------------------------------------
+ // Branch change (manual — for active repo)
+ // ---------------------------------------------------------------------------
+ const handleBranchChange = (nextBranch) => {
+ if (!repoKey) return;
+ if (!nextBranch || nextBranch === currentBranch) return;
+
+ setRepoStateByKey((prev) => {
+ const cur = prev[repoKey];
+ if (!cur) return prev;
+
+ const nextState = { ...cur, currentBranch: nextBranch };
+
+ if (nextBranch === cur.defaultBranch) {
+ nextState.chatByBranch = {
+ ...nextState.chatByBranch,
+ [nextBranch]: { messages: [], plan: null },
+ };
+ }
+
+ return { ...prev, [repoKey]: nextState };
+ });
+
+ setContextRepos((prev) =>
+ prev.map((e) =>
+ e.repoKey === repoKey ? { ...e, branch: nextBranch } : e
+ )
+ );
+
+ if (nextBranch === defaultBranch) {
+ showToast("New Session", `Switched to ${defaultBranch}. Chat cleared.`);
+ } else {
+ showToast("Context Switched", `Now viewing ${nextBranch}.`);
+ }
+ };
+
+ // ---------------------------------------------------------------------------
+ // Execution complete
+ // ---------------------------------------------------------------------------
+ const handleExecutionComplete = ({
+ branch,
+ mode,
+ commit_url,
+ completionMsg,
+ sourceBranch,
+ }) => {
+ if (!repoKey || !branch) return;
+
+ setRepoStateByKey((prev) => {
+ const cur =
+ prev[repoKey] || {
+ defaultBranch,
+ currentBranch: defaultBranch,
+ sessionBranches: [],
+ lastExecution: null,
+ pulseNonce: 0,
+ chatByBranch: { [defaultBranch]: { messages: [], plan: null } },
+ };
+
+ const next = { ...cur };
+ next.lastExecution = { mode, branch, ts: Date.now() };
+
+ if (!next.chatByBranch) next.chatByBranch = {};
+
+ const prevBranchKey =
+ sourceBranch || cur.currentBranch || cur.defaultBranch || defaultBranch;
+
+ const successSystemMsg = {
+ role: "system",
+ isSuccess: true,
+ link: commit_url,
+ content:
+ mode === "hard-switch"
+ ? `🌱 **Session Started:** Created branch \`${branch}\`.`
+ : `✅ **Update Published:** Commits pushed to \`${branch}\`.`,
+ };
+
+ const normalizedCompletion =
+ completionMsg &&
+ (completionMsg.answer || completionMsg.content || completionMsg.executionLog)
+ ? {
+ from: completionMsg.from || "ai",
+ role: completionMsg.role || "assistant",
+ answer: completionMsg.answer,
+ content: completionMsg.content,
+ executionLog: completionMsg.executionLog,
+ }
+ : null;
+
+ if (mode === "hard-switch") {
+ next.sessionBranches = uniq([...(next.sessionBranches || []), branch]);
+ next.currentBranch = branch;
+ next.pulseNonce = (next.pulseNonce || 0) + 1;
+
+ const existingTargetChat = next.chatByBranch[branch];
+ const isExistingSession =
+ existingTargetChat && (existingTargetChat.messages || []).length > 0;
+
+ if (isExistingSession) {
+ const appended = [
+ ...(existingTargetChat.messages || []),
+ ...(normalizedCompletion ? [normalizedCompletion] : []),
+ successSystemMsg,
+ ];
+
+ next.chatByBranch[branch] = {
+ ...existingTargetChat,
+ messages: appended,
+ plan: null,
+ };
+ } else {
+ const prevChat =
+ (cur.chatByBranch && cur.chatByBranch[prevBranchKey]) || {
+ messages: [],
+ plan: null,
+ };
+
+ next.chatByBranch[branch] = {
+ messages: [
+ ...(prevChat.messages || []),
+ ...(normalizedCompletion ? [normalizedCompletion] : []),
+ successSystemMsg,
+ ],
+ plan: null,
+ };
+ }
+
+ if (!next.chatByBranch[next.defaultBranch]) {
+ next.chatByBranch[next.defaultBranch] = { messages: [], plan: null };
+ }
+ } else if (mode === "sticky") {
+ next.currentBranch = cur.currentBranch || branch;
+
+ const targetChat = next.chatByBranch[branch] || { messages: [], plan: null };
+
+ next.chatByBranch[branch] = {
+ messages: [
+ ...(targetChat.messages || []),
+ ...(normalizedCompletion ? [normalizedCompletion] : []),
+ successSystemMsg,
+ ],
+ plan: null,
+ };
+ }
+
+ return { ...prev, [repoKey]: next };
+ });
+
+ if (mode === "hard-switch") {
+ showToast("Context Switched", `Active on ${branch}.`);
+ } else {
+ showToast("Changes Committed", `Updated ${branch}.`);
+ }
+ };
+
+ // ---------------------------------------------------------------------------
+ // Auth & startup render
+ // ---------------------------------------------------------------------------
+ useEffect(() => {
+ checkAuthentication();
+ }, []);
+
+ const checkAuthentication = async () => {
+ setStartupPhase("booting");
+ setStartupStatusMessage("Starting application...");
+ setStartupDetailMessage(
+ "Initializing authentication, provider, and workspace context."
+ );
+
+ try {
+ setStartupPhase("checking-backend");
+ setStartupStatusMessage("Connecting to backend...");
+ setStartupDetailMessage(
+ "Waiting for the server to be ready. This may take a few seconds on first start."
+ );
+
+ // Single-source-of-truth init: combines /api/status + /api/auth/status
+ // in one request. Runs exactly once per page load (StrictMode-safe).
+ const initResult = await initApp();
+ const status = initResult.status;
+ if (status) {
+ setStartupStatusSnapshot(status);
+ setAdminStatus(status);
+ }
+
+ const token = localStorage.getItem("github_token");
+ const user = localStorage.getItem("github_user");
+
+ if (token && user) {
+ setStartupPhase("validating-auth");
+ setStartupStatusMessage("Validating authentication...");
+ setStartupDetailMessage(
+ "Restoring your GitHub session and confirming access."
+ );
+
+ try {
+ const data = await safeFetchJSON(apiUrl("/api/auth/validate"), {
+ method: "POST",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify({ access_token: token }),
+ timeout: 20000, // 20s — first-load GitHub API validation can be slow
+ });
+
+ if (data.authenticated) {
+ setStartupPhase("restoring-session");
+ setStartupStatusMessage("Restoring workspace...");
+ setStartupDetailMessage(
+ "Loading user profile, reconnecting provider state, and preparing the workspace."
+ );
+
+ setIsAuthenticated(true);
+ setUserInfo(JSON.parse(user));
+ setIsLoading(false);
+ return;
+ }
+ } catch (err) {
+ console.error(err);
+ }
+
+ localStorage.removeItem("github_token");
+ localStorage.removeItem("github_user");
+ }
+
+ setStartupPhase("ready");
+ setStartupStatusMessage("Preparing sign-in...");
+ setStartupDetailMessage(
+ "GitPilot is ready. Please authenticate to continue."
+ );
+
+ setIsAuthenticated(false);
+ setIsLoading(false);
+ } catch (err) {
+ console.error(err);
+ setStartupPhase("fallback");
+ setStartupStatusMessage("Starting application...");
+ setStartupDetailMessage(
+ "Continuing with basic startup while backend status is still loading."
+ );
+ setIsAuthenticated(false);
+ setIsLoading(false);
+ }
+ };
+
+ const handleAuthenticated = (session) => {
+ setIsAuthenticated(true);
+ setUserInfo(session.user);
+ };
+
+ const handleLogout = () => {
+ localStorage.removeItem("github_token");
+ localStorage.removeItem("github_user");
+ setIsAuthenticated(false);
+ setUserInfo(null);
+ clearAllContext();
+ };
+
+ if (isLoading) {
+ return (
+
+ );
+ }
+
+ if (!isAuthenticated) {
+ return (
+
+ );
+ }
+
+ const hasContext = contextRepos.length > 0;
+
+ return (
+
+
+
+
+
+ {activePage === "admin" && (
+
+
+ {["overview", "providers", "workspace-modes", "integrations", "sessions", "skills", "security", "advanced"].map((tab) => (
+ setAdminTab(tab)}
+ style={{
+ padding: "8px 16px",
+ borderRadius: "6px",
+ border: adminTab === tab ? "1px solid #3B82F6" : "1px solid #333",
+ background: adminTab === tab ? "#1e3a5f" : "#1a1b26",
+ color: adminTab === tab ? "#93c5fd" : "#a0a0b0",
+ cursor: "pointer",
+ fontSize: "13px",
+ textTransform: "capitalize",
+ }}
+ >
+ {tab.replace("-", " ")}
+
+ ))}
+
+
+ {adminTab === "overview" && (
+
+
+
Server
+
+ {adminStatus?.server_ready ? "Connected" : "Checking..."}
+
+
127.0.0.1:8000
+
+
+
+
Provider
+
+ {adminStatus?.provider?.name || "Loading..."}
+
+
+ {adminStatus?.provider?.configured
+ ? `${adminStatus.provider.model || "Ready"}`
+ : "Not configured"}
+
+
+
+
+
Workspace Modes
+
+ Folder: {adminStatus?.workspace?.folder_mode_available ? "Yes" : "—"}
+
+
+ Local Git: {adminStatus?.workspace?.local_git_available ? "Yes" : "—"}
+
+
+ GitHub: {adminStatus?.workspace?.github_mode_available ? "Yes" : "Optional"}
+
+
+
+
+
GitHub
+
+ {adminStatus?.github?.connected ? "Connected" : "Optional"}
+
+
+ {adminStatus?.github?.username || "Not linked"}
+
+
+
+
+
+
+
Get Started
+
setAdminTab("providers")}
+ style={{
+ padding: "6px 12px",
+ background: "#3B82F6",
+ color: "#fff",
+ border: "none",
+ borderRadius: "4px",
+ cursor: "pointer",
+ fontSize: "12px",
+ marginRight: "4px",
+ }}
+ >
+ Configure Provider
+
+
+
+ )}
+
+ {adminTab === "providers" && (
+
+
AI Providers
+
+
+ )}
+
+ {adminTab === "workspace-modes" && (
+
{
+ setActiveSessionId(result.session_id);
+ setSessionRefreshNonce((n) => n + 1);
+ setActivePage("workspace");
+ }}
+ />
+ )}
+
+ {adminTab === "integrations" && (
+
+ )}
+
+ {adminTab === "security" && (
+
+ )}
+
+ {adminTab === "sessions" && (
+ {
+ handleSelectSession(s);
+ setActivePage("workspace");
+ }}
+ />
+ )}
+
+ {adminTab === "skills" && }
+
+ {adminTab === "advanced" && (
+ setSettingsOpen(true)}
+ />
+ )}
+
+ )}
+
+ {activePage === "flow" && }
+
+ {activePage === "workspace" &&
+ (repo ? (
+
+
setAddRepoOpen(true)}
+ onBranchChange={handleContextBranchChange}
+ />
+
+
+
+ setSettingsOpen(true)}
+ />
+
+
+
+
+ GitPilot chat
+
+
+
+
+
+
+ ) : (
+
+
🤖
+
Select a repository
+
Select a repo to begin agentic workflow.
+
+ ))}
+
+
+
+
+
+ {repo && (
+
setSettingsOpen(false)}
+ activeEnvId={activeEnvId}
+ onEnvChange={setActiveEnvId}
+ />
+ )}
+
+ setAddRepoOpen(false)}
+ excludeKeys={contextRepos.map((e) => e.repoKey)}
+ />
+
+ setAboutOpen(false)}
+ />
+
+ {toast && (
+
+
{toast.title}
+
{toast.message}
+
+ )}
+
+
+
+ );
+}
\ No newline at end of file
diff --git a/frontend/components/AboutModal.jsx b/frontend/components/AboutModal.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..e80dd1a98dd3d6caa062310469b6135ec24800dc
--- /dev/null
+++ b/frontend/components/AboutModal.jsx
@@ -0,0 +1,488 @@
+// frontend/components/AboutModal.jsx
+import React, { useEffect, useCallback, useState } from "react";
+import { apiUrl, safeFetchJSON } from "../utils/api.js";
+
+/**
+ * AboutModal — "About GitPilot" dialog shown from the user menu.
+ *
+ * Enterprise design goals:
+ * - Prominent brand mark matching docs/logo.svg (orange ring + GP monogram)
+ * - Clear identity: name, tagline, version (frontend + backend)
+ * - Credits the creator (Ruslan Magana Vsevolodovna) as a link to GitHub
+ * - Open-source positioning: Apache 2.0 license + GitHub repo link
+ * - Action row: View on GitHub, Report Issue, Documentation
+ * - Accessible: role="dialog", aria-modal, aria-labelledby, Escape to close,
+ * focus trap via initial focus on close button
+ * - Brand palette: #D95C3D accent, #1C1C1F card, #27272A border, #EDEDED text
+ */
+
+const FRONTEND_VERSION =
+ typeof __APP_VERSION__ !== "undefined" ? __APP_VERSION__ : "0.1.5";
+
+export default function AboutModal({ isOpen, onClose }) {
+ const [backendVersion, setBackendVersion] = useState(null);
+
+ // Fetch backend version when opened
+ useEffect(() => {
+ if (!isOpen) return;
+ let cancelled = false;
+ (async () => {
+ try {
+ const data = await safeFetchJSON(apiUrl("/api/ping"), { timeout: 4000 });
+ if (!cancelled) {
+ setBackendVersion(data?.version || null);
+ }
+ } catch {
+ if (!cancelled) setBackendVersion(null);
+ }
+ })();
+ return () => {
+ cancelled = true;
+ };
+ }, [isOpen]);
+
+ // Escape to close
+ useEffect(() => {
+ if (!isOpen) return;
+ const handleKey = (e) => {
+ if (e.key === "Escape") onClose?.();
+ };
+ document.addEventListener("keydown", handleKey);
+ return () => document.removeEventListener("keydown", handleKey);
+ }, [isOpen, onClose]);
+
+ // Lock body scroll while open
+ useEffect(() => {
+ if (!isOpen) return;
+ const prev = document.body.style.overflow;
+ document.body.style.overflow = "hidden";
+ return () => {
+ document.body.style.overflow = prev;
+ };
+ }, [isOpen]);
+
+ const handleBackdropClick = useCallback(
+ (e) => {
+ if (e.target === e.currentTarget) onClose?.();
+ },
+ [onClose]
+ );
+
+ if (!isOpen) return null;
+
+ return (
+
+
+ {/* Close button */}
+
{
+ e.currentTarget.style.background = "#27272A";
+ e.currentTarget.style.color = "#EDEDED";
+ }}
+ onMouseLeave={(e) => {
+ e.currentTarget.style.background = "transparent";
+ e.currentTarget.style.color = "#A1A1AA";
+ }}
+ >
+
+
+
+
+
+ {/* Hero: brand mark + name */}
+
+
+
+
+ GitPilot
+
+
+ Enterprise Workspace Copilot
+
+
+
+
+ Open Source · Apache 2.0
+
+
+
+ {/* Body */}
+
+
+ An agentic AI coding companion for your repositories. Ask, plan,
+ code, and ship — with multi-LLM support, security scanning, and
+ VS Code integration.
+
+
+ {/* Meta table */}
+
+
+
+
+
+ (e.currentTarget.style.textDecoration = "underline")
+ }
+ onMouseLeave={(e) =>
+ (e.currentTarget.style.textDecoration = "none")
+ }
+ >
+ Ruslan Magana Vsevolodovna
+
+ }
+ isLast
+ />
+
+
+
+ {/* Action row */}
+
+
}
+ label="GitHub"
+ />
+
}
+ label="Docs"
+ />
+
}
+ label="Report"
+ />
+
+
+ {/* Footer */}
+
+ © {new Date().getFullYear()} GitPilot · Made with care for
+ developers everywhere
+
+
+
+
+
+ );
+}
+
+// ── Brand mark (mirrors docs/logo.svg) ──────────────────────────────
+function BrandMark() {
+ return (
+
+ {/* Outer subtle ring */}
+
+ {/* Active arc (top-right, uses conic gradient for smooth arc) */}
+
+ {/* Soft core glow */}
+
+ {/* GP monogram */}
+
+ GP
+
+
+ );
+}
+
+// ── Meta row ────────────────────────────────────────────────────────
+function MetaRow({ label, value, isLast = false }) {
+ return (
+
+ {label}
+
+ {value}
+
+
+ );
+}
+
+// ── Action button ───────────────────────────────────────────────────
+function ActionButton({ href, icon, label }) {
+ return (
+ {
+ e.currentTarget.style.borderColor = "#D95C3D";
+ e.currentTarget.style.background = "rgba(217, 92, 61, 0.08)";
+ }}
+ onMouseLeave={(e) => {
+ e.currentTarget.style.borderColor = "#27272A";
+ e.currentTarget.style.background = "#131316";
+ }}
+ >
+
+ {icon}
+
+ {label}
+
+ );
+}
+
+// ── Icons ───────────────────────────────────────────────────────────
+function GitHubIcon() {
+ return (
+
+
+
+ );
+}
+
+function DocsIcon() {
+ return (
+
+
+
+
+ );
+}
+
+function BugIcon() {
+ return (
+
+
+
+
+ );
+}
diff --git a/frontend/components/AddRepoModal.jsx b/frontend/components/AddRepoModal.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..7832877ed985bac5ec81f1b4c43978e525dd8bd2
--- /dev/null
+++ b/frontend/components/AddRepoModal.jsx
@@ -0,0 +1,256 @@
+import React, { useCallback, useEffect, useState } from "react";
+import { createPortal } from "react-dom";
+import { authFetch } from "../utils/api.js";
+
+/**
+ * AddRepoModal — lightweight portal modal for adding repos to context.
+ *
+ * Embeds a minimal repo search/list (not the full RepoSelector) to keep
+ * the modal focused. Filters out repos already in context.
+ */
+export default function AddRepoModal({ isOpen, onSelect, onClose, excludeKeys = [] }) {
+ const [query, setQuery] = useState("");
+ const [repos, setRepos] = useState([]);
+ const [loading, setLoading] = useState(false);
+
+ const fetchRepos = useCallback(
+ async (searchQuery) => {
+ setLoading(true);
+ try {
+ const params = new URLSearchParams({ per_page: "50" });
+ if (searchQuery) params.set("query", searchQuery);
+ const res = await authFetch(`/api/repos?${params}`);
+ if (!res.ok) return;
+ const data = await res.json();
+ setRepos(data.repositories || []);
+ } catch (err) {
+ console.warn("AddRepoModal: fetch failed:", err);
+ } finally {
+ setLoading(false);
+ }
+ },
+ []
+ );
+
+ useEffect(() => {
+ if (isOpen) {
+ setQuery("");
+ fetchRepos("");
+ }
+ }, [isOpen, fetchRepos]);
+
+ // Debounced search
+ useEffect(() => {
+ if (!isOpen) return;
+ const t = setTimeout(() => fetchRepos(query), 300);
+ return () => clearTimeout(t);
+ }, [query, isOpen, fetchRepos]);
+
+ const excludeSet = new Set(excludeKeys);
+ const filtered = repos.filter((r) => {
+ const key = r.full_name || `${r.owner}/${r.name}`;
+ return !excludeSet.has(key);
+ });
+
+ if (!isOpen) return null;
+
+ return createPortal(
+ {
+ if (e.target === e.currentTarget) onClose();
+ }}
+ >
+
e.stopPropagation()}>
+
+ Add Repository
+
+ ×
+
+
+
+
+ setQuery(e.target.value)}
+ style={styles.searchInput}
+ autoFocus
+ onKeyDown={(e) => {
+ if (e.key === "Escape") onClose();
+ }}
+ />
+
+
+
+ {loading && filtered.length === 0 && (
+
Loading...
+ )}
+ {!loading && filtered.length === 0 && (
+
+ {excludeKeys.length > 0 && repos.length > 0
+ ? "All matching repos are already in context"
+ : "No repositories found"}
+
+ )}
+ {filtered.map((r) => {
+ const key = r.full_name || `${r.owner}/${r.name}`;
+ return (
+
onSelect(r)}
+ >
+
+ {r.name}
+ {r.owner}
+
+
+ {r.private && Private }
+ {r.default_branch || "main"}
+
+
+ );
+ })}
+ {loading && filtered.length > 0 && (
+
Updating...
+ )}
+
+
+
,
+ document.body
+ );
+}
+
+const styles = {
+ overlay: {
+ position: "fixed",
+ top: 0,
+ left: 0,
+ right: 0,
+ bottom: 0,
+ backgroundColor: "rgba(0, 0, 0, 0.6)",
+ zIndex: 10000,
+ display: "flex",
+ alignItems: "center",
+ justifyContent: "center",
+ },
+ modal: {
+ width: 440,
+ maxHeight: "70vh",
+ backgroundColor: "#131316",
+ border: "1px solid #27272A",
+ borderRadius: 12,
+ display: "flex",
+ flexDirection: "column",
+ overflow: "hidden",
+ boxShadow: "0 12px 40px rgba(0,0,0,0.5)",
+ },
+ header: {
+ display: "flex",
+ justifyContent: "space-between",
+ alignItems: "center",
+ padding: "12px 14px",
+ borderBottom: "1px solid #27272A",
+ backgroundColor: "#18181B",
+ },
+ headerTitle: {
+ fontSize: 14,
+ fontWeight: 600,
+ color: "#E4E4E7",
+ },
+ closeBtn: {
+ width: 26,
+ height: 26,
+ borderRadius: 6,
+ border: "1px solid #3F3F46",
+ background: "transparent",
+ color: "#A1A1AA",
+ fontSize: 16,
+ cursor: "pointer",
+ display: "flex",
+ alignItems: "center",
+ justifyContent: "center",
+ },
+ searchBox: {
+ padding: "10px 12px",
+ borderBottom: "1px solid #27272A",
+ },
+ searchInput: {
+ width: "100%",
+ padding: "8px 10px",
+ borderRadius: 6,
+ border: "1px solid #3F3F46",
+ background: "#18181B",
+ color: "#E4E4E7",
+ fontSize: 13,
+ outline: "none",
+ fontFamily: "monospace",
+ boxSizing: "border-box",
+ },
+ list: {
+ flex: 1,
+ overflowY: "auto",
+ maxHeight: 360,
+ },
+ statusRow: {
+ padding: "16px 12px",
+ textAlign: "center",
+ fontSize: 12,
+ color: "#71717A",
+ },
+ repoRow: {
+ display: "flex",
+ alignItems: "center",
+ justifyContent: "space-between",
+ width: "100%",
+ padding: "10px 14px",
+ border: "none",
+ borderBottom: "1px solid rgba(39, 39, 42, 0.5)",
+ background: "transparent",
+ color: "#E4E4E7",
+ cursor: "pointer",
+ textAlign: "left",
+ transition: "background-color 0.1s",
+ },
+ repoInfo: {
+ display: "flex",
+ flexDirection: "column",
+ gap: 2,
+ minWidth: 0,
+ },
+ repoName: {
+ fontSize: 13,
+ fontWeight: 600,
+ fontFamily: "monospace",
+ overflow: "hidden",
+ textOverflow: "ellipsis",
+ whiteSpace: "nowrap",
+ },
+ repoOwner: {
+ fontSize: 11,
+ color: "#71717A",
+ },
+ repoMeta: {
+ display: "flex",
+ alignItems: "center",
+ gap: 8,
+ flexShrink: 0,
+ },
+ privateBadge: {
+ fontSize: 9,
+ padding: "1px 5px",
+ borderRadius: 8,
+ backgroundColor: "rgba(239, 68, 68, 0.12)",
+ color: "#F87171",
+ fontWeight: 600,
+ textTransform: "uppercase",
+ },
+ branchHint: {
+ fontSize: 10,
+ color: "#52525B",
+ fontFamily: "monospace",
+ },
+};
diff --git a/frontend/components/AdminTabs/AdvancedTab.jsx b/frontend/components/AdminTabs/AdvancedTab.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..3d0085dd2f5c794d98d18323d48c388e9855df32
--- /dev/null
+++ b/frontend/components/AdminTabs/AdvancedTab.jsx
@@ -0,0 +1,360 @@
+// frontend/components/AdminTabs/AdvancedTab.jsx
+import React, { useEffect, useState, useCallback } from "react";
+import { apiUrl, safeFetchJSON } from "../../utils/api.js";
+
+/**
+ * Advanced tab — inline toggles for:
+ * - Lite Mode (via /api/settings/topology — sets topology to "lite_mode")
+ * - Permission Mode (normal | auto | plan via /api/permissions/mode)
+ * - Link to full Settings modal for power users
+ *
+ * Best practices applied:
+ * - Optimistic UI with rollback on error
+ * - Each setting has its own loading indicator (no global lock)
+ * - Descriptions explain what each mode does
+ * - ARIA-labeled toggle switches for accessibility
+ */
+
+const PERMISSION_MODES = [
+ {
+ value: "normal",
+ label: "Normal",
+ description:
+ "Ask before writing files or running commands (recommended).",
+ },
+ {
+ value: "auto",
+ label: "Auto",
+ description:
+ "Approve all tool calls automatically. Use only when you trust the agent.",
+ },
+ {
+ value: "plan",
+ label: "Plan Only",
+ description:
+ "Read-only mode. Agent cannot write files or run commands.",
+ },
+];
+
+function ToggleSwitch({ checked, onChange, disabled, ariaLabel }) {
+ return (
+ !disabled && onChange(!checked)}
+ disabled={disabled}
+ style={{
+ position: "relative",
+ width: "44px",
+ height: "24px",
+ borderRadius: "12px",
+ background: checked ? "#3B82F6" : "#374151",
+ border: "none",
+ cursor: disabled ? "not-allowed" : "pointer",
+ transition: "background 150ms ease",
+ padding: 0,
+ opacity: disabled ? 0.5 : 1,
+ }}
+ >
+
+
+ );
+}
+
+export default function AdvancedTab({ showToast, onOpenFullSettings }) {
+ const [liteMode, setLiteMode] = useState(false);
+ const [permissionMode, setPermissionMode] = useState("normal");
+ const [loading, setLoading] = useState(true);
+ const [updatingLite, setUpdatingLite] = useState(false);
+ const [updatingPerm, setUpdatingPerm] = useState(false);
+ const [error, setError] = useState(null);
+
+ // Initial fetch: topology preference + permission mode
+ useEffect(() => {
+ let cancelled = false;
+ (async () => {
+ try {
+ const [topo, perms] = await Promise.all([
+ safeFetchJSON(apiUrl("/api/settings/topology"), { timeout: 5000 })
+ .catch(() => ({ topology: null })),
+ safeFetchJSON(apiUrl("/api/permissions"), { timeout: 5000 })
+ .catch(() => ({ mode: "normal" })),
+ ]);
+ if (cancelled) return;
+ setLiteMode(topo?.topology === "lite_mode");
+ setPermissionMode(perms?.mode || perms?.policy?.mode || "normal");
+ } catch (err) {
+ if (!cancelled) setError(err?.message || "Failed to load settings");
+ } finally {
+ if (!cancelled) setLoading(false);
+ }
+ })();
+ return () => {
+ cancelled = true;
+ };
+ }, []);
+
+ const handleLiteToggle = useCallback(async (next) => {
+ setUpdatingLite(true);
+ setError(null);
+ const previous = liteMode;
+ setLiteMode(next); // optimistic
+ try {
+ await safeFetchJSON(apiUrl("/api/settings/topology"), {
+ method: "POST",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify({ topology: next ? "lite_mode" : null }),
+ timeout: 5000,
+ });
+ showToast?.(
+ "Lite Mode " + (next ? "enabled" : "disabled"),
+ next
+ ? "Single-agent path — better for small local models."
+ : "Multi-agent path — uses full CrewAI orchestration."
+ );
+ } catch (err) {
+ setLiteMode(previous); // rollback
+ setError(err?.message || "Failed to update lite mode");
+ } finally {
+ setUpdatingLite(false);
+ }
+ }, [liteMode, showToast]);
+
+ const handlePermissionChange = useCallback(async (next) => {
+ setUpdatingPerm(true);
+ setError(null);
+ const previous = permissionMode;
+ setPermissionMode(next); // optimistic
+ try {
+ const res = await fetch(apiUrl("/api/permissions/mode"), {
+ method: "PUT",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify({ mode: next }),
+ });
+ if (!res.ok) {
+ const body = await res.json().catch(() => ({}));
+ throw new Error(body.detail || `HTTP ${res.status}`);
+ }
+ showToast?.(
+ "Permission mode updated",
+ `Set to ${next}.`
+ );
+ } catch (err) {
+ setPermissionMode(previous); // rollback
+ setError(err?.message || "Failed to update permission mode");
+ } finally {
+ setUpdatingPerm(false);
+ }
+ }, [permissionMode, showToast]);
+
+ if (loading) {
+ return (
+
+
Advanced
+
+ Loading advanced settings...
+
+
+ );
+ }
+
+ return (
+
+
Advanced
+
+ Fine-tune GitPilot's agent behavior and safety settings.
+
+
+ {error && (
+
+ {error}
+
+ )}
+
+ {/* Lite Mode toggle */}
+
+
+
+
Lite Mode
+
+ Use a simplified single-agent prompt instead of the multi-agent
+ CrewAI pipeline. Recommended for small local models
+ (qwen2.5:1.5b, deepseek-r1, phi3:mini) that struggle with the
+ ReAct format.
+
+
+
+
+
+
+ {/* Permission Mode selector */}
+
+
Permission Mode
+
+ Controls when the agent needs your approval before writing files or
+ running commands.
+
+
+
+ {PERMISSION_MODES.map((mode) => {
+ const selected = permissionMode === mode.value;
+ return (
+
+ handlePermissionChange(mode.value)}
+ disabled={updatingPerm}
+ style={{ marginTop: "2px", cursor: "inherit" }}
+ />
+
+
+ {mode.label}
+
+
+ {mode.description}
+
+
+
+ );
+ })}
+
+
+
+ {/* Link to full settings modal */}
+
+
+
+
+ Full Settings
+
+
+ Server URL, telemetry, debug logs, environment variables, and more.
+
+
+
+ Open Settings Modal
+
+
+
+
+ );
+}
diff --git a/frontend/components/AdminTabs/IntegrationsTab.jsx b/frontend/components/AdminTabs/IntegrationsTab.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..e6c4d252324fdb7ce1fc329f05eb5821b4af4078
--- /dev/null
+++ b/frontend/components/AdminTabs/IntegrationsTab.jsx
@@ -0,0 +1,238 @@
+// frontend/components/AdminTabs/IntegrationsTab.jsx
+import React, { useEffect, useState } from "react";
+import { apiUrl, safeFetchJSON } from "../../utils/api.js";
+
+/**
+ * Integrations tab — shows connection status for GitHub (and future
+ * third-party integrations) with Connect/Disconnect actions.
+ *
+ * Best practices applied:
+ * - Fetch current status on mount via /api/auth/status
+ * - Show connected user info if already authenticated
+ * - "Connect GitHub" button opens /api/auth/url in the same window
+ * (OAuth flow will redirect back with ?code=...)
+ * - Disconnect clears localStorage token and re-fetches status
+ * - Handles both Web OAuth and Device Flow modes
+ */
+
+export default function IntegrationsTab({ userInfo, onDisconnect, showToast }) {
+ const [authStatus, setAuthStatus] = useState(null);
+ const [loading, setLoading] = useState(true);
+ const [connecting, setConnecting] = useState(false);
+ const [error, setError] = useState(null);
+
+ useEffect(() => {
+ let cancelled = false;
+ (async () => {
+ try {
+ const data = await safeFetchJSON(apiUrl("/api/auth/status"), { timeout: 5000 });
+ if (!cancelled) setAuthStatus(data);
+ } catch (err) {
+ if (!cancelled) setError(err?.message || "Failed to check auth status");
+ } finally {
+ if (!cancelled) setLoading(false);
+ }
+ })();
+ return () => {
+ cancelled = true;
+ };
+ }, []);
+
+ const handleConnect = async () => {
+ setConnecting(true);
+ setError(null);
+ try {
+ if (authStatus?.mode === "web") {
+ // Web OAuth flow — redirect to GitHub authorization URL
+ const { authorization_url, state } = await safeFetchJSON(
+ apiUrl("/api/auth/url"),
+ { timeout: 5000 }
+ );
+ if (state) {
+ sessionStorage.setItem("gitpilot_oauth_state", state);
+ }
+ // Full page redirect (OAuth providers don't support iframes)
+ window.location.href = authorization_url;
+ } else {
+ // Device flow — the LoginPage already handles this.
+ showToast?.(
+ "Device flow",
+ "GitHub device flow is configured. Sign out and sign in again to reconnect."
+ );
+ }
+ } catch (err) {
+ setError(err?.message || "Failed to start OAuth flow");
+ setConnecting(false);
+ }
+ };
+
+ const handleDisconnect = () => {
+ if (!window.confirm("Disconnect GitHub? You will be signed out.")) return;
+ localStorage.removeItem("github_token");
+ localStorage.removeItem("github_user");
+ onDisconnect?.();
+ showToast?.("Disconnected", "GitHub token removed.");
+ };
+
+ const isConnected = !!(userInfo && userInfo.login);
+
+ return (
+
+
Integrations
+
+ Connect third-party services to unlock additional GitPilot features.
+
+
+ {/* GitHub integration card */}
+
+
+
+
GitHub
+
+ Pull requests, issues, and remote repository workflows.
+
+
+
+ {loading ? "CHECKING..." : isConnected ? "CONNECTED" : "NOT CONNECTED"}
+
+
+
+ {isConnected && userInfo && (
+
+ {userInfo.avatar_url && (
+
+ )}
+
+
+ {userInfo.name || userInfo.login}
+
+
@{userInfo.login}
+
+
+ )}
+
+ {error && (
+
+ {error}
+
+ )}
+
+
+ {isConnected ? (
+
+ Disconnect
+
+ ) : (
+
+ {connecting ? "Connecting..." : "Connect GitHub"}
+
+ )}
+
+
+ {authStatus && !isConnected && (
+
+ Auth mode: {authStatus.mode || "unknown"}
+ {authStatus.oauth_configured && " (Web OAuth)"}
+ {authStatus.pat_configured && " (Personal Access Token)"}
+
+ )}
+
+
+ {/* Placeholder for future integrations */}
+
+
+ More integrations coming soon (GitLab, Bitbucket, Jira, Slack)
+
+
+
+ );
+}
diff --git a/frontend/components/AdminTabs/SecurityTab.jsx b/frontend/components/AdminTabs/SecurityTab.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..f39161c491b77076489b0fa0163abf5b8d4fe33f
--- /dev/null
+++ b/frontend/components/AdminTabs/SecurityTab.jsx
@@ -0,0 +1,341 @@
+// frontend/components/AdminTabs/SecurityTab.jsx
+import React, { useState } from "react";
+import { scanWorkspace } from "../../utils/api.js";
+
+/**
+ * Security tab — runs a workspace scan via /api/security/scan-workspace
+ * and renders findings grouped by severity.
+ *
+ * Best practices applied:
+ * - Custom path input (defaults to ".")
+ * - Loading spinner while scanning
+ * - Error state with retry
+ * - Empty state ("No findings") with green checkmark
+ * - Findings grouped by severity (critical → info)
+ * - Each finding shows file, line, CWE, recommendation
+ * - Color-coded severity badges
+ */
+
+const SEVERITY_ORDER = ["critical", "high", "medium", "low", "info"];
+
+const SEVERITY_COLORS = {
+ critical: { bg: "#7f1d1d", text: "#fecaca", border: "#991b1b" },
+ high: { bg: "#9a3412", text: "#fed7aa", border: "#c2410c" },
+ medium: { bg: "#78350f", text: "#fde68a", border: "#a16207" },
+ low: { bg: "#164e63", text: "#a5f3fc", border: "#0e7490" },
+ info: { bg: "#1e3a5f", text: "#93c5fd", border: "#3B82F6" },
+};
+
+function SeverityBadge({ severity }) {
+ const c = SEVERITY_COLORS[severity] || SEVERITY_COLORS.info;
+ return (
+
+ {severity}
+
+ );
+}
+
+export default function SecurityTab({ showToast }) {
+ const [path, setPath] = useState(".");
+ const [scanning, setScanning] = useState(false);
+ const [result, setResult] = useState(null);
+ const [error, setError] = useState(null);
+
+ const handleScan = async () => {
+ setScanning(true);
+ setError(null);
+ setResult(null);
+ try {
+ const data = await scanWorkspace(path.trim() || ".");
+ setResult(data);
+ const findingsCount = data.findings?.length || 0;
+ showToast?.(
+ "Scan complete",
+ findingsCount === 0
+ ? "No security findings."
+ : `Found ${findingsCount} issue${findingsCount !== 1 ? "s" : ""}.`
+ );
+ } catch (err) {
+ setError(err?.message || "Scan failed");
+ } finally {
+ setScanning(false);
+ }
+ };
+
+ // Group findings by severity
+ const grouped = React.useMemo(() => {
+ const out = {};
+ if (result?.findings) {
+ for (const f of result.findings) {
+ const sev = f.severity || "info";
+ if (!out[sev]) out[sev] = [];
+ out[sev].push(f);
+ }
+ }
+ return out;
+ }, [result]);
+
+ const totalFindings = result?.findings?.length || 0;
+
+ return (
+
+
Security Scanning
+
+ Scan your workspace for vulnerabilities, secrets, and insecure patterns (OWASP Top 10).
+
+
+ {/* Scan controls */}
+
+
+
+ Path to scan (relative or absolute)
+
+ setPath(e.target.value)}
+ disabled={scanning}
+ placeholder="."
+ style={{
+ width: "100%",
+ padding: "8px 10px",
+ background: "#0d0e15",
+ border: "1px solid #2a2b36",
+ borderRadius: "4px",
+ color: "#fff",
+ fontSize: "12px",
+ fontFamily: "monospace",
+ }}
+ />
+
+
+ {scanning ? "Scanning..." : "Scan Workspace"}
+
+
+
+ {/* Error state */}
+ {error && (
+
+ Scan failed:
+ {error}
+
+ )}
+
+ {/* Results summary */}
+ {result && (
+
+
+
+
Files Scanned
+
+ {result.files_scanned ?? 0}
+
+
+
+
Total Findings
+
+ {totalFindings}
+
+
+
+
Duration
+
+ {result.scan_duration_ms ?? 0}ms
+
+
+
+
+ )}
+
+ {/* Empty state — no findings */}
+ {result && totalFindings === 0 && (
+
+
✓
+
+ No security issues found
+
+
+ Your workspace passed all {result.files_scanned ?? 0} file checks.
+
+
+ )}
+
+ {/* Findings grouped by severity */}
+ {totalFindings > 0 &&
+ SEVERITY_ORDER.filter((sev) => grouped[sev]?.length > 0).map((sev) => (
+
+
+
+
+ {grouped[sev].length} {sev} issue{grouped[sev].length !== 1 ? "s" : ""}
+
+
+
+ {grouped[sev].map((f, idx) => (
+
+
+
{f.title}
+ {f.cwe_id && (
+
+ {f.cwe_id}
+
+ )}
+
+
+ {f.file_path}:{f.line_number}
+
+ {f.snippet && (
+
+ {f.snippet}
+
+ )}
+ {f.recommendation && (
+
+ Fix:
+ {f.recommendation}
+
+ )}
+
+ ))}
+
+
+ ))}
+
+ );
+}
diff --git a/frontend/components/AdminTabs/SessionsTab.jsx b/frontend/components/AdminTabs/SessionsTab.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..f55e9ef70ededefa2e89b63a7b9fff01ca00c5f6
--- /dev/null
+++ b/frontend/components/AdminTabs/SessionsTab.jsx
@@ -0,0 +1,362 @@
+// frontend/components/AdminTabs/SessionsTab.jsx
+import React, { useEffect, useMemo, useState, useCallback } from "react";
+import { apiUrl, safeFetchJSON } from "../../utils/api.js";
+
+/**
+ * Sessions tab — admin-level table view of all saved sessions with
+ * search, sort, and delete actions.
+ *
+ * Best practices applied:
+ * - Fetch all sessions on mount
+ * - Client-side search (useMemo for filtered list)
+ * - Confirmation dialog before delete
+ * - Row hover effect
+ * - Empty / loading / error states
+ * - Relative timestamps ("2 hours ago")
+ * - Click row to open in workspace view
+ */
+
+function formatRelativeTime(iso) {
+ if (!iso) return "—";
+ try {
+ const d = new Date(iso);
+ const diff = Date.now() - d.getTime();
+ if (diff < 60_000) return "just now";
+ if (diff < 3_600_000) return `${Math.floor(diff / 60_000)}m ago`;
+ if (diff < 86_400_000) return `${Math.floor(diff / 3_600_000)}h ago`;
+ if (diff < 2_592_000_000) return `${Math.floor(diff / 86_400_000)}d ago`;
+ return d.toLocaleDateString();
+ } catch {
+ return "—";
+ }
+}
+
+export default function SessionsTab({ onSelectSession, showToast }) {
+ const [sessions, setSessions] = useState([]);
+ const [loading, setLoading] = useState(true);
+ const [error, setError] = useState(null);
+ const [query, setQuery] = useState("");
+ const [deletingId, setDeletingId] = useState(null);
+
+ const fetchSessions = useCallback(async () => {
+ setError(null);
+ try {
+ const data = await safeFetchJSON(apiUrl("/api/sessions"), { timeout: 10000 });
+ setSessions(Array.isArray(data.sessions) ? data.sessions : []);
+ } catch (err) {
+ setError(err?.message || "Failed to load sessions");
+ } finally {
+ setLoading(false);
+ }
+ }, []);
+
+ useEffect(() => {
+ fetchSessions();
+ }, [fetchSessions]);
+
+ const handleDelete = async (session) => {
+ if (
+ !window.confirm(
+ `Delete session "${session.name || session.id?.slice(0, 8)}"? This cannot be undone.`
+ )
+ ) {
+ return;
+ }
+
+ setDeletingId(session.id);
+ try {
+ const res = await fetch(apiUrl(`/api/sessions/${session.id}`), {
+ method: "DELETE",
+ });
+ if (!res.ok) {
+ throw new Error(`Delete failed (${res.status})`);
+ }
+ showToast?.("Session deleted", session.name || session.id);
+ // Optimistic removal
+ setSessions((prev) => prev.filter((s) => s.id !== session.id));
+ } catch (err) {
+ setError(err?.message || "Failed to delete session");
+ } finally {
+ setDeletingId(null);
+ }
+ };
+
+ const filtered = useMemo(() => {
+ if (!query.trim()) return sessions;
+ const q = query.toLowerCase();
+ return sessions.filter((s) => {
+ return (
+ (s.name || "").toLowerCase().includes(q) ||
+ (s.repo || "").toLowerCase().includes(q) ||
+ (s.branch || "").toLowerCase().includes(q) ||
+ (s.id || "").toLowerCase().includes(q)
+ );
+ });
+ }, [sessions, query]);
+
+ return (
+
+
+
+
Sessions
+
+ All saved chat sessions ({sessions.length} total
+ {query ? `, ${filtered.length} matching` : ""}).
+
+
+
+ setQuery(e.target.value)}
+ placeholder="Search sessions..."
+ style={{
+ padding: "6px 10px",
+ background: "#0d0e15",
+ border: "1px solid #2a2b36",
+ borderRadius: "4px",
+ color: "#fff",
+ fontSize: "12px",
+ width: "220px",
+ }}
+ />
+
+ Refresh
+
+
+
+
+ {/* Loading state */}
+ {loading && (
+
+ Loading sessions...
+
+ )}
+
+ {/* Error state */}
+ {error && !loading && (
+
+ Error:
+ {error}
+
+ )}
+
+ {/* Empty state */}
+ {!loading && !error && sessions.length === 0 && (
+
+
💬
+
+ No sessions yet
+
+
+ Start chatting with GitPilot to create your first session.
+
+
+ )}
+
+ {/* Table */}
+ {!loading && filtered.length > 0 && (
+
+
+
+
+ Name
+ Repository
+ Branch
+ Messages
+ Status
+ Updated
+ Actions
+
+
+
+ {filtered.map((s) => (
+
+ (e.currentTarget.style.background = "#22232e")
+ }
+ onMouseLeave={(e) =>
+ (e.currentTarget.style.background = "transparent")
+ }
+ onClick={() => onSelectSession?.(s)}
+ >
+
+
+ {s.name || (unnamed) }
+
+
+ {s.id?.slice(0, 12)}
+
+
+
+ {s.repo || — }
+
+
+ {s.branch || — }
+
+ {s.message_count ?? 0}
+
+
+ {s.status || "unknown"}
+
+
+
+ {formatRelativeTime(s.updated_at)}
+
+
+ {
+ e.stopPropagation();
+ handleDelete(s);
+ }}
+ disabled={deletingId === s.id}
+ style={{
+ padding: "4px 10px",
+ background: "transparent",
+ color: "#f87171",
+ border: "1px solid #991b1b",
+ borderRadius: "4px",
+ cursor: deletingId === s.id ? "not-allowed" : "pointer",
+ fontSize: "11px",
+ }}
+ >
+ {deletingId === s.id ? "..." : "Delete"}
+
+
+
+ ))}
+
+
+
+ )}
+
+ {/* No matches for search */}
+ {!loading && sessions.length > 0 && filtered.length === 0 && (
+
+ No sessions match "{query}"
+
+ )}
+
+ );
+}
+
+const thStyle = {
+ padding: "10px 12px",
+ textAlign: "left",
+ fontSize: "11px",
+ fontWeight: 600,
+ textTransform: "uppercase",
+ letterSpacing: "0.5px",
+ opacity: 0.7,
+};
+
+const tdStyle = {
+ padding: "10px 12px",
+ verticalAlign: "middle",
+};
diff --git a/frontend/components/AdminTabs/SkillsTab.jsx b/frontend/components/AdminTabs/SkillsTab.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..6068f4af61f6d2933f546997260ecc97a65823f3
--- /dev/null
+++ b/frontend/components/AdminTabs/SkillsTab.jsx
@@ -0,0 +1,266 @@
+// frontend/components/AdminTabs/SkillsTab.jsx
+import React, { useEffect, useState, useCallback } from "react";
+import { apiUrl, safeFetchJSON } from "../../utils/api.js";
+
+/**
+ * Skills tab — lists all loaded skills from /api/skills and allows
+ * reloading them from disk via /api/skills/reload.
+ *
+ * Best practices applied:
+ * - Fetch on mount
+ * - Explicit reload button (skills are loaded from .md files on disk)
+ * - Loading / empty / error states
+ * - Auto-trigger indicator badge
+ * - Required tools list per skill
+ * - Source file path for debugging
+ */
+
+export default function SkillsTab({ showToast }) {
+ const [skills, setSkills] = useState([]);
+ const [loading, setLoading] = useState(true);
+ const [reloading, setReloading] = useState(false);
+ const [error, setError] = useState(null);
+
+ const fetchSkills = useCallback(async () => {
+ setError(null);
+ try {
+ const data = await safeFetchJSON(apiUrl("/api/skills"), { timeout: 10000 });
+ setSkills(Array.isArray(data.skills) ? data.skills : []);
+ } catch (err) {
+ setError(err?.message || "Failed to load skills");
+ } finally {
+ setLoading(false);
+ }
+ }, []);
+
+ useEffect(() => {
+ fetchSkills();
+ }, [fetchSkills]);
+
+ const handleReload = async () => {
+ setReloading(true);
+ setError(null);
+ try {
+ const data = await safeFetchJSON(apiUrl("/api/skills/reload"), {
+ method: "POST",
+ timeout: 10000,
+ });
+ showToast?.(
+ "Skills reloaded",
+ `${data.count ?? 0} skill${data.count !== 1 ? "s" : ""} loaded from disk.`
+ );
+ await fetchSkills();
+ } catch (err) {
+ setError(err?.message || "Failed to reload skills");
+ } finally {
+ setReloading(false);
+ }
+ };
+
+ return (
+
+
+
+
Skills
+
+ Reusable prompt templates loaded from{" "}
+ .gitpilot/skills/*.md files.
+
+
+
+ {reloading ? "Reloading..." : "Reload Skills"}
+
+
+
+ {/* Loading state */}
+ {loading && (
+
+ Loading skills...
+
+ )}
+
+ {/* Error state */}
+ {error && !loading && (
+
+ Error:
+ {error}
+
+ )}
+
+ {/* Empty state */}
+ {!loading && !error && skills.length === 0 && (
+
+
📚
+
+ No skills loaded
+
+
+ Create a .gitpilot/skills/my-skill.md file with YAML
+ frontmatter to add custom skills.
+
+
+ )}
+
+ {/* Skills grid */}
+ {!loading && skills.length > 0 && (
+
+ {skills.map((skill) => (
+
+
+
+ {skill.name}
+
+ {skill.auto_trigger && (
+
+ Auto
+
+ )}
+
+
+
+ {skill.description || "No description"}
+
+
+ {Array.isArray(skill.required_tools) && skill.required_tools.length > 0 && (
+
+ {skill.required_tools.map((t) => (
+
+ {t}
+
+ ))}
+
+ )}
+
+ {skill.source && (
+
+ {skill.source}
+
+ )}
+
+ ))}
+
+ )}
+
+ );
+}
diff --git a/frontend/components/AdminTabs/WorkspaceModesTab.jsx b/frontend/components/AdminTabs/WorkspaceModesTab.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..124373c175c7c6543493487627d48467405b4252
--- /dev/null
+++ b/frontend/components/AdminTabs/WorkspaceModesTab.jsx
@@ -0,0 +1,254 @@
+// frontend/components/AdminTabs/WorkspaceModesTab.jsx
+import React, { useState } from "react";
+import { startSession } from "../../utils/api.js";
+
+/**
+ * Workspace Modes tab — allows the user to start a session in one of
+ * three modes (folder, local_git, github). Calls POST /api/session/start.
+ *
+ * Best practices applied:
+ * - Loading state while the request is in flight
+ * - Per-mode error state (not a global error)
+ * - Disabled card during submission to prevent double-click
+ * - ARIA role="button" + aria-disabled for accessibility
+ * - Toast notification on success
+ * - Success callback so App.jsx can set activeSessionId and switch to workspace view
+ */
+
+const MODES = [
+ {
+ id: "folder",
+ title: "Folder Mode",
+ description: "Work with any local folder. No Git required.",
+ requires: "A local folder path",
+ enables: "Chat, explain, review",
+ promptKey: "folder_path",
+ promptLabel: "Folder path (absolute)",
+ promptPlaceholder: "/home/you/myproject",
+ buildPayload: (value) => ({ mode: "folder", folder_path: value }),
+ },
+ {
+ id: "local_git",
+ title: "Local Git Mode",
+ description: "Full repo + branch context for AI assistance.",
+ requires: "A local Git repository",
+ enables: "All local features (branches, diff, commit)",
+ promptKey: "repo_root",
+ promptLabel: "Repository root (absolute path)",
+ promptPlaceholder: "/home/you/my-git-repo",
+ buildPayload: (value) => ({ mode: "local_git", repo_root: value }),
+ },
+ {
+ id: "github",
+ title: "GitHub Mode",
+ description: "PRs, issues, remote workflows via GitHub API.",
+ requires: "GitHub token (already signed in)",
+ enables: "Full platform features",
+ promptKey: "repo_full_name",
+ promptLabel: "Repository (owner/repo)",
+ promptPlaceholder: "octocat/hello-world",
+ buildPayload: (value) => ({ mode: "github", repo_full_name: value }),
+ },
+];
+
+export default function WorkspaceModesTab({ onSessionStarted, showToast }) {
+ const [activeModeId, setActiveModeId] = useState(null);
+ const [inputValue, setInputValue] = useState("");
+ const [submittingId, setSubmittingId] = useState(null);
+ const [errorByMode, setErrorByMode] = useState({});
+
+ const handleCardClick = (mode) => {
+ if (submittingId) return;
+ setActiveModeId(mode.id);
+ setInputValue("");
+ setErrorByMode((prev) => ({ ...prev, [mode.id]: null }));
+ };
+
+ const handleStart = async (mode) => {
+ const trimmed = inputValue.trim();
+ if (!trimmed) {
+ setErrorByMode((prev) => ({
+ ...prev,
+ [mode.id]: `${mode.promptLabel} is required`,
+ }));
+ return;
+ }
+
+ setSubmittingId(mode.id);
+ setErrorByMode((prev) => ({ ...prev, [mode.id]: null }));
+
+ try {
+ const payload = mode.buildPayload(trimmed);
+ const result = await startSession(payload);
+
+ showToast?.(
+ `${mode.title} started`,
+ `Session ${result.session_id?.slice(0, 8) || ""} is now active.`
+ );
+
+ onSessionStarted?.(result);
+ setActiveModeId(null);
+ setInputValue("");
+ } catch (err) {
+ setErrorByMode((prev) => ({
+ ...prev,
+ [mode.id]: err?.message || "Failed to start session",
+ }));
+ } finally {
+ setSubmittingId(null);
+ }
+ };
+
+ const handleCancel = () => {
+ if (submittingId) return;
+ setActiveModeId(null);
+ setInputValue("");
+ };
+
+ return (
+
+
Workspace Modes
+
+ Choose how you want GitPilot to interact with your code. You can switch modes at any time.
+
+
+
+ {MODES.map((mode) => {
+ const isActive = activeModeId === mode.id;
+ const isSubmitting = submittingId === mode.id;
+ const error = errorByMode[mode.id];
+
+ return (
+
!isActive && handleCardClick(mode)}
+ onKeyDown={(e) => {
+ if ((e.key === "Enter" || e.key === " ") && !isActive) {
+ e.preventDefault();
+ handleCardClick(mode);
+ }
+ }}
+ style={{
+ background: isActive ? "#1e3a5f" : "#1a1b26",
+ borderRadius: "8px",
+ padding: "20px",
+ border: isActive ? "1px solid #3B82F6" : "1px solid #2a2b36",
+ cursor: submittingId && !isSubmitting ? "not-allowed" : "pointer",
+ opacity: submittingId && !isSubmitting ? 0.5 : 1,
+ transition: "all 150ms ease",
+ }}
+ >
+
+ {mode.title}
+
+
+ {mode.description}
+
+
+ Requires:
+ {mode.requires}
+
+
+ Enables:
+ {mode.enables}
+
+
+ {isActive && (
+
e.stopPropagation()} style={{ marginTop: "12px" }}>
+
+ {mode.promptLabel}
+
+
setInputValue(e.target.value)}
+ onKeyDown={(e) => {
+ if (e.key === "Enter") {
+ e.preventDefault();
+ handleStart(mode);
+ } else if (e.key === "Escape") {
+ handleCancel();
+ }
+ }}
+ placeholder={mode.promptPlaceholder}
+ disabled={isSubmitting}
+ autoFocus
+ style={{
+ width: "100%",
+ padding: "6px 8px",
+ background: "#0d0e15",
+ border: "1px solid #2a2b36",
+ borderRadius: "4px",
+ color: "#fff",
+ fontSize: "12px",
+ fontFamily: "monospace",
+ }}
+ />
+ {error && (
+
+ {error}
+
+ )}
+
+ handleStart(mode)}
+ disabled={isSubmitting || !inputValue.trim()}
+ style={{
+ padding: "6px 12px",
+ background: isSubmitting ? "#555" : "#3B82F6",
+ color: "#fff",
+ border: "none",
+ borderRadius: "4px",
+ cursor: isSubmitting || !inputValue.trim() ? "not-allowed" : "pointer",
+ fontSize: "12px",
+ fontWeight: 600,
+ }}
+ >
+ {isSubmitting ? "Starting..." : "Start Session"}
+
+
+ Cancel
+
+
+
+ )}
+
+ );
+ })}
+
+
+ );
+}
diff --git a/frontend/components/AdminTabs/index.js b/frontend/components/AdminTabs/index.js
new file mode 100644
index 0000000000000000000000000000000000000000..c4e7d4c80b6b7f6a178f000ba54fe69d41eae60c
--- /dev/null
+++ b/frontend/components/AdminTabs/index.js
@@ -0,0 +1,8 @@
+// frontend/components/AdminTabs/index.js
+// Barrel export — all admin tab components in one place
+export { default as WorkspaceModesTab } from "./WorkspaceModesTab.jsx";
+export { default as SecurityTab } from "./SecurityTab.jsx";
+export { default as IntegrationsTab } from "./IntegrationsTab.jsx";
+export { default as SkillsTab } from "./SkillsTab.jsx";
+export { default as SessionsTab } from "./SessionsTab.jsx";
+export { default as AdvancedTab } from "./AdvancedTab.jsx";
diff --git a/frontend/components/AssistantMessage.jsx b/frontend/components/AssistantMessage.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..cb24b5c971ed8c1505a1f6e1beeb6b20918b46e0
--- /dev/null
+++ b/frontend/components/AssistantMessage.jsx
@@ -0,0 +1,121 @@
+import React from "react";
+import PlanView from "./PlanView.jsx";
+
+export default function AssistantMessage({ answer, plan, executionLog }) {
+ const styles = {
+ container: {
+ marginBottom: "20px",
+ padding: "20px",
+ backgroundColor: "#18181B", // Zinc-900
+ borderRadius: "12px",
+ border: "1px solid #27272A", // Zinc-800
+ color: "#F4F4F5", // Zinc-100
+ fontFamily: '-apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, sans-serif',
+ boxShadow: "0 4px 6px -1px rgba(0, 0, 0, 0.1), 0 2px 4px -1px rgba(0, 0, 0, 0.06)",
+ },
+ section: {
+ marginBottom: "20px",
+ },
+ lastSection: {
+ marginBottom: "0",
+ },
+ header: {
+ display: "flex",
+ alignItems: "center",
+ marginBottom: "12px",
+ paddingBottom: "8px",
+ borderBottom: "1px solid #3F3F46", // Zinc-700
+ },
+ title: {
+ fontSize: "12px",
+ fontWeight: "600",
+ textTransform: "uppercase",
+ letterSpacing: "0.05em",
+ color: "#A1A1AA", // Zinc-400
+ margin: 0,
+ },
+ content: {
+ fontSize: "14px",
+ lineHeight: "1.6",
+ whiteSpace: "pre-wrap",
+ },
+ executionList: {
+ listStyle: "none",
+ padding: 0,
+ margin: 0,
+ display: "flex",
+ flexDirection: "column",
+ gap: "8px",
+ },
+ executionStep: {
+ display: "flex",
+ flexDirection: "column",
+ gap: "4px",
+ padding: "10px",
+ backgroundColor: "#09090B", // Zinc-950
+ borderRadius: "6px",
+ border: "1px solid #27272A",
+ fontSize: "13px",
+ },
+ stepNumber: {
+ fontSize: "11px",
+ fontWeight: "600",
+ color: "#10B981", // Emerald-500
+ textTransform: "uppercase",
+ },
+ stepSummary: {
+ color: "#D4D4D8", // Zinc-300
+ fontFamily: "ui-monospace, SFMono-Regular, Menlo, Monaco, Consolas, monospace",
+ },
+ };
+
+ // Only show Action Plan section when there are actual file actions.
+ // For Lite Mode Q&A responses (all steps have 0 files), the plan
+ // just duplicates the answer — hiding it avoids showing the same text 3x.
+ const hasFileActions = plan?.steps?.some(s => s.files?.length > 0);
+
+ return (
+
+ {/* Answer section */}
+
+
+ {/* Action Plan section — only when there are file changes */}
+ {plan && hasFileActions && (
+
+ )}
+
+ {/* Execution Log section (shown after execution) */}
+ {executionLog && (
+
+
+
+
+ {executionLog.steps.map((s) => (
+
+ Step {s.step_number}
+ {s.summary}
+
+ ))}
+
+
+
+ )}
+
+ );
+}
\ No newline at end of file
diff --git a/frontend/components/BranchPicker.jsx b/frontend/components/BranchPicker.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..e04e04d77dbda1a8532b26c9c2d3fc92fb7d2616
--- /dev/null
+++ b/frontend/components/BranchPicker.jsx
@@ -0,0 +1,398 @@
+import React, { useCallback, useEffect, useRef, useState } from "react";
+import { createPortal } from "react-dom";
+
+/**
+ * BranchPicker — Claude-Code-on-Web parity branch selector.
+ *
+ * Fetches branches from the new /api/repos/{owner}/{repo}/branches endpoint.
+ * Shows search, default branch badge, AI session branch highlighting.
+ *
+ * Fixes applied:
+ * - Dropdown portaled to document.body (avoids overflow:hidden clipping)
+ * - Branches cached per repo (no "No branches found" flash)
+ * - Shows "Loading..." only on first fetch, keeps stale data otherwise
+ */
+
+// Simple per-repo branch cache so reopening the dropdown is instant
+const branchCache = {};
+
+/**
+ * Props:
+ * repo, currentBranch, defaultBranch, sessionBranches, onBranchChange
+ * — standard branch-picker props
+ *
+ * externalAnchorRef (optional) — a React ref pointing to an external DOM
+ * element to anchor the dropdown to. When provided:
+ * - BranchPicker skips rendering its own trigger button
+ * - the dropdown opens immediately on mount
+ * - closing the dropdown calls onClose()
+ *
+ * onClose (optional) — called when the dropdown is dismissed (outside
+ * click or Escape). Only meaningful with externalAnchorRef.
+ */
+export default function BranchPicker({
+ repo,
+ currentBranch,
+ defaultBranch,
+ sessionBranches = [],
+ onBranchChange,
+ externalAnchorRef,
+ onClose,
+}) {
+ const isExternalMode = !!externalAnchorRef;
+ const [open, setOpen] = useState(isExternalMode);
+ const [query, setQuery] = useState("");
+ const [branches, setBranches] = useState([]);
+ const [loading, setLoading] = useState(false);
+ const [error, setError] = useState(null);
+ const triggerRef = useRef(null);
+ const dropdownRef = useRef(null);
+ const inputRef = useRef(null);
+
+ const branch = currentBranch || defaultBranch || "main";
+ const isAiSession = sessionBranches.includes(branch) && branch !== defaultBranch;
+
+ // The element used for dropdown positioning
+ const anchorRef = isExternalMode ? externalAnchorRef : triggerRef;
+
+ const cacheKey = repo ? `${repo.owner}/${repo.name}` : null;
+
+ // Seed from cache on mount / repo change
+ useEffect(() => {
+ if (cacheKey && branchCache[cacheKey]) {
+ setBranches(branchCache[cacheKey]);
+ }
+ }, [cacheKey]);
+
+ // Fetch branches from GitHub via backend
+ const fetchBranches = useCallback(async (searchQuery) => {
+ if (!repo) return;
+ setLoading(true);
+ setError(null);
+ try {
+ const token = localStorage.getItem("github_token");
+ const headers = token ? { Authorization: `Bearer ${token}` } : {};
+ const params = new URLSearchParams({ per_page: "100" });
+ if (searchQuery) params.set("query", searchQuery);
+
+ const res = await fetch(
+ `/api/repos/${repo.owner}/${repo.name}/branches?${params}`,
+ { headers, cache: "no-cache" }
+ );
+ if (!res.ok) {
+ const errData = await res.json().catch(() => ({}));
+ const detail = errData.detail || `HTTP ${res.status}`;
+ console.warn("BranchPicker: fetch failed:", detail);
+ setError(detail);
+ return;
+ }
+ const data = await res.json();
+ const fetched = data.branches || [];
+ setBranches(fetched);
+
+ // Only cache the unfiltered result
+ if (!searchQuery && cacheKey) {
+ branchCache[cacheKey] = fetched;
+ }
+ } catch (err) {
+ console.warn("Failed to fetch branches:", err);
+ } finally {
+ setLoading(false);
+ }
+ }, [repo, cacheKey]);
+
+ // Fetch + focus when opened
+ useEffect(() => {
+ if (open) {
+ fetchBranches(query);
+ setTimeout(() => inputRef.current?.focus(), 50);
+ }
+ }, [open]); // eslint-disable-line react-hooks/exhaustive-deps
+
+ // Debounced search
+ useEffect(() => {
+ if (!open) return;
+ const t = setTimeout(() => fetchBranches(query), 300);
+ return () => clearTimeout(t);
+ }, [query, open, fetchBranches]);
+
+ // Close on outside click
+ useEffect(() => {
+ if (!open) return;
+ const handler = (e) => {
+ const inAnchor = anchorRef.current && anchorRef.current.contains(e.target);
+ const inDropdown = dropdownRef.current && dropdownRef.current.contains(e.target);
+ if (!inAnchor && !inDropdown) {
+ handleClose();
+ }
+ };
+ document.addEventListener("mousedown", handler);
+ return () => document.removeEventListener("mousedown", handler);
+ }, [open]); // eslint-disable-line react-hooks/exhaustive-deps
+
+ const handleClose = useCallback(() => {
+ setOpen(false);
+ setQuery("");
+ onClose?.();
+ }, [onClose]);
+
+ const handleSelect = (branchName) => {
+ handleClose();
+ if (branchName !== branch) {
+ onBranchChange?.(branchName);
+ }
+ };
+
+ // Merge API branches with session branches (AI branches might not show in GitHub API)
+ const allBranches = [...branches];
+ for (const sb of sessionBranches) {
+ if (!allBranches.find((b) => b.name === sb)) {
+ allBranches.push({ name: sb, is_default: false, protected: false });
+ }
+ }
+
+ // Calculate portal position from anchor element
+ const getDropdownPosition = () => {
+ if (!anchorRef.current) return { top: 0, left: 0 };
+ const rect = anchorRef.current.getBoundingClientRect();
+ return {
+ top: rect.bottom + 4,
+ left: rect.left,
+ };
+ };
+
+ const pos = open ? getDropdownPosition() : { top: 0, left: 0 };
+
+ return (
+
+ {/* Trigger button — hidden when using external anchor */}
+ {!isExternalMode && (
+
setOpen((v) => !v)}
+ >
+
+
+
+
+
+
+ {branch}
+
+
+
+
+ )}
+
+ {/* Dropdown — portaled to document.body to escape overflow:hidden */}
+ {open && createPortal(
+
+ {/* Search input */}
+
+ setQuery(e.target.value)}
+ style={styles.searchInput}
+ onKeyDown={(e) => {
+ if (e.key === "Escape") {
+ handleClose();
+ }
+ }}
+ />
+
+
+ {/* Branch list */}
+
+ {loading && allBranches.length === 0 && (
+
Loading...
+ )}
+
+ {!loading && error && (
+
{error}
+ )}
+
+ {!loading && !error && allBranches.length === 0 && (
+
No branches found
+ )}
+
+ {allBranches.map((b) => {
+ const isDefault = b.is_default || b.name === defaultBranch;
+ const isAi = sessionBranches.includes(b.name);
+ const isCurrent = b.name === branch;
+
+ return (
+
handleSelect(b.name)}
+ >
+
+ ✓
+
+
+ {b.name}
+
+ {isDefault && (
+
default
+ )}
+ {isAi && !isDefault && (
+
AI
+ )}
+ {b.protected && (
+
+
+
+
+
+ )}
+
+ );
+ })}
+
+ {/* Subtle loading indicator when refreshing with cached data visible */}
+ {loading && allBranches.length > 0 && (
+
Updating...
+ )}
+
+
,
+ document.body
+ )}
+
+ );
+}
+
+const styles = {
+ container: {
+ position: "relative",
+ },
+ trigger: {
+ display: "flex",
+ alignItems: "center",
+ gap: 6,
+ padding: "4px 8px",
+ borderRadius: 4,
+ border: "1px solid #3F3F46",
+ background: "transparent",
+ fontSize: 13,
+ cursor: "pointer",
+ fontFamily: "monospace",
+ maxWidth: 200,
+ },
+ branchName: {
+ whiteSpace: "nowrap",
+ overflow: "hidden",
+ textOverflow: "ellipsis",
+ maxWidth: 140,
+ },
+ dropdown: {
+ position: "fixed",
+ width: 280,
+ backgroundColor: "#1F1F23",
+ border: "1px solid #27272A",
+ borderRadius: 8,
+ boxShadow: "0 8px 24px rgba(0,0,0,0.6)",
+ zIndex: 9999,
+ overflow: "hidden",
+ },
+ searchBox: {
+ padding: "8px 10px",
+ borderBottom: "1px solid #27272A",
+ },
+ searchInput: {
+ width: "100%",
+ padding: "6px 8px",
+ borderRadius: 4,
+ border: "1px solid #3F3F46",
+ background: "#131316",
+ color: "#E4E4E7",
+ fontSize: 12,
+ outline: "none",
+ fontFamily: "monospace",
+ boxSizing: "border-box",
+ },
+ branchList: {
+ maxHeight: 260,
+ overflowY: "auto",
+ },
+ branchRow: {
+ display: "flex",
+ alignItems: "center",
+ gap: 6,
+ padding: "7px 10px",
+ cursor: "pointer",
+ transition: "background-color 0.1s",
+ borderBottom: "1px solid rgba(39, 39, 42, 0.5)",
+ },
+ loadingRow: {
+ padding: "12px 10px",
+ textAlign: "center",
+ fontSize: 12,
+ color: "#71717A",
+ },
+ errorRow: {
+ padding: "12px 10px",
+ textAlign: "center",
+ fontSize: 11,
+ color: "#F59E0B",
+ },
+ defaultBadge: {
+ fontSize: 9,
+ padding: "1px 5px",
+ borderRadius: 8,
+ backgroundColor: "rgba(16, 185, 129, 0.15)",
+ color: "#10B981",
+ fontWeight: 600,
+ textTransform: "uppercase",
+ letterSpacing: "0.04em",
+ flexShrink: 0,
+ },
+ aiBadge: {
+ fontSize: 9,
+ padding: "1px 5px",
+ borderRadius: 8,
+ backgroundColor: "rgba(59, 130, 246, 0.15)",
+ color: "#60a5fa",
+ fontWeight: 700,
+ flexShrink: 0,
+ },
+ protectedBadge: {
+ color: "#F59E0B",
+ flexShrink: 0,
+ display: "flex",
+ alignItems: "center",
+ },
+};
diff --git a/frontend/components/ChatPanel.jsx b/frontend/components/ChatPanel.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..60b889a077ab7bc755533396ad1d6ea16f5c233a
--- /dev/null
+++ b/frontend/components/ChatPanel.jsx
@@ -0,0 +1,745 @@
+// frontend/components/ChatPanel.jsx
+import React, { useEffect, useRef, useState } from "react";
+import AssistantMessage from "./AssistantMessage.jsx";
+import DiffStats from "./DiffStats.jsx";
+import DiffViewer from "./DiffViewer.jsx";
+import CreatePRButton from "./CreatePRButton.jsx";
+import StreamingMessage from "./StreamingMessage.jsx";
+import { SessionWebSocket } from "../utils/ws.js";
+
+// Helper to get headers (inline safety if utility is missing)
+const getHeaders = () => ({
+ "Content-Type": "application/json",
+ Authorization: `Bearer ${localStorage.getItem("github_token") || ""}`,
+});
+
+export default function ChatPanel({
+ repo,
+ defaultBranch = "main",
+ currentBranch, // do NOT default here; parent must pass the real one
+ onExecutionComplete,
+ sessionChatState,
+ onSessionChatStateChange,
+ sessionId,
+ onEnsureSession,
+ canChat = true, // readiness gate: false disables composer and shows blocker
+ chatBlocker = null, // { message: string, cta?: string, onCta?: () => void }
+}) {
+ // Initialize state from props or defaults
+ const [messages, setMessages] = useState(sessionChatState?.messages || []);
+ const [goal, setGoal] = useState("");
+ const [plan, setPlan] = useState(sessionChatState?.plan || null);
+
+ const [loadingPlan, setLoadingPlan] = useState(false);
+ const [executing, setExecuting] = useState(false);
+ const [status, setStatus] = useState("");
+
+ // Claude-Code-on-Web: WebSocket streaming + diff + PR
+ const [wsConnected, setWsConnected] = useState(false);
+ const [streamingEvents, setStreamingEvents] = useState([]);
+ const [diffData, setDiffData] = useState(null);
+ const [showDiffViewer, setShowDiffViewer] = useState(false);
+ const wsRef = useRef(null);
+
+ // Ref mirrors streamingEvents so WS callbacks avoid stale closures
+ const streamingEventsRef = useRef([]);
+ useEffect(() => { streamingEventsRef.current = streamingEvents; }, [streamingEvents]);
+
+ // Skip the session-sync useEffect reset when we just created a session
+ // (the parent already seeded the messages into chatBySession)
+ const skipNextSyncRef = useRef(false);
+
+ const messagesEndRef = useRef(null);
+ const prevMsgCountRef = useRef((sessionChatState?.messages || []).length);
+
+ // ---------------------------------------------------------------------------
+ // WebSocket connection management
+ // ---------------------------------------------------------------------------
+ useEffect(() => {
+ // Clean up previous connection
+ if (wsRef.current) {
+ wsRef.current.close();
+ wsRef.current = null;
+ setWsConnected(false);
+ }
+
+ if (!sessionId) return;
+
+ // Wait for backend to be reachable before opening WebSocket.
+ // Without this, the WS connects immediately on session creation
+ // and fails repeatedly with "closed before established" when the
+ // backend is still starting up (common on WSL cold start).
+ let cancelled = false;
+ const backendUrl = import.meta.env.VITE_BACKEND_URL || '';
+ const pingUrl = backendUrl ? `${backendUrl}/api/ping` : '/api/ping';
+ const waitForBackend = async () => {
+ for (let i = 0; i < 10 && !cancelled; i++) {
+ try {
+ const res = await fetch(pingUrl, { method: 'GET', signal: AbortSignal.timeout(2000) });
+ if (res.ok) return true;
+ } catch { /* retry */ }
+ await new Promise(r => setTimeout(r, 1500));
+ }
+ return false;
+ };
+
+ waitForBackend().then((ok) => {
+ if (cancelled || !ok) return;
+ connectWs();
+ });
+
+ function connectWs() {
+ const ws = new SessionWebSocket(sessionId, {
+ onConnect: () => setWsConnected(true),
+ onDisconnect: () => setWsConnected(false),
+ onMessage: (data) => {
+ if (data.type === "agent_message") {
+ setStreamingEvents((prev) => [...prev, data]);
+ } else if (data.type === "tool_use" || data.type === "tool_result") {
+ setStreamingEvents((prev) => [...prev, data]);
+ } else if (data.type === "diff_update") {
+ setDiffData(data.stats || data);
+ } else if (data.type === "session_restored") {
+ // Session loaded
+ }
+ },
+ onStatusChange: (newStatus) => {
+ if (newStatus === "waiting") {
+ // Always clear loading state when agent finishes
+ setLoadingPlan(false);
+
+ // Consolidate streaming events into a chat message (use ref to
+ // avoid stale closure — streamingEvents state would be stale here)
+ const events = streamingEventsRef.current;
+ if (events.length > 0) {
+ const textParts = events
+ .filter((e) => e.type === "agent_message")
+ .map((e) => e.content);
+ if (textParts.length > 0) {
+ const consolidated = {
+ from: "ai",
+ role: "assistant",
+ answer: textParts.join(""),
+ content: textParts.join(""),
+ };
+ setMessages((prev) => [...prev, consolidated]);
+ }
+ setStreamingEvents([]);
+ }
+ }
+ },
+ onError: (err) => {
+ console.warn("[ws] Error:", err);
+ setLoadingPlan(false);
+ },
+ });
+
+ ws.connect();
+ wsRef.current = ws;
+ } // end connectWs
+
+ return () => {
+ cancelled = true;
+ if (wsRef.current) wsRef.current.close();
+ };
+ }, [sessionId]); // eslint-disable-line react-hooks/exhaustive-deps
+
+ // ---------------------------------------------------------------------------
+ // 1) SESSION SYNC: Restore chat when branch, repo, OR session changes
+ // IMPORTANT: Do NOT depend on sessionChatState here (prevents prop/state loop)
+ // ---------------------------------------------------------------------------
+ useEffect(() => {
+ // When send() just created a session, the parent seeded the messages
+ // into chatBySession already. Skip the reset so we don't wipe
+ // the optimistic user message that was already rendered.
+ if (skipNextSyncRef.current) {
+ skipNextSyncRef.current = false;
+ return;
+ }
+
+ const nextMessages = sessionChatState?.messages || [];
+ const nextPlan = sessionChatState?.plan || null;
+
+ setMessages(nextMessages);
+ setPlan(nextPlan);
+
+ // Reset transient UI state on branch/repo/session switch
+ setGoal("");
+ setStatus("");
+ setLoadingPlan(false);
+ setExecuting(false);
+ setStreamingEvents([]);
+ setDiffData(null);
+
+ // Update msg count tracker so auto-scroll doesn't "jump" on switch
+ prevMsgCountRef.current = nextMessages.length;
+ // eslint-disable-next-line react-hooks/exhaustive-deps
+ }, [currentBranch, repo?.full_name, sessionId]);
+
+ // ---------------------------------------------------------------------------
+ // 2) PERSISTENCE: Save chat to Parent (no loop now because sync only on branch)
+ // ---------------------------------------------------------------------------
+ useEffect(() => {
+ if (typeof onSessionChatStateChange === "function") {
+ // Avoid wiping parent state on mount
+ if (messages.length > 0 || plan) {
+ onSessionChatStateChange({ messages, plan });
+ }
+ }
+ // eslint-disable-next-line react-hooks/exhaustive-deps
+ }, [messages, plan]);
+
+ // ---------------------------------------------------------------------------
+ // 3) AUTO-SCROLL: Only scroll when a message is appended (reduces flicker)
+ // ---------------------------------------------------------------------------
+ useEffect(() => {
+ const curCount = messages.length + streamingEvents.length;
+ const prevCount = prevMsgCountRef.current;
+
+ // Only scroll when new messages are added
+ if (curCount > prevCount) {
+ prevMsgCountRef.current = curCount;
+ requestAnimationFrame(() => {
+ messagesEndRef.current?.scrollIntoView({ behavior: "smooth" });
+ });
+ } else {
+ prevMsgCountRef.current = curCount;
+ }
+ }, [messages.length, streamingEvents.length]);
+
+ // ---------------------------------------------------------------------------
+ // HANDLERS
+ // ---------------------------------------------------------------------------
+ // ---------------------------------------------------------------------------
+ // Persist a message to the backend session (fire-and-forget)
+ // ---------------------------------------------------------------------------
+ const persistMessage = (sid, role, content) => {
+ if (!sid) return;
+ fetch(`/api/sessions/${sid}/message`, {
+ method: "POST",
+ headers: getHeaders(),
+ body: JSON.stringify({ role, content }),
+ }).catch(() => {}); // best-effort
+ };
+
+ const send = async () => {
+ if (!repo || !goal.trim()) return;
+
+ const text = goal.trim();
+
+ // Clear input immediately (Claude Code behavior)
+ setGoal("");
+ // Reset textarea height
+ const ta = document.querySelector(".chat-input");
+ if (ta) ta.style.height = "40px";
+
+ // Optimistic update (user bubble appears immediately)
+ const userMsg = { from: "user", role: "user", text, content: text };
+ setMessages((prev) => [...prev, userMsg]);
+
+ setLoadingPlan(true);
+ setStatus("");
+ setPlan(null);
+ setStreamingEvents([]);
+
+ // ------- Implicit session creation (Claude Code parity) -------
+ // Every chat must be backed by a session. If none exists yet,
+ // create one on-demand before sending the plan request.
+ let sid = sessionId;
+ if (!sid && typeof onEnsureSession === "function") {
+ // Derive a short title from the first message
+ const sessionName = text.length > 60 ? text.slice(0, 57) + "..." : text;
+
+ // Tell the sync useEffect to skip the reset that would otherwise
+ // wipe the optimistic user message when activeSessionId changes.
+ skipNextSyncRef.current = true;
+
+ sid = await onEnsureSession(sessionName, [userMsg]);
+ if (!sid) {
+ // Session creation failed — continue without session
+ skipNextSyncRef.current = false;
+ }
+ }
+
+ // Persist user message to backend session
+ persistMessage(sid, "user", text);
+
+ // Always use HTTP for plan generation (the original reliable flow).
+ // WebSocket is only used for real-time streaming feedback display.
+ const effectiveBranch = currentBranch || defaultBranch || "HEAD";
+
+ try {
+ // Timeout after 5 minutes (CrewAI agent can be slow with small models)
+ const planController = new AbortController();
+ const planTimer = setTimeout(() => planController.abort(), 300000);
+
+ let res;
+ try {
+ res = await fetch("/api/chat/plan", {
+ method: "POST",
+ headers: getHeaders(),
+ body: JSON.stringify({
+ repo_owner: repo.owner,
+ repo_name: repo.name,
+ goal: text,
+ branch_name: effectiveBranch,
+ }),
+ signal: planController.signal,
+ });
+ } catch (fetchErr) {
+ if (fetchErr.name === "AbortError") {
+ throw new Error("Request timed out after 5 minutes. The LLM may be too slow. Try a faster model.");
+ }
+ throw fetchErr;
+ } finally {
+ clearTimeout(planTimer);
+ }
+
+ let data;
+ try {
+ data = await res.json();
+ } catch {
+ throw new Error(`Server error (${res.status}). The LLM may have returned an invalid response. Try a different model or enable Lite Mode in Settings.`);
+ }
+ if (!res.ok) {
+ const detail = data?.detail || data?.error || data?.message || "";
+ // Friendly message for common LLM failures
+ if (detail.includes("None or empty") || detail.includes("Invalid response from LLM")) {
+ throw new Error(
+ "The LLM returned an empty response. This often happens with small models (deepseek, qwen 0.5b). " +
+ "Try a larger model (llama3, qwen2.5:7b) or enable Lite Mode in Settings."
+ );
+ }
+ throw new Error(detail || "Failed to generate plan");
+ }
+
+ setPlan(data);
+
+ // Extract summary from nested plan structure or top-level
+ const summary =
+ data.plan?.summary || data.summary || data.message ||
+ "Here is the proposed plan for your request.";
+
+ // Assistant response (Answer + Action Plan)
+ setMessages((prev) => [
+ ...prev,
+ {
+ from: "ai",
+ role: "assistant",
+ answer: summary,
+ content: summary,
+ plan: data,
+ },
+ ]);
+
+ // Persist assistant response to backend session
+ persistMessage(sid, "assistant", summary);
+ } catch (err) {
+ const msg = String(err?.message || err);
+ console.error(err);
+ setStatus(msg);
+ setMessages((prev) => [
+ ...prev,
+ { from: "ai", role: "system", content: `Error: ${msg}` },
+ ]);
+ } finally {
+ setLoadingPlan(false);
+ }
+ };
+
+ const execute = async () => {
+ if (!repo || !plan) return;
+
+ setExecuting(true);
+ setStatus("");
+
+ try {
+ // Guard: currentBranch might be missing if parent didn't pass it yet
+ const safeCurrent = currentBranch || defaultBranch || "HEAD";
+ const safeDefault = defaultBranch || "main";
+
+ // Sticky vs Hard Switch:
+ // - If on default branch -> undefined (backend creates new branch)
+ // - If already on AI branch -> currentBranch (backend updates existing)
+ const branch_name = safeCurrent === safeDefault ? undefined : safeCurrent;
+
+ const res = await fetch("/api/chat/execute", {
+ method: "POST",
+ headers: getHeaders(),
+ body: JSON.stringify({
+ repo_owner: repo.owner,
+ repo_name: repo.name,
+ plan,
+ branch_name,
+ }),
+ });
+
+ const data = await res.json();
+ if (!res.ok) throw new Error(data.detail || "Execution failed");
+
+ setStatus(data.message || "Execution completed.");
+
+ const completionMsg = {
+ from: "ai",
+ role: "assistant",
+ answer: data.message || "Execution completed.",
+ content: data.message || "Execution completed.",
+ executionLog: data.executionLog,
+ };
+
+ // Show completion immediately (keeps old "Execution Log" section)
+ setMessages((prev) => [...prev, completionMsg]);
+
+ // Clear active plan UI
+ setPlan(null);
+
+ // Pass completionMsg upward for seeding branch history
+ if (typeof onExecutionComplete === "function") {
+ onExecutionComplete({
+ branch: data.branch || data.branch_name,
+ mode: data.mode,
+ commit_url: data.commit_url || data.html_url,
+ message: data.message,
+ completionMsg,
+ sourceBranch: safeCurrent,
+ });
+ }
+ } catch (err) {
+ console.error(err);
+ setStatus(String(err?.message || err));
+ } finally {
+ setExecuting(false);
+ }
+ };
+
+ // ---------------------------------------------------------------------------
+ // RENDER
+ // ---------------------------------------------------------------------------
+ const isOnSessionBranch = currentBranch && currentBranch !== defaultBranch;
+
+ return (
+
+
+
+
+ {messages.map((m, idx) => {
+ // Success message (App.jsx injected)
+ if (m.isSuccess) {
+ return (
+
+ );
+ }
+
+ // User message
+ if (m.from === "user" || m.role === "user") {
+ return (
+
+ {m.text || m.content}
+
+ );
+ }
+
+ // Assistant message (Answer / Plan / Execution Log)
+ return (
+
+
+ {/* Diff stats indicator (Claude-Code-on-Web parity) */}
+ {m.diff && (
+
{
+ setDiffData(m.diff);
+ setShowDiffViewer(true);
+ }} />
+ )}
+
+ );
+ })}
+
+ {/* Streaming events (real-time agent output) */}
+ {streamingEvents.length > 0 && (
+
+
+
+ )}
+
+ {loadingPlan && streamingEvents.length === 0 && (
+
+ Thinking...
+
+ )}
+
+ {!messages.length && !plan && !loadingPlan && streamingEvents.length === 0 && (
+
+
💬
+
Tell GitPilot what you want to do with this repository.
+
+ It will propose a safe step-by-step plan before any execution.
+
+
+ )}
+
+
+
+
+ {/* Diff stats bar (when agent has made changes) */}
+ {diffData && (
+
+ setShowDiffViewer(true)} />
+
+ )}
+
+
+ {/* Readiness blocker banner */}
+ {!canChat && chatBlocker && (
+
+ {chatBlocker.message || "Chat is not ready yet."}
+ {chatBlocker.cta && chatBlocker.onCta && (
+
+ {chatBlocker.cta}
+
+ )}
+
+ )}
+ {status && (
+
+ {status}
+
+ )}
+
+
+
+
+ {/* WebSocket connection indicator */}
+ {sessionId && (
+
+
+
+ {wsConnected ? "Live" : "Connecting..."}
+
+
+ )}
+
+
+ {/* Diff Viewer overlay */}
+ {showDiffViewer && (
+
setShowDiffViewer(false)}
+ />
+ )}
+
+ );
+}
diff --git a/frontend/components/ContextBar.jsx b/frontend/components/ContextBar.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..be13192a04dbea56e257044078db9e1398a44db8
--- /dev/null
+++ b/frontend/components/ContextBar.jsx
@@ -0,0 +1,156 @@
+import React, { useCallback, useRef, useState } from "react";
+import BranchPicker from "./BranchPicker.jsx";
+
+/**
+ * ContextBar — horizontal repo chip bar for multi-repo workspace context.
+ *
+ * Uses CSS classes for hover-reveal X (Claude-style: subtle by default,
+ * visible on chip hover, red on X hover). Each chip owns its own remove
+ * button — removing one repo never affects the others.
+ */
+export default function ContextBar({
+ contextRepos,
+ activeRepoKey,
+ repoStateByKey,
+ onActivate,
+ onRemove,
+ onAdd,
+ onBranchChange,
+ mode, // workspace mode: "github", "local-git", "folder" (optional)
+}) {
+ if (!contextRepos || contextRepos.length === 0) return null;
+
+ return (
+
+ {/* Workspace mode indicator */}
+ {mode && (
+
+ {mode === "github" ? "GH" : mode === "local-git" ? "Git" : "Dir"}
+
+ )}
+
+ {contextRepos.map((entry) => {
+ const isActive = entry.repoKey === activeRepoKey;
+ return (
+ onActivate(entry.repoKey)}
+ onRemove={() => onRemove(entry.repoKey)}
+ onBranchChange={(newBranch) =>
+ onBranchChange(entry.repoKey, newBranch)
+ }
+ />
+ );
+ })}
+
+
+
+
+
+
+
+
+
+
+ {contextRepos.length} {contextRepos.length === 1 ? "repo" : "repos"}
+
+
+ );
+}
+
+function RepoChip({ entry, isActive, repoState, onActivate, onRemove, onBranchChange }) {
+ const [branchOpen, setBranchOpen] = useState(false);
+ const [hovered, setHovered] = useState(false);
+ const branchBtnRef = useRef(null);
+ const repo = entry.repo;
+ const branch = repoState?.currentBranch || entry.branch || repo?.default_branch || "main";
+ const defaultBranch = repoState?.defaultBranch || repo?.default_branch || "main";
+ const sessionBranches = repoState?.sessionBranches || [];
+ const displayName = repo?.name || entry.repoKey?.split("/")[1] || entry.repoKey;
+
+ const handleChipClick = useCallback(
+ (e) => {
+ if (e.target.closest("[data-chip-action]")) return;
+ onActivate();
+ },
+ [onActivate]
+ );
+
+ return (
+ setHovered(true)}
+ onMouseLeave={() => setHovered(false)}
+ title={isActive ? `Active (write): ${entry.repoKey}` : `Click to activate ${entry.repoKey}`}
+ >
+ {/* Active indicator bar */}
+ {isActive &&
}
+
+ {/* Repo name */}
+
{displayName}
+
+ {/* Separator dot */}
+
+
+ {/* Branch name — single click opens GitHub branch list */}
+
{
+ e.stopPropagation();
+ setBranchOpen((v) => !v);
+ }}
+ >
+ {branch}
+
+
+ {/* Write badge for active repo */}
+ {isActive &&
write }
+
+ {/* Remove button: hidden by default, revealed on hover */}
+
{
+ e.stopPropagation();
+ onRemove();
+ }}
+ title={`Remove ${displayName} from context`}
+ >
+
+
+
+
+
+
+ {/* BranchPicker in external-anchor mode: dropdown opens immediately,
+ positioned from the branch button, fetches all branches from GitHub */}
+ {branchOpen && (
+
{
+ onBranchChange(newBranch);
+ setBranchOpen(false);
+ }}
+ onClose={() => setBranchOpen(false)}
+ />
+ )}
+
+ );
+}
diff --git a/frontend/components/CreatePRButton.jsx b/frontend/components/CreatePRButton.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..559eb9c277ceed34cb8a455eea1347189f52cf56
--- /dev/null
+++ b/frontend/components/CreatePRButton.jsx
@@ -0,0 +1,159 @@
+import React, { useState } from "react";
+
+/**
+ * CreatePRButton — Claude-Code-on-Web parity PR creation action.
+ *
+ * When clicked, pushes session changes to a new branch and opens a PR.
+ * Shows loading state and links to the created PR on GitHub.
+ */
+export default function CreatePRButton({
+ repo,
+ sessionId,
+ branch,
+ defaultBranch,
+ disabled,
+ onPRCreated,
+}) {
+ const [creating, setCreating] = useState(false);
+ const [prUrl, setPrUrl] = useState(null);
+ const [error, setError] = useState(null);
+
+ const handleCreate = async () => {
+ if (!repo || !branch || branch === defaultBranch) return;
+
+ setCreating(true);
+ setError(null);
+
+ try {
+ const token = localStorage.getItem("github_token");
+ const headers = {
+ "Content-Type": "application/json",
+ ...(token ? { Authorization: `Bearer ${token}` } : {}),
+ };
+
+ const owner = repo.full_name?.split("/")[0] || repo.owner;
+ const name = repo.full_name?.split("/")[1] || repo.name;
+
+ const res = await fetch(`/api/repos/${owner}/${name}/pulls`, {
+ method: "POST",
+ headers,
+ body: JSON.stringify({
+ title: `[GitPilot] Changes from session ${sessionId ? sessionId.slice(0, 8) : branch}`,
+ head: branch,
+ base: defaultBranch || "main",
+ body: [
+ "## Summary",
+ "",
+ `Changes created by GitPilot AI assistant on branch \`${branch}\`.`,
+ "",
+ sessionId ? `Session ID: \`${sessionId}\`` : "",
+ "",
+ "---",
+ "*This PR was generated by [GitPilot](https://github.com/ruslanmv/gitpilot).*",
+ ]
+ .filter(Boolean)
+ .join("\n"),
+ }),
+ });
+
+ const data = await res.json();
+ if (!res.ok) throw new Error(data.detail || "Failed to create PR");
+
+ const url = data.html_url || data.url;
+ setPrUrl(url);
+ onPRCreated?.({ pr_url: url, pr_number: data.number, branch });
+ } catch (err) {
+ setError(err.message);
+ } finally {
+ setCreating(false);
+ }
+ };
+
+ if (prUrl) {
+ return (
+
+
+
+
+
+
+
+ View PR on GitHub →
+
+ );
+ }
+
+ return (
+
+
+
+
+
+
+
+
+ {creating ? "Creating PR..." : "Create PR"}
+
+ {error && (
+
{error}
+ )}
+
+ );
+}
+
+const styles = {
+ btn: {
+ display: "flex",
+ alignItems: "center",
+ gap: 6,
+ height: 38,
+ padding: "0 14px",
+ borderRadius: 8,
+ border: "1px solid rgba(16, 185, 129, 0.3)",
+ background: "rgba(16, 185, 129, 0.08)",
+ color: "#10B981",
+ fontSize: 13,
+ fontWeight: 600,
+ cursor: "pointer",
+ whiteSpace: "nowrap",
+ transition: "background-color 0.15s",
+ },
+ prLink: {
+ display: "flex",
+ alignItems: "center",
+ gap: 6,
+ height: 38,
+ padding: "0 14px",
+ borderRadius: 8,
+ background: "rgba(16, 185, 129, 0.10)",
+ color: "#10B981",
+ fontSize: 13,
+ fontWeight: 600,
+ textDecoration: "none",
+ whiteSpace: "nowrap",
+ },
+ error: {
+ fontSize: 11,
+ color: "#EF4444",
+ marginTop: 4,
+ },
+};
diff --git a/frontend/components/DiffStats.jsx b/frontend/components/DiffStats.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..2460e6ef496ccc5e95b2159c698afda556a99734
--- /dev/null
+++ b/frontend/components/DiffStats.jsx
@@ -0,0 +1,59 @@
+import React from "react";
+
+/**
+ * DiffStats — Claude-Code-on-Web parity inline diff indicator.
+ *
+ * Clickable "+N -N in M files" badge that appears in agent messages.
+ * Clicking opens the DiffViewer overlay.
+ */
+export default function DiffStats({ diff, onClick }) {
+ if (!diff || (!diff.additions && !diff.deletions && !diff.files_changed)) {
+ return null;
+ }
+
+ return (
+
+
+
+
+
+ +{diff.additions || 0}
+ -{diff.deletions || 0}
+
+ in {diff.files_changed || (diff.files || []).length} file{(diff.files_changed || (diff.files || []).length) !== 1 ? "s" : ""}
+
+
+
+
+
+ );
+}
+
+const styles = {
+ container: {
+ display: "inline-flex",
+ alignItems: "center",
+ gap: 6,
+ padding: "5px 10px",
+ borderRadius: 6,
+ border: "1px solid #27272A",
+ backgroundColor: "rgba(24, 24, 27, 0.8)",
+ cursor: "pointer",
+ fontSize: 12,
+ fontFamily: "monospace",
+ color: "#A1A1AA",
+ transition: "border-color 0.15s, background-color 0.15s",
+ marginTop: 8,
+ },
+ additions: {
+ color: "#10B981",
+ fontWeight: 600,
+ },
+ deletions: {
+ color: "#EF4444",
+ fontWeight: 600,
+ },
+ files: {
+ color: "#71717A",
+ },
+};
diff --git a/frontend/components/DiffViewer.jsx b/frontend/components/DiffViewer.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..b1a05fbc9d3f4c52b40ab4cd075d42db99522663
--- /dev/null
+++ b/frontend/components/DiffViewer.jsx
@@ -0,0 +1,263 @@
+import React, { useState } from "react";
+
+/**
+ * DiffViewer — Claude-Code-on-Web parity diff overlay.
+ *
+ * Shows a file list on the left and unified diff on the right.
+ * Green = additions, red = deletions. Additive component.
+ */
+export default function DiffViewer({ diff, onClose }) {
+ const [selectedFile, setSelectedFile] = useState(0);
+
+ if (!diff || !diff.files || diff.files.length === 0) {
+ return (
+
+
+
+ Diff Viewer
+
+ ×
+
+
+
No changes to display.
+
+
+ );
+ }
+
+ const files = diff.files || [];
+ const currentFile = files[selectedFile] || files[0];
+
+ return (
+
+
+ {/* Header */}
+
+
+ Diff Viewer
+
+ +{diff.additions || 0}
+ {" "}
+ -{diff.deletions || 0}
+ {" in "}
+ {diff.files_changed || files.length} files
+
+
+
+ ×
+
+
+
+ {/* Body */}
+
+ {/* File list */}
+
+ {files.map((f, idx) => (
+
setSelectedFile(idx)}
+ >
+ {f.path}
+
+ +{f.additions || 0}
+ {" "}
+ -{f.deletions || 0}
+
+
+ ))}
+
+
+ {/* Diff content */}
+
+
{currentFile.path}
+
+ {(currentFile.hunks || []).map((hunk, hi) => (
+
+
{hunk.header || `@@ hunk ${hi + 1} @@`}
+ {(hunk.lines || []).map((line, li) => {
+ let bg = "transparent";
+ let color = "#D4D4D8";
+ if (line.startsWith("+")) {
+ bg = "rgba(16, 185, 129, 0.10)";
+ color = "#6EE7B7";
+ } else if (line.startsWith("-")) {
+ bg = "rgba(239, 68, 68, 0.10)";
+ color = "#FCA5A5";
+ }
+ return (
+
+ {line}
+
+ );
+ })}
+
+ ))}
+
+ {(!currentFile.hunks || currentFile.hunks.length === 0) && (
+
+ Diff content will appear here when the agent modifies files.
+
+ )}
+
+
+
+
+
+ );
+}
+
+const styles = {
+ overlay: {
+ position: "fixed",
+ top: 0,
+ left: 0,
+ right: 0,
+ bottom: 0,
+ backgroundColor: "rgba(0, 0, 0, 0.7)",
+ zIndex: 200,
+ display: "flex",
+ alignItems: "center",
+ justifyContent: "center",
+ },
+ panel: {
+ width: "90vw",
+ maxWidth: 1100,
+ height: "80vh",
+ backgroundColor: "#131316",
+ border: "1px solid #27272A",
+ borderRadius: 12,
+ display: "flex",
+ flexDirection: "column",
+ overflow: "hidden",
+ },
+ header: {
+ display: "flex",
+ justifyContent: "space-between",
+ alignItems: "center",
+ padding: "12px 16px",
+ borderBottom: "1px solid #27272A",
+ backgroundColor: "#18181B",
+ },
+ headerLeft: {
+ display: "flex",
+ alignItems: "center",
+ gap: 12,
+ },
+ headerTitle: {
+ fontSize: 14,
+ fontWeight: 600,
+ color: "#E4E4E7",
+ },
+ statBadge: {
+ fontSize: 12,
+ color: "#A1A1AA",
+ },
+ closeBtn: {
+ width: 28,
+ height: 28,
+ borderRadius: 6,
+ border: "1px solid #3F3F46",
+ background: "transparent",
+ color: "#A1A1AA",
+ fontSize: 18,
+ cursor: "pointer",
+ display: "flex",
+ alignItems: "center",
+ justifyContent: "center",
+ },
+ body: {
+ flex: 1,
+ display: "flex",
+ overflow: "hidden",
+ },
+ fileList: {
+ width: 240,
+ borderRight: "1px solid #27272A",
+ overflowY: "auto",
+ flexShrink: 0,
+ },
+ fileItem: {
+ padding: "8px 10px",
+ cursor: "pointer",
+ borderBottom: "1px solid rgba(39, 39, 42, 0.5)",
+ transition: "background-color 0.1s",
+ },
+ fileName: {
+ display: "block",
+ fontSize: 12,
+ fontFamily: "monospace",
+ color: "#E4E4E7",
+ whiteSpace: "nowrap",
+ overflow: "hidden",
+ textOverflow: "ellipsis",
+ },
+ fileStats: {
+ display: "block",
+ fontSize: 10,
+ marginTop: 2,
+ },
+ diffContent: {
+ flex: 1,
+ overflow: "auto",
+ display: "flex",
+ flexDirection: "column",
+ },
+ diffPath: {
+ padding: "8px 12px",
+ fontSize: 12,
+ fontFamily: "monospace",
+ color: "#A1A1AA",
+ borderBottom: "1px solid #27272A",
+ backgroundColor: "#18181B",
+ position: "sticky",
+ top: 0,
+ zIndex: 1,
+ },
+ diffCode: {
+ padding: "4px 0",
+ fontFamily: "monospace",
+ fontSize: 12,
+ lineHeight: 1.6,
+ },
+ hunkHeader: {
+ padding: "4px 12px",
+ color: "#6B7280",
+ backgroundColor: "rgba(59, 130, 246, 0.05)",
+ fontSize: 11,
+ fontStyle: "italic",
+ },
+ diffLine: {
+ padding: "0 12px",
+ whiteSpace: "pre",
+ },
+ diffPlaceholder: {
+ padding: 20,
+ textAlign: "center",
+ color: "#52525B",
+ fontSize: 13,
+ },
+ emptyState: {
+ flex: 1,
+ display: "flex",
+ alignItems: "center",
+ justifyContent: "center",
+ color: "#52525B",
+ fontSize: 14,
+ },
+};
diff --git a/frontend/components/EnvironmentEditor.jsx b/frontend/components/EnvironmentEditor.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..eb0740eebff0280f3155f478ac7a2f437f251681
--- /dev/null
+++ b/frontend/components/EnvironmentEditor.jsx
@@ -0,0 +1,278 @@
+import React, { useState } from "react";
+import { createPortal } from "react-dom";
+
+/**
+ * EnvironmentEditor — Claude-Code-on-Web parity environment config modal.
+ *
+ * Allows setting name, network access level, and environment variables.
+ */
+export default function EnvironmentEditor({ environment, onSave, onDelete, onClose }) {
+ const [name, setName] = useState(environment?.name || "");
+ const [networkAccess, setNetworkAccess] = useState(environment?.network_access || "limited");
+ const [envVarsText, setEnvVarsText] = useState(
+ environment?.env_vars
+ ? Object.entries(environment.env_vars)
+ .map(([k, v]) => `${k}=${v}`)
+ .join("\n")
+ : ""
+ );
+
+ const handleSave = () => {
+ const envVars = {};
+ envVarsText
+ .split("\n")
+ .map((line) => line.trim())
+ .filter((line) => line && line.includes("="))
+ .forEach((line) => {
+ const idx = line.indexOf("=");
+ const key = line.slice(0, idx).trim();
+ const val = line.slice(idx + 1).trim();
+ if (key) envVars[key] = val;
+ });
+
+ onSave({
+ id: environment?.id || null,
+ name: name.trim() || "Default",
+ network_access: networkAccess,
+ env_vars: envVars,
+ });
+ };
+
+ return createPortal(
+ { if (e.target === e.currentTarget) onClose(); }}>
+
e.stopPropagation()}>
+
+
+ {environment?.id ? "Edit Environment" : "New Environment"}
+
+
+ ×
+
+
+
+
+ {/* Name */}
+
Environment Name
+
setName(e.target.value)}
+ placeholder="e.g. Development, Staging, Production"
+ style={styles.input}
+ />
+
+ {/* Network Access */}
+
Network Access
+
+ {[
+ { value: "limited", label: "Limited", desc: "Allowlisted domains only (package managers, APIs)" },
+ { value: "full", label: "Full", desc: "Unrestricted internet access" },
+ { value: "none", label: "None", desc: "Air-gapped — no external network" },
+ ].map((opt) => (
+
+ setNetworkAccess(e.target.value)}
+ style={{ display: "none" }}
+ />
+
+
+ {opt.label}
+
+
+ {opt.desc}
+
+
+
+ ))}
+
+
+ {/* Environment Variables */}
+
Environment Variables
+
+
+
+ {onDelete && (
+
+ Delete
+
+ )}
+
+
+ Cancel
+
+
+ Save
+
+
+
+
,
+ document.body
+ );
+}
+
+const styles = {
+ overlay: {
+ position: "fixed",
+ top: 0,
+ left: 0,
+ right: 0,
+ bottom: 0,
+ backgroundColor: "rgba(0, 0, 0, 0.6)",
+ zIndex: 10000,
+ display: "flex",
+ alignItems: "center",
+ justifyContent: "center",
+ },
+ modal: {
+ width: 480,
+ maxHeight: "80vh",
+ backgroundColor: "#131316",
+ border: "1px solid #27272A",
+ borderRadius: 12,
+ display: "flex",
+ flexDirection: "column",
+ overflow: "hidden",
+ },
+ header: {
+ display: "flex",
+ justifyContent: "space-between",
+ alignItems: "center",
+ padding: "14px 16px",
+ borderBottom: "1px solid #27272A",
+ backgroundColor: "#18181B",
+ },
+ headerTitle: {
+ fontSize: 14,
+ fontWeight: 600,
+ color: "#E4E4E7",
+ },
+ closeBtn: {
+ width: 26,
+ height: 26,
+ borderRadius: 6,
+ border: "1px solid #3F3F46",
+ background: "transparent",
+ color: "#A1A1AA",
+ fontSize: 16,
+ cursor: "pointer",
+ display: "flex",
+ alignItems: "center",
+ justifyContent: "center",
+ },
+ body: {
+ padding: "16px",
+ overflowY: "auto",
+ flex: 1,
+ },
+ label: {
+ display: "block",
+ fontSize: 12,
+ fontWeight: 600,
+ color: "#A1A1AA",
+ marginBottom: 6,
+ marginTop: 14,
+ },
+ input: {
+ width: "100%",
+ padding: "8px 10px",
+ borderRadius: 6,
+ border: "1px solid #3F3F46",
+ background: "#18181B",
+ color: "#E4E4E7",
+ fontSize: 13,
+ outline: "none",
+ boxSizing: "border-box",
+ },
+ radioGroup: {
+ display: "flex",
+ flexDirection: "column",
+ gap: 6,
+ },
+ radioItem: {
+ display: "flex",
+ alignItems: "flex-start",
+ gap: 10,
+ padding: "8px 10px",
+ borderRadius: 6,
+ border: "1px solid #27272A",
+ cursor: "pointer",
+ transition: "border-color 0.15s, background-color 0.15s",
+ },
+ textarea: {
+ width: "100%",
+ padding: "8px 10px",
+ borderRadius: 6,
+ border: "1px solid #3F3F46",
+ background: "#18181B",
+ color: "#E4E4E7",
+ fontSize: 12,
+ fontFamily: "monospace",
+ outline: "none",
+ resize: "vertical",
+ boxSizing: "border-box",
+ },
+ footer: {
+ display: "flex",
+ alignItems: "center",
+ gap: 8,
+ padding: "12px 16px",
+ borderTop: "1px solid #27272A",
+ },
+ cancelBtn: {
+ padding: "6px 14px",
+ borderRadius: 6,
+ border: "1px solid #3F3F46",
+ background: "transparent",
+ color: "#A1A1AA",
+ fontSize: 12,
+ cursor: "pointer",
+ },
+ saveBtn: {
+ padding: "6px 14px",
+ borderRadius: 6,
+ border: "none",
+ background: "#3B82F6",
+ color: "#fff",
+ fontSize: 12,
+ fontWeight: 600,
+ cursor: "pointer",
+ },
+ deleteBtn: {
+ padding: "6px 14px",
+ borderRadius: 6,
+ border: "1px solid rgba(239, 68, 68, 0.3)",
+ background: "transparent",
+ color: "#EF4444",
+ fontSize: 12,
+ cursor: "pointer",
+ },
+};
diff --git a/frontend/components/EnvironmentSelector.jsx b/frontend/components/EnvironmentSelector.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..c54e475a0c5715cc34ee1523aab9a2dc53bbe889
--- /dev/null
+++ b/frontend/components/EnvironmentSelector.jsx
@@ -0,0 +1,199 @@
+import React, { useEffect, useState } from "react";
+import EnvironmentEditor from "./EnvironmentEditor.jsx";
+
+/**
+ * EnvironmentSelector — Claude-Code-on-Web parity environment dropdown.
+ *
+ * Shows current environment name + gear icon. Gear opens the editor modal.
+ * Fetches environments from /api/environments.
+ */
+export default function EnvironmentSelector({ activeEnvId, onEnvChange }) {
+ const [envs, setEnvs] = useState([]);
+ const [editorOpen, setEditorOpen] = useState(false);
+ const [editingEnv, setEditingEnv] = useState(null);
+
+ const fetchEnvs = async () => {
+ try {
+ const res = await fetch("/api/environments", { cache: "no-cache" });
+ if (!res.ok) return;
+ const data = await res.json();
+ setEnvs(data.environments || []);
+ } catch (err) {
+ console.warn("Failed to fetch environments:", err);
+ }
+ };
+
+ useEffect(() => {
+ fetchEnvs();
+ }, []);
+
+ const activeEnv =
+ envs.find((e) => e.id === activeEnvId) || envs[0] || { name: "Default", id: "default" };
+
+ const handleSave = async (config) => {
+ try {
+ const method = config.id ? "PUT" : "POST";
+ const url = config.id ? `/api/environments/${config.id}` : "/api/environments";
+ await fetch(url, {
+ method,
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify(config),
+ });
+ await fetchEnvs();
+ setEditorOpen(false);
+ setEditingEnv(null);
+ } catch (err) {
+ console.warn("Failed to save environment:", err);
+ }
+ };
+
+ const handleDelete = async (envId) => {
+ try {
+ await fetch(`/api/environments/${envId}`, { method: "DELETE" });
+ await fetchEnvs();
+ if (activeEnvId === envId) {
+ onEnvChange?.(null);
+ }
+ } catch (err) {
+ console.warn("Failed to delete environment:", err);
+ }
+ };
+
+ return (
+
+
ENVIRONMENT
+
+
+ {/* Env selector */}
+ onEnvChange?.(e.target.value)}
+ style={styles.select}
+ >
+ {envs.map((env) => (
+
+ {env.name}
+
+ ))}
+
+
+ {/* Network badge */}
+
+ {activeEnv.network_access || "limited"}
+
+
+
+ {/* Gear icon */}
+
{
+ setEditingEnv(activeEnv);
+ setEditorOpen(true);
+ }}
+ title="Configure environment"
+ >
+
+
+
+
+
+
+ {/* Add new */}
+
{
+ setEditingEnv(null);
+ setEditorOpen(true);
+ }}
+ title="Add environment"
+ >
+ +
+
+
+
+ {/* Editor modal */}
+ {editorOpen && (
+
handleDelete(editingEnv.id) : null}
+ onClose={() => {
+ setEditorOpen(false);
+ setEditingEnv(null);
+ }}
+ />
+ )}
+
+ );
+}
+
+const styles = {
+ container: {
+ padding: "10px 14px",
+ },
+ label: {
+ fontSize: 10,
+ fontWeight: 700,
+ letterSpacing: "0.08em",
+ color: "#71717A",
+ textTransform: "uppercase",
+ marginBottom: 6,
+ },
+ row: {
+ display: "flex",
+ alignItems: "center",
+ gap: 6,
+ },
+ envCard: {
+ flex: 1,
+ display: "flex",
+ alignItems: "center",
+ gap: 8,
+ padding: "4px 8px",
+ borderRadius: 6,
+ border: "1px solid #27272A",
+ backgroundColor: "#18181B",
+ minWidth: 0,
+ },
+ select: {
+ flex: 1,
+ background: "transparent",
+ border: "none",
+ color: "#E4E4E7",
+ fontSize: 12,
+ fontWeight: 500,
+ outline: "none",
+ cursor: "pointer",
+ minWidth: 0,
+ },
+ networkBadge: {
+ fontSize: 9,
+ fontWeight: 600,
+ textTransform: "uppercase",
+ letterSpacing: "0.04em",
+ flexShrink: 0,
+ },
+ gearBtn: {
+ width: 28,
+ height: 28,
+ borderRadius: 6,
+ border: "1px solid #27272A",
+ background: "transparent",
+ color: "#71717A",
+ cursor: "pointer",
+ display: "flex",
+ alignItems: "center",
+ justifyContent: "center",
+ fontSize: 14,
+ flexShrink: 0,
+ },
+};
diff --git a/frontend/components/FileTree.jsx b/frontend/components/FileTree.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..74352c434ba2a7a26ec6c61c63b32be21504c04a
--- /dev/null
+++ b/frontend/components/FileTree.jsx
@@ -0,0 +1,307 @@
+import React, { useState, useEffect } from "react";
+
+/**
+ * Simple recursive file tree viewer with refresh support
+ * Fetches tree data directly using the API.
+ */
+export default function FileTree({ repo, refreshTrigger, branch }) {
+ const [tree, setTree] = useState([]);
+ const [loading, setLoading] = useState(false);
+ const [isSwitchingBranch, setIsSwitchingBranch] = useState(false);
+ const [error, setError] = useState(null);
+ const [localRefresh, setLocalRefresh] = useState(0);
+
+ useEffect(() => {
+ if (!repo) return;
+
+ // Determine if this is a branch switch (we already have data)
+ const hasExistingData = tree.length > 0;
+ if (hasExistingData) {
+ setIsSwitchingBranch(true);
+ } else {
+ setLoading(true);
+ }
+ setError(null);
+
+ // Construct headers manually
+ let headers = {};
+ try {
+ const token = localStorage.getItem("github_token");
+ if (token) {
+ headers = { Authorization: `Bearer ${token}` };
+ }
+ } catch (e) {
+ console.warn("Unable to read github_token", e);
+ }
+
+ // Add cache busting + selected branch ref
+ const refParam = branch ? `&ref=${encodeURIComponent(branch)}` : "";
+ const cacheBuster = `?_t=${Date.now()}${refParam}`;
+
+ let cancelled = false;
+
+ fetch(`/api/repos/${repo.owner}/${repo.name}/tree${cacheBuster}`, { headers })
+ .then(async (res) => {
+ if (!res.ok) {
+ const errData = await res.json().catch(() => ({}));
+ throw new Error(errData.detail || "Failed to load files");
+ }
+ return res.json();
+ })
+ .then((data) => {
+ if (cancelled) return;
+ if (data.files && Array.isArray(data.files)) {
+ setTree(buildTree(data.files));
+ setError(null);
+ } else {
+ setError("No files found in repository");
+ }
+ })
+ .catch((err) => {
+ if (cancelled) return;
+ setError(err.message);
+ console.error("FileTree error:", err);
+ })
+ .finally(() => {
+ if (cancelled) return;
+ setIsSwitchingBranch(false);
+ setLoading(false);
+ });
+
+ return () => { cancelled = true; };
+ }, [repo, branch, refreshTrigger, localRefresh]); // eslint-disable-line react-hooks/exhaustive-deps
+
+ const handleRefresh = () => {
+ setLocalRefresh(prev => prev + 1);
+ };
+
+ // Theme matching parent component
+ const theme = {
+ border: "#27272A",
+ textPrimary: "#EDEDED",
+ textSecondary: "#A1A1AA",
+ accent: "#D95C3D",
+ warningText: "#F59E0B",
+ warningBg: "rgba(245, 158, 11, 0.1)",
+ warningBorder: "rgba(245, 158, 11, 0.2)",
+ };
+
+ const styles = {
+ header: {
+ display: "flex",
+ alignItems: "center",
+ justifyContent: "space-between",
+ padding: "8px 20px 8px 10px",
+ marginBottom: "8px",
+ borderBottom: `1px solid ${theme.border}`,
+ },
+ headerTitle: {
+ fontSize: "12px",
+ fontWeight: "600",
+ color: theme.textSecondary,
+ textTransform: "uppercase",
+ letterSpacing: "0.5px",
+ },
+ refreshButton: {
+ backgroundColor: "transparent",
+ border: `1px solid ${theme.border}`,
+ color: theme.textSecondary,
+ padding: "4px 8px",
+ borderRadius: "4px",
+ fontSize: "11px",
+ cursor: loading ? "not-allowed" : "pointer",
+ display: "flex",
+ alignItems: "center",
+ gap: "4px",
+ transition: "all 0.2s",
+ opacity: loading ? 0.5 : 1,
+ },
+ switchingBar: {
+ padding: "6px 20px",
+ fontSize: "11px",
+ color: theme.textSecondary,
+ backgroundColor: "rgba(59, 130, 246, 0.06)",
+ borderBottom: `1px solid ${theme.border}`,
+ },
+ loadingText: {
+ padding: "0 20px",
+ color: theme.textSecondary,
+ fontSize: "13px",
+ },
+ errorBox: {
+ padding: "12px 20px",
+ color: theme.warningText,
+ fontSize: "12px",
+ backgroundColor: theme.warningBg,
+ border: `1px solid ${theme.warningBorder}`,
+ borderRadius: "6px",
+ margin: "0 10px",
+ },
+ emptyText: {
+ padding: "0 20px",
+ color: theme.textSecondary,
+ fontSize: "13px",
+ },
+ treeContainer: {
+ fontSize: "13px",
+ color: theme.textSecondary,
+ padding: "0 10px 20px 10px",
+ },
+ };
+
+ return (
+
+ {/* Header with Refresh Button */}
+
+
Files
+
{
+ if (!loading) {
+ e.currentTarget.style.backgroundColor = "rgba(255, 255, 255, 0.05)";
+ }
+ }}
+ onMouseOut={(e) => {
+ e.currentTarget.style.backgroundColor = "transparent";
+ }}
+ >
+
+
+
+ {loading ? "..." : "Refresh"}
+
+
+
+ {/* Branch switch indicator (shown above existing tree, doesn't clear it) */}
+ {isSwitchingBranch && (
+
Loading branch...
+ )}
+
+ {/* Content */}
+ {loading && tree.length === 0 && (
+
Loading files...
+ )}
+
+ {!loading && !isSwitchingBranch && error && (
+
{error}
+ )}
+
+ {!loading && !isSwitchingBranch && !error && tree.length === 0 && (
+
No files found
+ )}
+
+ {tree.length > 0 && (
+
+ {tree.map((node) => (
+
+ ))}
+
+ )}
+
+ );
+}
+
+// Recursive Node Component
+function TreeNode({ node, level }) {
+ const [expanded, setExpanded] = useState(false);
+ const isFolder = node.children && node.children.length > 0;
+
+ const icon = isFolder ? (expanded ? "📂" : "📁") : "📄";
+
+ return (
+
+
isFolder && setExpanded(!expanded)}
+ style={{
+ padding: "4px 0",
+ paddingLeft: `${level * 12}px`,
+ cursor: isFolder ? "pointer" : "default",
+ display: "flex",
+ alignItems: "center",
+ gap: "6px",
+ color: isFolder ? "#EDEDED" : "#A1A1AA",
+ whiteSpace: "nowrap"
+ }}
+ >
+ {icon}
+ {node.name}
+
+
+ {isFolder && expanded && (
+
+ {node.children.map(child => (
+
+ ))}
+
+ )}
+
+ );
+}
+
+// Helper to build tree structure from flat file list
+function buildTree(files) {
+ const root = [];
+
+ files.forEach(file => {
+ const parts = file.path.split('/');
+ let currentLevel = root;
+ let currentPath = "";
+
+ parts.forEach((part, idx) => {
+ currentPath = currentPath ? `${currentPath}/${part}` : part;
+
+ // Check if node exists at this level
+ let existingNode = currentLevel.find(n => n.name === part);
+
+ if (!existingNode) {
+ const newNode = {
+ name: part,
+ path: currentPath,
+ type: idx === parts.length - 1 ? file.type : 'tree',
+ children: []
+ };
+ currentLevel.push(newNode);
+ existingNode = newNode;
+ }
+
+ if (idx < parts.length - 1) {
+ currentLevel = existingNode.children;
+ }
+ });
+ });
+
+ // Sort folders first, then files
+ const sortNodes = (nodes) => {
+ nodes.sort((a, b) => {
+ const aIsFolder = a.children.length > 0;
+ const bIsFolder = b.children.length > 0;
+ if (aIsFolder && !bIsFolder) return -1;
+ if (!aIsFolder && bIsFolder) return 1;
+ return a.name.localeCompare(b.name);
+ });
+ nodes.forEach(n => {
+ if (n.children.length > 0) sortNodes(n.children);
+ });
+ };
+
+ sortNodes(root);
+ return root;
+}
\ No newline at end of file
diff --git a/frontend/components/FlowViewer.jsx b/frontend/components/FlowViewer.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..71ef1e0b71603c85dd38ed37b2e930ebf6405bde
--- /dev/null
+++ b/frontend/components/FlowViewer.jsx
@@ -0,0 +1,659 @@
+import React, { useEffect, useState, useCallback, useRef } from "react";
+import ReactFlow, { Background, Controls, MiniMap } from "reactflow";
+import "reactflow/dist/style.css";
+
+/* ------------------------------------------------------------------ */
+/* Node type → colour mapping */
+/* ------------------------------------------------------------------ */
+const NODE_COLOURS = {
+ agent: { border: "#ff7a3c", bg: "#20141a" },
+ router: { border: "#6c8cff", bg: "#141828" },
+ tool: { border: "#3a3b4d", bg: "#141821" },
+ tool_group: { border: "#3a3b4d", bg: "#141821" },
+ user: { border: "#4caf88", bg: "#14211a" },
+ output: { border: "#9c6cff", bg: "#1a1428" },
+};
+const DEFAULT_COLOUR = { border: "#3a3b4d", bg: "#141821" };
+
+function colourFor(type) {
+ return NODE_COLOURS[type] || DEFAULT_COLOUR;
+}
+
+const STYLE_COLOURS = {
+ single_task: "#6c8cff",
+ react_loop: "#ff7a3c",
+ crew_pipeline: "#4caf88",
+};
+
+const STYLE_LABELS = {
+ single_task: "Dispatch",
+ react_loop: "ReAct Loop",
+ crew_pipeline: "Pipeline",
+};
+
+/* ------------------------------------------------------------------ */
+/* TopologyCard — single clickable topology card */
+/* ------------------------------------------------------------------ */
+function TopologyCard({ topology, isActive, onClick }) {
+ const styleColor = STYLE_COLOURS[topology.execution_style] || "#9a9bb0";
+ const agentCount = topology.agents_used?.length || 0;
+
+ return (
+
+
+ {topology.icon}
+
+ {STYLE_LABELS[topology.execution_style] || topology.execution_style}
+
+
+
+ {topology.name}
+
+ {topology.description}
+
+ {agentCount} agent{agentCount !== 1 ? "s" : ""}
+
+
+ );
+}
+
+const cardStyles = {
+ card: {
+ display: "flex",
+ flexDirection: "column",
+ gap: 4,
+ padding: "10px 12px",
+ borderRadius: 8,
+ border: "1px solid #1e1f30",
+ cursor: "pointer",
+ textAlign: "left",
+ minWidth: 170,
+ maxWidth: 200,
+ flexShrink: 0,
+ transition: "border-color 0.2s, background-color 0.2s",
+ },
+ cardTop: {
+ display: "flex",
+ alignItems: "center",
+ justifyContent: "space-between",
+ gap: 6,
+ },
+ icon: {
+ fontSize: 18,
+ },
+ styleBadge: {
+ fontSize: 9,
+ fontWeight: 700,
+ textTransform: "uppercase",
+ letterSpacing: "0.05em",
+ padding: "1px 6px",
+ borderRadius: 4,
+ border: "1px solid",
+ },
+ name: {
+ fontSize: 12,
+ fontWeight: 600,
+ lineHeight: 1.3,
+ },
+ desc: {
+ fontSize: 10,
+ color: "#71717A",
+ lineHeight: 1.3,
+ overflow: "hidden",
+ display: "-webkit-box",
+ WebkitLineClamp: 2,
+ WebkitBoxOrient: "vertical",
+ },
+ agentCount: {
+ fontSize: 9,
+ color: "#52525B",
+ fontWeight: 600,
+ marginTop: 2,
+ },
+};
+
+/* ------------------------------------------------------------------ */
+/* TopologyPanel — card grid grouped by category */
+/* ------------------------------------------------------------------ */
+function TopologyPanel({
+ topologies,
+ activeTopology,
+ autoMode,
+ autoResult,
+ onSelect,
+ onToggleAuto,
+}) {
+ const systems = topologies.filter((t) => t.category === "system");
+ const pipelines = topologies.filter((t) => t.category === "pipeline");
+
+ return (
+
+ {/* Auto-detect toggle */}
+
+
+
+
+
+
+ Auto
+
+ {autoMode && autoResult && (
+
+ Detected: {autoResult.icon} {autoResult.name}
+ {autoResult.confidence != null && (
+
+ {" "}({Math.round(autoResult.confidence * 100)}%)
+
+ )}
+
+ )}
+
+
+ {/* System architectures */}
+
+
System Architectures
+
+ {systems.map((t) => (
+ onSelect(t.id)}
+ />
+ ))}
+
+
+
+ {/* Task pipelines */}
+
+
Task Pipelines
+
+ {pipelines.map((t) => (
+ onSelect(t.id)}
+ />
+ ))}
+
+
+
+ );
+}
+
+const panelStyles = {
+ root: {
+ padding: "8px 16px 12px",
+ borderBottom: "1px solid #1e1f30",
+ backgroundColor: "#08090e",
+ },
+ autoRow: {
+ display: "flex",
+ alignItems: "center",
+ gap: 10,
+ marginBottom: 10,
+ },
+ autoBtn: {
+ display: "flex",
+ alignItems: "center",
+ gap: 5,
+ padding: "4px 10px",
+ borderRadius: 6,
+ border: "1px solid #27272A",
+ background: "transparent",
+ fontSize: 11,
+ fontWeight: 600,
+ cursor: "pointer",
+ transition: "border-color 0.15s, color 0.15s",
+ },
+ autoHint: {
+ fontSize: 11,
+ color: "#9a9bb0",
+ },
+ section: {
+ marginBottom: 8,
+ },
+ sectionLabel: {
+ fontSize: 9,
+ fontWeight: 700,
+ textTransform: "uppercase",
+ letterSpacing: "0.08em",
+ color: "#52525B",
+ marginBottom: 6,
+ },
+ cardRow: {
+ display: "flex",
+ gap: 8,
+ overflowX: "auto",
+ scrollbarWidth: "none",
+ paddingBottom: 2,
+ },
+};
+
+/* ------------------------------------------------------------------ */
+/* Main FlowViewer component */
+/* ------------------------------------------------------------------ */
+export default function FlowViewer() {
+ const [nodes, setNodes] = useState([]);
+ const [edges, setEdges] = useState([]);
+ const [loading, setLoading] = useState(false);
+ const [error, setError] = useState("");
+
+ // Topology state
+ const [topologies, setTopologies] = useState([]);
+ const [activeTopology, setActiveTopology] = useState(null);
+ const [topologyMeta, setTopologyMeta] = useState(null);
+
+ // Auto-detection state
+ const [autoMode, setAutoMode] = useState(false);
+ const [autoResult, setAutoResult] = useState(null);
+ const [autoTestMessage, setAutoTestMessage] = useState("");
+
+ const initialLoadDone = useRef(false);
+
+ /* ---------- Load topology list on mount ---------- */
+ useEffect(() => {
+ (async () => {
+ try {
+ const [topoRes, prefRes] = await Promise.all([
+ fetch("/api/flow/topologies"),
+ fetch("/api/settings/topology"),
+ ]);
+ if (topoRes.ok) {
+ const data = await topoRes.json();
+ setTopologies(data);
+ }
+ if (prefRes.ok) {
+ const { topology } = await prefRes.json();
+ if (topology) {
+ setActiveTopology(topology);
+ }
+ }
+ } catch (e) {
+ console.warn("Failed to load topologies:", e);
+ }
+ initialLoadDone.current = true;
+ })();
+ }, []);
+
+ /* ---------- Load graph when topology changes ---------- */
+ const loadGraph = useCallback(async (topologyId) => {
+ setLoading(true);
+ setError("");
+ try {
+ const url = topologyId
+ ? `/api/flow/current?topology=${encodeURIComponent(topologyId)}`
+ : "/api/flow/current";
+ const res = await fetch(url);
+ const data = await res.json();
+ if (!res.ok) throw new Error(data.error || "Failed to load flow");
+
+ // Track topology metadata from response
+ if (data.topology_id) {
+ setTopologyMeta({
+ id: data.topology_id,
+ name: data.topology_name,
+ icon: data.topology_icon,
+ description: data.topology_description,
+ execution_style: data.execution_style,
+ agents_used: topologies.find((t) => t.id === data.topology_id)?.agents_used || [],
+ });
+ }
+
+ // Build ReactFlow nodes
+ const RFnodes = data.nodes.map((n, i) => {
+ const nodeType = n.type || "default";
+ const colour = colourFor(nodeType);
+ const d = n.data || {};
+
+ const label = d.label || n.label || n.id;
+ const description = d.description || n.description || "";
+ const model = d.model;
+ const mode = d.mode;
+
+ const pos = n.position || {
+ x: 50 + (i % 3) * 250,
+ y: 50 + Math.floor(i / 3) * 180,
+ };
+
+ return {
+ id: n.id,
+ data: {
+ label: (
+
+
+ {label}
+
+ {model && (
+
+ {model}
+
+ )}
+ {mode && (
+
+ {mode}
+
+ )}
+
+ {description}
+
+
+ ),
+ },
+ position: pos,
+ type: "default",
+ style: {
+ borderRadius: 12,
+ padding: "12px 16px",
+ border: `2px solid ${colour.border}`,
+ background: colour.bg,
+ color: "#f5f5f7",
+ fontSize: 13,
+ minWidth: 180,
+ maxWidth: 220,
+ },
+ };
+ });
+
+ // Build ReactFlow edges
+ const RFedges = data.edges.map((e) => ({
+ id: e.id,
+ source: e.source,
+ target: e.target,
+ label: e.label,
+ animated: e.animated !== false,
+ style: { stroke: "#7a7b8e", strokeWidth: 2 },
+ labelStyle: { fill: "#c3c5dd", fontSize: 11, fontWeight: 500 },
+ labelBgStyle: { fill: "#101117", fillOpacity: 0.9 },
+ ...(e.type === "bidirectional" && {
+ markerEnd: { type: "arrowclosed", color: "#7a7b8e" },
+ markerStart: { type: "arrowclosed", color: "#7a7b8e" },
+ animated: false,
+ style: { stroke: "#555670", strokeWidth: 1.5, strokeDasharray: "5 5" },
+ }),
+ }));
+
+ setNodes(RFnodes);
+ setEdges(RFedges);
+ } catch (e) {
+ console.error(e);
+ setError(e.message);
+ } finally {
+ setLoading(false);
+ }
+ }, [topologies]);
+
+ // Load graph whenever activeTopology changes
+ useEffect(() => {
+ loadGraph(activeTopology);
+ }, [activeTopology, loadGraph]);
+
+ /* ---------- Topology selection handler ---------- */
+ const handleTopologyChange = useCallback(
+ async (newTopologyId) => {
+ setActiveTopology(newTopologyId);
+ setAutoMode(false); // Manual selection disables auto
+ // Persist preference (fire-and-forget)
+ try {
+ await fetch("/api/settings/topology", {
+ method: "POST",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify({ topology: newTopologyId }),
+ });
+ } catch (e) {
+ console.warn("Failed to save topology preference:", e);
+ }
+ },
+ []
+ );
+
+ /* ---------- Auto-detection ---------- */
+ const handleToggleAuto = useCallback(() => {
+ setAutoMode((prev) => !prev);
+ if (!autoMode) {
+ setAutoResult(null);
+ }
+ }, [autoMode]);
+
+ const handleAutoClassify = useCallback(
+ async (message) => {
+ if (!message.trim()) return;
+ try {
+ const res = await fetch("/api/flow/classify", {
+ method: "POST",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify({ message }),
+ });
+ if (!res.ok) return;
+ const data = await res.json();
+ const recommendedId = data.recommended_topology;
+ const topo = topologies.find((t) => t.id === recommendedId);
+ setAutoResult({
+ id: recommendedId,
+ name: topo?.name || recommendedId,
+ icon: topo?.icon || "",
+ confidence: data.confidence,
+ alternatives: data.alternatives || [],
+ });
+ setActiveTopology(recommendedId);
+ } catch (e) {
+ console.warn("Auto-classify failed:", e);
+ }
+ },
+ [topologies]
+ );
+
+ // Debounced auto-classify when test message changes
+ useEffect(() => {
+ if (!autoMode || !autoTestMessage.trim()) return;
+ const t = setTimeout(() => handleAutoClassify(autoTestMessage), 500);
+ return () => clearTimeout(t);
+ }, [autoTestMessage, autoMode, handleAutoClassify]);
+
+ /* ---------- Render ---------- */
+ const activeStyleColor = STYLE_COLOURS[topologyMeta?.execution_style] || "#9a9bb0";
+
+ return (
+
+ {/* Header */}
+
+
+
Agent Workflow
+
+ Visual view of the multi-agent system that GitPilot uses to
+ plan and apply changes to your repositories.
+
+
+
+ {topologyMeta && (
+
+ {topologyMeta.icon}
+ {topologyMeta.name}
+
+ {STYLE_LABELS[topologyMeta.execution_style] || topologyMeta.execution_style}
+
+ {topologyMeta.agents_used?.length || 0} agents
+
+ )}
+ {loading &&
Loading... }
+
+
+
+ {/* Topology selector panel */}
+ {topologies.length > 0 && (
+
+ )}
+
+ {/* Auto-detection test input (shown when auto mode is on) */}
+ {autoMode && (
+
+
+ Test auto-detection: type a task description to see which topology is recommended
+
+
setAutoTestMessage(e.target.value)}
+ style={autoInputStyles.input}
+ />
+ {autoResult && autoResult.alternatives?.length > 0 && (
+
+ Alternatives:
+ {autoResult.alternatives.slice(0, 3).map((alt) => {
+ const altTopo = topologies.find((t) => t.id === alt.id);
+ return (
+ handleTopologyChange(alt.id)}
+ >
+ {altTopo?.icon} {altTopo?.name || alt.id}
+
+ {alt.confidence != null ? ` ${Math.round(alt.confidence * 100)}%` : ""}
+
+
+ );
+ })}
+
+ )}
+
+ )}
+
+ {/* Description bar */}
+ {topologyMeta && topologyMeta.description && !autoMode && (
+
+ {topologyMeta.icon} {topologyMeta.description}
+
+ )}
+
+ {/* ReactFlow canvas */}
+
+ {error ? (
+
+ ) : (
+
+
+ {
+ const border = node.style?.border || "";
+ if (border.includes("#ff7a3c")) return "#ff7a3c";
+ if (border.includes("#6c8cff")) return "#6c8cff";
+ if (border.includes("#4caf88")) return "#4caf88";
+ if (border.includes("#9c6cff")) return "#9c6cff";
+ return "#3a3b4d";
+ }}
+ maskColor="rgba(0, 0, 0, 0.6)"
+ />
+
+
+ )}
+
+
+ );
+}
+
+const autoInputStyles = {
+ wrap: {
+ padding: "8px 16px 10px",
+ borderBottom: "1px solid #1e1f30",
+ backgroundColor: "#0c0d14",
+ },
+ label: {
+ fontSize: 10,
+ color: "#71717A",
+ marginBottom: 6,
+ },
+ input: {
+ width: "100%",
+ padding: "8px 12px",
+ borderRadius: 6,
+ border: "1px solid #27272A",
+ background: "#08090e",
+ color: "#e0e1f0",
+ fontSize: 12,
+ fontFamily: "monospace",
+ outline: "none",
+ boxSizing: "border-box",
+ },
+ altRow: {
+ display: "flex",
+ alignItems: "center",
+ gap: 6,
+ marginTop: 6,
+ flexWrap: "wrap",
+ },
+ altBtn: {
+ padding: "2px 8px",
+ borderRadius: 4,
+ border: "1px solid #27272A",
+ background: "transparent",
+ color: "#9a9bb0",
+ fontSize: 10,
+ cursor: "pointer",
+ },
+};
diff --git a/frontend/components/Footer.jsx b/frontend/components/Footer.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..71e8a0bca926c639c885bde541d6b3f7c9c10fbd
--- /dev/null
+++ b/frontend/components/Footer.jsx
@@ -0,0 +1,48 @@
+import React from "react";
+
+export default function Footer() {
+ return (
+
+ );
+}
\ No newline at end of file
diff --git a/frontend/components/LlmSettings.jsx b/frontend/components/LlmSettings.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..e90b2180af222ad7fda08a9cd18852ed806eaf44
--- /dev/null
+++ b/frontend/components/LlmSettings.jsx
@@ -0,0 +1,623 @@
+import React, { useEffect, useMemo, useState } from "react";
+import { testProvider } from "../utils/api";
+
+const PROVIDERS = ["ollabridge", "openai", "claude", "watsonx", "ollama"];
+
+const PROVIDER_LABELS = {
+ ollabridge: "OllaBridge Cloud",
+ openai: "OpenAI",
+ claude: "Claude",
+ watsonx: "Watsonx",
+ ollama: "Ollama",
+};
+
+const AUTH_MODES = [
+ { id: "device", label: "Device Pairing", icon: "📱" },
+ { id: "apikey", label: "API Key", icon: "🔑" },
+ { id: "local", label: "Local Trust", icon: "🏠" },
+];
+
+function LoadingState({ loadingMessage, loadingSlow, onRetry }) {
+ return (
+
+
+
+
AI Providers
+
Admin / LLM Settings
+
{loadingMessage}
+
+ {loadingSlow && (
+
+
+ This is taking longer than expected. The backend may still be
+ starting or the settings endpoint may be slow.
+
+
+ Retry
+
+
+ )}
+
+
+ );
+}
+
+export default function LlmSettings() {
+ const [settings, setSettings] = useState(null);
+ const [initialLoading, setInitialLoading] = useState(true);
+ const [loadingSlow, setLoadingSlow] = useState(false);
+
+ const [saving, setSaving] = useState(false);
+ const [error, setError] = useState("");
+ const [savedMsg, setSavedMsg] = useState("");
+
+ const [modelsByProvider, setModelsByProvider] = useState({});
+ const [modelsError, setModelsError] = useState("");
+ const [loadingModelsFor, setLoadingModelsFor] = useState("");
+
+ const [testResult, setTestResult] = useState(null);
+ const [testing, setTesting] = useState(false);
+
+ const [authMode, setAuthMode] = useState("local");
+ const [pairCode, setPairCode] = useState("");
+ const [pairing, setPairing] = useState(false);
+ const [pairResult, setPairResult] = useState(null);
+
+ const loadingMessage = useMemo(() => {
+ if (loadingSlow) {
+ return "Still loading provider configuration…";
+ }
+ return "Loading current configuration…";
+ }, [loadingSlow]);
+
+ const loadSettings = async () => {
+ setInitialLoading(true);
+ setError("");
+ setLoadingSlow(false);
+
+ let slowTimer;
+ try {
+ slowTimer = window.setTimeout(() => {
+ setLoadingSlow(true);
+ }, 1500);
+
+ const res = await fetch("/api/settings");
+ const data = await res.json();
+
+ if (!res.ok) {
+ throw new Error(data.error || "Failed to load settings");
+ }
+
+ setSettings(data);
+ } catch (e) {
+ console.error(e);
+ setError(e.message || "Failed to load settings");
+ } finally {
+ window.clearTimeout(slowTimer);
+ setInitialLoading(false);
+ }
+ };
+
+ useEffect(() => {
+ loadSettings();
+ }, []);
+
+ const updateField = (section, field, value) => {
+ setSettings((prev) => ({
+ ...prev,
+ [section]: {
+ ...prev[section],
+ [field]: value,
+ },
+ }));
+ };
+
+ const handleSave = async () => {
+ setSaving(true);
+ setError("");
+ setSavedMsg("");
+
+ try {
+ const res = await fetch("/api/settings/llm", {
+ method: "PUT",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify(settings),
+ });
+
+ const data = await res.json();
+ if (!res.ok) throw new Error(data.error || "Failed to save settings");
+
+ setSettings(data);
+ setSavedMsg("Settings saved successfully!");
+ setTimeout(() => setSavedMsg(""), 3000);
+ } catch (e) {
+ console.error(e);
+ setError(e.message || "Failed to save settings");
+ } finally {
+ setSaving(false);
+ }
+ };
+
+ const loadModelsForProvider = async (provider) => {
+ setModelsError("");
+ setLoadingModelsFor(provider);
+
+ try {
+ const res = await fetch(`/api/settings/models?provider=${provider}`);
+ const data = await res.json();
+
+ if (!res.ok || data.error) {
+ throw new Error(data.error || "Failed to load models");
+ }
+
+ setModelsByProvider((prev) => ({
+ ...prev,
+ [provider]: data.models || [],
+ }));
+ } catch (e) {
+ console.error(e);
+ setModelsError(e.message || "Failed to load models");
+ } finally {
+ setLoadingModelsFor("");
+ }
+ };
+
+ const handlePair = async () => {
+ if (!pairCode.trim()) return;
+
+ setPairing(true);
+ setPairResult(null);
+
+ try {
+ const baseUrl =
+ settings?.ollabridge?.base_url || "https://ruslanmv-ollabridge.hf.space";
+
+ const res = await fetch("/api/ollabridge/pair", {
+ method: "POST",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify({ base_url: baseUrl, code: pairCode.trim() }),
+ });
+
+ const data = await res.json();
+
+ if (data.success) {
+ setPairResult({ ok: true, message: "Paired successfully!" });
+ if (data.token) {
+ updateField("ollabridge", "api_key", data.token);
+ }
+ } else {
+ setPairResult({
+ ok: false,
+ message: data.error || "Pairing failed",
+ });
+ }
+ } catch (e) {
+ setPairResult({ ok: false, message: e.message || "Pairing failed" });
+ } finally {
+ setPairing(false);
+ }
+ };
+
+ const handleTestConnection = async () => {
+ setTesting(true);
+ setTestResult(null);
+
+ try {
+ const activeProvider = settings?.provider || "ollama";
+ const config = { provider: activeProvider };
+
+ if (activeProvider === "openai" && settings?.openai) {
+ config.openai = {
+ api_key: settings.openai.api_key,
+ base_url: settings.openai.base_url,
+ model: settings.openai.model,
+ };
+ } else if (activeProvider === "claude" && settings?.claude) {
+ config.claude = {
+ api_key: settings.claude.api_key,
+ base_url: settings.claude.base_url,
+ model: settings.claude.model,
+ };
+ } else if (activeProvider === "watsonx" && settings?.watsonx) {
+ config.watsonx = {
+ api_key: settings.watsonx.api_key,
+ project_id: settings.watsonx.project_id,
+ base_url: settings.watsonx.base_url,
+ model_id: settings.watsonx.model_id,
+ };
+ } else if (activeProvider === "ollama" && settings?.ollama) {
+ config.ollama = {
+ base_url: settings.ollama.base_url,
+ model: settings.ollama.model,
+ };
+ } else if (activeProvider === "ollabridge" && settings?.ollabridge) {
+ config.ollabridge = {
+ base_url: settings.ollabridge.base_url,
+ model: settings.ollabridge.model,
+ api_key: settings.ollabridge.api_key,
+ };
+ }
+
+ const result = await testProvider(config);
+ setTestResult(result);
+ } catch (err) {
+ setTestResult({
+ health: "error",
+ warning: err.message || "Test failed",
+ });
+ } finally {
+ setTesting(false);
+ }
+ };
+
+ if (initialLoading) {
+ return (
+
+ );
+ }
+
+ if (!settings) {
+ return (
+
+
+
AI Providers
+
Admin / LLM Settings
+
+ {error || "Unable to load current configuration."}
+
+
+ Retry
+
+
+
+ );
+ }
+
+ const { provider } = settings;
+ const availableModels = modelsByProvider[provider] || [];
+
+ return (
+
+
AI Providers
+
+ Choose which LLM provider GitPilot should use for planning and agent
+ workflows. Provider settings are stored on the server.
+
+
+ {error &&
{error}
}
+ {savedMsg &&
{savedMsg}
}
+
+
+
Active provider
+
+ {PROVIDERS.map((p) => (
+ setSettings((prev) => ({ ...prev, provider: p }))}
+ >
+ {PROVIDER_LABELS[p] || p}
+
+ ))}
+
+
+
+ {provider === "ollabridge" && (
+
+
OllaBridge Cloud Configuration
+
+ Connect to OllaBridge Cloud or any OllaBridge instance for LLM
+ inference. No API key required for public endpoints.
+
+
+
Authentication Mode
+
+ {AUTH_MODES.map((m) => (
+ setAuthMode(m.id)}
+ >
+ {m.icon}
+ {m.label}
+
+ ))}
+
+
+ {authMode === "device" && (
+
+
+ Enter the pairing code from your OllaBridge console and click
+ Pair.
+
+
+ setPairCode(e.target.value.toUpperCase())}
+ onKeyDown={(e) => e.key === "Enter" && handlePair()}
+ />
+
+ {pairing ? "Pairing…" : "Pair"}
+
+
+ {pairResult && (
+
+ {pairResult.message}
+
+ )}
+
+ )}
+
+
Base URL
+
+ updateField("ollabridge", "base_url", e.target.value)
+ }
+ placeholder="https://your-ollabridge-endpoint"
+ />
+
+ {(authMode === "apikey" || authMode === "local") && (
+ <>
+
API Key
+
+ updateField("ollabridge", "api_key", e.target.value)
+ }
+ placeholder="Optional API key"
+ />
+ >
+ )}
+
+
Model
+
+
+ updateField("ollabridge", "model", e.target.value)
+ }
+ placeholder="qwen2.5:1.5b"
+ />
+ loadModelsForProvider("ollabridge")}
+ disabled={loadingModelsFor === "ollabridge"}
+ >
+ {loadingModelsFor === "ollabridge" ? "Loading…" : "Load Models"}
+
+
+
+ )}
+
+ {provider === "openai" && (
+
+
OpenAI Configuration
+
+
API Key
+
updateField("openai", "api_key", e.target.value)}
+ placeholder="sk-..."
+ />
+
+
Base URL
+
updateField("openai", "base_url", e.target.value)}
+ placeholder="Optional custom base URL"
+ />
+
+
Model
+
updateField("openai", "model", e.target.value)}
+ placeholder="gpt-4o-mini"
+ />
+
+ )}
+
+ {provider === "claude" && (
+
+
Claude Configuration
+
+
API Key
+
updateField("claude", "api_key", e.target.value)}
+ placeholder="Anthropic API key"
+ />
+
+
Base URL
+
updateField("claude", "base_url", e.target.value)}
+ placeholder="Optional custom base URL"
+ />
+
+
Model
+
updateField("claude", "model", e.target.value)}
+ placeholder="claude-sonnet-4-5"
+ />
+
+ )}
+
+ {provider === "watsonx" && (
+
+ )}
+
+ {provider === "ollama" && (
+
+
Ollama Configuration
+
+
Base URL
+
updateField("ollama", "base_url", e.target.value)}
+ placeholder="http://localhost:11434"
+ />
+
+
Model
+
+ updateField("ollama", "model", e.target.value)}
+ placeholder="llama3"
+ />
+ loadModelsForProvider("ollama")}
+ disabled={loadingModelsFor === "ollama"}
+ >
+ {loadingModelsFor === "ollama" ? "Loading…" : "Load Models"}
+
+
+
+ )}
+
+ {availableModels.length > 0 && (
+
+
Available Models
+
+ {availableModels.map((model) => (
+ updateField(provider, "model", model)}
+ >
+ {model}
+
+ ))}
+
+
+ )}
+
+ {modelsError &&
{modelsError}
}
+
+ {testResult && (
+
+ {testResult.health === "ok"
+ ? testResult.details || "Provider connection successful."
+ : testResult.warning || "Provider connection failed."}
+
+ )}
+
+
+
+ {saving ? "Saving…" : "Save Settings"}
+
+
+
+ {testing ? "Testing…" : "Test Connection"}
+
+
+
+ );
+}
\ No newline at end of file
diff --git a/frontend/components/LoginPage.jsx b/frontend/components/LoginPage.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..9d0fd67d1300c080de52c6ce91b0e03a0ce503dc
--- /dev/null
+++ b/frontend/components/LoginPage.jsx
@@ -0,0 +1,544 @@
+// frontend/components/LoginPage.jsx
+import React, { useState, useEffect, useRef } from "react";
+import { apiUrl, safeFetchJSON } from "../utils/api.js";
+import { initApp } from "../utils/appInit.js";
+
+/**
+ * GitPilot – Enterprise Agentic Login
+ * Theme: "Claude Code" / Anthropic Enterprise (Dark + Warm Orange)
+ */
+
+export default function LoginPage({ onAuthenticated, backendReady = false }) {
+ // Auth State
+ const [authProcessing, setAuthProcessing] = useState(false);
+ const [error, setError] = useState("");
+
+ // Mode State: 'loading' | 'web' (Has Secret) | 'device' (No Secret)
+ const [mode, setMode] = useState("loading");
+
+ // Device Flow State
+ const [deviceData, setDeviceData] = useState(null);
+ const pollTimer = useRef(null);
+ const stopPolling = useRef(false); // Flag to safely stop async polling
+
+ // Web Flow State
+ const [missingClientId, setMissingClientId] = useState(false);
+
+ // REF FIX: Prevents React StrictMode from running the auth exchange twice
+ const processingRef = useRef(false);
+ const authCheckDone = useRef(false);
+
+ // 1. Initialization Effect — runs once on mount AND when backendReady changes
+ useEffect(() => {
+ // Skip if already resolved
+ if (authCheckDone.current && mode !== "loading") return;
+
+ const params = new URLSearchParams(window.location.search);
+ const code = params.get("code");
+ const state = params.get("state");
+
+ // A. If returning from GitHub (Web Flow Callback)
+ if (code) {
+ if (!processingRef.current) {
+ processingRef.current = true;
+ setMode("web");
+ consumeOAuthCallback(code, state);
+ }
+ return;
+ }
+
+ // B. Use the shared singleton init — reuses App.jsx's result.
+ // No duplicate /api/auth/status calls, no separate retry loops.
+ initApp().then((result) => {
+ authCheckDone.current = true;
+ if (result.ready) {
+ setError("");
+ setMode(result.authMode === "web" ? "web" : "device");
+ } else {
+ // Backend unreachable — allow device flow as fallback
+ setError(result.error || "Backend unavailable");
+ setMode("device");
+ }
+ });
+
+ // Cleanup polling on unmount
+ return () => {
+ stopPolling.current = true;
+ if (pollTimer.current) clearTimeout(pollTimer.current);
+ };
+ // eslint-disable-next-line react-hooks/exhaustive-deps
+ }, [backendReady]);
+
+ // ===========================================================================
+ // WEB FLOW LOGIC (Standard OAuth2)
+ // ===========================================================================
+
+ async function consumeOAuthCallback(code, state) {
+ const expectedState = sessionStorage.getItem("gitpilot_oauth_state");
+ if (state && expectedState && expectedState !== state) {
+ console.warn("OAuth state mismatch - proceeding with caution.");
+ }
+
+ setAuthProcessing(true);
+ setError("");
+ window.history.replaceState({}, document.title, window.location.pathname);
+
+ try {
+ const data = await safeFetchJSON(apiUrl("/api/auth/callback"), {
+ method: "POST",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify({ code, state: state || "" }),
+ });
+
+ handleSuccess(data);
+ } catch (err) {
+ console.error("Login Error:", err);
+ setError(err instanceof Error ? err.message : "Login failed.");
+ setAuthProcessing(false);
+ }
+ }
+
+ async function handleSignInWithGitHub() {
+ setError("");
+ setMissingClientId(false);
+ setAuthProcessing(true);
+
+ try {
+ const data = await safeFetchJSON(apiUrl("/api/auth/url"));
+
+ if (data.state) {
+ sessionStorage.setItem("gitpilot_oauth_state", data.state);
+ }
+
+ window.location.href = data.authorization_url;
+ } catch (err) {
+ console.error("Auth Start Error:", err);
+ // Check for missing client ID (404/500 errors)
+ if (err.message && (err.message.includes('404') || err.message.includes('500'))) {
+ setMissingClientId(true);
+ } else {
+ setError(err instanceof Error ? err.message : "Could not start sign-in.");
+ }
+ setAuthProcessing(false);
+ }
+ }
+
+ // ===========================================================================
+ // DEVICE FLOW LOGIC (No Client Secret Required)
+ // ===========================================================================
+
+ const startDeviceFlow = async () => {
+ setError("");
+ setAuthProcessing(true);
+ stopPolling.current = false; // Reset stop flag
+
+ try {
+ const data = await safeFetchJSON(apiUrl("/api/auth/device/code"), { method: "POST" });
+
+ // Handle Errors
+ if (data.error) {
+ if (data.error.includes("400") || data.error.includes("Bad Request")) {
+ throw new Error("Device Flow is disabled in GitHub. Please go to your GitHub App Settings > 'General' > 'Identifying and authorizing users' and check the box 'Enable Device Flow'.");
+ }
+ throw new Error(data.error);
+ }
+
+ if (!data.device_code) throw new Error("Invalid device code response");
+
+ setDeviceData(data);
+ setAuthProcessing(false);
+
+ // Start Polling (Recursive Timeout Pattern)
+ pollDeviceToken(data.device_code, data.interval || 5);
+
+ } catch (err) {
+ setError(err.message);
+ setAuthProcessing(false);
+ }
+ };
+
+ const pollDeviceToken = async (deviceCode, interval) => {
+ if (stopPolling.current) return;
+
+ try {
+ const response = await fetch(apiUrl("/api/auth/device/poll"), {
+ method: "POST",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify({ device_code: deviceCode })
+ });
+
+ // 1. Success (200)
+ if (response.status === 200) {
+ const data = await response.json();
+ handleSuccess(data);
+ return;
+ }
+
+ // 2. Pending (202) -> Continue Polling
+ if (response.status === 202) {
+ // Schedule next poll
+ pollTimer.current = setTimeout(
+ () => pollDeviceToken(deviceCode, interval),
+ interval * 1000
+ );
+ return;
+ }
+
+ // 3. Error (4xx/5xx) -> Stop Polling & Show Error
+ const errData = await response.json().catch(() => ({ error: "Unknown polling error" }));
+
+ // Special case: If it's just a 'slow_down' warning (sometimes 400), we just wait longer
+ if (errData.error === "slow_down") {
+ pollTimer.current = setTimeout(
+ () => pollDeviceToken(deviceCode, interval + 5),
+ (interval + 5) * 1000
+ );
+ return;
+ }
+
+ // Terminal errors
+ throw new Error(errData.error || `Polling failed: ${response.status}`);
+
+ } catch (e) {
+ console.error("Poll error:", e);
+ if (!stopPolling.current) {
+ setError(e.message || "Failed to connect to authentication server.");
+ setDeviceData(null); // Return to initial state
+ }
+ }
+ };
+
+ const handleManualCheck = async () => {
+ if (!deviceData?.device_code) return;
+
+ try {
+ const response = await fetch(apiUrl("/api/auth/device/poll"), {
+ method: "POST",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify({ device_code: deviceData.device_code })
+ });
+
+ if (response.status === 200) {
+ const data = await response.json();
+ handleSuccess(data);
+ } else if (response.status === 202) {
+ // Visual feedback for pending state
+ const btn = document.getElementById("manual-check-btn");
+ if (btn) {
+ const originalText = btn.innerText;
+ btn.innerText = "Still Pending...";
+ btn.disabled = true;
+ setTimeout(() => {
+ btn.innerText = originalText;
+ btn.disabled = false;
+ }, 2000);
+ }
+ }
+ } catch (e) {
+ console.error("Manual check failed", e);
+ }
+ };
+
+ const handleCancelDeviceFlow = () => {
+ stopPolling.current = true;
+ if (pollTimer.current) clearTimeout(pollTimer.current);
+ setDeviceData(null);
+ setError("");
+ };
+
+ // ===========================================================================
+ // SHARED HELPERS
+ // ===========================================================================
+
+ function handleSuccess(data) {
+ stopPolling.current = true; // Ensure polling stops
+ if (pollTimer.current) clearTimeout(pollTimer.current);
+
+ if (!data.access_token || !data.user) {
+ setError("Server returned incomplete session data.");
+ return;
+ }
+
+ try {
+ localStorage.setItem("github_token", data.access_token);
+ localStorage.setItem("github_user", JSON.stringify(data.user));
+ } catch (e) {
+ console.warn("LocalStorage access denied:", e);
+ }
+
+ if (typeof onAuthenticated === "function") {
+ onAuthenticated({
+ access_token: data.access_token,
+ user: data.user,
+ });
+ }
+ }
+
+ // --- Design Token System ---
+ const theme = {
+ bg: "#131316",
+ cardBg: "#1C1C1F",
+ border: "#27272A",
+ accent: "#D95C3D",
+ accentHover: "#C44F32",
+ textPrimary: "#EDEDED",
+ textSecondary: "#A1A1AA",
+ font: '"Söhne", "Inter", -apple-system, sans-serif',
+ };
+
+ const styles = {
+ container: {
+ minHeight: "100vh",
+ display: "flex",
+ alignItems: "center",
+ justifyContent: "center",
+ backgroundColor: theme.bg,
+ fontFamily: theme.font,
+ color: theme.textPrimary,
+ letterSpacing: "-0.01em",
+ },
+ card: {
+ backgroundColor: theme.cardBg,
+ width: "100%",
+ maxWidth: "440px",
+ borderRadius: "12px",
+ border: `1px solid ${theme.border}`,
+ boxShadow: "0 24px 48px -12px rgba(0, 0, 0, 0.6)",
+ padding: "48px 40px",
+ textAlign: "center",
+ position: "relative",
+ },
+ logoBadge: {
+ width: "48px",
+ height: "48px",
+ backgroundColor: "rgba(217, 92, 61, 0.15)",
+ color: theme.accent,
+ borderRadius: "10px",
+ display: "flex",
+ alignItems: "center",
+ justifyContent: "center",
+ fontSize: "22px",
+ fontWeight: "700",
+ margin: "0 auto 32px auto",
+ border: "1px solid rgba(217, 92, 61, 0.2)",
+ },
+ h1: {
+ fontSize: "24px",
+ fontWeight: "600",
+ marginBottom: "12px",
+ color: theme.textPrimary,
+ },
+ p: {
+ fontSize: "14px",
+ color: theme.textSecondary,
+ lineHeight: "1.6",
+ marginBottom: "40px",
+ },
+ button: {
+ width: "100%",
+ height: "48px",
+ backgroundColor: theme.accent,
+ color: "#FFFFFF",
+ border: "none",
+ borderRadius: "8px",
+ fontSize: "14px",
+ fontWeight: "500",
+ cursor: (authProcessing || (mode === 'loading')) ? "not-allowed" : "pointer",
+ opacity: (authProcessing || (mode === 'loading')) ? 0.7 : 1,
+ transition: "background-color 0.2s ease",
+ display: "flex",
+ alignItems: "center",
+ justifyContent: "center",
+ gap: "10px",
+ boxShadow: "0 4px 12px rgba(217, 92, 61, 0.25)",
+ },
+ secondaryButton: {
+ backgroundColor: "transparent",
+ color: "#A1A1AA",
+ border: "1px solid #3F3F46",
+ padding: "8px 16px",
+ borderRadius: "6px",
+ fontSize: "12px",
+ cursor: "pointer",
+ marginTop: "16px",
+ minWidth: "100px"
+ },
+ errorBox: {
+ backgroundColor: "rgba(185, 28, 28, 0.15)",
+ border: "1px solid rgba(185, 28, 28, 0.3)",
+ color: "#FCA5A5",
+ padding: "12px",
+ borderRadius: "8px",
+ fontSize: "13px",
+ marginBottom: "24px",
+ textAlign: "left",
+ },
+ configCard: {
+ textAlign: "left",
+ backgroundColor: "#111",
+ border: "1px solid #333",
+ padding: "24px",
+ borderRadius: "8px",
+ marginBottom: "24px",
+ },
+ codeDisplay: {
+ backgroundColor: "#27272A",
+ color: theme.accent,
+ fontSize: "20px",
+ fontWeight: "700",
+ padding: "12px",
+ borderRadius: "6px",
+ textAlign: "center",
+ letterSpacing: "2px",
+ margin: "12px 0",
+ border: `1px dashed ${theme.accent}`,
+ cursor: "pointer",
+ },
+ footer: {
+ marginTop: "48px",
+ fontSize: "12px",
+ color: "#52525B",
+ }
+ };
+
+ // --- RENDER: Device Flow UI ---
+ const renderDeviceFlow = () => {
+ if (!deviceData) {
+ return (
+ !authProcessing && (e.currentTarget.style.backgroundColor = theme.accentHover)}
+ onMouseOut={(e) => !authProcessing && (e.currentTarget.style.backgroundColor = theme.accent)}
+ >
+ {authProcessing ? "Connecting..." : "Sign in with GitHub"}
+
+ );
+ }
+
+ return (
+
+
Authorize Device
+
+ GitPilot needs authorization to access your repositories.
+
+
+
+
1. Copy code:
+
{
+ navigator.clipboard.writeText(deviceData.user_code);
+ }}
+ title="Click to copy"
+ >
+ {deviceData.user_code}
+
+
+
+
+
+
+ ↻
+ Waiting for authorization...
+
+
+
+
+
+ Check Status
+
+
+ Cancel
+
+
+
+ );
+ };
+
+ // --- RENDER: Config Error ---
+ if (missingClientId) {
+ return (
+
+
+
⚠️
+
Configuration Error
+
Could not connect to GitHub Authentication services.
+
setMissingClientId(false)} style={{...styles.button, backgroundColor: "#3F3F46"}}>Retry
+
+
+ );
+ }
+
+ // --- RENDER: Main ---
+ return (
+
+
+
GP
+
+
GitPilot Enterprise
+
+ Agentic AI workflow for your repositories.
+ Secure. Context-aware. Automated.
+
+
+ {error &&
{error}
}
+
+ {mode === "loading" && (
+
Initializing...
+ )}
+
+ {mode === "web" && (
+
!authProcessing && (e.currentTarget.style.backgroundColor = theme.accentHover)}
+ onMouseOut={(e) => !authProcessing && (e.currentTarget.style.backgroundColor = theme.accent)}
+ >
+ {authProcessing ? "Connecting..." : (
+ <>
+
+ Sign in with GitHub
+ >
+ )}
+
+ )}
+
+ {mode === "device" && renderDeviceFlow()}
+
+
+ © {new Date().getFullYear()} GitPilot Inc.
+
+
+
+ );
+}
\ No newline at end of file
diff --git a/frontend/components/PlanView.jsx b/frontend/components/PlanView.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..a67efb237c5204e69cd1cec98248e8702ade6e25
--- /dev/null
+++ b/frontend/components/PlanView.jsx
@@ -0,0 +1,231 @@
+import React from "react";
+
+export default function PlanView({ plan }) {
+ if (!plan) return null;
+
+ // Calculate totals for each action type
+ const totals = { CREATE: 0, MODIFY: 0, DELETE: 0 };
+ plan.steps.forEach((step) => {
+ step.files.forEach((file) => {
+ totals[file.action] = (totals[file.action] || 0) + 1;
+ });
+ });
+
+ const theme = {
+ bg: "#18181B",
+ border: "#27272A",
+ textPrimary: "#EDEDED",
+ textSecondary: "#A1A1AA",
+ successBg: "rgba(16, 185, 129, 0.1)",
+ successText: "#10B981",
+ warningBg: "rgba(245, 158, 11, 0.1)",
+ warningText: "#F59E0B",
+ dangerBg: "rgba(239, 68, 68, 0.1)",
+ dangerText: "#EF4444",
+ };
+
+ const styles = {
+ container: {
+ display: "flex",
+ flexDirection: "column",
+ gap: "20px",
+ fontFamily: '-apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, sans-serif',
+ },
+ header: {
+ display: "flex",
+ flexDirection: "column",
+ gap: "8px",
+ paddingBottom: "16px",
+ borderBottom: `1px solid ${theme.border}`,
+ },
+ goal: {
+ fontSize: "14px",
+ fontWeight: "600",
+ color: theme.textPrimary,
+ },
+ summary: {
+ fontSize: "13px",
+ color: theme.textSecondary,
+ lineHeight: "1.5",
+ },
+ totals: {
+ display: "flex",
+ gap: "12px",
+ flexWrap: "wrap",
+ },
+ totalBadge: {
+ fontSize: "11px",
+ fontWeight: "500",
+ padding: "4px 8px",
+ borderRadius: "4px",
+ border: "1px solid transparent",
+ },
+ totalCreate: {
+ backgroundColor: theme.successBg,
+ color: theme.successText,
+ borderColor: "rgba(16, 185, 129, 0.2)",
+ },
+ totalModify: {
+ backgroundColor: theme.warningBg,
+ color: theme.warningText,
+ borderColor: "rgba(245, 158, 11, 0.2)",
+ },
+ totalDelete: {
+ backgroundColor: theme.dangerBg,
+ color: theme.dangerText,
+ borderColor: "rgba(239, 68, 68, 0.2)",
+ },
+ stepsList: {
+ listStyle: "none",
+ padding: 0,
+ margin: 0,
+ display: "flex",
+ flexDirection: "column",
+ gap: "24px",
+ },
+ step: {
+ display: "flex",
+ flexDirection: "column",
+ gap: "8px",
+ position: "relative",
+ },
+ stepHeader: {
+ display: "flex",
+ alignItems: "baseline",
+ gap: "8px",
+ fontSize: "13px",
+ fontWeight: "600",
+ color: theme.textPrimary,
+ },
+ stepNumber: {
+ color: theme.textSecondary,
+ fontSize: "11px",
+ textTransform: "uppercase",
+ letterSpacing: "0.05em",
+ },
+ stepDescription: {
+ fontSize: "13px",
+ color: theme.textSecondary,
+ lineHeight: "1.5",
+ margin: 0,
+ },
+ fileList: {
+ marginTop: "8px",
+ display: "flex",
+ flexDirection: "column",
+ gap: "4px",
+ backgroundColor: "#131316",
+ padding: "8px 12px",
+ borderRadius: "6px",
+ border: `1px solid ${theme.border}`,
+ },
+ fileItem: {
+ display: "flex",
+ alignItems: "center",
+ gap: "10px",
+ fontSize: "12px",
+ fontFamily: "ui-monospace, SFMono-Regular, Menlo, Monaco, Consolas, monospace",
+ },
+ actionBadge: {
+ padding: "2px 6px",
+ borderRadius: "4px",
+ fontSize: "10px",
+ fontWeight: "bold",
+ textTransform: "uppercase",
+ minWidth: "55px",
+ textAlign: "center",
+ letterSpacing: "0.02em",
+ },
+ path: {
+ color: "#D4D4D8",
+ whiteSpace: "nowrap",
+ overflow: "hidden",
+ textOverflow: "ellipsis",
+ },
+ risks: {
+ marginTop: "8px",
+ fontSize: "12px",
+ color: theme.warningText,
+ backgroundColor: "rgba(245, 158, 11, 0.05)",
+ padding: "8px 12px",
+ borderRadius: "6px",
+ border: "1px solid rgba(245, 158, 11, 0.1)",
+ display: "flex",
+ gap: "6px",
+ alignItems: "flex-start",
+ },
+ };
+
+ const getActionStyle = (action) => {
+ switch (action) {
+ case "CREATE": return styles.totalCreate;
+ case "MODIFY": return styles.totalModify;
+ case "DELETE": return styles.totalDelete;
+ default: return {};
+ }
+ };
+
+ return (
+
+ {/* Header & Summary */}
+
+
Goal: {plan.goal}
+
{plan.summary}
+
+
+ {/* Totals Summary */}
+
+ {totals.CREATE > 0 && (
+
+ {totals.CREATE} to create
+
+ )}
+ {totals.MODIFY > 0 && (
+
+ {totals.MODIFY} to modify
+
+ )}
+ {totals.DELETE > 0 && (
+
+ {totals.DELETE} to delete
+
+ )}
+
+
+ {/* Steps List */}
+
+ {plan.steps.map((s) => (
+
+
+ Step {s.step_number}
+ {s.title}
+
+ {s.description}
+
+ {/* Files List */}
+ {s.files && s.files.length > 0 && (
+
+ {s.files.map((file, idx) => (
+
+
+ {file.action}
+
+ {file.path}
+
+ ))}
+
+ )}
+
+ {/* Risks */}
+ {s.risks && (
+
+ ⚠️
+ {s.risks}
+
+ )}
+
+ ))}
+
+
+ );
+}
\ No newline at end of file
diff --git a/frontend/components/ProjectContextPanel.jsx b/frontend/components/ProjectContextPanel.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..0a480e9b6003a3a9e1c4ffbc80c86564d6ccbe55
--- /dev/null
+++ b/frontend/components/ProjectContextPanel.jsx
@@ -0,0 +1,572 @@
+import React, { useEffect, useMemo, useRef, useState } from "react";
+import FileTree from "./FileTree.jsx";
+import BranchPicker from "./BranchPicker.jsx";
+
+// --- INJECTED STYLES FOR ANIMATIONS ---
+const animationStyles = `
+ @keyframes highlight-pulse {
+ 0% { background-color: rgba(59, 130, 246, 0.10); }
+ 50% { background-color: rgba(59, 130, 246, 0.22); }
+ 100% { background-color: transparent; }
+ }
+ .pulse-context {
+ animation: highlight-pulse 1.1s ease-out;
+ }
+`;
+
+/**
+ * ProjectContextPanel (Production-ready)
+ *
+ * Controlled component:
+ * - Branch source of truth is App.jsx:
+ * - defaultBranch (prod)
+ * - currentBranch (what user sees)
+ * - sessionBranches (list of all active AI session branches)
+ *
+ * Responsibilities:
+ * - Show project context + branch dropdown + AI badge/banner
+ * - Fetch access status + file count for the currentBranch
+ * - Trigger visual pulse on pulseNonce (Hard Switch)
+ */
+export default function ProjectContextPanel({
+ repo,
+ defaultBranch,
+ currentBranch,
+ sessionBranch, // Active session branch (optional, for specific highlighting)
+ sessionBranches = [], // List of all AI branches
+ onBranchChange,
+ pulseNonce,
+ onSettingsClick,
+}) {
+ const [appUrl, setAppUrl] = useState("");
+ const [fileCount, setFileCount] = useState(0);
+
+ const [isDropdownOpen, setIsDropdownOpen] = useState(false);
+
+ // Data Loading State
+ const [analyzing, setAnalyzing] = useState(false);
+ const [accessInfo, setAccessInfo] = useState(null);
+ const [treeError, setTreeError] = useState(null);
+
+ // Retry / Refresh Logic
+ const [refreshTrigger, setRefreshTrigger] = useState(0);
+ const [retryCount, setRetryCount] = useState(0);
+ const retryTimeoutRef = useRef(null);
+
+ // UX State
+ const [animateHeader, setAnimateHeader] = useState(false);
+ const [toast, setToast] = useState({ visible: false, title: "", msg: "" });
+
+ // Calculate effective default to prevent 'main' fallback errors
+ const effectiveDefaultBranch = defaultBranch || repo?.default_branch || "main";
+ const branch = currentBranch || effectiveDefaultBranch;
+
+ // Determine if we are currently viewing an AI Session branch
+ const isAiSession = (sessionBranches.includes(branch)) || (sessionBranch === branch && branch !== effectiveDefaultBranch);
+
+ // Fetch App URL on mount
+ useEffect(() => {
+ fetch("/api/auth/app-url")
+ .then((res) => res.json())
+ .then((data) => {
+ if (data.app_url) setAppUrl(data.app_url);
+ })
+ .catch((err) => console.error("Failed to fetch App URL:", err));
+ }, []);
+
+ // Hard Switch pulse: whenever App increments pulseNonce
+ useEffect(() => {
+ if (!pulseNonce) return;
+ setAnimateHeader(true);
+ const t = window.setTimeout(() => setAnimateHeader(false), 1100);
+ return () => window.clearTimeout(t);
+ }, [pulseNonce]);
+
+ // Main data fetcher (Access + Tree stats) for currentBranch
+ // Stale-while-revalidate: keep previous data visible during fetch
+ useEffect(() => {
+ if (!repo) return;
+
+ // Only show full "analyzing" spinner if we have no data yet
+ if (!accessInfo) setAnalyzing(true);
+ setTreeError(null);
+
+ if (retryTimeoutRef.current) {
+ clearTimeout(retryTimeoutRef.current);
+ retryTimeoutRef.current = null;
+ }
+
+ let headers = {};
+ try {
+ const token = localStorage.getItem("github_token");
+ if (token) headers = { Authorization: `Bearer ${token}` };
+ } catch (e) {
+ console.warn("Unable to read github_token:", e);
+ }
+
+ let cancelled = false;
+ const cacheBuster = `&_t=${Date.now()}&retry=${retryCount}`;
+
+ // A) Access Check (with Stale Cache Fix)
+ fetch(`/api/auth/repo-access?owner=${repo.owner}&repo=${repo.name}${cacheBuster}`, {
+ headers,
+ cache: "no-cache",
+ })
+ .then(async (res) => {
+ if (cancelled) return;
+ const data = await res.json().catch(() => ({}));
+
+ if (!res.ok) {
+ setAccessInfo({ can_write: false, app_installed: false, auth_type: "none" });
+ return;
+ }
+
+ setAccessInfo(data);
+
+ // Auto-retry if user has push access but App is not detected yet (Stale Cache)
+ if (data.can_write && !data.app_installed && retryCount === 0) {
+ retryTimeoutRef.current = setTimeout(() => {
+ setRetryCount(1);
+ }, 1000);
+ }
+ })
+ .catch(() => {
+ if (!cancelled) setAccessInfo({ can_write: false, app_installed: false, auth_type: "none" });
+ });
+
+ // B) Tree count for the selected branch
+ // Don't clear fileCount — keep stale value visible until new one arrives
+ const hadFileCount = fileCount > 0;
+ if (!hadFileCount) setAnalyzing(true);
+
+ fetch(`/api/repos/${repo.owner}/${repo.name}/tree?ref=${encodeURIComponent(branch)}&_t=${Date.now()}`, {
+ headers,
+ cache: "no-cache",
+ })
+ .then(async (res) => {
+ if (cancelled) return;
+ const data = await res.json().catch(() => ({}));
+ if (!res.ok) {
+ setTreeError(data.detail || "Failed to load tree");
+ setFileCount(0);
+ return;
+ }
+ setFileCount(Array.isArray(data.files) ? data.files.length : 0);
+ })
+ .catch((err) => {
+ if (cancelled) return;
+ setTreeError(err.message);
+ setFileCount(0);
+ })
+ .finally(() => { if (!cancelled) setAnalyzing(false); });
+
+ return () => {
+ cancelled = true;
+ if (retryTimeoutRef.current) clearTimeout(retryTimeoutRef.current);
+ };
+ // eslint-disable-next-line react-hooks/exhaustive-deps
+ }, [repo?.owner, repo?.name, branch, refreshTrigger, retryCount]);
+
+ const showToast = (title, msg) => {
+ setToast({ visible: true, title, msg });
+ setTimeout(() => setToast((prev) => ({ ...prev, visible: false })), 3000);
+ };
+
+ const handleManualSwitch = (targetBranch) => {
+ if (!targetBranch || targetBranch === branch) {
+ setIsDropdownOpen(false);
+ return;
+ }
+
+ // Local UI feedback (App.jsx will handle the actual state change)
+ const goingAi = sessionBranches.includes(targetBranch);
+ showToast(
+ goingAi ? "Context Switched" : "Switched to Production",
+ goingAi ? `Viewing AI Session: ${targetBranch}` : `Viewing ${targetBranch}.`
+ );
+
+ setIsDropdownOpen(false);
+ if (onBranchChange) onBranchChange(targetBranch);
+ };
+
+ const handleRefresh = () => {
+ setAnalyzing(true);
+ setRetryCount(0);
+ setRefreshTrigger((prev) => prev + 1);
+ };
+
+ const handleInstallClick = () => {
+ if (!appUrl) return;
+ const targetUrl = appUrl.endsWith("/") ? `${appUrl}installations/new` : `${appUrl}/installations/new`;
+ window.open(targetUrl, "_blank", "noopener,noreferrer");
+ };
+
+ // --- STYLES ---
+ const theme = useMemo(
+ () => ({
+ bg: "#131316",
+ border: "#27272A",
+ textPrimary: "#EDEDED",
+ textSecondary: "#A1A1AA",
+ accent: "#3b82f6",
+ warningBorder: "rgba(245, 158, 11, 0.2)",
+ warningText: "#F59E0B",
+ successColor: "#10B981",
+ cardBg: "#18181B",
+ aiBg: "rgba(59, 130, 246, 0.10)",
+ aiBorder: "rgba(59, 130, 246, 0.30)",
+ aiText: "#60a5fa",
+ }),
+ []
+ );
+
+ const styles = useMemo(
+ () => ({
+ container: {
+ height: "100%",
+ borderRight: `1px solid ${theme.border}`,
+ backgroundColor: theme.bg,
+ display: "flex",
+ flexDirection: "column",
+ fontFamily: '"Söhne", "Inter", sans-serif',
+ position: "relative",
+ overflow: "hidden",
+ },
+ header: {
+ padding: "16px 20px",
+ borderBottom: `1px solid ${theme.border}`,
+ display: "flex",
+ alignItems: "center",
+ justifyContent: "space-between",
+ transition: "background-color 0.3s ease",
+ },
+ titleGroup: { display: "flex", alignItems: "center", gap: "8px" },
+ title: { fontSize: "13px", fontWeight: "600", color: theme.textPrimary },
+ repoBadge: {
+ backgroundColor: "#27272A",
+ color: theme.textSecondary,
+ fontSize: "11px",
+ padding: "2px 8px",
+ borderRadius: "12px",
+ border: `1px solid ${theme.border}`,
+ fontFamily: "monospace",
+ },
+ aiBadge: {
+ display: "flex",
+ alignItems: "center",
+ gap: "6px",
+ backgroundColor: theme.aiBg,
+ color: theme.aiText,
+ fontSize: "10px",
+ fontWeight: "bold",
+ padding: "2px 8px",
+ borderRadius: "12px",
+ border: `1px solid ${theme.aiBorder}`,
+ textTransform: "uppercase",
+ letterSpacing: "0.5px",
+ },
+ content: {
+ padding: "16px 20px 12px 20px",
+ display: "flex",
+ flexDirection: "column",
+ gap: "12px",
+ },
+ statRow: { display: "flex", justifyContent: "space-between", fontSize: "13px", marginBottom: "4px" },
+ label: { color: theme.textSecondary },
+ value: { color: theme.textPrimary, fontWeight: "500" },
+ dropdownContainer: { position: "relative" },
+ branchButton: {
+ display: "flex",
+ alignItems: "center",
+ gap: "6px",
+ padding: "4px 8px",
+ borderRadius: "4px",
+ border: `1px solid ${isAiSession ? theme.aiBorder : theme.border}`,
+ backgroundColor: isAiSession ? "rgba(59, 130, 246, 0.05)" : "transparent",
+ color: isAiSession ? theme.aiText : theme.textPrimary,
+ fontSize: "13px",
+ cursor: "pointer",
+ fontFamily: "monospace",
+ },
+ dropdownMenu: {
+ position: "absolute",
+ top: "100%",
+ left: 0,
+ marginTop: "4px",
+ width: "240px",
+ backgroundColor: "#1F1F23",
+ border: `1px solid ${theme.border}`,
+ borderRadius: "6px",
+ boxShadow: "0 4px 12px rgba(0,0,0,0.5)",
+ zIndex: 50,
+ display: isDropdownOpen ? "block" : "none",
+ overflow: "hidden",
+ },
+ dropdownItem: {
+ padding: "8px 12px",
+ fontSize: "13px",
+ color: theme.textSecondary,
+ cursor: "pointer",
+ display: "flex",
+ alignItems: "center",
+ gap: "8px",
+ borderBottom: `1px solid ${theme.border}`,
+ },
+ contextBanner: {
+ backgroundColor: theme.aiBg,
+ borderTop: `1px solid ${theme.aiBorder}`,
+ padding: "8px 20px",
+ fontSize: "11px",
+ color: theme.aiText,
+ display: "flex",
+ justifyContent: "space-between",
+ alignItems: "center",
+ },
+ toast: {
+ position: "absolute",
+ top: "16px",
+ right: "16px",
+ backgroundColor: "#18181B",
+ border: `1px solid ${theme.border}`,
+ borderLeft: `3px solid ${theme.accent}`,
+ borderRadius: "6px",
+ padding: "12px",
+ boxShadow: "0 4px 12px rgba(0,0,0,0.5)",
+ zIndex: 100,
+ minWidth: "240px",
+ transition: "all 0.3s cubic-bezier(0.16, 1, 0.3, 1)",
+ transform: toast.visible ? "translateX(0)" : "translateX(120%)",
+ opacity: toast.visible ? 1 : 0,
+ },
+ toastTitle: { fontSize: "13px", fontWeight: "bold", color: theme.textPrimary, marginBottom: "2px" },
+ toastMsg: { fontSize: "11px", color: theme.textSecondary },
+ refreshButton: {
+ marginTop: "8px",
+ height: "32px",
+ padding: "0 12px",
+ backgroundColor: "transparent",
+ color: theme.textSecondary,
+ border: `1px solid ${theme.border}`,
+ borderRadius: "6px",
+ fontSize: "12px",
+ cursor: analyzing ? "not-allowed" : "pointer",
+ display: "flex",
+ alignItems: "center",
+ justifyContent: "center",
+ gap: "6px",
+ },
+ settingsBtn: {
+ display: "flex",
+ alignItems: "center",
+ justifyContent: "center",
+ width: "28px",
+ height: "28px",
+ borderRadius: "6px",
+ border: `1px solid ${theme.border}`,
+ backgroundColor: "transparent",
+ color: theme.textSecondary,
+ cursor: "pointer",
+ padding: 0,
+ transition: "color 0.15s, border-color 0.15s",
+ },
+ treeWrapper: { flex: 1, overflow: "auto", borderTop: `1px solid ${theme.border}` },
+ installCard: {
+ marginTop: "8px",
+ padding: "12px",
+ borderRadius: "8px",
+ backgroundColor: theme.cardBg,
+ border: `1px solid ${theme.warningBorder}`,
+ },
+ installHeader: {
+ display: "flex",
+ alignItems: "center",
+ gap: "10px",
+ fontSize: "14px",
+ fontWeight: "600",
+ color: theme.textPrimary,
+ },
+ installText: {
+ fontSize: "13px",
+ color: theme.textSecondary,
+ lineHeight: "1.5",
+ },
+ }),
+ [analyzing, isAiSession, isDropdownOpen, theme, toast.visible]
+ );
+
+ // Determine status text
+ let statusText = "Checking...";
+ let statusColor = theme.textSecondary;
+ let showInstallCard = false;
+
+ if (!analyzing && accessInfo) {
+ if (accessInfo.app_installed) {
+ statusText = "Write Access ✓";
+ statusColor = theme.successColor;
+ } else if (accessInfo.can_write && retryCount === 0) {
+ statusText = "Verifying...";
+ } else if (accessInfo.can_write) {
+ statusText = "Push Access (No App)";
+ statusColor = theme.warningText;
+ showInstallCard = true;
+ } else {
+ statusText = "Read Only";
+ statusColor = theme.warningText;
+ showInstallCard = true;
+ }
+ }
+
+ if (!repo) {
+ return (
+
+ );
+ }
+
+ return (
+
+
+
+ {/* TOAST */}
+
+
{toast.title}
+
{toast.msg}
+
+
+ {/* HEADER */}
+
+
+
Project context
+ {isAiSession && (
+
+
+
+
+ AI Session
+
+ )}
+
+
+ {!isAiSession &&
{repo.name} }
+ {onSettingsClick && (
+
+
+
+
+
+
+ )}
+
+
+
+ {/* CONTENT */}
+
+ {/* Branch selector (Claude-Code-on-Web parity — uses BranchPicker with search) */}
+
+ Branch:
+
+
+
+ {/* Stats */}
+
+ Files:
+ {analyzing ? "…" : fileCount}
+
+
+
+ Status:
+ {statusText}
+
+
+ {/* Tree error (optional display) */}
+ {treeError && (
+
+ {treeError}
+
+ )}
+
+ {/* Refresh */}
+
+
+
+
+ {analyzing ? "Refreshing..." : "Refresh"}
+
+
+ {/* Install card */}
+ {showInstallCard && (
+
+
+ ⚡
+ Enable Write Access
+
+
+ Install the GitPilot App to enable AI agent operations.
+
+
+ Alternatively, use Folder or Local Git mode for local-first workflows without GitHub.
+
+
+ Install App
+
+
+ )}
+
+
+ {/* Context banner */}
+ {isAiSession && (
+
+
+
+
+
+
+
+ You are viewing an AI Session branch.
+
+ handleManualSwitch(effectiveDefaultBranch)}>
+ Return to {effectiveDefaultBranch}
+
+
+ )}
+
+ {/* File tree (branch-aware) */}
+
+
+
+
+ );
+}
\ No newline at end of file
diff --git a/frontend/components/ProjectSettings/ContextTab.jsx b/frontend/components/ProjectSettings/ContextTab.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..5272c846b181fb9c17031215e4056b565a865f09
--- /dev/null
+++ b/frontend/components/ProjectSettings/ContextTab.jsx
@@ -0,0 +1,352 @@
+import React, { useEffect, useMemo, useRef, useState } from "react";
+
+export default function ContextTab({ owner, repo }) {
+ const [assets, setAssets] = useState([]);
+ const [busy, setBusy] = useState(false);
+ const [error, setError] = useState("");
+ const [uploadHint, setUploadHint] = useState("");
+ const inputRef = useRef(null);
+
+ const canUse = useMemo(() => Boolean(owner && repo), [owner, repo]);
+
+ async function loadAssets() {
+ if (!canUse) return;
+ setError("");
+ try {
+ const res = await fetch(`/api/repos/${owner}/${repo}/context/assets`);
+ if (!res.ok) throw new Error(`Failed to list assets (${res.status})`);
+ const data = await res.json();
+ setAssets(data.assets || []);
+ } catch (e) {
+ setError(e?.message || "Failed to load assets");
+ }
+ }
+
+ useEffect(() => {
+ loadAssets();
+ // eslint-disable-next-line react-hooks/exhaustive-deps
+ }, [owner, repo]);
+
+ async function uploadFiles(fileList) {
+ if (!canUse) return;
+ const files = Array.from(fileList || []);
+ if (!files.length) return;
+
+ setBusy(true);
+ setError("");
+ setUploadHint(`Uploading ${files.length} file(s)...`);
+
+ try {
+ for (const f of files) {
+ const form = new FormData();
+ form.append("file", f);
+
+ const res = await fetch(
+ `/api/repos/${owner}/${repo}/context/assets/upload`,
+ { method: "POST", body: form }
+ );
+ if (!res.ok) {
+ const txt = await res.text().catch(() => "");
+ throw new Error(`Upload failed (${res.status}) ${txt}`);
+ }
+ }
+ setUploadHint("Upload complete. Refreshing list...");
+ await loadAssets();
+ setUploadHint("");
+ } catch (e) {
+ setError(e?.message || "Upload failed");
+ setUploadHint("");
+ } finally {
+ setBusy(false);
+ if (inputRef.current) inputRef.current.value = "";
+ }
+ }
+
+ async function deleteAsset(assetId) {
+ if (!canUse) return;
+ const ok = window.confirm("Delete this asset? This cannot be undone.");
+ if (!ok) return;
+
+ setBusy(true);
+ setError("");
+ try {
+ const res = await fetch(
+ `/api/repos/${owner}/${repo}/context/assets/${assetId}`,
+ { method: "DELETE" }
+ );
+ if (!res.ok) throw new Error(`Delete failed (${res.status})`);
+ await loadAssets();
+ } catch (e) {
+ setError(e?.message || "Delete failed");
+ } finally {
+ setBusy(false);
+ }
+ }
+
+ function downloadAsset(assetId) {
+ if (!canUse) return;
+ window.open(
+ `/api/repos/${owner}/${repo}/context/assets/${assetId}/download`,
+ "_blank"
+ );
+ }
+
+ const empty = !assets || assets.length === 0;
+
+ return (
+
+
+
+
Project Context
+
+ Upload documents, transcripts, screenshots, etc. (non-destructive,
+ additive).
+
+
+
+
+ uploadFiles(e.target.files)}
+ style={styles.fileInput}
+ />
+ inputRef.current?.click()}
+ >
+ Upload
+
+
+ Refresh
+
+
+
+
+
{
+ e.preventDefault();
+ e.stopPropagation();
+ }}
+ onDrop={(e) => {
+ e.preventDefault();
+ e.stopPropagation();
+ if (busy) return;
+ uploadFiles(e.dataTransfer.files);
+ }}
+ >
+
+ Drag & drop files here, or click Upload .
+
+
+ Tip: For audio/video, upload a transcript file too.
+
+
+
+ {uploadHint ?
{uploadHint}
: null}
+ {error ?
{error}
: null}
+
+
+
+
File
+
Type
+
Size
+
Indexed
+
Actions
+
+
+ {empty ? (
+
+ No context assets yet. Upload docs, transcripts, and screenshots to
+ improve planning quality.
+
+ ) : (
+ assets.map((a) => (
+
+
+
{a.filename}
+
+ Added: {a.created_at || "-"} | Extracted:{" "}
+ {Number(a.extracted_chars || 0).toLocaleString()} chars
+
+
+
+
+ {a.mime || "unknown"}
+
+
+
+ {formatBytes(a.size_bytes || 0)}
+
+
+
+ {a.indexed_chunks || 0} chunks
+
+
+
+ downloadAsset(a.asset_id)}
+ >
+ Download
+
+ deleteAsset(a.asset_id)}
+ >
+ Delete
+
+
+
+ ))
+ )}
+
+
+ );
+}
+
+function formatBytes(bytes) {
+ const b = Number(bytes || 0);
+ if (!b) return "0 B";
+ const units = ["B", "KB", "MB", "GB", "TB"];
+ let i = 0;
+ let v = b;
+ while (v >= 1024 && i < units.length - 1) {
+ v /= 1024;
+ i += 1;
+ }
+ return `${v.toFixed(v >= 10 || i === 0 ? 0 : 1)} ${units[i]}`;
+}
+
+const styles = {
+ wrap: { display: "flex", flexDirection: "column", gap: 12 },
+ topRow: {
+ display: "flex",
+ justifyContent: "space-between",
+ gap: 12,
+ alignItems: "flex-start",
+ flexWrap: "wrap",
+ },
+ left: { minWidth: 280 },
+ right: { display: "flex", gap: 8, alignItems: "center", flexWrap: "wrap" },
+ h1: { fontSize: 14, fontWeight: 800, color: "#fff" },
+ h2: { fontSize: 12, color: "rgba(255,255,255,0.65)", marginTop: 4 },
+ fileInput: { display: "none" },
+ btn: {
+ background: "rgba(255,255,255,0.10)",
+ border: "1px solid rgba(255,255,255,0.18)",
+ color: "#fff",
+ borderRadius: 10,
+ padding: "8px 10px",
+ cursor: "pointer",
+ fontSize: 13,
+ },
+ dropzone: {
+ border: "1px dashed rgba(255,255,255,0.22)",
+ borderRadius: 12,
+ padding: 16,
+ background: "rgba(255,255,255,0.03)",
+ },
+ dropText: { color: "rgba(255,255,255,0.85)", fontSize: 13 },
+ dropSub: { color: "rgba(255,255,255,0.55)", fontSize: 12, marginTop: 6 },
+ hint: {
+ color: "rgba(255,255,255,0.75)",
+ fontSize: 12,
+ padding: "8px 10px",
+ border: "1px solid rgba(255,255,255,0.12)",
+ borderRadius: 10,
+ background: "rgba(255,255,255,0.03)",
+ },
+ error: {
+ color: "#ffb3b3",
+ fontSize: 12,
+ padding: "8px 10px",
+ border: "1px solid rgba(255,120,120,0.25)",
+ borderRadius: 10,
+ background: "rgba(255,80,80,0.08)",
+ },
+ tableWrap: {
+ border: "1px solid rgba(255,255,255,0.12)",
+ borderRadius: 12,
+ overflow: "hidden",
+ },
+ tableHeader: {
+ display: "grid",
+ gridTemplateColumns: "1.6fr 1fr 0.6fr 0.6fr 0.8fr",
+ gap: 0,
+ padding: "10px 12px",
+ background: "rgba(255,255,255,0.03)",
+ borderBottom: "1px solid rgba(255,255,255,0.10)",
+ fontSize: 12,
+ color: "rgba(255,255,255,0.65)",
+ },
+ row: {
+ display: "grid",
+ gridTemplateColumns: "1.6fr 1fr 0.6fr 0.6fr 0.8fr",
+ padding: "10px 12px",
+ borderBottom: "1px solid rgba(255,255,255,0.08)",
+ alignItems: "center",
+ },
+ col: { minWidth: 0 },
+ colName: {},
+ colMeta: { color: "rgba(255,255,255,0.75)", fontSize: 12 },
+ colActions: { display: "flex", gap: 8, justifyContent: "flex-end" },
+ fileName: {
+ color: "#fff",
+ fontSize: 13,
+ fontWeight: 700,
+ overflow: "hidden",
+ textOverflow: "ellipsis",
+ whiteSpace: "nowrap",
+ },
+ small: {
+ color: "rgba(255,255,255,0.55)",
+ fontSize: 11,
+ marginTop: 4,
+ overflow: "hidden",
+ textOverflow: "ellipsis",
+ whiteSpace: "nowrap",
+ },
+ badge: {
+ display: "inline-flex",
+ alignItems: "center",
+ padding: "2px 8px",
+ borderRadius: 999,
+ border: "1px solid rgba(255,255,255,0.16)",
+ background: "rgba(255,255,255,0.04)",
+ fontSize: 11,
+ color: "rgba(255,255,255,0.80)",
+ maxWidth: "100%",
+ overflow: "hidden",
+ textOverflow: "ellipsis",
+ whiteSpace: "nowrap",
+ },
+ smallBtn: {
+ background: "rgba(255,255,255,0.08)",
+ border: "1px solid rgba(255,255,255,0.16)",
+ color: "#fff",
+ borderRadius: 10,
+ padding: "6px 8px",
+ cursor: "pointer",
+ fontSize: 12,
+ },
+ dangerBtn: {
+ border: "1px solid rgba(255,90,90,0.35)",
+ background: "rgba(255,90,90,0.10)",
+ },
+ empty: {
+ padding: 14,
+ color: "rgba(255,255,255,0.65)",
+ fontSize: 13,
+ },
+};
diff --git a/frontend/components/ProjectSettings/ConventionsTab.jsx b/frontend/components/ProjectSettings/ConventionsTab.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..d508ccf1a65817f1b5a44dbebcdfc1861c42b8b6
--- /dev/null
+++ b/frontend/components/ProjectSettings/ConventionsTab.jsx
@@ -0,0 +1,151 @@
+import React, { useEffect, useMemo, useState } from "react";
+
+export default function ConventionsTab({ owner, repo }) {
+ const [content, setContent] = useState("");
+ const [busy, setBusy] = useState(false);
+ const [error, setError] = useState("");
+
+ const canUse = useMemo(() => Boolean(owner && repo), [owner, repo]);
+
+ async function load() {
+ if (!canUse) return;
+ setError("");
+ setBusy(true);
+ try {
+ const res = await fetch(`/api/repos/${owner}/${repo}/context`);
+ if (!res.ok) throw new Error(`Failed to load conventions (${res.status})`);
+ const data = await res.json();
+ // backend may return { context: "..."} or { conventions: "..."} depending on implementation
+ setContent(data.context || data.conventions || data.memory || data.text || "");
+ } catch (e) {
+ setError(e?.message || "Failed to load conventions");
+ } finally {
+ setBusy(false);
+ }
+ }
+
+ async function initialize() {
+ if (!canUse) return;
+ setError("");
+ setBusy(true);
+ try {
+ const res = await fetch(`/api/repos/${owner}/${repo}/context/init`, {
+ method: "POST",
+ });
+ if (!res.ok) {
+ const txt = await res.text().catch(() => "");
+ throw new Error(`Init failed (${res.status}) ${txt}`);
+ }
+ await load();
+ } catch (e) {
+ setError(e?.message || "Init failed");
+ } finally {
+ setBusy(false);
+ }
+ }
+
+ useEffect(() => {
+ load();
+ // eslint-disable-next-line react-hooks/exhaustive-deps
+ }, [owner, repo]);
+
+ return (
+
+
+
+
Project Conventions
+
+ This is the project memory/conventions file used by GitPilot.
+
+
+
+
+ Refresh
+
+
+ Initialize
+
+
+
+
+ {error ?
{error}
: null}
+
+
+ {content ? (
+
{content}
+ ) : (
+
+ No conventions found yet. Click Initialize to create default
+ project memory if supported.
+
+ )}
+
+
+
+ Editing conventions is intentionally not included here to keep this
+ feature additive/non-destructive. You can extend this later with an
+ explicit "Edit" mode.
+
+
+ );
+}
+
+const styles = {
+ wrap: { display: "flex", flexDirection: "column", gap: 12 },
+ topRow: {
+ display: "flex",
+ justifyContent: "space-between",
+ gap: 12,
+ alignItems: "flex-start",
+ flexWrap: "wrap",
+ },
+ actions: { display: "flex", gap: 8, flexWrap: "wrap" },
+ h1: { fontSize: 14, fontWeight: 800, color: "#fff" },
+ h2: { fontSize: 12, color: "rgba(255,255,255,0.65)", marginTop: 4 },
+ btn: {
+ background: "rgba(255,255,255,0.10)",
+ border: "1px solid rgba(255,255,255,0.18)",
+ color: "#fff",
+ borderRadius: 10,
+ padding: "8px 10px",
+ cursor: "pointer",
+ fontSize: 13,
+ },
+ error: {
+ color: "#ffb3b3",
+ fontSize: 12,
+ padding: "8px 10px",
+ border: "1px solid rgba(255,120,120,0.25)",
+ borderRadius: 10,
+ background: "rgba(255,80,80,0.08)",
+ },
+ box: {
+ border: "1px solid rgba(255,255,255,0.12)",
+ borderRadius: 12,
+ overflow: "hidden",
+ background: "rgba(0,0,0,0.22)",
+ },
+ pre: {
+ margin: 0,
+ padding: 12,
+ color: "rgba(255,255,255,0.85)",
+ fontSize: 12,
+ lineHeight: 1.35,
+ whiteSpace: "pre-wrap",
+ overflow: "auto",
+ maxHeight: 520,
+ },
+ empty: {
+ padding: 12,
+ color: "rgba(255,255,255,0.65)",
+ fontSize: 13,
+ },
+ note: {
+ color: "rgba(255,255,255,0.55)",
+ fontSize: 12,
+ },
+};
diff --git a/frontend/components/ProjectSettings/UseCaseTab.jsx b/frontend/components/ProjectSettings/UseCaseTab.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..b01e9a46d3ac15c42d16decf69b93e0cf7192db2
--- /dev/null
+++ b/frontend/components/ProjectSettings/UseCaseTab.jsx
@@ -0,0 +1,637 @@
+import React, { useEffect, useMemo, useRef, useState } from "react";
+
+export default function UseCaseTab({ owner, repo }) {
+ const [useCases, setUseCases] = useState([]);
+ const [selectedId, setSelectedId] = useState("");
+ const [useCase, setUseCase] = useState(null);
+ const [busy, setBusy] = useState(false);
+ const [error, setError] = useState("");
+ const [draftTitle, setDraftTitle] = useState("New Use Case");
+ const [message, setMessage] = useState("");
+ const messagesEndRef = useRef(null);
+
+ const canUse = useMemo(() => Boolean(owner && repo), [owner, repo]);
+ const spec = useCase?.spec || {};
+
+ function scrollToBottom() {
+ requestAnimationFrame(() => {
+ messagesEndRef.current?.scrollIntoView({ behavior: "smooth" });
+ });
+ }
+
+ async function loadUseCases() {
+ if (!canUse) return;
+ setError("");
+ try {
+ const res = await fetch(`/api/repos/${owner}/${repo}/use-cases`);
+ if (!res.ok) throw new Error(`Failed to list use cases (${res.status})`);
+ const data = await res.json();
+ const list = data.use_cases || [];
+ setUseCases(list);
+
+ // auto select active or first
+ const active = list.find((x) => x.is_active);
+ const nextId = active?.use_case_id || list[0]?.use_case_id || "";
+ if (!selectedId && nextId) setSelectedId(nextId);
+ } catch (e) {
+ setError(e?.message || "Failed to load use cases");
+ }
+ }
+
+ async function loadUseCase(id) {
+ if (!canUse || !id) return;
+ setError("");
+ try {
+ const res = await fetch(`/api/repos/${owner}/${repo}/use-cases/${id}`);
+ if (!res.ok) throw new Error(`Failed to load use case (${res.status})`);
+ const data = await res.json();
+ setUseCase(data.use_case || null);
+ scrollToBottom();
+ } catch (e) {
+ setError(e?.message || "Failed to load use case");
+ }
+ }
+
+ useEffect(() => {
+ loadUseCases();
+ // eslint-disable-next-line react-hooks/exhaustive-deps
+ }, [owner, repo]);
+
+ useEffect(() => {
+ if (!selectedId) return;
+ loadUseCase(selectedId);
+ // eslint-disable-next-line react-hooks/exhaustive-deps
+ }, [selectedId]);
+
+ async function createUseCase() {
+ if (!canUse) return;
+ setBusy(true);
+ setError("");
+ try {
+ const res = await fetch(`/api/repos/${owner}/${repo}/use-cases`, {
+ method: "POST",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify({ title: draftTitle || "New Use Case" }),
+ });
+ if (!res.ok) {
+ const txt = await res.text().catch(() => "");
+ throw new Error(`Create failed (${res.status}) ${txt}`);
+ }
+ const data = await res.json();
+ const id = data?.use_case?.use_case_id;
+ await loadUseCases();
+ if (id) setSelectedId(id);
+ setDraftTitle("New Use Case");
+ } catch (e) {
+ setError(e?.message || "Create failed");
+ } finally {
+ setBusy(false);
+ }
+ }
+
+ async function sendMessage() {
+ if (!canUse || !selectedId) return;
+ const msg = (message || "").trim();
+ if (!msg) return;
+
+ setBusy(true);
+ setError("");
+
+ // optimistic UI: append user message immediately
+ setUseCase((prev) => {
+ if (!prev) return prev;
+ const next = { ...prev };
+ next.messages = Array.isArray(next.messages) ? [...next.messages] : [];
+ next.messages.push({ role: "user", content: msg, ts: new Date().toISOString() });
+ return next;
+ });
+ setMessage("");
+ scrollToBottom();
+
+ try {
+ const res = await fetch(
+ `/api/repos/${owner}/${repo}/use-cases/${selectedId}/chat`,
+ {
+ method: "POST",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify({ message: msg }),
+ }
+ );
+ if (!res.ok) {
+ const txt = await res.text().catch(() => "");
+ throw new Error(`Chat failed (${res.status}) ${txt}`);
+ }
+ const data = await res.json();
+ setUseCase(data.use_case || null);
+ await loadUseCases();
+ scrollToBottom();
+ } catch (e) {
+ setError(e?.message || "Chat failed");
+ } finally {
+ setBusy(false);
+ }
+ }
+
+ async function finalizeUseCase() {
+ if (!canUse || !selectedId) return;
+ setBusy(true);
+ setError("");
+ try {
+ const res = await fetch(
+ `/api/repos/${owner}/${repo}/use-cases/${selectedId}/finalize`,
+ { method: "POST" }
+ );
+ if (!res.ok) {
+ const txt = await res.text().catch(() => "");
+ throw new Error(`Finalize failed (${res.status}) ${txt}`);
+ }
+ const data = await res.json();
+ setUseCase(data.use_case || null);
+ await loadUseCases();
+ alert(
+ "Use Case finalized and marked active.\n\nA Markdown export was saved in the repo workspace .gitpilot/context/use_cases/."
+ );
+ } catch (e) {
+ setError(e?.message || "Finalize failed");
+ } finally {
+ setBusy(false);
+ }
+ }
+
+ const activeId = useCases.find((x) => x.is_active)?.use_case_id;
+
+ return (
+
+
+
+
Use Case
+
+ Guided chat to clarify requirements and produce a versioned spec.
+
+
+
+
+ setDraftTitle(e.target.value)}
+ placeholder="New use case title..."
+ style={styles.titleInput}
+ disabled={!canUse || busy}
+ />
+
+ New
+
+
+ Finalize
+
+
+ Refresh
+
+
+
+
+ {error ?
{error}
: null}
+
+
+
+
Use Cases
+
+ {useCases.length === 0 ? (
+
+ No use cases yet. Create one with New .
+
+ ) : (
+ useCases.map((uc) => (
+
setSelectedId(uc.use_case_id)}
+ >
+
+
+ {uc.title || "(untitled)"}
+
+ {uc.use_case_id === activeId ? (
+
ACTIVE
+ ) : null}
+
+
+ Updated: {uc.updated_at || uc.created_at || "-"}
+
+
+ ))
+ )}
+
+
+
+
+
Guided Chat
+
+ {Array.isArray(useCase?.messages) && useCase.messages.length ? (
+ useCase.messages.map((m, idx) => (
+
+
+ {m.role === "user" ? "You" : "Assistant"}
+
+
{m.content}
+
+ ))
+ ) : (
+
+ Select a use case and start chatting. You can paste structured
+ info like:
+
+{`Summary: ...
+Problem: ...
+Users: ...
+Requirements:
+- ...
+Acceptance Criteria:
+- ...`}
+
+
+ )}
+
+
+
+
+
+
+
+
+
Spec Preview
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Finalize will save a Markdown spec and mark it ACTIVE for context.
+
+
+ Finalize Spec
+
+
+
+
+
+ );
+}
+
+function Section({ title, value }) {
+ return (
+
+
{title}
+
+ {String(value || "").trim() ? (
+
{value}
+ ) : (
+
(empty)
+ )}
+
+
+ );
+}
+
+function ListSection({ title, items }) {
+ const list = Array.isArray(items) ? items : [];
+ return (
+
+
{title}
+
+ {list.length ? (
+
+ {list.map((x, i) => (
+
+ {x}
+
+ ))}
+
+ ) : (
+
(empty)
+ )}
+
+
+ );
+}
+
+const styles = {
+ wrap: { display: "flex", flexDirection: "column", gap: 12 },
+ topRow: {
+ display: "flex",
+ justifyContent: "space-between",
+ gap: 12,
+ alignItems: "flex-start",
+ flexWrap: "wrap",
+ },
+ left: { minWidth: 280 },
+ right: { display: "flex", gap: 8, alignItems: "center", flexWrap: "wrap" },
+ h1: { fontSize: 14, fontWeight: 800, color: "#fff" },
+ h2: { fontSize: 12, color: "rgba(255,255,255,0.65)", marginTop: 4 },
+ titleInput: {
+ width: 260,
+ maxWidth: "70vw",
+ padding: "8px 10px",
+ borderRadius: 10,
+ border: "1px solid rgba(255,255,255,0.18)",
+ background: "rgba(0,0,0,0.25)",
+ color: "#fff",
+ fontSize: 13,
+ outline: "none",
+ },
+ btn: {
+ background: "rgba(255,255,255,0.10)",
+ border: "1px solid rgba(255,255,255,0.18)",
+ color: "#fff",
+ borderRadius: 10,
+ padding: "8px 10px",
+ cursor: "pointer",
+ fontSize: 13,
+ },
+ primaryBtn: {
+ background: "rgba(255,255,255,0.12)",
+ border: "1px solid rgba(255,255,255,0.22)",
+ color: "#fff",
+ borderRadius: 10,
+ padding: "8px 12px",
+ cursor: "pointer",
+ fontSize: 13,
+ fontWeight: 700,
+ },
+ error: {
+ color: "#ffb3b3",
+ fontSize: 12,
+ padding: "8px 10px",
+ border: "1px solid rgba(255,120,120,0.25)",
+ borderRadius: 10,
+ background: "rgba(255,80,80,0.08)",
+ },
+ grid: {
+ display: "grid",
+ gridTemplateColumns: "300px 1.2fr 0.9fr",
+ gap: 12,
+ alignItems: "stretch",
+ },
+ sidebar: {
+ border: "1px solid rgba(255,255,255,0.12)",
+ borderRadius: 12,
+ overflow: "hidden",
+ background: "rgba(255,255,255,0.02)",
+ display: "flex",
+ flexDirection: "column",
+ minHeight: 520,
+ },
+ sidebarTitle: {
+ padding: 10,
+ borderBottom: "1px solid rgba(255,255,255,0.10)",
+ fontSize: 12,
+ fontWeight: 800,
+ color: "rgba(255,255,255,0.85)",
+ },
+ sidebarList: {
+ padding: 8,
+ display: "flex",
+ flexDirection: "column",
+ gap: 8,
+ overflow: "auto",
+ },
+ sidebarEmpty: {
+ color: "rgba(255,255,255,0.65)",
+ fontSize: 12,
+ padding: 8,
+ },
+ ucItem: {
+ textAlign: "left",
+ background: "rgba(0,0,0,0.25)",
+ border: "1px solid rgba(255,255,255,0.12)",
+ color: "#fff",
+ borderRadius: 12,
+ padding: 10,
+ cursor: "pointer",
+ },
+ ucItemActive: {
+ border: "1px solid rgba(255,255,255,0.25)",
+ background: "rgba(255,255,255,0.06)",
+ },
+ ucTitleRow: { display: "flex", alignItems: "center", gap: 8 },
+ ucTitle: {
+ fontSize: 13,
+ fontWeight: 800,
+ overflow: "hidden",
+ textOverflow: "ellipsis",
+ whiteSpace: "nowrap",
+ flex: 1,
+ },
+ activePill: {
+ fontSize: 10,
+ fontWeight: 800,
+ padding: "2px 8px",
+ borderRadius: 999,
+ border: "1px solid rgba(120,255,180,0.30)",
+ background: "rgba(120,255,180,0.10)",
+ color: "rgba(200,255,220,0.95)",
+ },
+ ucMeta: {
+ marginTop: 6,
+ fontSize: 11,
+ color: "rgba(255,255,255,0.60)",
+ },
+ chatCol: {
+ border: "1px solid rgba(255,255,255,0.12)",
+ borderRadius: 12,
+ overflow: "hidden",
+ display: "flex",
+ flexDirection: "column",
+ background: "rgba(255,255,255,0.02)",
+ minHeight: 520,
+ },
+ specCol: {
+ border: "1px solid rgba(255,255,255,0.12)",
+ borderRadius: 12,
+ overflow: "hidden",
+ display: "flex",
+ flexDirection: "column",
+ background: "rgba(255,255,255,0.02)",
+ minHeight: 520,
+ },
+ panelTitle: {
+ padding: 10,
+ borderBottom: "1px solid rgba(255,255,255,0.10)",
+ fontSize: 12,
+ fontWeight: 800,
+ color: "rgba(255,255,255,0.85)",
+ },
+ chatBox: {
+ flex: 1,
+ overflow: "auto",
+ padding: 10,
+ display: "flex",
+ flexDirection: "column",
+ gap: 10,
+ },
+ chatEmpty: {
+ color: "rgba(255,255,255,0.65)",
+ fontSize: 12,
+ padding: 6,
+ },
+ pre: {
+ marginTop: 10,
+ padding: 10,
+ borderRadius: 10,
+ border: "1px solid rgba(255,255,255,0.12)",
+ background: "rgba(0,0,0,0.25)",
+ color: "rgba(255,255,255,0.8)",
+ overflow: "auto",
+ fontSize: 11,
+ },
+ msg: {
+ border: "1px solid rgba(255,255,255,0.12)",
+ borderRadius: 12,
+ padding: 10,
+ background: "rgba(0,0,0,0.25)",
+ },
+ msgUser: {
+ border: "1px solid rgba(255,255,255,0.18)",
+ background: "rgba(255,255,255,0.04)",
+ },
+ msgAsst: {},
+ msgRole: {
+ fontSize: 11,
+ fontWeight: 800,
+ color: "rgba(255,255,255,0.70)",
+ marginBottom: 6,
+ },
+ msgContent: {
+ whiteSpace: "pre-wrap",
+ fontSize: 13,
+ color: "rgba(255,255,255,0.90)",
+ lineHeight: 1.35,
+ },
+ composer: {
+ borderTop: "1px solid rgba(255,255,255,0.10)",
+ padding: 10,
+ display: "flex",
+ gap: 10,
+ alignItems: "flex-end",
+ },
+ textarea: {
+ flex: 1,
+ minHeight: 52,
+ maxHeight: 120,
+ resize: "vertical",
+ padding: 10,
+ borderRadius: 12,
+ border: "1px solid rgba(255,255,255,0.18)",
+ background: "rgba(0,0,0,0.25)",
+ color: "#fff",
+ fontSize: 13,
+ outline: "none",
+ },
+ sendBtn: {
+ background: "rgba(255,255,255,0.12)",
+ border: "1px solid rgba(255,255,255,0.22)",
+ color: "#fff",
+ borderRadius: 12,
+ padding: "10px 12px",
+ cursor: "pointer",
+ fontSize: 13,
+ fontWeight: 800,
+ },
+ specBox: {
+ flex: 1,
+ overflow: "auto",
+ padding: 10,
+ display: "flex",
+ flexDirection: "column",
+ gap: 10,
+ },
+ specFooter: {
+ borderTop: "1px solid rgba(255,255,255,0.10)",
+ padding: 10,
+ display: "flex",
+ gap: 10,
+ alignItems: "center",
+ justifyContent: "space-between",
+ },
+ specHint: { fontSize: 12, color: "rgba(255,255,255,0.60)" },
+ section: {
+ border: "1px solid rgba(255,255,255,0.10)",
+ borderRadius: 12,
+ background: "rgba(0,0,0,0.22)",
+ overflow: "hidden",
+ },
+ sectionTitle: {
+ padding: "8px 10px",
+ borderBottom: "1px solid rgba(255,255,255,0.08)",
+ fontSize: 12,
+ fontWeight: 800,
+ color: "rgba(255,255,255,0.80)",
+ background: "rgba(255,255,255,0.02)",
+ },
+ sectionBody: { padding: "8px 10px" },
+ sectionText: {
+ whiteSpace: "pre-wrap",
+ fontSize: 12,
+ color: "rgba(255,255,255,0.90)",
+ lineHeight: 1.35,
+ },
+ sectionEmpty: { fontSize: 12, color: "rgba(255,255,255,0.45)" },
+ ul: { margin: 0, paddingLeft: 18 },
+ li: { color: "rgba(255,255,255,0.90)", fontSize: 12, lineHeight: 1.35 },
+};
diff --git a/frontend/components/ProjectSettingsModal.jsx b/frontend/components/ProjectSettingsModal.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..ff6cc365af5a1c1226d544b425ffa7bbc88698b4
--- /dev/null
+++ b/frontend/components/ProjectSettingsModal.jsx
@@ -0,0 +1,230 @@
+import React, { useEffect, useMemo, useState } from "react";
+import ContextTab from "./ProjectSettings/ContextTab.jsx";
+import UseCaseTab from "./ProjectSettings/UseCaseTab.jsx";
+import ConventionsTab from "./ProjectSettings/ConventionsTab.jsx";
+import EnvironmentSelector from "./EnvironmentSelector.jsx";
+
+export default function ProjectSettingsModal({
+ owner,
+ repo,
+ isOpen,
+ onClose,
+ activeEnvId,
+ onEnvChange,
+}) {
+ const [activeTab, setActiveTab] = useState("context");
+
+ useEffect(() => {
+ if (!isOpen) return;
+ // reset to Context each time opened (safe default)
+ setActiveTab("context");
+ }, [isOpen]);
+
+ const title = useMemo(() => {
+ const repoLabel = owner && repo ? `${owner}/${repo}` : "Project";
+ return `Project Settings — ${repoLabel}`;
+ }, [owner, repo]);
+
+ if (!isOpen) return null;
+
+ return (
+ {
+ // click outside closes
+ if (e.target === e.currentTarget) onClose?.();
+ }}
+ >
+
e.stopPropagation()}>
+
+
+
{title}
+
+ Manage context, use cases, and project conventions (additive only).
+
+
+
+ ✕
+
+
+
+
+ setActiveTab("context")}
+ />
+ setActiveTab("usecase")}
+ />
+ setActiveTab("conventions")}
+ />
+ setActiveTab("environment")}
+ />
+
+
+
+ {activeTab === "context" &&
}
+ {activeTab === "usecase" &&
}
+ {activeTab === "conventions" && (
+
+ )}
+ {activeTab === "environment" && (
+
+
+ Select and configure the execution environment for agent operations.
+
+
+
+ )}
+
+
+
+
+ Tip: Upload meeting notes/transcripts in Context, then finalize a Use
+ Case spec.
+
+
+ Done
+
+
+
+
+ );
+}
+
+function TabButton({ label, isActive, onClick }) {
+ return (
+
+ {label}
+
+ );
+}
+
+const styles = {
+ backdrop: {
+ position: "fixed",
+ inset: 0,
+ background: "rgba(0,0,0,0.45)",
+ display: "flex",
+ justifyContent: "center",
+ alignItems: "center",
+ zIndex: 9999,
+ padding: 16,
+ },
+ modal: {
+ width: "min(1100px, 96vw)",
+ height: "min(760px, 90vh)",
+ background: "#111",
+ border: "1px solid rgba(255,255,255,0.12)",
+ borderRadius: 12,
+ overflow: "hidden",
+ display: "flex",
+ flexDirection: "column",
+ boxShadow: "0 12px 40px rgba(0,0,0,0.35)",
+ },
+ header: {
+ padding: "14px 14px 10px",
+ display: "flex",
+ gap: 12,
+ alignItems: "flex-start",
+ justifyContent: "space-between",
+ borderBottom: "1px solid rgba(255,255,255,0.10)",
+ background: "linear-gradient(180deg, rgba(255,255,255,0.04), transparent)",
+ },
+ headerLeft: {
+ display: "flex",
+ flexDirection: "column",
+ gap: 4,
+ minWidth: 0,
+ },
+ title: {
+ fontSize: 16,
+ fontWeight: 700,
+ color: "#fff",
+ lineHeight: 1.2,
+ whiteSpace: "nowrap",
+ overflow: "hidden",
+ textOverflow: "ellipsis",
+ maxWidth: "88vw",
+ },
+ subtitle: {
+ fontSize: 12,
+ color: "rgba(255,255,255,0.65)",
+ },
+ closeBtn: {
+ background: "transparent",
+ border: "1px solid rgba(255,255,255,0.18)",
+ color: "rgba(255,255,255,0.85)",
+ borderRadius: 10,
+ padding: "6px 10px",
+ cursor: "pointer",
+ },
+ tabsRow: {
+ display: "flex",
+ gap: 8,
+ padding: 10,
+ borderBottom: "1px solid rgba(255,255,255,0.10)",
+ background: "rgba(255,255,255,0.02)",
+ },
+ tabBtn: {
+ background: "transparent",
+ border: "1px solid rgba(255,255,255,0.14)",
+ color: "rgba(255,255,255,0.75)",
+ borderRadius: 999,
+ padding: "8px 12px",
+ cursor: "pointer",
+ fontSize: 13,
+ },
+ tabBtnActive: {
+ border: "1px solid rgba(255,255,255,0.28)",
+ color: "#fff",
+ background: "rgba(255,255,255,0.06)",
+ },
+ body: {
+ flex: 1,
+ overflow: "auto",
+ padding: 12,
+ },
+ footer: {
+ padding: 12,
+ borderTop: "1px solid rgba(255,255,255,0.10)",
+ display: "flex",
+ alignItems: "center",
+ justifyContent: "space-between",
+ gap: 12,
+ background: "rgba(255,255,255,0.02)",
+ },
+ footerHint: {
+ color: "rgba(255,255,255,0.6)",
+ fontSize: 12,
+ overflow: "hidden",
+ textOverflow: "ellipsis",
+ whiteSpace: "nowrap",
+ },
+ primaryBtn: {
+ background: "rgba(255,255,255,0.10)",
+ border: "1px solid rgba(255,255,255,0.20)",
+ color: "#fff",
+ borderRadius: 10,
+ padding: "8px 12px",
+ cursor: "pointer",
+ },
+};
diff --git a/frontend/components/RepoSelector.jsx b/frontend/components/RepoSelector.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..dd601f4f7cb9b8a19a7858280d5886607a33271c
--- /dev/null
+++ b/frontend/components/RepoSelector.jsx
@@ -0,0 +1,269 @@
+import React, { useEffect, useState, useCallback } from "react";
+import { authFetch } from "../utils/api.js";
+
+export default function RepoSelector({ onSelect }) {
+ const [query, setQuery] = useState("");
+ const [repos, setRepos] = useState([]);
+ const [loading, setLoading] = useState(false);
+ const [loadingMore, setLoadingMore] = useState(false);
+ const [status, setStatus] = useState("");
+ const [page, setPage] = useState(1);
+ const [hasMore, setHasMore] = useState(false);
+ const [totalCount, setTotalCount] = useState(null);
+
+ /**
+ * Fetch repositories with pagination and optional search
+ * @param {number} pageNum - Page number to fetch
+ * @param {boolean} append - Whether to append or replace results
+ * @param {string} searchQuery - Search query (uses current query if not provided)
+ */
+ const fetchRepos = useCallback(async (pageNum = 1, append = false, searchQuery = query) => {
+ // Set appropriate loading state
+ if (pageNum === 1) {
+ setLoading(true);
+ setStatus("");
+ } else {
+ setLoadingMore(true);
+ }
+
+ try {
+ // Build URL with query parameters
+ const params = new URLSearchParams();
+ params.append("page", pageNum);
+ params.append("per_page", "100");
+ if (searchQuery) {
+ params.append("query", searchQuery);
+ }
+
+ const url = `/api/repos?${params.toString()}`;
+ const res = await authFetch(url);
+ const data = await res.json();
+
+ if (!res.ok) {
+ throw new Error(data.detail || data.error || "Failed to load repositories");
+ }
+
+ // Update repositories - append or replace
+ if (append) {
+ setRepos((prev) => [...prev, ...data.repositories]);
+ } else {
+ setRepos(data.repositories);
+ }
+
+ // Update pagination state
+ setPage(pageNum);
+ setHasMore(data.has_more);
+ setTotalCount(data.total_count);
+
+ // Show status if no results
+ if (!append && data.repositories.length === 0) {
+ if (searchQuery) {
+ setStatus(`No repositories matching "${searchQuery}"`);
+ } else {
+ setStatus("No repositories found");
+ }
+ } else {
+ setStatus("");
+ }
+ } catch (err) {
+ console.error("Error fetching repositories:", err);
+ setStatus(err.message || "Failed to load repositories");
+ } finally {
+ setLoading(false);
+ setLoadingMore(false);
+ }
+ }, [query]);
+
+ /**
+ * Load more repositories (next page)
+ */
+ const loadMore = () => {
+ fetchRepos(page + 1, true);
+ };
+
+ /**
+ * Handle search - resets to page 1
+ */
+ const handleSearch = () => {
+ setPage(1);
+ fetchRepos(1, false, query);
+ };
+
+ /**
+ * Handle input change - trigger search on Enter key
+ */
+ const handleKeyDown = (e) => {
+ if (e.key === "Enter") {
+ handleSearch();
+ }
+ };
+
+ /**
+ * Clear search and show all repos
+ */
+ const clearSearch = () => {
+ setQuery("");
+ setPage(1);
+ fetchRepos(1, false, "");
+ };
+
+ // Initial load on mount
+ useEffect(() => {
+ fetchRepos(1, false, "");
+ // eslint-disable-next-line react-hooks/exhaustive-deps
+ }, []);
+
+ /**
+ * Format repository count for display
+ */
+ const getCountText = () => {
+ if (totalCount !== null) {
+ // Search mode - show filtered count
+ return `${repos.length} of ${totalCount} repositories`;
+ } else {
+ // Pagination mode - show loaded count
+ return `${repos.length} ${repos.length === 1 ? "repository" : "repositories"}${hasMore ? "+" : ""}`;
+ }
+ };
+
+ return (
+
+
+ GitHub repos are optional. Use Folder or Local Git mode for local-first workflows.
+
+ {/* Search Header */}
+
+
+ setQuery(e.target.value)}
+ onKeyDown={handleKeyDown}
+ disabled={loading}
+ />
+
+ {loading ? "..." : "Search"}
+
+
+
+ {/* Search Info Bar */}
+ {(query || repos.length > 0) && (
+
+ {getCountText()}
+ {query && (
+
+ Clear search
+
+ )}
+
+ )}
+
+
+ {/* Status Message */}
+ {status && !loading && (
+
+ {status}
+
+ )}
+
+ {/* Repository List */}
+
+ {repos.map((r) => (
+
onSelect(r)}
+ >
+
+ {r.name}
+ {r.owner}
+
+ {r.private && (
+ Private
+ )}
+
+ ))}
+
+ {/* Loading Indicator */}
+ {loading && repos.length === 0 && (
+
+
+
Loading repositories...
+
+ )}
+
+ {/* Load More Button */}
+ {hasMore && !loading && repos.length > 0 && (
+
+ {loadingMore ? (
+ <>
+
+ Loading more...
+ >
+ ) : (
+ <>
+ Load more repositories
+ ({repos.length} loaded)
+ >
+ )}
+
+ )}
+
+ {/* All Loaded Message */}
+ {!hasMore && !loading && repos.length > 0 && (
+
+ ✓ All repositories loaded ({repos.length} total)
+
+ )}
+
+
+ {/* GitHub App Installation Notice */}
+
+
+
+
+
+
+
+ Repository missing?
+
+
+
+
+
+ );
+}
\ No newline at end of file
diff --git a/frontend/components/SessionItem.jsx b/frontend/components/SessionItem.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..acf2ce3fc670698f9198362a5c81d9d871e99027
--- /dev/null
+++ b/frontend/components/SessionItem.jsx
@@ -0,0 +1,183 @@
+import React, { useState } from "react";
+
+/**
+ * SessionItem — a single row in the sessions sidebar.
+ *
+ * Shows status dot (pulsing/static), title, timestamp, message count.
+ * Claude-Code-on-Web parity: active=amber pulse, completed=green,
+ * failed=red, waiting=blue.
+ */
+export default function SessionItem({ session, isActive, onSelect, onDelete }) {
+ const [hovering, setHovering] = useState(false);
+
+ const status = session.status || "active";
+
+ const dotColor = {
+ active: "#F59E0B",
+ completed: "#10B981",
+ failed: "#EF4444",
+ waiting: "#3B82F6",
+ paused: "#6B7280",
+ }[status] || "#6B7280";
+
+ const isPulsing = status === "active";
+
+ const timeAgo = formatTimeAgo(session.updated_at);
+
+ // Prefer name (set from first user prompt) over generic fallback
+ const title =
+ session.name ||
+ (session.branch ? `${session.branch}` : `Session ${session.id?.slice(0, 8)}`);
+
+ return (
+ setHovering(true)}
+ onMouseLeave={() => setHovering(false)}
+ >
+
+
+ {/* Status dot */}
+
+
+ {/* Content */}
+
+
{title}
+
+ {timeAgo}
+ {session.mode && (
+
+ {session.mode === "github" ? "GH" : session.mode === "local-git" ? "Git" : "Dir"}
+
+ )}
+ {session.message_count > 0 && (
+ {session.message_count} msgs
+ )}
+
+
+
+ {/* Delete button (on hover) */}
+ {hovering && (
+
{
+ e.stopPropagation();
+ onDelete?.();
+ }}
+ title="Delete session"
+ >
+ ×
+
+ )}
+
+ );
+}
+
+function formatTimeAgo(isoStr) {
+ if (!isoStr) return "";
+ try {
+ const date = new Date(isoStr);
+ const now = new Date();
+ const diffMs = now - date;
+ const diffMin = Math.floor(diffMs / 60000);
+ if (diffMin < 1) return "just now";
+ if (diffMin < 60) return `${diffMin}m ago`;
+ const diffHr = Math.floor(diffMin / 60);
+ if (diffHr < 24) return `${diffHr}h ago`;
+ const diffDay = Math.floor(diffHr / 24);
+ return `${diffDay}d ago`;
+ } catch {
+ return "";
+ }
+}
+
+const styles = {
+ row: {
+ display: "flex",
+ alignItems: "center",
+ gap: 8,
+ padding: "8px 10px",
+ borderRadius: 6,
+ cursor: "pointer",
+ transition: "background-color 0.15s",
+ position: "relative",
+ marginBottom: 2,
+ animation: "session-fade-in 0.25s ease-out",
+ },
+ dot: {
+ width: 8,
+ height: 8,
+ borderRadius: "50%",
+ flexShrink: 0,
+ },
+ content: {
+ flex: 1,
+ minWidth: 0,
+ overflow: "hidden",
+ },
+ title: {
+ fontSize: 12,
+ fontWeight: 500,
+ color: "#E4E4E7",
+ whiteSpace: "nowrap",
+ overflow: "hidden",
+ textOverflow: "ellipsis",
+ },
+ meta: {
+ fontSize: 10,
+ color: "#71717A",
+ marginTop: 2,
+ display: "flex",
+ alignItems: "center",
+ gap: 6,
+ },
+ badge: {
+ fontSize: 9,
+ background: "#27272A",
+ padding: "1px 5px",
+ borderRadius: 8,
+ color: "#A1A1AA",
+ },
+ deleteBtn: {
+ position: "absolute",
+ right: 6,
+ top: 6,
+ width: 18,
+ height: 18,
+ borderRadius: 3,
+ border: "none",
+ background: "rgba(239, 68, 68, 0.15)",
+ color: "#EF4444",
+ fontSize: 14,
+ cursor: "pointer",
+ display: "flex",
+ alignItems: "center",
+ justifyContent: "center",
+ lineHeight: 1,
+ },
+};
diff --git a/frontend/components/SessionSidebar.jsx b/frontend/components/SessionSidebar.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..fb63c7526850dba78ce9a5e37a4b0efac32d0e8a
--- /dev/null
+++ b/frontend/components/SessionSidebar.jsx
@@ -0,0 +1,181 @@
+import React, { useEffect, useRef, useState } from "react";
+import SessionItem from "./SessionItem.jsx";
+
+/**
+ * SessionSidebar — Claude-Code-on-Web parity.
+ *
+ * Shows a scrollable list of coding sessions with status indicators,
+ * timestamps, and a "New Session" button. Additive — does not modify
+ * any existing component.
+ */
+export default function SessionSidebar({
+ repo,
+ activeSessionId,
+ onSelectSession,
+ onNewSession,
+ onDeleteSession,
+ refreshNonce = 0,
+}) {
+ const [sessions, setSessions] = useState([]);
+ const [loading, setLoading] = useState(false);
+ const pollRef = useRef(null);
+
+ const repoFullName = repo?.full_name || (repo ? `${repo.owner}/${repo.name}` : null);
+
+ // Fetch sessions
+ useEffect(() => {
+ if (!repoFullName) {
+ setSessions([]);
+ return;
+ }
+
+ let cancelled = false;
+
+ const fetchSessions = async () => {
+ setLoading(true);
+ try {
+ const token = localStorage.getItem("github_token");
+ const headers = token ? { Authorization: `Bearer ${token}` } : {};
+ const res = await fetch(`/api/sessions`, { headers, cache: "no-cache" });
+ if (!res.ok) return;
+ const data = await res.json();
+ if (cancelled) return;
+
+ // Filter to current repo
+ const filtered = (data.sessions || []).filter(
+ (s) => s.repo === repoFullName
+ );
+ setSessions(filtered);
+ } catch (err) {
+ console.warn("Failed to fetch sessions:", err);
+ } finally {
+ if (!cancelled) setLoading(false);
+ }
+ };
+
+ fetchSessions();
+
+ // Poll every 15s for status updates
+ pollRef.current = setInterval(fetchSessions, 15000);
+
+ return () => {
+ cancelled = true;
+ if (pollRef.current) clearInterval(pollRef.current);
+ };
+ }, [repoFullName, refreshNonce]);
+
+ const handleDelete = async (sessionId) => {
+ try {
+ const token = localStorage.getItem("github_token");
+ const headers = token ? { Authorization: `Bearer ${token}` } : {};
+ await fetch(`/api/sessions/${sessionId}`, { method: "DELETE", headers });
+ setSessions((prev) => prev.filter((s) => s.id !== sessionId));
+ // Notify parent so it can clear the chat if this was the active session
+ onDeleteSession?.(sessionId);
+ } catch (err) {
+ console.warn("Failed to delete session:", err);
+ }
+ };
+
+ return (
+
+
+
+ {/* Header */}
+
+ SESSIONS
+
+ +
+
+
+
+ {/* Session list */}
+
+ {loading && sessions.length === 0 && (
+
Loading...
+ )}
+
+ {!loading && sessions.length === 0 && (
+
+ No sessions yet.
+
+
+ Your first message will create one automatically.
+
+
+ )}
+
+ {sessions.map((s) => (
+
onSelectSession?.(s)}
+ onDelete={() => handleDelete(s.id)}
+ />
+ ))}
+
+
+ );
+}
+
+const animStyles = `
+ @keyframes session-fade-in {
+ from { opacity: 0; transform: translateY(4px); }
+ to { opacity: 1; transform: translateY(0); }
+ }
+`;
+
+const styles = {
+ container: {
+ display: "flex",
+ flexDirection: "column",
+ borderTop: "1px solid #27272A",
+ flex: 1,
+ minHeight: 0,
+ },
+ header: {
+ display: "flex",
+ justifyContent: "space-between",
+ alignItems: "center",
+ padding: "10px 14px 6px",
+ },
+ label: {
+ fontSize: 10,
+ fontWeight: 700,
+ letterSpacing: "0.08em",
+ color: "#71717A",
+ textTransform: "uppercase",
+ },
+ newBtn: {
+ width: 22,
+ height: 22,
+ borderRadius: 4,
+ border: "1px dashed #3F3F46",
+ background: "transparent",
+ color: "#A1A1AA",
+ fontSize: 14,
+ cursor: "pointer",
+ display: "flex",
+ alignItems: "center",
+ justifyContent: "center",
+ lineHeight: 1,
+ },
+ list: {
+ flex: 1,
+ overflowY: "auto",
+ padding: "0 6px 8px",
+ },
+ empty: {
+ textAlign: "center",
+ color: "#52525B",
+ fontSize: 12,
+ padding: "20px 8px",
+ lineHeight: 1.5,
+ },
+};
diff --git a/frontend/components/SettingsModal.jsx b/frontend/components/SettingsModal.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..24d43b1dd25831da589d35e7a386b0a7a1aa37a4
--- /dev/null
+++ b/frontend/components/SettingsModal.jsx
@@ -0,0 +1,333 @@
+import React, { useEffect, useState } from "react";
+
+export default function SettingsModal({ onClose }) {
+ const [settings, setSettings] = useState(null);
+ const [models, setModels] = useState([]);
+ const [modelsError, setModelsError] = useState(null);
+ const [loadingModels, setLoadingModels] = useState(false);
+ const [testResult, setTestResult] = useState(null); // { ok: bool, message: string }
+ const [testing, setTesting] = useState(false);
+
+ const loadSettings = async () => {
+ const res = await fetch("/api/settings");
+ const data = await res.json();
+ setSettings(data);
+ };
+
+ useEffect(() => {
+ loadSettings();
+ }, []);
+
+ const changeProvider = async (provider) => {
+ const res = await fetch("/api/settings/provider", {
+ method: "POST",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify({ provider }),
+ });
+ const data = await res.json();
+ setSettings(data);
+
+ // Reset models state when provider changes
+ setModels([]);
+ setModelsError(null);
+ };
+
+ const loadModels = async () => {
+ if (!settings) return;
+ setLoadingModels(true);
+ setModelsError(null);
+ try {
+ const res = await fetch(
+ `/api/settings/models?provider=${settings.provider}`
+ );
+ const data = await res.json();
+ if (data.error) {
+ setModelsError(data.error);
+ setModels([]);
+ } else {
+ setModels(data.models || []);
+ }
+ } catch (err) {
+ console.error(err);
+ setModelsError("Failed to load models");
+ setModels([]);
+ } finally {
+ setLoadingModels(false);
+ }
+ };
+
+ const currentModelForActiveProvider = () => {
+ if (!settings) return "";
+ const p = settings.provider;
+ if (p === "openai") return settings.openai?.model || "";
+ if (p === "claude") return settings.claude?.model || "";
+ if (p === "watsonx") return settings.watsonx?.model_id || "";
+ if (p === "ollama") return settings.ollama?.model || "";
+ return "";
+ };
+
+ const changeModel = async (model) => {
+ if (!settings) return;
+ const provider = settings.provider;
+
+ let payload = {};
+ if (provider === "openai") {
+ payload = {
+ openai: {
+ ...settings.openai,
+ model,
+ },
+ };
+ } else if (provider === "claude") {
+ payload = {
+ claude: {
+ ...settings.claude,
+ model,
+ },
+ };
+ } else if (provider === "watsonx") {
+ payload = {
+ watsonx: {
+ ...settings.watsonx,
+ model_id: model,
+ },
+ };
+ } else if (provider === "ollama") {
+ payload = {
+ ollama: {
+ ...settings.ollama,
+ model,
+ },
+ };
+ }
+
+ const res = await fetch("/api/settings/llm", {
+ method: "PUT",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify(payload),
+ });
+ const data = await res.json();
+ setSettings(data);
+ };
+
+ const testConnection = async () => {
+ if (!settings) return;
+ setTesting(true);
+ setTestResult(null);
+ try {
+ const res = await fetch(`/api/settings/test?provider=${settings.provider}`);
+ const data = await res.json();
+ if (!res.ok || data.error) {
+ setTestResult({ ok: false, message: data.error || data.detail || "Connection failed" });
+ } else {
+ setTestResult({ ok: true, message: data.message || "Connection successful" });
+ }
+ } catch (err) {
+ setTestResult({ ok: false, message: err.message || "Connection test failed" });
+ } finally {
+ setTesting(false);
+ }
+ };
+
+ const toggleLiteMode = async () => {
+ if (!settings) return;
+ const newValue = !settings.lite_mode;
+ try {
+ const res = await fetch("/api/settings/lite-mode", {
+ method: "POST",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify({ lite_mode: newValue }),
+ });
+ if (res.ok) {
+ setSettings((prev) => ({ ...prev, lite_mode: newValue }));
+ }
+ } catch (err) {
+ console.error("Failed to toggle lite mode:", err);
+ }
+ };
+
+ if (!settings) return null;
+
+ const activeModel = currentModelForActiveProvider();
+
+ return (
+
+
e.stopPropagation()}>
+
+
+
+ Select which LLM provider GitPilot should use for planning and chat.
+
+
+
+ {settings.providers.map((p) => (
+
+
{p}
+
changeProvider(p)}
+ disabled={settings.provider === p}
+ >
+ {settings.provider === p ? "Active" : "Use"}
+
+
+ ))}
+
+
+ {/* Models section */}
+
+
+ Active provider: {settings.provider}
+
+
+
+
+ {testing ? "Testing…" : "Test Connection"}
+
+
+ {loadingModels ? "Loading…" : "Display models"}
+
+
+ {activeModel && (
+
+ Current model: {activeModel}
+
+ )}
+
+
+ {modelsError && (
+
+ {modelsError}
+
+ )}
+
+ {testResult && (
+
+ {testResult.ok ? "✓ " : "✗ "}{testResult.message}
+
+ )}
+
+ {models.length > 0 && (
+
+
+ Select model for {settings.provider}:
+
+ changeModel(e.target.value)}
+ >
+ -- select a model --
+ {models.map((m) => (
+
+ {m}
+
+ ))}
+
+
+ )}
+
+
+ {/* Lite Mode section */}
+
+
+
+ Lite Mode
+
+
+ {settings.lite_mode ? "ON" : "OFF"}
+
+
+
+ Optimized for small models (under 7B parameters).
+ Uses simplified prompts and single-agent execution instead
+ of multi-agent pipelines. Recommended for: qwen2.5:1.5b,
+ phi-3-mini, gemma-2b, tinyllama, etc.
+
+
+
+
+ );
+}
diff --git a/frontend/components/StartupScreen.jsx b/frontend/components/StartupScreen.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..a9c19e897a1b143761b111e4f2a5f46ed9643c61
--- /dev/null
+++ b/frontend/components/StartupScreen.jsx
@@ -0,0 +1,92 @@
+import React from "react";
+
+function normalizeProvider(provider) {
+ if (!provider) return "Checking...";
+ if (typeof provider === "string") return provider.toUpperCase();
+ if (typeof provider === "object") {
+ return (
+ provider.name ||
+ provider.provider ||
+ provider.type ||
+ provider.label ||
+ "Checking..."
+ );
+ }
+ return "Checking...";
+}
+
+function normalizeVersion(version) {
+ if (!version) return "Checking...";
+ return String(version);
+}
+
+export default function StartupScreen({
+ appName = "GitPilot",
+ subtitle = "Enterprise Workspace Copilot",
+ frontendVersion = "Checking...",
+ backendVersion = "Checking...",
+ provider = "Checking...",
+ statusMessage = "Starting application...",
+ detailMessage = "Initializing authentication, provider, and workspace context.",
+ phase = "booting",
+}) {
+ const providerLabel = normalizeProvider(provider);
+ const frontendLabel = normalizeVersion(frontendVersion);
+ const backendLabel = normalizeVersion(backendVersion);
+
+ return (
+
+
+
+
+
+
+
{appName}
+
{subtitle}
+
+
+
+
+
+
+
{statusMessage}
+
{detailMessage}
+
+
+
+ {phase}
+
+
+
+
+
Frontend
+
v{frontendLabel}
+
+
+
+
Backend
+
v{backendLabel}
+
+
+
+
Provider
+
{providerLabel}
+
+
+
+
+ Preparing workspace services, restoring session state, and checking
+ platform readiness.
+
+
+
+ );
+}
\ No newline at end of file
diff --git a/frontend/components/StreamingMessage.jsx b/frontend/components/StreamingMessage.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..71aaf5918c26d7ae6b097505381767d990357823
--- /dev/null
+++ b/frontend/components/StreamingMessage.jsx
@@ -0,0 +1,182 @@
+import React from "react";
+
+/**
+ * StreamingMessage — Claude-Code-on-Web parity streaming renderer.
+ *
+ * Renders agent messages incrementally as they arrive via WebSocket.
+ * Shows tool use blocks (bash commands + output), explanatory text,
+ * and status indicators.
+ */
+export default function StreamingMessage({ events }) {
+ if (!events || events.length === 0) return null;
+
+ return (
+
+ {events.map((evt, idx) => (
+
+ ))}
+
+ );
+}
+
+function StreamingEvent({ event, isLast }) {
+ const { type } = event;
+
+ if (type === "agent_message") {
+ return (
+
+ {event.content}
+ {isLast && | }
+
+ );
+ }
+
+ if (type === "tool_use") {
+ return (
+
+
+
+
+
+
+
{event.tool || "terminal"}
+
+
+ $ {event.input}
+
+
+ );
+ }
+
+ if (type === "tool_result") {
+ return (
+
+ );
+ }
+
+ if (type === "status_change") {
+ const statusLabels = {
+ active: "Working...",
+ waiting: "Waiting for input",
+ completed: "Completed",
+ failed: "Failed",
+ };
+ return (
+
+
+
{statusLabels[event.status] || event.status}
+
+ );
+ }
+
+ if (type === "diff_update") {
+ return null; // Handled by DiffStats in parent
+ }
+
+ if (type === "error") {
+ return (
+
+ {event.message}
+
+ );
+ }
+
+ return null;
+}
+
+const styles = {
+ container: {
+ display: "flex",
+ flexDirection: "column",
+ gap: 4,
+ },
+ textBlock: {
+ fontSize: 14,
+ lineHeight: 1.6,
+ color: "#D4D4D8",
+ whiteSpace: "pre-wrap",
+ wordBreak: "break-word",
+ },
+ cursor: {
+ display: "inline-block",
+ animation: "blink 1s step-end infinite",
+ color: "#3B82F6",
+ fontWeight: 700,
+ },
+ toolBlock: {
+ margin: "4px 0",
+ borderRadius: 6,
+ border: "1px solid #27272A",
+ overflow: "hidden",
+ },
+ toolHeader: {
+ display: "flex",
+ alignItems: "center",
+ gap: 6,
+ padding: "6px 10px",
+ backgroundColor: "#18181B",
+ fontSize: 11,
+ color: "#71717A",
+ fontFamily: "monospace",
+ },
+ toolName: {
+ fontWeight: 600,
+ },
+ toolInput: {
+ padding: "8px 10px",
+ backgroundColor: "#0D0D0F",
+ fontFamily: "monospace",
+ fontSize: 12,
+ color: "#10B981",
+ whiteSpace: "pre-wrap",
+ wordBreak: "break-all",
+ },
+ toolOutput: {
+ padding: "8px 10px",
+ backgroundColor: "#0D0D0F",
+ maxHeight: 300,
+ overflowY: "auto",
+ },
+ toolOutputPre: {
+ margin: 0,
+ fontFamily: "monospace",
+ fontSize: 11,
+ color: "#A1A1AA",
+ whiteSpace: "pre-wrap",
+ wordBreak: "break-all",
+ },
+ statusLine: {
+ display: "flex",
+ alignItems: "center",
+ gap: 6,
+ padding: "4px 0",
+ fontSize: 12,
+ color: "#71717A",
+ fontStyle: "italic",
+ },
+ statusDot: {
+ width: 6,
+ height: 6,
+ borderRadius: "50%",
+ },
+ errorBlock: {
+ padding: "8px 12px",
+ borderRadius: 6,
+ backgroundColor: "rgba(239, 68, 68, 0.08)",
+ border: "1px solid rgba(239, 68, 68, 0.2)",
+ color: "#FCA5A5",
+ fontSize: 13,
+ },
+};
diff --git a/frontend/components/UserMenu.jsx b/frontend/components/UserMenu.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..34849df04c66d62e71a434147a6dcaa5e5cf7fac
--- /dev/null
+++ b/frontend/components/UserMenu.jsx
@@ -0,0 +1,424 @@
+// frontend/components/UserMenu.jsx
+import React, { useEffect, useRef, useState, useCallback } from "react";
+
+/**
+ * UserMenu — account dropdown attached to the profile avatar in the
+ * bottom-left of the sidebar. Follows the Claude Code / ChatGPT pattern:
+ * click avatar → popover with Settings, About, Logout.
+ *
+ * Best practices applied:
+ * - Click outside to close (mousedown listener on document)
+ * - Escape key closes
+ * - ARIA: role="menu" + aria-haspopup + aria-expanded on trigger
+ * - Keyboard navigation (Tab / Shift+Tab cycles items, Enter activates)
+ * - Position: absolute popover anchored to trigger, opens upward
+ * - Brand palette: #D95C3D accent, #1C1C1F card, #27272A border
+ * - Respects sidebarCollapsed: when collapsed, only avatar is shown
+ * - Animation: subtle fade+translate for polish
+ */
+
+export default function UserMenu({
+ userInfo,
+ sidebarCollapsed = false,
+ onOpenSettings,
+ onOpenAbout,
+ onLogout,
+}) {
+ const [open, setOpen] = useState(false);
+ const [fixedPos, setFixedPos] = useState(null);
+ const containerRef = useRef(null);
+ const triggerRef = useRef(null);
+ const menuRef = useRef(null);
+
+ // When the sidebar is collapsed, the parent .sidebar has overflow-x:hidden
+ // which clips an absolutely-positioned popover. Escape the clip by using
+ // position:fixed with coordinates measured from the trigger's bounding
+ // rect. Recompute on open, window resize, and scroll.
+ useEffect(() => {
+ if (!open || !sidebarCollapsed) {
+ setFixedPos(null);
+ return;
+ }
+ const compute = () => {
+ const el = triggerRef.current;
+ if (!el) return;
+ const rect = el.getBoundingClientRect();
+ setFixedPos({
+ left: Math.round(rect.right + 8),
+ bottom: Math.round(window.innerHeight - rect.bottom),
+ });
+ };
+ compute();
+ window.addEventListener("resize", compute);
+ window.addEventListener("scroll", compute, true);
+ return () => {
+ window.removeEventListener("resize", compute);
+ window.removeEventListener("scroll", compute, true);
+ };
+ }, [open, sidebarCollapsed]);
+
+ // Close on click outside
+ useEffect(() => {
+ if (!open) return;
+ const handleDocMouseDown = (e) => {
+ if (containerRef.current && !containerRef.current.contains(e.target)) {
+ setOpen(false);
+ }
+ };
+ document.addEventListener("mousedown", handleDocMouseDown);
+ return () => document.removeEventListener("mousedown", handleDocMouseDown);
+ }, [open]);
+
+ // Close on Escape
+ useEffect(() => {
+ if (!open) return;
+ const handleKey = (e) => {
+ if (e.key === "Escape") {
+ setOpen(false);
+ triggerRef.current?.focus();
+ }
+ };
+ document.addEventListener("keydown", handleKey);
+ return () => document.removeEventListener("keydown", handleKey);
+ }, [open]);
+
+ // Focus the first menu item when opened
+ useEffect(() => {
+ if (open && menuRef.current) {
+ const firstItem = menuRef.current.querySelector('[role="menuitem"]');
+ firstItem?.focus();
+ }
+ }, [open]);
+
+ const handleItemClick = useCallback((action) => {
+ setOpen(false);
+ // Defer to next tick so the dropdown close animation doesn't jitter
+ // against the modal open animation.
+ window.setTimeout(() => action?.(), 0);
+ }, []);
+
+ if (!userInfo) return null;
+
+ const displayName = userInfo.name || userInfo.login;
+ const login = userInfo.login || "";
+
+ return (
+
+ {/* Trigger: avatar + optional name */}
+
setOpen((v) => !v)}
+ aria-haspopup="menu"
+ aria-expanded={open}
+ aria-label={`Account menu for ${displayName}`}
+ className="user-menu-trigger"
+ style={{
+ display: "flex",
+ alignItems: "center",
+ gap: sidebarCollapsed ? 0 : 10,
+ width: "100%",
+ padding: sidebarCollapsed ? "6px" : "8px 10px",
+ background: open ? "#27272A" : "transparent",
+ border: "1px solid",
+ borderColor: open ? "#D95C3D" : "transparent",
+ borderRadius: 10,
+ cursor: "pointer",
+ color: "#EDEDED",
+ textAlign: "left",
+ transition: "background 120ms ease, border-color 120ms ease",
+ fontFamily: "inherit",
+ }}
+ onMouseEnter={(e) => {
+ if (!open) e.currentTarget.style.background = "#1C1C1F";
+ }}
+ onMouseLeave={(e) => {
+ if (!open) e.currentTarget.style.background = "transparent";
+ }}
+ >
+ {userInfo.avatar_url ? (
+
+ ) : (
+
+ {(displayName || "?").slice(0, 2).toUpperCase()}
+
+ )}
+
+ {!sidebarCollapsed && (
+
+
+ {displayName}
+
+ {login && (
+
+ @{login}
+
+ )}
+
+ )}
+
+ {!sidebarCollapsed && (
+
+
+
+ )}
+
+
+ {/* Dropdown popover */}
+ {open && (
+
+ {/* Header: show full email/username for context */}
+
+
+ Signed in as
+
+
+ {displayName}
+
+
+
+
}
+ label="Settings"
+ onClick={() => handleItemClick(onOpenSettings)}
+ />
+
}
+ label="About GitPilot"
+ onClick={() => handleItemClick(onOpenAbout)}
+ />
+
+
+
+
}
+ label="Log out"
+ onClick={() => handleItemClick(onLogout)}
+ danger
+ />
+
+ )}
+
+ {/* Scoped keyframe animation */}
+
+
+ );
+}
+
+// ── Menu item primitive ────────────────────────────────────────────
+function MenuItem({ icon, label, onClick, danger = false }) {
+ const [hover, setHover] = useState(false);
+ const color = danger ? "#f87171" : "#EDEDED";
+ return (
+ setHover(true)}
+ onMouseLeave={() => setHover(false)}
+ style={{
+ display: "flex",
+ alignItems: "center",
+ gap: 12,
+ width: "100%",
+ padding: "9px 12px",
+ background: hover ? "#27272A" : "transparent",
+ border: "none",
+ borderRadius: 8,
+ cursor: "pointer",
+ color: color,
+ fontSize: 13,
+ fontWeight: 500,
+ textAlign: "left",
+ fontFamily: "inherit",
+ transition: "background 80ms ease",
+ }}
+ >
+
+ {icon}
+
+ {label}
+
+ );
+}
+
+// ── Inline icons (no extra asset loads) ────────────────────────────
+function SettingsIcon() {
+ return (
+
+
+
+
+ );
+}
+
+function InfoIcon() {
+ return (
+
+
+
+
+
+ );
+}
+
+function LogoutIcon() {
+ return (
+
+
+
+
+
+ );
+}
diff --git a/frontend/index.html b/frontend/index.html
new file mode 100644
index 0000000000000000000000000000000000000000..497850b3f10fa590f7e06d95843e28e5ce3fd4b5
--- /dev/null
+++ b/frontend/index.html
@@ -0,0 +1,12 @@
+
+
+
+
+ GitPilot
+
+
+
+
+
+
+
diff --git a/frontend/main.jsx b/frontend/main.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..2c2017cf902ef50f84816577bd83e21af501f43e
--- /dev/null
+++ b/frontend/main.jsx
@@ -0,0 +1,11 @@
+import React from "react";
+import ReactDOM from "react-dom/client";
+import App from "./App.jsx";
+import "./styles.css";
+import "./ollabridge.css";
+
+ReactDOM.createRoot(document.getElementById("root")).render(
+
+
+
+);
diff --git a/frontend/nginx.conf b/frontend/nginx.conf
new file mode 100644
index 0000000000000000000000000000000000000000..455bb91c50c5c97affbe57cf37fe1f7e07572f1d
--- /dev/null
+++ b/frontend/nginx.conf
@@ -0,0 +1,58 @@
+server {
+ listen 80;
+ server_name _;
+ root /usr/share/nginx/html;
+ index index.html;
+
+ # DNS resolver for dynamic upstream resolution
+ # This allows nginx to start even if backend doesn't exist yet
+ resolver 127.0.0.11 valid=30s ipv6=off;
+
+ # Gzip compression
+ gzip on;
+ gzip_vary on;
+ gzip_min_length 1024;
+ gzip_types text/plain text/css text/xml text/javascript application/x-javascript application/xml+rss application/json application/javascript;
+
+ # Security headers
+ add_header X-Frame-Options "SAMEORIGIN" always;
+ add_header X-Content-Type-Options "nosniff" always;
+ add_header X-XSS-Protection "1; mode=block" always;
+
+ # Handle API requests - proxy to backend (docker-compose only)
+ # Uses variables to force runtime DNS resolution instead of startup
+ location /api/ {
+ # Use variable to force runtime DNS resolution
+ set $backend "backend:8000";
+ proxy_pass http://$backend;
+ proxy_http_version 1.1;
+ proxy_set_header Upgrade $http_upgrade;
+ proxy_set_header Connection 'upgrade';
+ proxy_set_header Host $host;
+ proxy_set_header X-Real-IP $remote_addr;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header X-Forwarded-Proto $scheme;
+ proxy_cache_bypass $http_upgrade;
+
+ # Handle backend connection errors gracefully
+ proxy_intercept_errors on;
+ error_page 502 503 504 = @backend_unavailable;
+ }
+
+ # Fallback for when backend is unavailable
+ location @backend_unavailable {
+ add_header Content-Type application/json;
+ return 503 '{"error": "Backend service unavailable. Configure VITE_BACKEND_URL in frontend or ensure backend container is running."}';
+ }
+
+ # Serve static files
+ location / {
+ try_files $uri $uri/ /index.html;
+ }
+
+ # Cache static assets
+ location ~* \.(js|css|png|jpg|jpeg|gif|ico|svg|woff|woff2|ttf|eot)$ {
+ expires 1y;
+ add_header Cache-Control "public, immutable";
+ }
+}
diff --git a/frontend/ollabridge.css b/frontend/ollabridge.css
new file mode 100644
index 0000000000000000000000000000000000000000..26fc57504d5b22bf2e5384bff838314ab94115db
--- /dev/null
+++ b/frontend/ollabridge.css
@@ -0,0 +1,222 @@
+/* ============================================================================
+ OLLABRIDGE CLOUD - Provider Tabs & Pairing UI
+ ============================================================================ */
+
+/* Provider selection tabs (replaces dropdown) */
+.settings-provider-tabs {
+ display: flex;
+ gap: 4px;
+ flex-wrap: wrap;
+ margin-top: 4px;
+}
+
+.settings-provider-tab {
+ border: 1px solid #272832;
+ outline: none;
+ background: #0a0b0f;
+ color: #9a9bb0;
+ border-radius: 8px;
+ padding: 8px 14px;
+ font-size: 13px;
+ font-weight: 500;
+ cursor: pointer;
+ transition: all 0.2s ease;
+ font-family: inherit;
+}
+
+.settings-provider-tab:hover {
+ background: #1a1b26;
+ color: #c3c5dd;
+ border-color: #3a3b4d;
+}
+
+.settings-provider-tab-active {
+ background: rgba(255, 122, 60, 0.12);
+ color: #ff7a3c;
+ border-color: #ff7a3c;
+ font-weight: 600;
+}
+
+.settings-provider-tab-active:hover {
+ background: rgba(255, 122, 60, 0.18);
+ color: #ff8b52;
+}
+
+/* Auth mode tabs (Device Pairing / API Key / Local Trust) */
+.ob-auth-tabs {
+ display: flex;
+ gap: 4px;
+ margin-top: 4px;
+ margin-bottom: 8px;
+}
+
+.ob-auth-tab {
+ display: flex;
+ align-items: center;
+ gap: 6px;
+ border: 1px solid #272832;
+ outline: none;
+ background: #0a0b0f;
+ color: #9a9bb0;
+ border-radius: 8px;
+ padding: 7px 12px;
+ font-size: 12px;
+ font-weight: 500;
+ cursor: pointer;
+ transition: all 0.2s ease;
+ font-family: inherit;
+ white-space: nowrap;
+}
+
+.ob-auth-tab:hover {
+ background: #1a1b26;
+ color: #c3c5dd;
+ border-color: #3a3b4d;
+}
+
+.ob-auth-tab-active {
+ background: rgba(59, 130, 246, 0.1);
+ color: #60a5fa;
+ border-color: #3B82F6;
+ font-weight: 600;
+}
+
+.ob-auth-tab-active:hover {
+ background: rgba(59, 130, 246, 0.15);
+}
+
+.ob-auth-tab-icon {
+ font-size: 14px;
+ line-height: 1;
+}
+
+/* Auth panel (content below tabs) */
+.ob-auth-panel {
+ padding: 12px;
+ background: #0a0b0f;
+ border: 1px solid #1e1f30;
+ border-radius: 8px;
+ margin-bottom: 4px;
+}
+
+.ob-auth-desc {
+ font-size: 12px;
+ color: #9a9bb0;
+ line-height: 1.5;
+ margin-bottom: 10px;
+}
+
+/* Pairing row */
+.ob-pair-row {
+ display: flex;
+ gap: 8px;
+ align-items: center;
+}
+
+.ob-pair-input {
+ flex: 1;
+ font-family: "SF Mono", Monaco, "Cascadia Code", monospace;
+ font-size: 16px !important;
+ letter-spacing: 2px;
+ text-align: center;
+ text-transform: uppercase;
+}
+
+.ob-pair-btn {
+ display: flex;
+ align-items: center;
+ gap: 6px;
+ border: none;
+ outline: none;
+ background: #3B82F6;
+ color: #fff;
+ border-radius: 8px;
+ padding: 9px 16px;
+ font-size: 13px;
+ font-weight: 600;
+ cursor: pointer;
+ transition: all 0.2s ease;
+ white-space: nowrap;
+ font-family: inherit;
+}
+
+.ob-pair-btn:hover:not(:disabled) {
+ background: #4d93f7;
+ transform: translateY(-1px);
+ box-shadow: 0 4px 12px rgba(59, 130, 246, 0.3);
+}
+
+.ob-pair-btn:disabled {
+ opacity: 0.5;
+ cursor: not-allowed;
+}
+
+/* Pair spinner */
+.ob-pair-spinner {
+ display: inline-block;
+ width: 14px;
+ height: 14px;
+ border: 2px solid rgba(255, 255, 255, 0.3);
+ border-top-color: #fff;
+ border-radius: 50%;
+ animation: spin 0.6s linear infinite;
+}
+
+/* Pair result feedback */
+.ob-pair-result {
+ margin-top: 8px;
+ padding: 8px 12px;
+ border-radius: 6px;
+ font-size: 12px;
+ font-weight: 500;
+ animation: fadeIn 0.3s ease;
+}
+
+.ob-pair-result-ok {
+ background: rgba(76, 175, 136, 0.12);
+ border: 1px solid rgba(76, 175, 136, 0.3);
+ color: #7cffb3;
+}
+
+.ob-pair-result-err {
+ background: rgba(255, 82, 82, 0.1);
+ border: 1px solid rgba(255, 82, 82, 0.3);
+ color: #ff8a8a;
+}
+
+/* Model row (input + fetch button) */
+.ob-model-row {
+ display: flex;
+ gap: 8px;
+ align-items: center;
+}
+
+.ob-fetch-btn {
+ display: flex;
+ align-items: center;
+ gap: 5px;
+ border: 1px solid #272832;
+ outline: none;
+ background: #1a1b26;
+ color: #c3c5dd;
+ border-radius: 8px;
+ padding: 8px 12px;
+ font-size: 12px;
+ font-weight: 500;
+ cursor: pointer;
+ transition: all 0.2s ease;
+ white-space: nowrap;
+ font-family: inherit;
+}
+
+.ob-fetch-btn:hover:not(:disabled) {
+ background: #222335;
+ border-color: #3a3b4d;
+ color: #f5f5f7;
+ transform: translateY(-1px);
+}
+
+.ob-fetch-btn:disabled {
+ opacity: 0.5;
+ cursor: not-allowed;
+}
diff --git a/frontend/package-lock.json b/frontend/package-lock.json
new file mode 100644
index 0000000000000000000000000000000000000000..a59f1ef312e2433e2b84476beb3fec107102aaf7
--- /dev/null
+++ b/frontend/package-lock.json
@@ -0,0 +1,3346 @@
+{
+ "name": "gitpilot-frontend",
+ "version": "0.2.6",
+ "lockfileVersion": 3,
+ "requires": true,
+ "packages": {
+ "": {
+ "name": "gitpilot-frontend",
+ "version": "0.2.6",
+ "dependencies": {
+ "react": "^18.2.0",
+ "react-dom": "^18.2.0",
+ "react-markdown": "^10.1.0",
+ "reactflow": "^11.11.4"
+ },
+ "devDependencies": {
+ "@vitejs/plugin-react": "^4.0.0",
+ "vite": "^5.0.0"
+ }
+ },
+ "node_modules/@babel/code-frame": {
+ "version": "7.27.1",
+ "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.27.1.tgz",
+ "integrity": "sha512-cjQ7ZlQ0Mv3b47hABuTevyTuYN4i+loJKGeV9flcCgIK37cCXRh+L1bd3iBHlynerhQ7BhCkn2BPbQUL+rGqFg==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/helper-validator-identifier": "^7.27.1",
+ "js-tokens": "^4.0.0",
+ "picocolors": "^1.1.1"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/compat-data": {
+ "version": "7.28.5",
+ "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.28.5.tgz",
+ "integrity": "sha512-6uFXyCayocRbqhZOB+6XcuZbkMNimwfVGFji8CTZnCzOHVGvDqzvitu1re2AU5LROliz7eQPhB8CpAMvnx9EjA==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/core": {
+ "version": "7.28.5",
+ "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.28.5.tgz",
+ "integrity": "sha512-e7jT4DxYvIDLk1ZHmU/m/mB19rex9sv0c2ftBtjSBv+kVM/902eh0fINUzD7UwLLNR+jU585GxUJ8/EBfAM5fw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/code-frame": "^7.27.1",
+ "@babel/generator": "^7.28.5",
+ "@babel/helper-compilation-targets": "^7.27.2",
+ "@babel/helper-module-transforms": "^7.28.3",
+ "@babel/helpers": "^7.28.4",
+ "@babel/parser": "^7.28.5",
+ "@babel/template": "^7.27.2",
+ "@babel/traverse": "^7.28.5",
+ "@babel/types": "^7.28.5",
+ "@jridgewell/remapping": "^2.3.5",
+ "convert-source-map": "^2.0.0",
+ "debug": "^4.1.0",
+ "gensync": "^1.0.0-beta.2",
+ "json5": "^2.2.3",
+ "semver": "^6.3.1"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/babel"
+ }
+ },
+ "node_modules/@babel/generator": {
+ "version": "7.28.5",
+ "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.28.5.tgz",
+ "integrity": "sha512-3EwLFhZ38J4VyIP6WNtt2kUdW9dokXA9Cr4IVIFHuCpZ3H8/YFOl5JjZHisrn1fATPBmKKqXzDFvh9fUwHz6CQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/parser": "^7.28.5",
+ "@babel/types": "^7.28.5",
+ "@jridgewell/gen-mapping": "^0.3.12",
+ "@jridgewell/trace-mapping": "^0.3.28",
+ "jsesc": "^3.0.2"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-compilation-targets": {
+ "version": "7.27.2",
+ "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.27.2.tgz",
+ "integrity": "sha512-2+1thGUUWWjLTYTHZWK1n8Yga0ijBz1XAhUXcKy81rd5g6yh7hGqMp45v7cadSbEHc9G3OTv45SyneRN3ps4DQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/compat-data": "^7.27.2",
+ "@babel/helper-validator-option": "^7.27.1",
+ "browserslist": "^4.24.0",
+ "lru-cache": "^5.1.1",
+ "semver": "^6.3.1"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-globals": {
+ "version": "7.28.0",
+ "resolved": "https://registry.npmjs.org/@babel/helper-globals/-/helper-globals-7.28.0.tgz",
+ "integrity": "sha512-+W6cISkXFa1jXsDEdYA8HeevQT/FULhxzR99pxphltZcVaugps53THCeiWA8SguxxpSp3gKPiuYfSWopkLQ4hw==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-module-imports": {
+ "version": "7.27.1",
+ "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.27.1.tgz",
+ "integrity": "sha512-0gSFWUPNXNopqtIPQvlD5WgXYI5GY2kP2cCvoT8kczjbfcfuIljTbcWrulD1CIPIX2gt1wghbDy08yE1p+/r3w==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/traverse": "^7.27.1",
+ "@babel/types": "^7.27.1"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-module-transforms": {
+ "version": "7.28.3",
+ "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.28.3.tgz",
+ "integrity": "sha512-gytXUbs8k2sXS9PnQptz5o0QnpLL51SwASIORY6XaBKF88nsOT0Zw9szLqlSGQDP/4TljBAD5y98p2U1fqkdsw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/helper-module-imports": "^7.27.1",
+ "@babel/helper-validator-identifier": "^7.27.1",
+ "@babel/traverse": "^7.28.3"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0"
+ }
+ },
+ "node_modules/@babel/helper-plugin-utils": {
+ "version": "7.27.1",
+ "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.27.1.tgz",
+ "integrity": "sha512-1gn1Up5YXka3YYAHGKpbideQ5Yjf1tDa9qYcgysz+cNCXukyLl6DjPXhD3VRwSb8c0J9tA4b2+rHEZtc6R0tlw==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-string-parser": {
+ "version": "7.27.1",
+ "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.27.1.tgz",
+ "integrity": "sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-validator-identifier": {
+ "version": "7.28.5",
+ "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.28.5.tgz",
+ "integrity": "sha512-qSs4ifwzKJSV39ucNjsvc6WVHs6b7S03sOh2OcHF9UHfVPqWWALUsNUVzhSBiItjRZoLHx7nIarVjqKVusUZ1Q==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-validator-option": {
+ "version": "7.27.1",
+ "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.27.1.tgz",
+ "integrity": "sha512-YvjJow9FxbhFFKDSuFnVCe2WxXk1zWc22fFePVNEaWJEu8IrZVlda6N0uHwzZrUM1il7NC9Mlp4MaJYbYd9JSg==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helpers": {
+ "version": "7.28.4",
+ "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.28.4.tgz",
+ "integrity": "sha512-HFN59MmQXGHVyYadKLVumYsA9dBFun/ldYxipEjzA4196jpLZd8UjEEBLkbEkvfYreDqJhZxYAWFPtrfhNpj4w==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/template": "^7.27.2",
+ "@babel/types": "^7.28.4"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/parser": {
+ "version": "7.28.5",
+ "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.28.5.tgz",
+ "integrity": "sha512-KKBU1VGYR7ORr3At5HAtUQ+TV3SzRCXmA/8OdDZiLDBIZxVyzXuztPjfLd3BV1PRAQGCMWWSHYhL0F8d5uHBDQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/types": "^7.28.5"
+ },
+ "bin": {
+ "parser": "bin/babel-parser.js"
+ },
+ "engines": {
+ "node": ">=6.0.0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-react-jsx-self": {
+ "version": "7.27.1",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-self/-/plugin-transform-react-jsx-self-7.27.1.tgz",
+ "integrity": "sha512-6UzkCs+ejGdZ5mFFC/OCUrv028ab2fp1znZmCZjAOBKiBK2jXD1O+BPSfX8X2qjJ75fZBMSnQn3Rq2mrBJK2mw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.27.1"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-react-jsx-source": {
+ "version": "7.27.1",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-source/-/plugin-transform-react-jsx-source-7.27.1.tgz",
+ "integrity": "sha512-zbwoTsBruTeKB9hSq73ha66iFeJHuaFkUbwvqElnygoNbj/jHRsSeokowZFN3CZ64IvEqcmmkVe89OPXc7ldAw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.27.1"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/template": {
+ "version": "7.27.2",
+ "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.27.2.tgz",
+ "integrity": "sha512-LPDZ85aEJyYSd18/DkjNh4/y1ntkE5KwUHWTiqgRxruuZL2F1yuHligVHLvcHY2vMHXttKFpJn6LwfI7cw7ODw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/code-frame": "^7.27.1",
+ "@babel/parser": "^7.27.2",
+ "@babel/types": "^7.27.1"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/traverse": {
+ "version": "7.28.5",
+ "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.28.5.tgz",
+ "integrity": "sha512-TCCj4t55U90khlYkVV/0TfkJkAkUg3jZFA3Neb7unZT8CPok7iiRfaX0F+WnqWqt7OxhOn0uBKXCw4lbL8W0aQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/code-frame": "^7.27.1",
+ "@babel/generator": "^7.28.5",
+ "@babel/helper-globals": "^7.28.0",
+ "@babel/parser": "^7.28.5",
+ "@babel/template": "^7.27.2",
+ "@babel/types": "^7.28.5",
+ "debug": "^4.3.1"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/types": {
+ "version": "7.28.5",
+ "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.28.5.tgz",
+ "integrity": "sha512-qQ5m48eI/MFLQ5PxQj4PFaprjyCTLI37ElWMmNs0K8Lk3dVeOdNpB3ks8jc7yM5CDmVC73eMVk/trk3fgmrUpA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/helper-string-parser": "^7.27.1",
+ "@babel/helper-validator-identifier": "^7.28.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@esbuild/aix-ppc64": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.21.5.tgz",
+ "integrity": "sha512-1SDgH6ZSPTlggy1yI6+Dbkiz8xzpHJEVAlF/AM1tHPLsf5STom9rwtjE4hKAF20FfXXNTFqEYXyJNWh1GiZedQ==",
+ "cpu": [
+ "ppc64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "aix"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/android-arm": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.21.5.tgz",
+ "integrity": "sha512-vCPvzSjpPHEi1siZdlvAlsPxXl7WbOVUBBAowWug4rJHb68Ox8KualB+1ocNvT5fjv6wpkX6o/iEpbDrf68zcg==",
+ "cpu": [
+ "arm"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "android"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/android-arm64": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.21.5.tgz",
+ "integrity": "sha512-c0uX9VAUBQ7dTDCjq+wdyGLowMdtR/GoC2U5IYk/7D1H1JYC0qseD7+11iMP2mRLN9RcCMRcjC4YMclCzGwS/A==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "android"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/android-x64": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.21.5.tgz",
+ "integrity": "sha512-D7aPRUUNHRBwHxzxRvp856rjUHRFW1SdQATKXH2hqA0kAZb1hKmi02OpYRacl0TxIGz/ZmXWlbZgjwWYaCakTA==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "android"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/darwin-arm64": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.21.5.tgz",
+ "integrity": "sha512-DwqXqZyuk5AiWWf3UfLiRDJ5EDd49zg6O9wclZ7kUMv2WRFr4HKjXp/5t8JZ11QbQfUS6/cRCKGwYhtNAY88kQ==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/darwin-x64": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.21.5.tgz",
+ "integrity": "sha512-se/JjF8NlmKVG4kNIuyWMV/22ZaerB+qaSi5MdrXtd6R08kvs2qCN4C09miupktDitvh8jRFflwGFBQcxZRjbw==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/freebsd-arm64": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.21.5.tgz",
+ "integrity": "sha512-5JcRxxRDUJLX8JXp/wcBCy3pENnCgBR9bN6JsY4OmhfUtIHe3ZW0mawA7+RDAcMLrMIZaf03NlQiX9DGyB8h4g==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "freebsd"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/freebsd-x64": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.21.5.tgz",
+ "integrity": "sha512-J95kNBj1zkbMXtHVH29bBriQygMXqoVQOQYA+ISs0/2l3T9/kj42ow2mpqerRBxDJnmkUDCaQT/dfNXWX/ZZCQ==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "freebsd"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/linux-arm": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.21.5.tgz",
+ "integrity": "sha512-bPb5AHZtbeNGjCKVZ9UGqGwo8EUu4cLq68E95A53KlxAPRmUyYv2D6F0uUI65XisGOL1hBP5mTronbgo+0bFcA==",
+ "cpu": [
+ "arm"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/linux-arm64": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.21.5.tgz",
+ "integrity": "sha512-ibKvmyYzKsBeX8d8I7MH/TMfWDXBF3db4qM6sy+7re0YXya+K1cem3on9XgdT2EQGMu4hQyZhan7TeQ8XkGp4Q==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/linux-ia32": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.21.5.tgz",
+ "integrity": "sha512-YvjXDqLRqPDl2dvRODYmmhz4rPeVKYvppfGYKSNGdyZkA01046pLWyRKKI3ax8fbJoK5QbxblURkwK/MWY18Tg==",
+ "cpu": [
+ "ia32"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/linux-loong64": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.21.5.tgz",
+ "integrity": "sha512-uHf1BmMG8qEvzdrzAqg2SIG/02+4/DHB6a9Kbya0XDvwDEKCoC8ZRWI5JJvNdUjtciBGFQ5PuBlpEOXQj+JQSg==",
+ "cpu": [
+ "loong64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/linux-mips64el": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.21.5.tgz",
+ "integrity": "sha512-IajOmO+KJK23bj52dFSNCMsz1QP1DqM6cwLUv3W1QwyxkyIWecfafnI555fvSGqEKwjMXVLokcV5ygHW5b3Jbg==",
+ "cpu": [
+ "mips64el"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/linux-ppc64": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.21.5.tgz",
+ "integrity": "sha512-1hHV/Z4OEfMwpLO8rp7CvlhBDnjsC3CttJXIhBi+5Aj5r+MBvy4egg7wCbe//hSsT+RvDAG7s81tAvpL2XAE4w==",
+ "cpu": [
+ "ppc64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/linux-riscv64": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.21.5.tgz",
+ "integrity": "sha512-2HdXDMd9GMgTGrPWnJzP2ALSokE/0O5HhTUvWIbD3YdjME8JwvSCnNGBnTThKGEB91OZhzrJ4qIIxk/SBmyDDA==",
+ "cpu": [
+ "riscv64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/linux-s390x": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.21.5.tgz",
+ "integrity": "sha512-zus5sxzqBJD3eXxwvjN1yQkRepANgxE9lgOW2qLnmr8ikMTphkjgXu1HR01K4FJg8h1kEEDAqDcZQtbrRnB41A==",
+ "cpu": [
+ "s390x"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/linux-x64": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.21.5.tgz",
+ "integrity": "sha512-1rYdTpyv03iycF1+BhzrzQJCdOuAOtaqHTWJZCWvijKD2N5Xu0TtVC8/+1faWqcP9iBCWOmjmhoH94dH82BxPQ==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/netbsd-x64": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.21.5.tgz",
+ "integrity": "sha512-Woi2MXzXjMULccIwMnLciyZH4nCIMpWQAs049KEeMvOcNADVxo0UBIQPfSmxB3CWKedngg7sWZdLvLczpe0tLg==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "netbsd"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/openbsd-x64": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.21.5.tgz",
+ "integrity": "sha512-HLNNw99xsvx12lFBUwoT8EVCsSvRNDVxNpjZ7bPn947b8gJPzeHWyNVhFsaerc0n3TsbOINvRP2byTZ5LKezow==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "openbsd"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/sunos-x64": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.21.5.tgz",
+ "integrity": "sha512-6+gjmFpfy0BHU5Tpptkuh8+uw3mnrvgs+dSPQXQOv3ekbordwnzTVEb4qnIvQcYXq6gzkyTnoZ9dZG+D4garKg==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "sunos"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/win32-arm64": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.21.5.tgz",
+ "integrity": "sha512-Z0gOTd75VvXqyq7nsl93zwahcTROgqvuAcYDUr+vOv8uHhNSKROyU961kgtCD1e95IqPKSQKH7tBTslnS3tA8A==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "win32"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/win32-ia32": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.21.5.tgz",
+ "integrity": "sha512-SWXFF1CL2RVNMaVs+BBClwtfZSvDgtL//G/smwAc5oVK/UPu2Gu9tIaRgFmYFFKrmg3SyAjSrElf0TiJ1v8fYA==",
+ "cpu": [
+ "ia32"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "win32"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/win32-x64": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.21.5.tgz",
+ "integrity": "sha512-tQd/1efJuzPC6rCFwEvLtci/xNFcTZknmXs98FYDfGE4wP9ClFV98nyKrzJKVPMhdDnjzLhdUyMX4PsQAPjwIw==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "win32"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@jridgewell/gen-mapping": {
+ "version": "0.3.13",
+ "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.13.tgz",
+ "integrity": "sha512-2kkt/7niJ6MgEPxF0bYdQ6etZaA+fQvDcLKckhy1yIQOzaoKjBBjSj63/aLVjYE3qhRt5dvM+uUyfCg6UKCBbA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jridgewell/sourcemap-codec": "^1.5.0",
+ "@jridgewell/trace-mapping": "^0.3.24"
+ }
+ },
+ "node_modules/@jridgewell/remapping": {
+ "version": "2.3.5",
+ "resolved": "https://registry.npmjs.org/@jridgewell/remapping/-/remapping-2.3.5.tgz",
+ "integrity": "sha512-LI9u/+laYG4Ds1TDKSJW2YPrIlcVYOwi2fUC6xB43lueCjgxV4lffOCZCtYFiH6TNOX+tQKXx97T4IKHbhyHEQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jridgewell/gen-mapping": "^0.3.5",
+ "@jridgewell/trace-mapping": "^0.3.24"
+ }
+ },
+ "node_modules/@jridgewell/resolve-uri": {
+ "version": "3.1.2",
+ "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz",
+ "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=6.0.0"
+ }
+ },
+ "node_modules/@jridgewell/sourcemap-codec": {
+ "version": "1.5.5",
+ "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz",
+ "integrity": "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/@jridgewell/trace-mapping": {
+ "version": "0.3.31",
+ "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.31.tgz",
+ "integrity": "sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jridgewell/resolve-uri": "^3.1.0",
+ "@jridgewell/sourcemap-codec": "^1.4.14"
+ }
+ },
+ "node_modules/@reactflow/background": {
+ "version": "11.3.14",
+ "resolved": "https://registry.npmjs.org/@reactflow/background/-/background-11.3.14.tgz",
+ "integrity": "sha512-Gewd7blEVT5Lh6jqrvOgd4G6Qk17eGKQfsDXgyRSqM+CTwDqRldG2LsWN4sNeno6sbqVIC2fZ+rAUBFA9ZEUDA==",
+ "license": "MIT",
+ "dependencies": {
+ "@reactflow/core": "11.11.4",
+ "classcat": "^5.0.3",
+ "zustand": "^4.4.1"
+ },
+ "peerDependencies": {
+ "react": ">=17",
+ "react-dom": ">=17"
+ }
+ },
+ "node_modules/@reactflow/controls": {
+ "version": "11.2.14",
+ "resolved": "https://registry.npmjs.org/@reactflow/controls/-/controls-11.2.14.tgz",
+ "integrity": "sha512-MiJp5VldFD7FrqaBNIrQ85dxChrG6ivuZ+dcFhPQUwOK3HfYgX2RHdBua+gx+40p5Vw5It3dVNp/my4Z3jF0dw==",
+ "license": "MIT",
+ "dependencies": {
+ "@reactflow/core": "11.11.4",
+ "classcat": "^5.0.3",
+ "zustand": "^4.4.1"
+ },
+ "peerDependencies": {
+ "react": ">=17",
+ "react-dom": ">=17"
+ }
+ },
+ "node_modules/@reactflow/core": {
+ "version": "11.11.4",
+ "resolved": "https://registry.npmjs.org/@reactflow/core/-/core-11.11.4.tgz",
+ "integrity": "sha512-H4vODklsjAq3AMq6Np4LE12i1I4Ta9PrDHuBR9GmL8uzTt2l2jh4CiQbEMpvMDcp7xi4be0hgXj+Ysodde/i7Q==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/d3": "^7.4.0",
+ "@types/d3-drag": "^3.0.1",
+ "@types/d3-selection": "^3.0.3",
+ "@types/d3-zoom": "^3.0.1",
+ "classcat": "^5.0.3",
+ "d3-drag": "^3.0.0",
+ "d3-selection": "^3.0.0",
+ "d3-zoom": "^3.0.0",
+ "zustand": "^4.4.1"
+ },
+ "peerDependencies": {
+ "react": ">=17",
+ "react-dom": ">=17"
+ }
+ },
+ "node_modules/@reactflow/minimap": {
+ "version": "11.7.14",
+ "resolved": "https://registry.npmjs.org/@reactflow/minimap/-/minimap-11.7.14.tgz",
+ "integrity": "sha512-mpwLKKrEAofgFJdkhwR5UQ1JYWlcAAL/ZU/bctBkuNTT1yqV+y0buoNVImsRehVYhJwffSWeSHaBR5/GJjlCSQ==",
+ "license": "MIT",
+ "dependencies": {
+ "@reactflow/core": "11.11.4",
+ "@types/d3-selection": "^3.0.3",
+ "@types/d3-zoom": "^3.0.1",
+ "classcat": "^5.0.3",
+ "d3-selection": "^3.0.0",
+ "d3-zoom": "^3.0.0",
+ "zustand": "^4.4.1"
+ },
+ "peerDependencies": {
+ "react": ">=17",
+ "react-dom": ">=17"
+ }
+ },
+ "node_modules/@reactflow/node-resizer": {
+ "version": "2.2.14",
+ "resolved": "https://registry.npmjs.org/@reactflow/node-resizer/-/node-resizer-2.2.14.tgz",
+ "integrity": "sha512-fwqnks83jUlYr6OHcdFEedumWKChTHRGw/kbCxj0oqBd+ekfs+SIp4ddyNU0pdx96JIm5iNFS0oNrmEiJbbSaA==",
+ "license": "MIT",
+ "dependencies": {
+ "@reactflow/core": "11.11.4",
+ "classcat": "^5.0.4",
+ "d3-drag": "^3.0.0",
+ "d3-selection": "^3.0.0",
+ "zustand": "^4.4.1"
+ },
+ "peerDependencies": {
+ "react": ">=17",
+ "react-dom": ">=17"
+ }
+ },
+ "node_modules/@reactflow/node-toolbar": {
+ "version": "1.3.14",
+ "resolved": "https://registry.npmjs.org/@reactflow/node-toolbar/-/node-toolbar-1.3.14.tgz",
+ "integrity": "sha512-rbynXQnH/xFNu4P9H+hVqlEUafDCkEoCy0Dg9mG22Sg+rY/0ck6KkrAQrYrTgXusd+cEJOMK0uOOFCK2/5rSGQ==",
+ "license": "MIT",
+ "dependencies": {
+ "@reactflow/core": "11.11.4",
+ "classcat": "^5.0.3",
+ "zustand": "^4.4.1"
+ },
+ "peerDependencies": {
+ "react": ">=17",
+ "react-dom": ">=17"
+ }
+ },
+ "node_modules/@rolldown/pluginutils": {
+ "version": "1.0.0-beta.27",
+ "resolved": "https://registry.npmjs.org/@rolldown/pluginutils/-/pluginutils-1.0.0-beta.27.tgz",
+ "integrity": "sha512-+d0F4MKMCbeVUJwG96uQ4SgAznZNSq93I3V+9NHA4OpvqG8mRCpGdKmK8l/dl02h2CCDHwW2FqilnTyDcAnqjA==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/@rollup/rollup-android-arm-eabi": {
+ "version": "4.53.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.53.2.tgz",
+ "integrity": "sha512-yDPzwsgiFO26RJA4nZo8I+xqzh7sJTZIWQOxn+/XOdPE31lAvLIYCKqjV+lNH/vxE2L2iH3plKxDCRK6i+CwhA==",
+ "cpu": [
+ "arm"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "android"
+ ]
+ },
+ "node_modules/@rollup/rollup-android-arm64": {
+ "version": "4.53.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.53.2.tgz",
+ "integrity": "sha512-k8FontTxIE7b0/OGKeSN5B6j25EuppBcWM33Z19JoVT7UTXFSo3D9CdU39wGTeb29NO3XxpMNauh09B+Ibw+9g==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "android"
+ ]
+ },
+ "node_modules/@rollup/rollup-darwin-arm64": {
+ "version": "4.53.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.53.2.tgz",
+ "integrity": "sha512-A6s4gJpomNBtJ2yioj8bflM2oogDwzUiMl2yNJ2v9E7++sHrSrsQ29fOfn5DM/iCzpWcebNYEdXpaK4tr2RhfQ==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "darwin"
+ ]
+ },
+ "node_modules/@rollup/rollup-darwin-x64": {
+ "version": "4.53.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.53.2.tgz",
+ "integrity": "sha512-e6XqVmXlHrBlG56obu9gDRPW3O3hLxpwHpLsBJvuI8qqnsrtSZ9ERoWUXtPOkY8c78WghyPHZdmPhHLWNdAGEw==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "darwin"
+ ]
+ },
+ "node_modules/@rollup/rollup-freebsd-arm64": {
+ "version": "4.53.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.53.2.tgz",
+ "integrity": "sha512-v0E9lJW8VsrwPux5Qe5CwmH/CF/2mQs6xU1MF3nmUxmZUCHazCjLgYvToOk+YuuUqLQBio1qkkREhxhc656ViA==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "freebsd"
+ ]
+ },
+ "node_modules/@rollup/rollup-freebsd-x64": {
+ "version": "4.53.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.53.2.tgz",
+ "integrity": "sha512-ClAmAPx3ZCHtp6ysl4XEhWU69GUB1D+s7G9YjHGhIGCSrsg00nEGRRZHmINYxkdoJehde8VIsDC5t9C0gb6yqA==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "freebsd"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-arm-gnueabihf": {
+ "version": "4.53.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.53.2.tgz",
+ "integrity": "sha512-EPlb95nUsz6Dd9Qy13fI5kUPXNSljaG9FiJ4YUGU1O/Q77i5DYFW5KR8g1OzTcdZUqQQ1KdDqsTohdFVwCwjqg==",
+ "cpu": [
+ "arm"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-arm-musleabihf": {
+ "version": "4.53.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.53.2.tgz",
+ "integrity": "sha512-BOmnVW+khAUX+YZvNfa0tGTEMVVEerOxN0pDk2E6N6DsEIa2Ctj48FOMfNDdrwinocKaC7YXUZ1pHlKpnkja/Q==",
+ "cpu": [
+ "arm"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-arm64-gnu": {
+ "version": "4.53.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.53.2.tgz",
+ "integrity": "sha512-Xt2byDZ+6OVNuREgBXr4+CZDJtrVso5woFtpKdGPhpTPHcNG7D8YXeQzpNbFRxzTVqJf7kvPMCub/pcGUWgBjA==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-arm64-musl": {
+ "version": "4.53.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.53.2.tgz",
+ "integrity": "sha512-+LdZSldy/I9N8+klim/Y1HsKbJ3BbInHav5qE9Iy77dtHC/pibw1SR/fXlWyAk0ThnpRKoODwnAuSjqxFRDHUQ==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-loong64-gnu": {
+ "version": "4.53.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-gnu/-/rollup-linux-loong64-gnu-4.53.2.tgz",
+ "integrity": "sha512-8ms8sjmyc1jWJS6WdNSA23rEfdjWB30LH8Wqj0Cqvv7qSHnvw6kgMMXRdop6hkmGPlyYBdRPkjJnj3KCUHV/uQ==",
+ "cpu": [
+ "loong64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-ppc64-gnu": {
+ "version": "4.53.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.53.2.tgz",
+ "integrity": "sha512-3HRQLUQbpBDMmzoxPJYd3W6vrVHOo2cVW8RUo87Xz0JPJcBLBr5kZ1pGcQAhdZgX9VV7NbGNipah1omKKe23/g==",
+ "cpu": [
+ "ppc64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-riscv64-gnu": {
+ "version": "4.53.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.53.2.tgz",
+ "integrity": "sha512-fMjKi+ojnmIvhk34gZP94vjogXNNUKMEYs+EDaB/5TG/wUkoeua7p7VCHnE6T2Tx+iaghAqQX8teQzcvrYpaQA==",
+ "cpu": [
+ "riscv64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-riscv64-musl": {
+ "version": "4.53.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.53.2.tgz",
+ "integrity": "sha512-XuGFGU+VwUUV5kLvoAdi0Wz5Xbh2SrjIxCtZj6Wq8MDp4bflb/+ThZsVxokM7n0pcbkEr2h5/pzqzDYI7cCgLQ==",
+ "cpu": [
+ "riscv64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-s390x-gnu": {
+ "version": "4.53.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.53.2.tgz",
+ "integrity": "sha512-w6yjZF0P+NGzWR3AXWX9zc0DNEGdtvykB03uhonSHMRa+oWA6novflo2WaJr6JZakG2ucsyb+rvhrKac6NIy+w==",
+ "cpu": [
+ "s390x"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-x64-gnu": {
+ "version": "4.53.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.53.2.tgz",
+ "integrity": "sha512-yo8d6tdfdeBArzC7T/PnHd7OypfI9cbuZzPnzLJIyKYFhAQ8SvlkKtKBMbXDxe1h03Rcr7u++nFS7tqXz87Gtw==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-x64-musl": {
+ "version": "4.53.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.53.2.tgz",
+ "integrity": "sha512-ah59c1YkCxKExPP8O9PwOvs+XRLKwh/mV+3YdKqQ5AMQ0r4M4ZDuOrpWkUaqO7fzAHdINzV9tEVu8vNw48z0lA==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-openharmony-arm64": {
+ "version": "4.53.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-openharmony-arm64/-/rollup-openharmony-arm64-4.53.2.tgz",
+ "integrity": "sha512-4VEd19Wmhr+Zy7hbUsFZ6YXEiP48hE//KPLCSVNY5RMGX2/7HZ+QkN55a3atM1C/BZCGIgqN+xrVgtdak2S9+A==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "openharmony"
+ ]
+ },
+ "node_modules/@rollup/rollup-win32-arm64-msvc": {
+ "version": "4.53.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.53.2.tgz",
+ "integrity": "sha512-IlbHFYc/pQCgew/d5fslcy1KEaYVCJ44G8pajugd8VoOEI8ODhtb/j8XMhLpwHCMB3yk2J07ctup10gpw2nyMA==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "win32"
+ ]
+ },
+ "node_modules/@rollup/rollup-win32-ia32-msvc": {
+ "version": "4.53.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.53.2.tgz",
+ "integrity": "sha512-lNlPEGgdUfSzdCWU176ku/dQRnA7W+Gp8d+cWv73jYrb8uT7HTVVxq62DUYxjbaByuf1Yk0RIIAbDzp+CnOTFg==",
+ "cpu": [
+ "ia32"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "win32"
+ ]
+ },
+ "node_modules/@rollup/rollup-win32-x64-gnu": {
+ "version": "4.53.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-gnu/-/rollup-win32-x64-gnu-4.53.2.tgz",
+ "integrity": "sha512-S6YojNVrHybQis2lYov1sd+uj7K0Q05NxHcGktuMMdIQ2VixGwAfbJ23NnlvvVV1bdpR2m5MsNBViHJKcA4ADw==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "win32"
+ ]
+ },
+ "node_modules/@rollup/rollup-win32-x64-msvc": {
+ "version": "4.53.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.53.2.tgz",
+ "integrity": "sha512-k+/Rkcyx//P6fetPoLMb8pBeqJBNGx81uuf7iljX9++yNBVRDQgD04L+SVXmXmh5ZP4/WOp4mWF0kmi06PW2tA==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "win32"
+ ]
+ },
+ "node_modules/@types/babel__core": {
+ "version": "7.20.5",
+ "resolved": "https://registry.npmjs.org/@types/babel__core/-/babel__core-7.20.5.tgz",
+ "integrity": "sha512-qoQprZvz5wQFJwMDqeseRXWv3rqMvhgpbXFfVyWhbx9X47POIA6i/+dXefEmZKoAgOaTdaIgNSMqMIU61yRyzA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/parser": "^7.20.7",
+ "@babel/types": "^7.20.7",
+ "@types/babel__generator": "*",
+ "@types/babel__template": "*",
+ "@types/babel__traverse": "*"
+ }
+ },
+ "node_modules/@types/babel__generator": {
+ "version": "7.27.0",
+ "resolved": "https://registry.npmjs.org/@types/babel__generator/-/babel__generator-7.27.0.tgz",
+ "integrity": "sha512-ufFd2Xi92OAVPYsy+P4n7/U7e68fex0+Ee8gSG9KX7eo084CWiQ4sdxktvdl0bOPupXtVJPY19zk6EwWqUQ8lg==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/types": "^7.0.0"
+ }
+ },
+ "node_modules/@types/babel__template": {
+ "version": "7.4.4",
+ "resolved": "https://registry.npmjs.org/@types/babel__template/-/babel__template-7.4.4.tgz",
+ "integrity": "sha512-h/NUaSyG5EyxBIp8YRxo4RMe2/qQgvyowRwVMzhYhBCONbW8PUsg4lkFMrhgZhUe5z3L3MiLDuvyJ/CaPa2A8A==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/parser": "^7.1.0",
+ "@babel/types": "^7.0.0"
+ }
+ },
+ "node_modules/@types/babel__traverse": {
+ "version": "7.28.0",
+ "resolved": "https://registry.npmjs.org/@types/babel__traverse/-/babel__traverse-7.28.0.tgz",
+ "integrity": "sha512-8PvcXf70gTDZBgt9ptxJ8elBeBjcLOAcOtoO/mPJjtji1+CdGbHgm77om1GrsPxsiE+uXIpNSK64UYaIwQXd4Q==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/types": "^7.28.2"
+ }
+ },
+ "node_modules/@types/d3": {
+ "version": "7.4.3",
+ "resolved": "https://registry.npmjs.org/@types/d3/-/d3-7.4.3.tgz",
+ "integrity": "sha512-lZXZ9ckh5R8uiFVt8ogUNf+pIrK4EsWrx2Np75WvF/eTpJ0FMHNhjXk8CKEx/+gpHbNQyJWehbFaTvqmHWB3ww==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/d3-array": "*",
+ "@types/d3-axis": "*",
+ "@types/d3-brush": "*",
+ "@types/d3-chord": "*",
+ "@types/d3-color": "*",
+ "@types/d3-contour": "*",
+ "@types/d3-delaunay": "*",
+ "@types/d3-dispatch": "*",
+ "@types/d3-drag": "*",
+ "@types/d3-dsv": "*",
+ "@types/d3-ease": "*",
+ "@types/d3-fetch": "*",
+ "@types/d3-force": "*",
+ "@types/d3-format": "*",
+ "@types/d3-geo": "*",
+ "@types/d3-hierarchy": "*",
+ "@types/d3-interpolate": "*",
+ "@types/d3-path": "*",
+ "@types/d3-polygon": "*",
+ "@types/d3-quadtree": "*",
+ "@types/d3-random": "*",
+ "@types/d3-scale": "*",
+ "@types/d3-scale-chromatic": "*",
+ "@types/d3-selection": "*",
+ "@types/d3-shape": "*",
+ "@types/d3-time": "*",
+ "@types/d3-time-format": "*",
+ "@types/d3-timer": "*",
+ "@types/d3-transition": "*",
+ "@types/d3-zoom": "*"
+ }
+ },
+ "node_modules/@types/d3-array": {
+ "version": "3.2.2",
+ "resolved": "https://registry.npmjs.org/@types/d3-array/-/d3-array-3.2.2.tgz",
+ "integrity": "sha512-hOLWVbm7uRza0BYXpIIW5pxfrKe0W+D5lrFiAEYR+pb6w3N2SwSMaJbXdUfSEv+dT4MfHBLtn5js0LAWaO6otw==",
+ "license": "MIT"
+ },
+ "node_modules/@types/d3-axis": {
+ "version": "3.0.6",
+ "resolved": "https://registry.npmjs.org/@types/d3-axis/-/d3-axis-3.0.6.tgz",
+ "integrity": "sha512-pYeijfZuBd87T0hGn0FO1vQ/cgLk6E1ALJjfkC0oJ8cbwkZl3TpgS8bVBLZN+2jjGgg38epgxb2zmoGtSfvgMw==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/d3-selection": "*"
+ }
+ },
+ "node_modules/@types/d3-brush": {
+ "version": "3.0.6",
+ "resolved": "https://registry.npmjs.org/@types/d3-brush/-/d3-brush-3.0.6.tgz",
+ "integrity": "sha512-nH60IZNNxEcrh6L1ZSMNA28rj27ut/2ZmI3r96Zd+1jrZD++zD3LsMIjWlvg4AYrHn/Pqz4CF3veCxGjtbqt7A==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/d3-selection": "*"
+ }
+ },
+ "node_modules/@types/d3-chord": {
+ "version": "3.0.6",
+ "resolved": "https://registry.npmjs.org/@types/d3-chord/-/d3-chord-3.0.6.tgz",
+ "integrity": "sha512-LFYWWd8nwfwEmTZG9PfQxd17HbNPksHBiJHaKuY1XeqscXacsS2tyoo6OdRsjf+NQYeB6XrNL3a25E3gH69lcg==",
+ "license": "MIT"
+ },
+ "node_modules/@types/d3-color": {
+ "version": "3.1.3",
+ "resolved": "https://registry.npmjs.org/@types/d3-color/-/d3-color-3.1.3.tgz",
+ "integrity": "sha512-iO90scth9WAbmgv7ogoq57O9YpKmFBbmoEoCHDB2xMBY0+/KVrqAaCDyCE16dUspeOvIxFFRI+0sEtqDqy2b4A==",
+ "license": "MIT"
+ },
+ "node_modules/@types/d3-contour": {
+ "version": "3.0.6",
+ "resolved": "https://registry.npmjs.org/@types/d3-contour/-/d3-contour-3.0.6.tgz",
+ "integrity": "sha512-BjzLgXGnCWjUSYGfH1cpdo41/hgdWETu4YxpezoztawmqsvCeep+8QGfiY6YbDvfgHz/DkjeIkkZVJavB4a3rg==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/d3-array": "*",
+ "@types/geojson": "*"
+ }
+ },
+ "node_modules/@types/d3-delaunay": {
+ "version": "6.0.4",
+ "resolved": "https://registry.npmjs.org/@types/d3-delaunay/-/d3-delaunay-6.0.4.tgz",
+ "integrity": "sha512-ZMaSKu4THYCU6sV64Lhg6qjf1orxBthaC161plr5KuPHo3CNm8DTHiLw/5Eq2b6TsNP0W0iJrUOFscY6Q450Hw==",
+ "license": "MIT"
+ },
+ "node_modules/@types/d3-dispatch": {
+ "version": "3.0.7",
+ "resolved": "https://registry.npmjs.org/@types/d3-dispatch/-/d3-dispatch-3.0.7.tgz",
+ "integrity": "sha512-5o9OIAdKkhN1QItV2oqaE5KMIiXAvDWBDPrD85e58Qlz1c1kI/J0NcqbEG88CoTwJrYe7ntUCVfeUl2UJKbWgA==",
+ "license": "MIT"
+ },
+ "node_modules/@types/d3-drag": {
+ "version": "3.0.7",
+ "resolved": "https://registry.npmjs.org/@types/d3-drag/-/d3-drag-3.0.7.tgz",
+ "integrity": "sha512-HE3jVKlzU9AaMazNufooRJ5ZpWmLIoc90A37WU2JMmeq28w1FQqCZswHZ3xR+SuxYftzHq6WU6KJHvqxKzTxxQ==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/d3-selection": "*"
+ }
+ },
+ "node_modules/@types/d3-dsv": {
+ "version": "3.0.7",
+ "resolved": "https://registry.npmjs.org/@types/d3-dsv/-/d3-dsv-3.0.7.tgz",
+ "integrity": "sha512-n6QBF9/+XASqcKK6waudgL0pf/S5XHPPI8APyMLLUHd8NqouBGLsU8MgtO7NINGtPBtk9Kko/W4ea0oAspwh9g==",
+ "license": "MIT"
+ },
+ "node_modules/@types/d3-ease": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/@types/d3-ease/-/d3-ease-3.0.2.tgz",
+ "integrity": "sha512-NcV1JjO5oDzoK26oMzbILE6HW7uVXOHLQvHshBUW4UMdZGfiY6v5BeQwh9a9tCzv+CeefZQHJt5SRgK154RtiA==",
+ "license": "MIT"
+ },
+ "node_modules/@types/d3-fetch": {
+ "version": "3.0.7",
+ "resolved": "https://registry.npmjs.org/@types/d3-fetch/-/d3-fetch-3.0.7.tgz",
+ "integrity": "sha512-fTAfNmxSb9SOWNB9IoG5c8Hg6R+AzUHDRlsXsDZsNp6sxAEOP0tkP3gKkNSO/qmHPoBFTxNrjDprVHDQDvo5aA==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/d3-dsv": "*"
+ }
+ },
+ "node_modules/@types/d3-force": {
+ "version": "3.0.10",
+ "resolved": "https://registry.npmjs.org/@types/d3-force/-/d3-force-3.0.10.tgz",
+ "integrity": "sha512-ZYeSaCF3p73RdOKcjj+swRlZfnYpK1EbaDiYICEEp5Q6sUiqFaFQ9qgoshp5CzIyyb/yD09kD9o2zEltCexlgw==",
+ "license": "MIT"
+ },
+ "node_modules/@types/d3-format": {
+ "version": "3.0.4",
+ "resolved": "https://registry.npmjs.org/@types/d3-format/-/d3-format-3.0.4.tgz",
+ "integrity": "sha512-fALi2aI6shfg7vM5KiR1wNJnZ7r6UuggVqtDA+xiEdPZQwy/trcQaHnwShLuLdta2rTymCNpxYTiMZX/e09F4g==",
+ "license": "MIT"
+ },
+ "node_modules/@types/d3-geo": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/@types/d3-geo/-/d3-geo-3.1.0.tgz",
+ "integrity": "sha512-856sckF0oP/diXtS4jNsiQw/UuK5fQG8l/a9VVLeSouf1/PPbBE1i1W852zVwKwYCBkFJJB7nCFTbk6UMEXBOQ==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/geojson": "*"
+ }
+ },
+ "node_modules/@types/d3-hierarchy": {
+ "version": "3.1.7",
+ "resolved": "https://registry.npmjs.org/@types/d3-hierarchy/-/d3-hierarchy-3.1.7.tgz",
+ "integrity": "sha512-tJFtNoYBtRtkNysX1Xq4sxtjK8YgoWUNpIiUee0/jHGRwqvzYxkq0hGVbbOGSz+JgFxxRu4K8nb3YpG3CMARtg==",
+ "license": "MIT"
+ },
+ "node_modules/@types/d3-interpolate": {
+ "version": "3.0.4",
+ "resolved": "https://registry.npmjs.org/@types/d3-interpolate/-/d3-interpolate-3.0.4.tgz",
+ "integrity": "sha512-mgLPETlrpVV1YRJIglr4Ez47g7Yxjl1lj7YKsiMCb27VJH9W8NVM6Bb9d8kkpG/uAQS5AmbA48q2IAolKKo1MA==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/d3-color": "*"
+ }
+ },
+ "node_modules/@types/d3-path": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/@types/d3-path/-/d3-path-3.1.1.tgz",
+ "integrity": "sha512-VMZBYyQvbGmWyWVea0EHs/BwLgxc+MKi1zLDCONksozI4YJMcTt8ZEuIR4Sb1MMTE8MMW49v0IwI5+b7RmfWlg==",
+ "license": "MIT"
+ },
+ "node_modules/@types/d3-polygon": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/@types/d3-polygon/-/d3-polygon-3.0.2.tgz",
+ "integrity": "sha512-ZuWOtMaHCkN9xoeEMr1ubW2nGWsp4nIql+OPQRstu4ypeZ+zk3YKqQT0CXVe/PYqrKpZAi+J9mTs05TKwjXSRA==",
+ "license": "MIT"
+ },
+ "node_modules/@types/d3-quadtree": {
+ "version": "3.0.6",
+ "resolved": "https://registry.npmjs.org/@types/d3-quadtree/-/d3-quadtree-3.0.6.tgz",
+ "integrity": "sha512-oUzyO1/Zm6rsxKRHA1vH0NEDG58HrT5icx/azi9MF1TWdtttWl0UIUsjEQBBh+SIkrpd21ZjEv7ptxWys1ncsg==",
+ "license": "MIT"
+ },
+ "node_modules/@types/d3-random": {
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/@types/d3-random/-/d3-random-3.0.3.tgz",
+ "integrity": "sha512-Imagg1vJ3y76Y2ea0871wpabqp613+8/r0mCLEBfdtqC7xMSfj9idOnmBYyMoULfHePJyxMAw3nWhJxzc+LFwQ==",
+ "license": "MIT"
+ },
+ "node_modules/@types/d3-scale": {
+ "version": "4.0.9",
+ "resolved": "https://registry.npmjs.org/@types/d3-scale/-/d3-scale-4.0.9.tgz",
+ "integrity": "sha512-dLmtwB8zkAeO/juAMfnV+sItKjlsw2lKdZVVy6LRr0cBmegxSABiLEpGVmSJJ8O08i4+sGR6qQtb6WtuwJdvVw==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/d3-time": "*"
+ }
+ },
+ "node_modules/@types/d3-scale-chromatic": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/@types/d3-scale-chromatic/-/d3-scale-chromatic-3.1.0.tgz",
+ "integrity": "sha512-iWMJgwkK7yTRmWqRB5plb1kadXyQ5Sj8V/zYlFGMUBbIPKQScw+Dku9cAAMgJG+z5GYDoMjWGLVOvjghDEFnKQ==",
+ "license": "MIT"
+ },
+ "node_modules/@types/d3-selection": {
+ "version": "3.0.11",
+ "resolved": "https://registry.npmjs.org/@types/d3-selection/-/d3-selection-3.0.11.tgz",
+ "integrity": "sha512-bhAXu23DJWsrI45xafYpkQ4NtcKMwWnAC/vKrd2l+nxMFuvOT3XMYTIj2opv8vq8AO5Yh7Qac/nSeP/3zjTK0w==",
+ "license": "MIT"
+ },
+ "node_modules/@types/d3-shape": {
+ "version": "3.1.7",
+ "resolved": "https://registry.npmjs.org/@types/d3-shape/-/d3-shape-3.1.7.tgz",
+ "integrity": "sha512-VLvUQ33C+3J+8p+Daf+nYSOsjB4GXp19/S/aGo60m9h1v6XaxjiT82lKVWJCfzhtuZ3yD7i/TPeC/fuKLLOSmg==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/d3-path": "*"
+ }
+ },
+ "node_modules/@types/d3-time": {
+ "version": "3.0.4",
+ "resolved": "https://registry.npmjs.org/@types/d3-time/-/d3-time-3.0.4.tgz",
+ "integrity": "sha512-yuzZug1nkAAaBlBBikKZTgzCeA+k1uy4ZFwWANOfKw5z5LRhV0gNA7gNkKm7HoK+HRN0wX3EkxGk0fpbWhmB7g==",
+ "license": "MIT"
+ },
+ "node_modules/@types/d3-time-format": {
+ "version": "4.0.3",
+ "resolved": "https://registry.npmjs.org/@types/d3-time-format/-/d3-time-format-4.0.3.tgz",
+ "integrity": "sha512-5xg9rC+wWL8kdDj153qZcsJ0FWiFt0J5RB6LYUNZjwSnesfblqrI/bJ1wBdJ8OQfncgbJG5+2F+qfqnqyzYxyg==",
+ "license": "MIT"
+ },
+ "node_modules/@types/d3-timer": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/@types/d3-timer/-/d3-timer-3.0.2.tgz",
+ "integrity": "sha512-Ps3T8E8dZDam6fUyNiMkekK3XUsaUEik+idO9/YjPtfj2qruF8tFBXS7XhtE4iIXBLxhmLjP3SXpLhVf21I9Lw==",
+ "license": "MIT"
+ },
+ "node_modules/@types/d3-transition": {
+ "version": "3.0.9",
+ "resolved": "https://registry.npmjs.org/@types/d3-transition/-/d3-transition-3.0.9.tgz",
+ "integrity": "sha512-uZS5shfxzO3rGlu0cC3bjmMFKsXv+SmZZcgp0KD22ts4uGXp5EVYGzu/0YdwZeKmddhcAccYtREJKkPfXkZuCg==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/d3-selection": "*"
+ }
+ },
+ "node_modules/@types/d3-zoom": {
+ "version": "3.0.8",
+ "resolved": "https://registry.npmjs.org/@types/d3-zoom/-/d3-zoom-3.0.8.tgz",
+ "integrity": "sha512-iqMC4/YlFCSlO8+2Ii1GGGliCAY4XdeG748w5vQUbevlbDu0zSjH/+jojorQVBK/se0j6DUFNPBGSqD3YWYnDw==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/d3-interpolate": "*",
+ "@types/d3-selection": "*"
+ }
+ },
+ "node_modules/@types/debug": {
+ "version": "4.1.12",
+ "resolved": "https://registry.npmjs.org/@types/debug/-/debug-4.1.12.tgz",
+ "integrity": "sha512-vIChWdVG3LG1SMxEvI/AK+FWJthlrqlTu7fbrlywTkkaONwk/UAGaULXRlf8vkzFBLVm0zkMdCquhL5aOjhXPQ==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/ms": "*"
+ }
+ },
+ "node_modules/@types/estree": {
+ "version": "1.0.8",
+ "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.8.tgz",
+ "integrity": "sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==",
+ "license": "MIT"
+ },
+ "node_modules/@types/estree-jsx": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/@types/estree-jsx/-/estree-jsx-1.0.5.tgz",
+ "integrity": "sha512-52CcUVNFyfb1A2ALocQw/Dd1BQFNmSdkuC3BkZ6iqhdMfQz7JWOFRuJFloOzjk+6WijU56m9oKXFAXc7o3Towg==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/estree": "*"
+ }
+ },
+ "node_modules/@types/geojson": {
+ "version": "7946.0.16",
+ "resolved": "https://registry.npmjs.org/@types/geojson/-/geojson-7946.0.16.tgz",
+ "integrity": "sha512-6C8nqWur3j98U6+lXDfTUWIfgvZU+EumvpHKcYjujKH7woYyLj2sUmff0tRhrqM7BohUw7Pz3ZB1jj2gW9Fvmg==",
+ "license": "MIT"
+ },
+ "node_modules/@types/hast": {
+ "version": "3.0.4",
+ "resolved": "https://registry.npmjs.org/@types/hast/-/hast-3.0.4.tgz",
+ "integrity": "sha512-WPs+bbQw5aCj+x6laNGWLH3wviHtoCv/P3+otBhbOhJgG8qtpdAMlTCxLtsTWA7LH1Oh/bFCHsBn0TPS5m30EQ==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "*"
+ }
+ },
+ "node_modules/@types/mdast": {
+ "version": "4.0.4",
+ "resolved": "https://registry.npmjs.org/@types/mdast/-/mdast-4.0.4.tgz",
+ "integrity": "sha512-kGaNbPh1k7AFzgpud/gMdvIm5xuECykRR+JnWKQno9TAXVa6WIVCGTPvYGekIDL4uwCZQSYbUxNBSb1aUo79oA==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "*"
+ }
+ },
+ "node_modules/@types/ms": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/@types/ms/-/ms-2.1.0.tgz",
+ "integrity": "sha512-GsCCIZDE/p3i96vtEqx+7dBUGXrc7zeSK3wwPHIaRThS+9OhWIXRqzs4d6k1SVU8g91DrNRWxWUGhp5KXQb2VA==",
+ "license": "MIT"
+ },
+ "node_modules/@types/react": {
+ "version": "19.2.7",
+ "resolved": "https://registry.npmjs.org/@types/react/-/react-19.2.7.tgz",
+ "integrity": "sha512-MWtvHrGZLFttgeEj28VXHxpmwYbor/ATPYbBfSFZEIRK0ecCFLl2Qo55z52Hss+UV9CRN7trSeq1zbgx7YDWWg==",
+ "license": "MIT",
+ "peer": true,
+ "dependencies": {
+ "csstype": "^3.2.2"
+ }
+ },
+ "node_modules/@types/unist": {
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/@types/unist/-/unist-3.0.3.tgz",
+ "integrity": "sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q==",
+ "license": "MIT"
+ },
+ "node_modules/@ungap/structured-clone": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/@ungap/structured-clone/-/structured-clone-1.3.0.tgz",
+ "integrity": "sha512-WmoN8qaIAo7WTYWbAZuG8PYEhn5fkz7dZrqTBZ7dtt//lL2Gwms1IcnQ5yHqjDfX8Ft5j4YzDM23f87zBfDe9g==",
+ "license": "ISC"
+ },
+ "node_modules/@vitejs/plugin-react": {
+ "version": "4.7.0",
+ "resolved": "https://registry.npmjs.org/@vitejs/plugin-react/-/plugin-react-4.7.0.tgz",
+ "integrity": "sha512-gUu9hwfWvvEDBBmgtAowQCojwZmJ5mcLn3aufeCsitijs3+f2NsrPtlAWIR6OPiqljl96GVCUbLe0HyqIpVaoA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/core": "^7.28.0",
+ "@babel/plugin-transform-react-jsx-self": "^7.27.1",
+ "@babel/plugin-transform-react-jsx-source": "^7.27.1",
+ "@rolldown/pluginutils": "1.0.0-beta.27",
+ "@types/babel__core": "^7.20.5",
+ "react-refresh": "^0.17.0"
+ },
+ "engines": {
+ "node": "^14.18.0 || >=16.0.0"
+ },
+ "peerDependencies": {
+ "vite": "^4.2.0 || ^5.0.0 || ^6.0.0 || ^7.0.0"
+ }
+ },
+ "node_modules/bail": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/bail/-/bail-2.0.2.tgz",
+ "integrity": "sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/baseline-browser-mapping": {
+ "version": "2.8.28",
+ "resolved": "https://registry.npmjs.org/baseline-browser-mapping/-/baseline-browser-mapping-2.8.28.tgz",
+ "integrity": "sha512-gYjt7OIqdM0PcttNYP2aVrr2G0bMALkBaoehD4BuRGjAOtipg0b6wHg1yNL+s5zSnLZZrGHOw4IrND8CD+3oIQ==",
+ "dev": true,
+ "license": "Apache-2.0",
+ "bin": {
+ "baseline-browser-mapping": "dist/cli.js"
+ }
+ },
+ "node_modules/browserslist": {
+ "version": "4.28.0",
+ "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.28.0.tgz",
+ "integrity": "sha512-tbydkR/CxfMwelN0vwdP/pLkDwyAASZ+VfWm4EOwlB6SWhx1sYnWLqo8N5j0rAzPfzfRaxt0mM/4wPU/Su84RQ==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "opencollective",
+ "url": "https://opencollective.com/browserslist"
+ },
+ {
+ "type": "tidelift",
+ "url": "https://tidelift.com/funding/github/npm/browserslist"
+ },
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/ai"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "baseline-browser-mapping": "^2.8.25",
+ "caniuse-lite": "^1.0.30001754",
+ "electron-to-chromium": "^1.5.249",
+ "node-releases": "^2.0.27",
+ "update-browserslist-db": "^1.1.4"
+ },
+ "bin": {
+ "browserslist": "cli.js"
+ },
+ "engines": {
+ "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7"
+ }
+ },
+ "node_modules/caniuse-lite": {
+ "version": "1.0.30001754",
+ "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001754.tgz",
+ "integrity": "sha512-x6OeBXueoAceOmotzx3PO4Zpt4rzpeIFsSr6AAePTZxSkXiYDUmpypEl7e2+8NCd9bD7bXjqyef8CJYPC1jfxg==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "opencollective",
+ "url": "https://opencollective.com/browserslist"
+ },
+ {
+ "type": "tidelift",
+ "url": "https://tidelift.com/funding/github/npm/caniuse-lite"
+ },
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/ai"
+ }
+ ],
+ "license": "CC-BY-4.0"
+ },
+ "node_modules/ccount": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/ccount/-/ccount-2.0.1.tgz",
+ "integrity": "sha512-eyrF0jiFpY+3drT6383f1qhkbGsLSifNAjA61IUjZjmLCWjItY6LB9ft9YhoDgwfmclB2zhu51Lc7+95b8NRAg==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/character-entities": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/character-entities/-/character-entities-2.0.2.tgz",
+ "integrity": "sha512-shx7oQ0Awen/BRIdkjkvz54PnEEI/EjwXDSIZp86/KKdbafHh1Df/RYGBhn4hbe2+uKC9FnT5UCEdyPz3ai9hQ==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/character-entities-html4": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/character-entities-html4/-/character-entities-html4-2.1.0.tgz",
+ "integrity": "sha512-1v7fgQRj6hnSwFpq1Eu0ynr/CDEw0rXo2B61qXrLNdHZmPKgb7fqS1a2JwF0rISo9q77jDI8VMEHoApn8qDoZA==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/character-entities-legacy": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/character-entities-legacy/-/character-entities-legacy-3.0.0.tgz",
+ "integrity": "sha512-RpPp0asT/6ufRm//AJVwpViZbGM/MkjQFxJccQRHmISF/22NBtsHqAWmL+/pmkPWoIUJdWyeVleTl1wydHATVQ==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/character-reference-invalid": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/character-reference-invalid/-/character-reference-invalid-2.0.1.tgz",
+ "integrity": "sha512-iBZ4F4wRbyORVsu0jPV7gXkOsGYjGHPmAyv+HiHG8gi5PtC9KI2j1+v8/tlibRvjoWX027ypmG/n0HtO5t7unw==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/classcat": {
+ "version": "5.0.5",
+ "resolved": "https://registry.npmjs.org/classcat/-/classcat-5.0.5.tgz",
+ "integrity": "sha512-JhZUT7JFcQy/EzW605k/ktHtncoo9vnyW/2GspNYwFlN1C/WmjuV/xtS04e9SOkL2sTdw0VAZ2UGCcQ9lR6p6w==",
+ "license": "MIT"
+ },
+ "node_modules/comma-separated-tokens": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/comma-separated-tokens/-/comma-separated-tokens-2.0.3.tgz",
+ "integrity": "sha512-Fu4hJdvzeylCfQPp9SGWidpzrMs7tTrlu6Vb8XGaRGck8QSNZJJp538Wrb60Lax4fPwR64ViY468OIUTbRlGZg==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/convert-source-map": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz",
+ "integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/csstype": {
+ "version": "3.2.3",
+ "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.2.3.tgz",
+ "integrity": "sha512-z1HGKcYy2xA8AGQfwrn0PAy+PB7X/GSj3UVJW9qKyn43xWa+gl5nXmU4qqLMRzWVLFC8KusUX8T/0kCiOYpAIQ==",
+ "license": "MIT",
+ "peer": true
+ },
+ "node_modules/d3-color": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/d3-color/-/d3-color-3.1.0.tgz",
+ "integrity": "sha512-zg/chbXyeBtMQ1LbD/WSoW2DpC3I0mpmPdW+ynRTj/x2DAWYrIY7qeZIHidozwV24m4iavr15lNwIwLxRmOxhA==",
+ "license": "ISC",
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-dispatch": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/d3-dispatch/-/d3-dispatch-3.0.1.tgz",
+ "integrity": "sha512-rzUyPU/S7rwUflMyLc1ETDeBj0NRuHKKAcvukozwhshr6g6c5d8zh4c2gQjY2bZ0dXeGLWc1PF174P2tVvKhfg==",
+ "license": "ISC",
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-drag": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/d3-drag/-/d3-drag-3.0.0.tgz",
+ "integrity": "sha512-pWbUJLdETVA8lQNJecMxoXfH6x+mO2UQo8rSmZ+QqxcbyA3hfeprFgIT//HW2nlHChWeIIMwS2Fq+gEARkhTkg==",
+ "license": "ISC",
+ "dependencies": {
+ "d3-dispatch": "1 - 3",
+ "d3-selection": "3"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-ease": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/d3-ease/-/d3-ease-3.0.1.tgz",
+ "integrity": "sha512-wR/XK3D3XcLIZwpbvQwQ5fK+8Ykds1ip7A2Txe0yxncXSdq1L9skcG7blcedkOX+ZcgxGAmLX1FrRGbADwzi0w==",
+ "license": "BSD-3-Clause",
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-interpolate": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/d3-interpolate/-/d3-interpolate-3.0.1.tgz",
+ "integrity": "sha512-3bYs1rOD33uo8aqJfKP3JWPAibgw8Zm2+L9vBKEHJ2Rg+viTR7o5Mmv5mZcieN+FRYaAOWX5SJATX6k1PWz72g==",
+ "license": "ISC",
+ "dependencies": {
+ "d3-color": "1 - 3"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-selection": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/d3-selection/-/d3-selection-3.0.0.tgz",
+ "integrity": "sha512-fmTRWbNMmsmWq6xJV8D19U/gw/bwrHfNXxrIN+HfZgnzqTHp9jOmKMhsTUjXOJnZOdZY9Q28y4yebKzqDKlxlQ==",
+ "license": "ISC",
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-timer": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/d3-timer/-/d3-timer-3.0.1.tgz",
+ "integrity": "sha512-ndfJ/JxxMd3nw31uyKoY2naivF+r29V+Lc0svZxe1JvvIRmi8hUsrMvdOwgS1o6uBHmiz91geQ0ylPP0aj1VUA==",
+ "license": "ISC",
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-transition": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/d3-transition/-/d3-transition-3.0.1.tgz",
+ "integrity": "sha512-ApKvfjsSR6tg06xrL434C0WydLr7JewBB3V+/39RMHsaXTOG0zmt/OAXeng5M5LBm0ojmxJrpomQVZ1aPvBL4w==",
+ "license": "ISC",
+ "dependencies": {
+ "d3-color": "1 - 3",
+ "d3-dispatch": "1 - 3",
+ "d3-ease": "1 - 3",
+ "d3-interpolate": "1 - 3",
+ "d3-timer": "1 - 3"
+ },
+ "engines": {
+ "node": ">=12"
+ },
+ "peerDependencies": {
+ "d3-selection": "2 - 3"
+ }
+ },
+ "node_modules/d3-zoom": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/d3-zoom/-/d3-zoom-3.0.0.tgz",
+ "integrity": "sha512-b8AmV3kfQaqWAuacbPuNbL6vahnOJflOhexLzMMNLga62+/nh0JzvJ0aO/5a5MVgUFGS7Hu1P9P03o3fJkDCyw==",
+ "license": "ISC",
+ "dependencies": {
+ "d3-dispatch": "1 - 3",
+ "d3-drag": "2 - 3",
+ "d3-interpolate": "1 - 3",
+ "d3-selection": "2 - 3",
+ "d3-transition": "2 - 3"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/debug": {
+ "version": "4.4.3",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz",
+ "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==",
+ "license": "MIT",
+ "dependencies": {
+ "ms": "^2.1.3"
+ },
+ "engines": {
+ "node": ">=6.0"
+ },
+ "peerDependenciesMeta": {
+ "supports-color": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/decode-named-character-reference": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/decode-named-character-reference/-/decode-named-character-reference-1.2.0.tgz",
+ "integrity": "sha512-c6fcElNV6ShtZXmsgNgFFV5tVX2PaV4g+MOAkb8eXHvn6sryJBrZa9r0zV6+dtTyoCKxtDy5tyQ5ZwQuidtd+Q==",
+ "license": "MIT",
+ "dependencies": {
+ "character-entities": "^2.0.0"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/dequal": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/dequal/-/dequal-2.0.3.tgz",
+ "integrity": "sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/devlop": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/devlop/-/devlop-1.1.0.tgz",
+ "integrity": "sha512-RWmIqhcFf1lRYBvNmr7qTNuyCt/7/ns2jbpp1+PalgE/rDQcBT0fioSMUpJ93irlUhC5hrg4cYqe6U+0ImW0rA==",
+ "license": "MIT",
+ "dependencies": {
+ "dequal": "^2.0.0"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/electron-to-chromium": {
+ "version": "1.5.252",
+ "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.252.tgz",
+ "integrity": "sha512-53uTpjtRgS7gjIxZ4qCgFdNO2q+wJt/Z8+xAvxbCqXPJrY6h7ighUkadQmNMXH96crtpa6gPFNP7BF4UBGDuaA==",
+ "dev": true,
+ "license": "ISC"
+ },
+ "node_modules/esbuild": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.21.5.tgz",
+ "integrity": "sha512-mg3OPMV4hXywwpoDxu3Qda5xCKQi+vCTZq8S9J/EpkhB2HzKXq4SNFZE3+NK93JYxc8VMSep+lOUSC/RVKaBqw==",
+ "dev": true,
+ "hasInstallScript": true,
+ "license": "MIT",
+ "bin": {
+ "esbuild": "bin/esbuild"
+ },
+ "engines": {
+ "node": ">=12"
+ },
+ "optionalDependencies": {
+ "@esbuild/aix-ppc64": "0.21.5",
+ "@esbuild/android-arm": "0.21.5",
+ "@esbuild/android-arm64": "0.21.5",
+ "@esbuild/android-x64": "0.21.5",
+ "@esbuild/darwin-arm64": "0.21.5",
+ "@esbuild/darwin-x64": "0.21.5",
+ "@esbuild/freebsd-arm64": "0.21.5",
+ "@esbuild/freebsd-x64": "0.21.5",
+ "@esbuild/linux-arm": "0.21.5",
+ "@esbuild/linux-arm64": "0.21.5",
+ "@esbuild/linux-ia32": "0.21.5",
+ "@esbuild/linux-loong64": "0.21.5",
+ "@esbuild/linux-mips64el": "0.21.5",
+ "@esbuild/linux-ppc64": "0.21.5",
+ "@esbuild/linux-riscv64": "0.21.5",
+ "@esbuild/linux-s390x": "0.21.5",
+ "@esbuild/linux-x64": "0.21.5",
+ "@esbuild/netbsd-x64": "0.21.5",
+ "@esbuild/openbsd-x64": "0.21.5",
+ "@esbuild/sunos-x64": "0.21.5",
+ "@esbuild/win32-arm64": "0.21.5",
+ "@esbuild/win32-ia32": "0.21.5",
+ "@esbuild/win32-x64": "0.21.5"
+ }
+ },
+ "node_modules/escalade": {
+ "version": "3.2.0",
+ "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz",
+ "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/estree-util-is-identifier-name": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/estree-util-is-identifier-name/-/estree-util-is-identifier-name-3.0.0.tgz",
+ "integrity": "sha512-hFtqIDZTIUZ9BXLb8y4pYGyk6+wekIivNVTcmvk8NoOh+VeRn5y6cEHzbURrWbfp1fIqdVipilzj+lfaadNZmg==",
+ "license": "MIT",
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/extend": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz",
+ "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==",
+ "license": "MIT"
+ },
+ "node_modules/fsevents": {
+ "version": "2.3.3",
+ "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz",
+ "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==",
+ "dev": true,
+ "hasInstallScript": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "engines": {
+ "node": "^8.16.0 || ^10.6.0 || >=11.0.0"
+ }
+ },
+ "node_modules/gensync": {
+ "version": "1.0.0-beta.2",
+ "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz",
+ "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/hast-util-to-jsx-runtime": {
+ "version": "2.3.6",
+ "resolved": "https://registry.npmjs.org/hast-util-to-jsx-runtime/-/hast-util-to-jsx-runtime-2.3.6.tgz",
+ "integrity": "sha512-zl6s8LwNyo1P9uw+XJGvZtdFF1GdAkOg8ujOw+4Pyb76874fLps4ueHXDhXWdk6YHQ6OgUtinliG7RsYvCbbBg==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/estree": "^1.0.0",
+ "@types/hast": "^3.0.0",
+ "@types/unist": "^3.0.0",
+ "comma-separated-tokens": "^2.0.0",
+ "devlop": "^1.0.0",
+ "estree-util-is-identifier-name": "^3.0.0",
+ "hast-util-whitespace": "^3.0.0",
+ "mdast-util-mdx-expression": "^2.0.0",
+ "mdast-util-mdx-jsx": "^3.0.0",
+ "mdast-util-mdxjs-esm": "^2.0.0",
+ "property-information": "^7.0.0",
+ "space-separated-tokens": "^2.0.0",
+ "style-to-js": "^1.0.0",
+ "unist-util-position": "^5.0.0",
+ "vfile-message": "^4.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/hast-util-whitespace": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/hast-util-whitespace/-/hast-util-whitespace-3.0.0.tgz",
+ "integrity": "sha512-88JUN06ipLwsnv+dVn+OIYOvAuvBMy/Qoi6O7mQHxdPXpjy+Cd6xRkWwux7DKO+4sYILtLBRIKgsdpS2gQc7qw==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/hast": "^3.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/html-url-attributes": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/html-url-attributes/-/html-url-attributes-3.0.1.tgz",
+ "integrity": "sha512-ol6UPyBWqsrO6EJySPz2O7ZSr856WDrEzM5zMqp+FJJLGMW35cLYmmZnl0vztAZxRUoNZJFTCohfjuIJ8I4QBQ==",
+ "license": "MIT",
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/inline-style-parser": {
+ "version": "0.2.7",
+ "resolved": "https://registry.npmjs.org/inline-style-parser/-/inline-style-parser-0.2.7.tgz",
+ "integrity": "sha512-Nb2ctOyNR8DqQoR0OwRG95uNWIC0C1lCgf5Naz5H6Ji72KZ8OcFZLz2P5sNgwlyoJ8Yif11oMuYs5pBQa86csA==",
+ "license": "MIT"
+ },
+ "node_modules/is-alphabetical": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/is-alphabetical/-/is-alphabetical-2.0.1.tgz",
+ "integrity": "sha512-FWyyY60MeTNyeSRpkM2Iry0G9hpr7/9kD40mD/cGQEuilcZYS4okz8SN2Q6rLCJ8gbCt6fN+rC+6tMGS99LaxQ==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/is-alphanumerical": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/is-alphanumerical/-/is-alphanumerical-2.0.1.tgz",
+ "integrity": "sha512-hmbYhX/9MUMF5uh7tOXyK/n0ZvWpad5caBA17GsC6vyuCqaWliRG5K1qS9inmUhEMaOBIW7/whAnSwveW/LtZw==",
+ "license": "MIT",
+ "dependencies": {
+ "is-alphabetical": "^2.0.0",
+ "is-decimal": "^2.0.0"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/is-decimal": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/is-decimal/-/is-decimal-2.0.1.tgz",
+ "integrity": "sha512-AAB9hiomQs5DXWcRB1rqsxGUstbRroFOPPVAomNk/3XHR5JyEZChOyTWe2oayKnsSsr/kcGqF+z6yuH6HHpN0A==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/is-hexadecimal": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/is-hexadecimal/-/is-hexadecimal-2.0.1.tgz",
+ "integrity": "sha512-DgZQp241c8oO6cA1SbTEWiXeoxV42vlcJxgH+B3hi1AiqqKruZR3ZGF8In3fj4+/y/7rHvlOZLZtgJ/4ttYGZg==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/is-plain-obj": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-4.1.0.tgz",
+ "integrity": "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/js-tokens": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz",
+ "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==",
+ "license": "MIT"
+ },
+ "node_modules/jsesc": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-3.1.0.tgz",
+ "integrity": "sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA==",
+ "dev": true,
+ "license": "MIT",
+ "bin": {
+ "jsesc": "bin/jsesc"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/json5": {
+ "version": "2.2.3",
+ "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz",
+ "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==",
+ "dev": true,
+ "license": "MIT",
+ "bin": {
+ "json5": "lib/cli.js"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/longest-streak": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/longest-streak/-/longest-streak-3.1.0.tgz",
+ "integrity": "sha512-9Ri+o0JYgehTaVBBDoMqIl8GXtbWg711O3srftcHhZ0dqnETqLaoIK0x17fUw9rFSlK/0NlsKe0Ahhyl5pXE2g==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/loose-envify": {
+ "version": "1.4.0",
+ "resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz",
+ "integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==",
+ "license": "MIT",
+ "dependencies": {
+ "js-tokens": "^3.0.0 || ^4.0.0"
+ },
+ "bin": {
+ "loose-envify": "cli.js"
+ }
+ },
+ "node_modules/lru-cache": {
+ "version": "5.1.1",
+ "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz",
+ "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==",
+ "dev": true,
+ "license": "ISC",
+ "dependencies": {
+ "yallist": "^3.0.2"
+ }
+ },
+ "node_modules/mdast-util-from-markdown": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/mdast-util-from-markdown/-/mdast-util-from-markdown-2.0.2.tgz",
+ "integrity": "sha512-uZhTV/8NBuw0WHkPTrCqDOl0zVe1BIng5ZtHoDk49ME1qqcjYmmLmOf0gELgcRMxN4w2iuIeVso5/6QymSrgmA==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/mdast": "^4.0.0",
+ "@types/unist": "^3.0.0",
+ "decode-named-character-reference": "^1.0.0",
+ "devlop": "^1.0.0",
+ "mdast-util-to-string": "^4.0.0",
+ "micromark": "^4.0.0",
+ "micromark-util-decode-numeric-character-reference": "^2.0.0",
+ "micromark-util-decode-string": "^2.0.0",
+ "micromark-util-normalize-identifier": "^2.0.0",
+ "micromark-util-symbol": "^2.0.0",
+ "micromark-util-types": "^2.0.0",
+ "unist-util-stringify-position": "^4.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-mdx-expression": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/mdast-util-mdx-expression/-/mdast-util-mdx-expression-2.0.1.tgz",
+ "integrity": "sha512-J6f+9hUp+ldTZqKRSg7Vw5V6MqjATc+3E4gf3CFNcuZNWD8XdyI6zQ8GqH7f8169MM6P7hMBRDVGnn7oHB9kXQ==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/estree-jsx": "^1.0.0",
+ "@types/hast": "^3.0.0",
+ "@types/mdast": "^4.0.0",
+ "devlop": "^1.0.0",
+ "mdast-util-from-markdown": "^2.0.0",
+ "mdast-util-to-markdown": "^2.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-mdx-jsx": {
+ "version": "3.2.0",
+ "resolved": "https://registry.npmjs.org/mdast-util-mdx-jsx/-/mdast-util-mdx-jsx-3.2.0.tgz",
+ "integrity": "sha512-lj/z8v0r6ZtsN/cGNNtemmmfoLAFZnjMbNyLzBafjzikOM+glrjNHPlf6lQDOTccj9n5b0PPihEBbhneMyGs1Q==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/estree-jsx": "^1.0.0",
+ "@types/hast": "^3.0.0",
+ "@types/mdast": "^4.0.0",
+ "@types/unist": "^3.0.0",
+ "ccount": "^2.0.0",
+ "devlop": "^1.1.0",
+ "mdast-util-from-markdown": "^2.0.0",
+ "mdast-util-to-markdown": "^2.0.0",
+ "parse-entities": "^4.0.0",
+ "stringify-entities": "^4.0.0",
+ "unist-util-stringify-position": "^4.0.0",
+ "vfile-message": "^4.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-mdxjs-esm": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/mdast-util-mdxjs-esm/-/mdast-util-mdxjs-esm-2.0.1.tgz",
+ "integrity": "sha512-EcmOpxsZ96CvlP03NghtH1EsLtr0n9Tm4lPUJUBccV9RwUOneqSycg19n5HGzCf+10LozMRSObtVr3ee1WoHtg==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/estree-jsx": "^1.0.0",
+ "@types/hast": "^3.0.0",
+ "@types/mdast": "^4.0.0",
+ "devlop": "^1.0.0",
+ "mdast-util-from-markdown": "^2.0.0",
+ "mdast-util-to-markdown": "^2.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-phrasing": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/mdast-util-phrasing/-/mdast-util-phrasing-4.1.0.tgz",
+ "integrity": "sha512-TqICwyvJJpBwvGAMZjj4J2n0X8QWp21b9l0o7eXyVJ25YNWYbJDVIyD1bZXE6WtV6RmKJVYmQAKWa0zWOABz2w==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/mdast": "^4.0.0",
+ "unist-util-is": "^6.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-to-hast": {
+ "version": "13.2.1",
+ "resolved": "https://registry.npmjs.org/mdast-util-to-hast/-/mdast-util-to-hast-13.2.1.tgz",
+ "integrity": "sha512-cctsq2wp5vTsLIcaymblUriiTcZd0CwWtCbLvrOzYCDZoWyMNV8sZ7krj09FSnsiJi3WVsHLM4k6Dq/yaPyCXA==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/hast": "^3.0.0",
+ "@types/mdast": "^4.0.0",
+ "@ungap/structured-clone": "^1.0.0",
+ "devlop": "^1.0.0",
+ "micromark-util-sanitize-uri": "^2.0.0",
+ "trim-lines": "^3.0.0",
+ "unist-util-position": "^5.0.0",
+ "unist-util-visit": "^5.0.0",
+ "vfile": "^6.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-to-markdown": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/mdast-util-to-markdown/-/mdast-util-to-markdown-2.1.2.tgz",
+ "integrity": "sha512-xj68wMTvGXVOKonmog6LwyJKrYXZPvlwabaryTjLh9LuvovB/KAH+kvi8Gjj+7rJjsFi23nkUxRQv1KqSroMqA==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/mdast": "^4.0.0",
+ "@types/unist": "^3.0.0",
+ "longest-streak": "^3.0.0",
+ "mdast-util-phrasing": "^4.0.0",
+ "mdast-util-to-string": "^4.0.0",
+ "micromark-util-classify-character": "^2.0.0",
+ "micromark-util-decode-string": "^2.0.0",
+ "unist-util-visit": "^5.0.0",
+ "zwitch": "^2.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-to-string": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/mdast-util-to-string/-/mdast-util-to-string-4.0.0.tgz",
+ "integrity": "sha512-0H44vDimn51F0YwvxSJSm0eCDOJTRlmN0R1yBh4HLj9wiV1Dn0QoXGbvFAWj2hSItVTlCmBF1hqKlIyUBVFLPg==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/mdast": "^4.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/micromark": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/micromark/-/micromark-4.0.2.tgz",
+ "integrity": "sha512-zpe98Q6kvavpCr1NPVSCMebCKfD7CA2NqZ+rykeNhONIJBpc1tFKt9hucLGwha3jNTNI8lHpctWJWoimVF4PfA==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "@types/debug": "^4.0.0",
+ "debug": "^4.0.0",
+ "decode-named-character-reference": "^1.0.0",
+ "devlop": "^1.0.0",
+ "micromark-core-commonmark": "^2.0.0",
+ "micromark-factory-space": "^2.0.0",
+ "micromark-util-character": "^2.0.0",
+ "micromark-util-chunked": "^2.0.0",
+ "micromark-util-combine-extensions": "^2.0.0",
+ "micromark-util-decode-numeric-character-reference": "^2.0.0",
+ "micromark-util-encode": "^2.0.0",
+ "micromark-util-normalize-identifier": "^2.0.0",
+ "micromark-util-resolve-all": "^2.0.0",
+ "micromark-util-sanitize-uri": "^2.0.0",
+ "micromark-util-subtokenize": "^2.0.0",
+ "micromark-util-symbol": "^2.0.0",
+ "micromark-util-types": "^2.0.0"
+ }
+ },
+ "node_modules/micromark-core-commonmark": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/micromark-core-commonmark/-/micromark-core-commonmark-2.0.3.tgz",
+ "integrity": "sha512-RDBrHEMSxVFLg6xvnXmb1Ayr2WzLAWjeSATAoxwKYJV94TeNavgoIdA0a9ytzDSVzBy2YKFK+emCPOEibLeCrg==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "decode-named-character-reference": "^1.0.0",
+ "devlop": "^1.0.0",
+ "micromark-factory-destination": "^2.0.0",
+ "micromark-factory-label": "^2.0.0",
+ "micromark-factory-space": "^2.0.0",
+ "micromark-factory-title": "^2.0.0",
+ "micromark-factory-whitespace": "^2.0.0",
+ "micromark-util-character": "^2.0.0",
+ "micromark-util-chunked": "^2.0.0",
+ "micromark-util-classify-character": "^2.0.0",
+ "micromark-util-html-tag-name": "^2.0.0",
+ "micromark-util-normalize-identifier": "^2.0.0",
+ "micromark-util-resolve-all": "^2.0.0",
+ "micromark-util-subtokenize": "^2.0.0",
+ "micromark-util-symbol": "^2.0.0",
+ "micromark-util-types": "^2.0.0"
+ }
+ },
+ "node_modules/micromark-factory-destination": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-factory-destination/-/micromark-factory-destination-2.0.1.tgz",
+ "integrity": "sha512-Xe6rDdJlkmbFRExpTOmRj9N3MaWmbAgdpSrBQvCFqhezUn4AHqJHbaEnfbVYYiexVSs//tqOdY/DxhjdCiJnIA==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "micromark-util-character": "^2.0.0",
+ "micromark-util-symbol": "^2.0.0",
+ "micromark-util-types": "^2.0.0"
+ }
+ },
+ "node_modules/micromark-factory-label": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-factory-label/-/micromark-factory-label-2.0.1.tgz",
+ "integrity": "sha512-VFMekyQExqIW7xIChcXn4ok29YE3rnuyveW3wZQWWqF4Nv9Wk5rgJ99KzPvHjkmPXF93FXIbBp6YdW3t71/7Vg==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "devlop": "^1.0.0",
+ "micromark-util-character": "^2.0.0",
+ "micromark-util-symbol": "^2.0.0",
+ "micromark-util-types": "^2.0.0"
+ }
+ },
+ "node_modules/micromark-factory-space": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.1.tgz",
+ "integrity": "sha512-zRkxjtBxxLd2Sc0d+fbnEunsTj46SWXgXciZmHq0kDYGnck/ZSGj9/wULTV95uoeYiK5hRXP2mJ98Uo4cq/LQg==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "micromark-util-character": "^2.0.0",
+ "micromark-util-types": "^2.0.0"
+ }
+ },
+ "node_modules/micromark-factory-title": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-factory-title/-/micromark-factory-title-2.0.1.tgz",
+ "integrity": "sha512-5bZ+3CjhAd9eChYTHsjy6TGxpOFSKgKKJPJxr293jTbfry2KDoWkhBb6TcPVB4NmzaPhMs1Frm9AZH7OD4Cjzw==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "micromark-factory-space": "^2.0.0",
+ "micromark-util-character": "^2.0.0",
+ "micromark-util-symbol": "^2.0.0",
+ "micromark-util-types": "^2.0.0"
+ }
+ },
+ "node_modules/micromark-factory-whitespace": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-factory-whitespace/-/micromark-factory-whitespace-2.0.1.tgz",
+ "integrity": "sha512-Ob0nuZ3PKt/n0hORHyvoD9uZhr+Za8sFoP+OnMcnWK5lngSzALgQYKMr9RJVOWLqQYuyn6ulqGWSXdwf6F80lQ==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "micromark-factory-space": "^2.0.0",
+ "micromark-util-character": "^2.0.0",
+ "micromark-util-symbol": "^2.0.0",
+ "micromark-util-types": "^2.0.0"
+ }
+ },
+ "node_modules/micromark-util-character": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.1.tgz",
+ "integrity": "sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "micromark-util-symbol": "^2.0.0",
+ "micromark-util-types": "^2.0.0"
+ }
+ },
+ "node_modules/micromark-util-chunked": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-util-chunked/-/micromark-util-chunked-2.0.1.tgz",
+ "integrity": "sha512-QUNFEOPELfmvv+4xiNg2sRYeS/P84pTW0TCgP5zc9FpXetHY0ab7SxKyAQCNCc1eK0459uoLI1y5oO5Vc1dbhA==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "micromark-util-symbol": "^2.0.0"
+ }
+ },
+ "node_modules/micromark-util-classify-character": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-util-classify-character/-/micromark-util-classify-character-2.0.1.tgz",
+ "integrity": "sha512-K0kHzM6afW/MbeWYWLjoHQv1sgg2Q9EccHEDzSkxiP/EaagNzCm7T/WMKZ3rjMbvIpvBiZgwR3dKMygtA4mG1Q==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "micromark-util-character": "^2.0.0",
+ "micromark-util-symbol": "^2.0.0",
+ "micromark-util-types": "^2.0.0"
+ }
+ },
+ "node_modules/micromark-util-combine-extensions": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-util-combine-extensions/-/micromark-util-combine-extensions-2.0.1.tgz",
+ "integrity": "sha512-OnAnH8Ujmy59JcyZw8JSbK9cGpdVY44NKgSM7E9Eh7DiLS2E9RNQf0dONaGDzEG9yjEl5hcqeIsj4hfRkLH/Bg==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "micromark-util-chunked": "^2.0.0",
+ "micromark-util-types": "^2.0.0"
+ }
+ },
+ "node_modules/micromark-util-decode-numeric-character-reference": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/micromark-util-decode-numeric-character-reference/-/micromark-util-decode-numeric-character-reference-2.0.2.tgz",
+ "integrity": "sha512-ccUbYk6CwVdkmCQMyr64dXz42EfHGkPQlBj5p7YVGzq8I7CtjXZJrubAYezf7Rp+bjPseiROqe7G6foFd+lEuw==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "micromark-util-symbol": "^2.0.0"
+ }
+ },
+ "node_modules/micromark-util-decode-string": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-util-decode-string/-/micromark-util-decode-string-2.0.1.tgz",
+ "integrity": "sha512-nDV/77Fj6eH1ynwscYTOsbK7rR//Uj0bZXBwJZRfaLEJ1iGBR6kIfNmlNqaqJf649EP0F3NWNdeJi03elllNUQ==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "decode-named-character-reference": "^1.0.0",
+ "micromark-util-character": "^2.0.0",
+ "micromark-util-decode-numeric-character-reference": "^2.0.0",
+ "micromark-util-symbol": "^2.0.0"
+ }
+ },
+ "node_modules/micromark-util-encode": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-util-encode/-/micromark-util-encode-2.0.1.tgz",
+ "integrity": "sha512-c3cVx2y4KqUnwopcO9b/SCdo2O67LwJJ/UyqGfbigahfegL9myoEFoDYZgkT7f36T0bLrM9hZTAaAyH+PCAXjw==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT"
+ },
+ "node_modules/micromark-util-html-tag-name": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-util-html-tag-name/-/micromark-util-html-tag-name-2.0.1.tgz",
+ "integrity": "sha512-2cNEiYDhCWKI+Gs9T0Tiysk136SnR13hhO8yW6BGNyhOC4qYFnwF1nKfD3HFAIXA5c45RrIG1ub11GiXeYd1xA==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT"
+ },
+ "node_modules/micromark-util-normalize-identifier": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-util-normalize-identifier/-/micromark-util-normalize-identifier-2.0.1.tgz",
+ "integrity": "sha512-sxPqmo70LyARJs0w2UclACPUUEqltCkJ6PhKdMIDuJ3gSf/Q+/GIe3WKl0Ijb/GyH9lOpUkRAO2wp0GVkLvS9Q==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "micromark-util-symbol": "^2.0.0"
+ }
+ },
+ "node_modules/micromark-util-resolve-all": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-util-resolve-all/-/micromark-util-resolve-all-2.0.1.tgz",
+ "integrity": "sha512-VdQyxFWFT2/FGJgwQnJYbe1jjQoNTS4RjglmSjTUlpUMa95Htx9NHeYW4rGDJzbjvCsl9eLjMQwGeElsqmzcHg==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "micromark-util-types": "^2.0.0"
+ }
+ },
+ "node_modules/micromark-util-sanitize-uri": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-util-sanitize-uri/-/micromark-util-sanitize-uri-2.0.1.tgz",
+ "integrity": "sha512-9N9IomZ/YuGGZZmQec1MbgxtlgougxTodVwDzzEouPKo3qFWvymFHWcnDi2vzV1ff6kas9ucW+o3yzJK9YB1AQ==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "micromark-util-character": "^2.0.0",
+ "micromark-util-encode": "^2.0.0",
+ "micromark-util-symbol": "^2.0.0"
+ }
+ },
+ "node_modules/micromark-util-subtokenize": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/micromark-util-subtokenize/-/micromark-util-subtokenize-2.1.0.tgz",
+ "integrity": "sha512-XQLu552iSctvnEcgXw6+Sx75GflAPNED1qx7eBJ+wydBb2KCbRZe+NwvIEEMM83uml1+2WSXpBAcp9IUCgCYWA==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "devlop": "^1.0.0",
+ "micromark-util-chunked": "^2.0.0",
+ "micromark-util-symbol": "^2.0.0",
+ "micromark-util-types": "^2.0.0"
+ }
+ },
+ "node_modules/micromark-util-symbol": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz",
+ "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT"
+ },
+ "node_modules/micromark-util-types": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/micromark-util-types/-/micromark-util-types-2.0.2.tgz",
+ "integrity": "sha512-Yw0ECSpJoViF1qTU4DC6NwtC4aWGt1EkzaQB8KPPyCRR8z9TWeV0HbEFGTO+ZY1wB22zmxnJqhPyTpOVCpeHTA==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT"
+ },
+ "node_modules/ms": {
+ "version": "2.1.3",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz",
+ "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==",
+ "license": "MIT"
+ },
+ "node_modules/nanoid": {
+ "version": "3.3.11",
+ "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.11.tgz",
+ "integrity": "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/ai"
+ }
+ ],
+ "license": "MIT",
+ "bin": {
+ "nanoid": "bin/nanoid.cjs"
+ },
+ "engines": {
+ "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1"
+ }
+ },
+ "node_modules/node-releases": {
+ "version": "2.0.27",
+ "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.27.tgz",
+ "integrity": "sha512-nmh3lCkYZ3grZvqcCH+fjmQ7X+H0OeZgP40OierEaAptX4XofMh5kwNbWh7lBduUzCcV/8kZ+NDLCwm2iorIlA==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/parse-entities": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/parse-entities/-/parse-entities-4.0.2.tgz",
+ "integrity": "sha512-GG2AQYWoLgL877gQIKeRPGO1xF9+eG1ujIb5soS5gPvLQ1y2o8FL90w2QWNdf9I361Mpp7726c+lj3U0qK1uGw==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^2.0.0",
+ "character-entities-legacy": "^3.0.0",
+ "character-reference-invalid": "^2.0.0",
+ "decode-named-character-reference": "^1.0.0",
+ "is-alphanumerical": "^2.0.0",
+ "is-decimal": "^2.0.0",
+ "is-hexadecimal": "^2.0.0"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/parse-entities/node_modules/@types/unist": {
+ "version": "2.0.11",
+ "resolved": "https://registry.npmjs.org/@types/unist/-/unist-2.0.11.tgz",
+ "integrity": "sha512-CmBKiL6NNo/OqgmMn95Fk9Whlp2mtvIv+KNpQKN2F4SjvrEesubTRWGYSg+BnWZOnlCaSTU1sMpsBOzgbYhnsA==",
+ "license": "MIT"
+ },
+ "node_modules/picocolors": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz",
+ "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==",
+ "dev": true,
+ "license": "ISC"
+ },
+ "node_modules/postcss": {
+ "version": "8.5.6",
+ "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.5.6.tgz",
+ "integrity": "sha512-3Ybi1tAuwAP9s0r1UQ2J4n5Y0G05bJkpUIO0/bI9MhwmD70S5aTWbXGBwxHrelT+XM1k6dM0pk+SwNkpTRN7Pg==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "opencollective",
+ "url": "https://opencollective.com/postcss/"
+ },
+ {
+ "type": "tidelift",
+ "url": "https://tidelift.com/funding/github/npm/postcss"
+ },
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/ai"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "nanoid": "^3.3.11",
+ "picocolors": "^1.1.1",
+ "source-map-js": "^1.2.1"
+ },
+ "engines": {
+ "node": "^10 || ^12 || >=14"
+ }
+ },
+ "node_modules/property-information": {
+ "version": "7.1.0",
+ "resolved": "https://registry.npmjs.org/property-information/-/property-information-7.1.0.tgz",
+ "integrity": "sha512-TwEZ+X+yCJmYfL7TPUOcvBZ4QfoT5YenQiJuX//0th53DE6w0xxLEtfK3iyryQFddXuvkIk51EEgrJQ0WJkOmQ==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/react": {
+ "version": "18.3.1",
+ "resolved": "https://registry.npmjs.org/react/-/react-18.3.1.tgz",
+ "integrity": "sha512-wS+hAgJShR0KhEvPJArfuPVN1+Hz1t0Y6n5jLrGQbkb4urgPE/0Rve+1kMB1v/oWgHgm4WIcV+i7F2pTVj+2iQ==",
+ "license": "MIT",
+ "dependencies": {
+ "loose-envify": "^1.1.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/react-dom": {
+ "version": "18.3.1",
+ "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-18.3.1.tgz",
+ "integrity": "sha512-5m4nQKp+rZRb09LNH59GM4BxTh9251/ylbKIbpe7TpGxfJ+9kv6BLkLBXIjjspbgbnIBNqlI23tRnTWT0snUIw==",
+ "license": "MIT",
+ "dependencies": {
+ "loose-envify": "^1.1.0",
+ "scheduler": "^0.23.2"
+ },
+ "peerDependencies": {
+ "react": "^18.3.1"
+ }
+ },
+ "node_modules/react-markdown": {
+ "version": "10.1.0",
+ "resolved": "https://registry.npmjs.org/react-markdown/-/react-markdown-10.1.0.tgz",
+ "integrity": "sha512-qKxVopLT/TyA6BX3Ue5NwabOsAzm0Q7kAPwq6L+wWDwisYs7R8vZ0nRXqq6rkueboxpkjvLGU9fWifiX/ZZFxQ==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/hast": "^3.0.0",
+ "@types/mdast": "^4.0.0",
+ "devlop": "^1.0.0",
+ "hast-util-to-jsx-runtime": "^2.0.0",
+ "html-url-attributes": "^3.0.0",
+ "mdast-util-to-hast": "^13.0.0",
+ "remark-parse": "^11.0.0",
+ "remark-rehype": "^11.0.0",
+ "unified": "^11.0.0",
+ "unist-util-visit": "^5.0.0",
+ "vfile": "^6.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ },
+ "peerDependencies": {
+ "@types/react": ">=18",
+ "react": ">=18"
+ }
+ },
+ "node_modules/react-refresh": {
+ "version": "0.17.0",
+ "resolved": "https://registry.npmjs.org/react-refresh/-/react-refresh-0.17.0.tgz",
+ "integrity": "sha512-z6F7K9bV85EfseRCp2bzrpyQ0Gkw1uLoCel9XBVWPg/TjRj94SkJzUTGfOa4bs7iJvBWtQG0Wq7wnI0syw3EBQ==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/reactflow": {
+ "version": "11.11.4",
+ "resolved": "https://registry.npmjs.org/reactflow/-/reactflow-11.11.4.tgz",
+ "integrity": "sha512-70FOtJkUWH3BAOsN+LU9lCrKoKbtOPnz2uq0CV2PLdNSwxTXOhCbsZr50GmZ+Rtw3jx8Uv7/vBFtCGixLfd4Og==",
+ "license": "MIT",
+ "dependencies": {
+ "@reactflow/background": "11.3.14",
+ "@reactflow/controls": "11.2.14",
+ "@reactflow/core": "11.11.4",
+ "@reactflow/minimap": "11.7.14",
+ "@reactflow/node-resizer": "2.2.14",
+ "@reactflow/node-toolbar": "1.3.14"
+ },
+ "peerDependencies": {
+ "react": ">=17",
+ "react-dom": ">=17"
+ }
+ },
+ "node_modules/remark-parse": {
+ "version": "11.0.0",
+ "resolved": "https://registry.npmjs.org/remark-parse/-/remark-parse-11.0.0.tgz",
+ "integrity": "sha512-FCxlKLNGknS5ba/1lmpYijMUzX2esxW5xQqjWxw2eHFfS2MSdaHVINFmhjo+qN1WhZhNimq0dZATN9pH0IDrpA==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/mdast": "^4.0.0",
+ "mdast-util-from-markdown": "^2.0.0",
+ "micromark-util-types": "^2.0.0",
+ "unified": "^11.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/remark-rehype": {
+ "version": "11.1.2",
+ "resolved": "https://registry.npmjs.org/remark-rehype/-/remark-rehype-11.1.2.tgz",
+ "integrity": "sha512-Dh7l57ianaEoIpzbp0PC9UKAdCSVklD8E5Rpw7ETfbTl3FqcOOgq5q2LVDhgGCkaBv7p24JXikPdvhhmHvKMsw==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/hast": "^3.0.0",
+ "@types/mdast": "^4.0.0",
+ "mdast-util-to-hast": "^13.0.0",
+ "unified": "^11.0.0",
+ "vfile": "^6.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/rollup": {
+ "version": "4.53.2",
+ "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.53.2.tgz",
+ "integrity": "sha512-MHngMYwGJVi6Fmnk6ISmnk7JAHRNF0UkuucA0CUW3N3a4KnONPEZz+vUanQP/ZC/iY1Qkf3bwPWzyY84wEks1g==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@types/estree": "1.0.8"
+ },
+ "bin": {
+ "rollup": "dist/bin/rollup"
+ },
+ "engines": {
+ "node": ">=18.0.0",
+ "npm": ">=8.0.0"
+ },
+ "optionalDependencies": {
+ "@rollup/rollup-android-arm-eabi": "4.53.2",
+ "@rollup/rollup-android-arm64": "4.53.2",
+ "@rollup/rollup-darwin-arm64": "4.53.2",
+ "@rollup/rollup-darwin-x64": "4.53.2",
+ "@rollup/rollup-freebsd-arm64": "4.53.2",
+ "@rollup/rollup-freebsd-x64": "4.53.2",
+ "@rollup/rollup-linux-arm-gnueabihf": "4.53.2",
+ "@rollup/rollup-linux-arm-musleabihf": "4.53.2",
+ "@rollup/rollup-linux-arm64-gnu": "4.53.2",
+ "@rollup/rollup-linux-arm64-musl": "4.53.2",
+ "@rollup/rollup-linux-loong64-gnu": "4.53.2",
+ "@rollup/rollup-linux-ppc64-gnu": "4.53.2",
+ "@rollup/rollup-linux-riscv64-gnu": "4.53.2",
+ "@rollup/rollup-linux-riscv64-musl": "4.53.2",
+ "@rollup/rollup-linux-s390x-gnu": "4.53.2",
+ "@rollup/rollup-linux-x64-gnu": "4.53.2",
+ "@rollup/rollup-linux-x64-musl": "4.53.2",
+ "@rollup/rollup-openharmony-arm64": "4.53.2",
+ "@rollup/rollup-win32-arm64-msvc": "4.53.2",
+ "@rollup/rollup-win32-ia32-msvc": "4.53.2",
+ "@rollup/rollup-win32-x64-gnu": "4.53.2",
+ "@rollup/rollup-win32-x64-msvc": "4.53.2",
+ "fsevents": "~2.3.2"
+ }
+ },
+ "node_modules/scheduler": {
+ "version": "0.23.2",
+ "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.23.2.tgz",
+ "integrity": "sha512-UOShsPwz7NrMUqhR6t0hWjFduvOzbtv7toDH1/hIrfRNIDBnnBWd0CwJTGvTpngVlmwGCdP9/Zl/tVrDqcuYzQ==",
+ "license": "MIT",
+ "dependencies": {
+ "loose-envify": "^1.1.0"
+ }
+ },
+ "node_modules/semver": {
+ "version": "6.3.1",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz",
+ "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==",
+ "dev": true,
+ "license": "ISC",
+ "bin": {
+ "semver": "bin/semver.js"
+ }
+ },
+ "node_modules/source-map-js": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz",
+ "integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==",
+ "dev": true,
+ "license": "BSD-3-Clause",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/space-separated-tokens": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/space-separated-tokens/-/space-separated-tokens-2.0.2.tgz",
+ "integrity": "sha512-PEGlAwrG8yXGXRjW32fGbg66JAlOAwbObuqVoJpv/mRgoWDQfgH1wDPvtzWyUSNAXBGSk8h755YDbbcEy3SH2Q==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/stringify-entities": {
+ "version": "4.0.4",
+ "resolved": "https://registry.npmjs.org/stringify-entities/-/stringify-entities-4.0.4.tgz",
+ "integrity": "sha512-IwfBptatlO+QCJUo19AqvrPNqlVMpW9YEL2LIVY+Rpv2qsjCGxaDLNRgeGsQWJhfItebuJhsGSLjaBbNSQ+ieg==",
+ "license": "MIT",
+ "dependencies": {
+ "character-entities-html4": "^2.0.0",
+ "character-entities-legacy": "^3.0.0"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/style-to-js": {
+ "version": "1.1.21",
+ "resolved": "https://registry.npmjs.org/style-to-js/-/style-to-js-1.1.21.tgz",
+ "integrity": "sha512-RjQetxJrrUJLQPHbLku6U/ocGtzyjbJMP9lCNK7Ag0CNh690nSH8woqWH9u16nMjYBAok+i7JO1NP2pOy8IsPQ==",
+ "license": "MIT",
+ "dependencies": {
+ "style-to-object": "1.0.14"
+ }
+ },
+ "node_modules/style-to-object": {
+ "version": "1.0.14",
+ "resolved": "https://registry.npmjs.org/style-to-object/-/style-to-object-1.0.14.tgz",
+ "integrity": "sha512-LIN7rULI0jBscWQYaSswptyderlarFkjQ+t79nzty8tcIAceVomEVlLzH5VP4Cmsv6MtKhs7qaAiwlcp+Mgaxw==",
+ "license": "MIT",
+ "dependencies": {
+ "inline-style-parser": "0.2.7"
+ }
+ },
+ "node_modules/trim-lines": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/trim-lines/-/trim-lines-3.0.1.tgz",
+ "integrity": "sha512-kRj8B+YHZCc9kQYdWfJB2/oUl9rA99qbowYYBtr4ui4mZyAQ2JpvVBd/6U2YloATfqBhBTSMhTpgBHtU0Mf3Rg==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/trough": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/trough/-/trough-2.2.0.tgz",
+ "integrity": "sha512-tmMpK00BjZiUyVyvrBK7knerNgmgvcV/KLVyuma/SC+TQN167GrMRciANTz09+k3zW8L8t60jWO1GpfkZdjTaw==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/unified": {
+ "version": "11.0.5",
+ "resolved": "https://registry.npmjs.org/unified/-/unified-11.0.5.tgz",
+ "integrity": "sha512-xKvGhPWw3k84Qjh8bI3ZeJjqnyadK+GEFtazSfZv/rKeTkTjOJho6mFqh2SM96iIcZokxiOpg78GazTSg8+KHA==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^3.0.0",
+ "bail": "^2.0.0",
+ "devlop": "^1.0.0",
+ "extend": "^3.0.0",
+ "is-plain-obj": "^4.0.0",
+ "trough": "^2.0.0",
+ "vfile": "^6.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/unist-util-is": {
+ "version": "6.0.1",
+ "resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-6.0.1.tgz",
+ "integrity": "sha512-LsiILbtBETkDz8I9p1dQ0uyRUWuaQzd/cuEeS1hoRSyW5E5XGmTzlwY1OrNzzakGowI9Dr/I8HVaw4hTtnxy8g==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^3.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/unist-util-position": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/unist-util-position/-/unist-util-position-5.0.0.tgz",
+ "integrity": "sha512-fucsC7HjXvkB5R3kTCO7kUjRdrS0BJt3M/FPxmHMBOm8JQi2BsHAHFsy27E0EolP8rp0NzXsJ+jNPyDWvOJZPA==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^3.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/unist-util-stringify-position": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/unist-util-stringify-position/-/unist-util-stringify-position-4.0.0.tgz",
+ "integrity": "sha512-0ASV06AAoKCDkS2+xw5RXJywruurpbC4JZSm7nr7MOt1ojAzvyyaO+UxZf18j8FCF6kmzCZKcAgN/yu2gm2XgQ==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^3.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/unist-util-visit": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-5.0.0.tgz",
+ "integrity": "sha512-MR04uvD+07cwl/yhVuVWAtw+3GOR/knlL55Nd/wAdblk27GCVt3lqpTivy/tkJcZoNPzTwS1Y+KMojlLDhoTzg==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^3.0.0",
+ "unist-util-is": "^6.0.0",
+ "unist-util-visit-parents": "^6.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/unist-util-visit-parents": {
+ "version": "6.0.2",
+ "resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-6.0.2.tgz",
+ "integrity": "sha512-goh1s1TBrqSqukSc8wrjwWhL0hiJxgA8m4kFxGlQ+8FYQ3C/m11FcTs4YYem7V664AhHVvgoQLk890Ssdsr2IQ==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^3.0.0",
+ "unist-util-is": "^6.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/update-browserslist-db": {
+ "version": "1.1.4",
+ "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.1.4.tgz",
+ "integrity": "sha512-q0SPT4xyU84saUX+tomz1WLkxUbuaJnR1xWt17M7fJtEJigJeWUNGUqrauFXsHnqev9y9JTRGwk13tFBuKby4A==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "opencollective",
+ "url": "https://opencollective.com/browserslist"
+ },
+ {
+ "type": "tidelift",
+ "url": "https://tidelift.com/funding/github/npm/browserslist"
+ },
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/ai"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "escalade": "^3.2.0",
+ "picocolors": "^1.1.1"
+ },
+ "bin": {
+ "update-browserslist-db": "cli.js"
+ },
+ "peerDependencies": {
+ "browserslist": ">= 4.21.0"
+ }
+ },
+ "node_modules/use-sync-external-store": {
+ "version": "1.6.0",
+ "resolved": "https://registry.npmjs.org/use-sync-external-store/-/use-sync-external-store-1.6.0.tgz",
+ "integrity": "sha512-Pp6GSwGP/NrPIrxVFAIkOQeyw8lFenOHijQWkUTrDvrF4ALqylP2C/KCkeS9dpUM3KvYRQhna5vt7IL95+ZQ9w==",
+ "license": "MIT",
+ "peerDependencies": {
+ "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0"
+ }
+ },
+ "node_modules/vfile": {
+ "version": "6.0.3",
+ "resolved": "https://registry.npmjs.org/vfile/-/vfile-6.0.3.tgz",
+ "integrity": "sha512-KzIbH/9tXat2u30jf+smMwFCsno4wHVdNmzFyL+T/L3UGqqk6JKfVqOFOZEpZSHADH1k40ab6NUIXZq422ov3Q==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^3.0.0",
+ "vfile-message": "^4.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/vfile-message": {
+ "version": "4.0.3",
+ "resolved": "https://registry.npmjs.org/vfile-message/-/vfile-message-4.0.3.tgz",
+ "integrity": "sha512-QTHzsGd1EhbZs4AsQ20JX1rC3cOlt/IWJruk893DfLRr57lcnOeMaWG4K0JrRta4mIJZKth2Au3mM3u03/JWKw==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^3.0.0",
+ "unist-util-stringify-position": "^4.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/vite": {
+ "version": "5.4.21",
+ "resolved": "https://registry.npmjs.org/vite/-/vite-5.4.21.tgz",
+ "integrity": "sha512-o5a9xKjbtuhY6Bi5S3+HvbRERmouabWbyUcpXXUA1u+GNUKoROi9byOJ8M0nHbHYHkYICiMlqxkg1KkYmm25Sw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "esbuild": "^0.21.3",
+ "postcss": "^8.4.43",
+ "rollup": "^4.20.0"
+ },
+ "bin": {
+ "vite": "bin/vite.js"
+ },
+ "engines": {
+ "node": "^18.0.0 || >=20.0.0"
+ },
+ "funding": {
+ "url": "https://github.com/vitejs/vite?sponsor=1"
+ },
+ "optionalDependencies": {
+ "fsevents": "~2.3.3"
+ },
+ "peerDependencies": {
+ "@types/node": "^18.0.0 || >=20.0.0",
+ "less": "*",
+ "lightningcss": "^1.21.0",
+ "sass": "*",
+ "sass-embedded": "*",
+ "stylus": "*",
+ "sugarss": "*",
+ "terser": "^5.4.0"
+ },
+ "peerDependenciesMeta": {
+ "@types/node": {
+ "optional": true
+ },
+ "less": {
+ "optional": true
+ },
+ "lightningcss": {
+ "optional": true
+ },
+ "sass": {
+ "optional": true
+ },
+ "sass-embedded": {
+ "optional": true
+ },
+ "stylus": {
+ "optional": true
+ },
+ "sugarss": {
+ "optional": true
+ },
+ "terser": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/yallist": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz",
+ "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==",
+ "dev": true,
+ "license": "ISC"
+ },
+ "node_modules/zustand": {
+ "version": "4.5.7",
+ "resolved": "https://registry.npmjs.org/zustand/-/zustand-4.5.7.tgz",
+ "integrity": "sha512-CHOUy7mu3lbD6o6LJLfllpjkzhHXSBlX8B9+qPddUsIfeF5S/UZ5q0kmCsnRqT1UHFQZchNFDDzMbQsuesHWlw==",
+ "license": "MIT",
+ "dependencies": {
+ "use-sync-external-store": "^1.2.2"
+ },
+ "engines": {
+ "node": ">=12.7.0"
+ },
+ "peerDependencies": {
+ "@types/react": ">=16.8",
+ "immer": ">=9.0.6",
+ "react": ">=16.8"
+ },
+ "peerDependenciesMeta": {
+ "@types/react": {
+ "optional": true
+ },
+ "immer": {
+ "optional": true
+ },
+ "react": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/zwitch": {
+ "version": "2.0.4",
+ "resolved": "https://registry.npmjs.org/zwitch/-/zwitch-2.0.4.tgz",
+ "integrity": "sha512-bXE4cR/kVZhKZX/RjPEflHaKVhUVl85noU3v6b8apfQEc1x4A+zBxjZ4lN8LqGd6WZ3dl98pY4o717VFmoPp+A==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ }
+ }
+}
diff --git a/frontend/package.json b/frontend/package.json
new file mode 100644
index 0000000000000000000000000000000000000000..5ffdd0f207fd4f5ecabbcb943c1610e75dde4f59
--- /dev/null
+++ b/frontend/package.json
@@ -0,0 +1,21 @@
+{
+ "name": "gitpilot-frontend",
+ "version": "0.2.6",
+ "private": true,
+ "scripts": {
+ "dev": "vite --host",
+ "build": "vite build",
+ "vercel-build": "vite build",
+ "preview": "vite preview"
+ },
+ "dependencies": {
+ "react": "^18.2.0",
+ "react-dom": "^18.2.0",
+ "react-markdown": "^10.1.0",
+ "reactflow": "^11.11.4"
+ },
+ "devDependencies": {
+ "@vitejs/plugin-react": "^4.0.0",
+ "vite": "^5.0.0"
+ }
+}
diff --git a/frontend/styles.css b/frontend/styles.css
new file mode 100644
index 0000000000000000000000000000000000000000..0fd9111fdd1971900b71aeb29421204212adb47f
--- /dev/null
+++ b/frontend/styles.css
@@ -0,0 +1,3288 @@
+:root {
+ color-scheme: dark;
+ font-family: system-ui, -apple-system, BlinkMacSystemFont, "SF Pro Text",
+ sans-serif;
+ background: #050608;
+ color: #f5f5f7;
+}
+
+*,
+*::before,
+*::after {
+ box-sizing: border-box;
+}
+
+body {
+ margin: 0;
+ overflow: hidden;
+}
+
+/* Custom scrollbar styling - Claude Code style */
+::-webkit-scrollbar {
+ width: 8px;
+ height: 8px;
+}
+
+::-webkit-scrollbar-track {
+ background: transparent;
+}
+
+::-webkit-scrollbar-thumb {
+ background: #272832;
+ border-radius: 4px;
+ transition: background 0.2s ease;
+}
+
+::-webkit-scrollbar-thumb:hover {
+ background: #3a3b4d;
+}
+
+/* App Root - Fixed height with footer accommodation */
+.app-root {
+ display: flex;
+ flex-direction: column;
+ height: 100vh;
+ background: radial-gradient(circle at top, #171823 0, #050608 55%);
+ color: #f5f5f7;
+ overflow: hidden;
+}
+
+/* Main content wrapper (sidebar + workspace) */
+.main-wrapper {
+ display: flex;
+ flex: 1;
+ min-height: 0;
+ overflow: hidden;
+}
+
+/* Sidebar */
+.sidebar {
+ width: 320px;
+ min-width: 320px;
+ padding: 16px 14px;
+ border-right: 1px solid #272832;
+ background: linear-gradient(180deg, #101117 0, #050608 100%);
+ display: flex;
+ flex-direction: column;
+ gap: 16px;
+ overflow-y: auto;
+ overflow-x: hidden;
+ transition: width 0.25s cubic-bezier(0.4, 0, 0.2, 1),
+ min-width 0.25s cubic-bezier(0.4, 0, 0.2, 1),
+ padding 0.25s cubic-bezier(0.4, 0, 0.2, 1);
+}
+
+.sidebar--collapsed {
+ width: 52px;
+ min-width: 52px;
+ padding: 16px 8px;
+ gap: 8px;
+}
+
+/* ---- Sidebar top row: logo + toggle ---- */
+.sidebar-top-row {
+ display: flex;
+ align-items: center;
+ justify-content: space-between;
+ gap: 6px;
+ min-height: 32px;
+}
+
+.sidebar--collapsed .sidebar-top-row {
+ justify-content: center;
+}
+
+.sidebar-toggle-btn {
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ width: 28px;
+ height: 28px;
+ border-radius: 6px;
+ border: none;
+ background: transparent;
+ color: #6b6d82;
+ cursor: pointer;
+ transition: background 0.15s, color 0.15s;
+ flex-shrink: 0;
+}
+
+.sidebar-toggle-btn:hover {
+ background: #1e1f2e;
+ color: #e0e1eb;
+}
+
+.sidebar--collapsed .sidebar-toggle-btn {
+ display: none;
+}
+
+/* ---- Nav buttons: icon + label layout ---- */
+.sidebar .nav-btn {
+ display: flex;
+ align-items: center;
+ gap: 10px;
+}
+
+.sidebar .nav-btn svg {
+ flex-shrink: 0;
+}
+
+.sidebar--collapsed .nav-btn {
+ justify-content: center;
+ padding: 8px;
+}
+
+/* ---- User profile in collapsed state ---- */
+.sidebar--collapsed .user-profile {
+ align-items: center;
+}
+
+.sidebar--collapsed .user-avatar {
+ width: 28px;
+ height: 28px;
+}
+
+/* User Profile Section */
+.user-profile {
+ margin-top: auto;
+ padding-top: 16px;
+ border-top: 1px solid #272832;
+ display: flex;
+ flex-direction: column;
+ gap: 12px;
+ animation: fadeIn 0.3s ease;
+}
+
+.user-profile-header {
+ display: flex;
+ align-items: center;
+ gap: 10px;
+}
+
+.user-avatar {
+ width: 40px;
+ height: 40px;
+ border-radius: 10px;
+ border: 2px solid #272832;
+ transition: all 0.2s ease;
+}
+
+.user-avatar:hover {
+ border-color: #ff7a3c;
+ transform: scale(1.05);
+}
+
+.user-info {
+ flex: 1;
+ min-width: 0;
+}
+
+.user-name {
+ font-size: 13px;
+ font-weight: 600;
+ color: #f5f5f7;
+ white-space: nowrap;
+ overflow: hidden;
+ text-overflow: ellipsis;
+}
+
+.user-login {
+ font-size: 11px;
+ color: #9a9bb0;
+ white-space: nowrap;
+ overflow: hidden;
+ text-overflow: ellipsis;
+}
+
+.btn-logout {
+ border: none;
+ outline: none;
+ background: #1a1b26;
+ color: #c3c5dd;
+ border-radius: 8px;
+ padding: 8px 12px;
+ font-size: 12px;
+ font-weight: 500;
+ cursor: pointer;
+ transition: all 0.2s ease;
+ border: 1px solid #272832;
+}
+
+.btn-logout:hover {
+ background: #2a2b3c;
+ border-color: #ff7a3c;
+ color: #ff7a3c;
+ transform: translateY(-1px);
+}
+
+.logo-row {
+ display: flex;
+ align-items: center;
+ gap: 10px;
+ animation: fadeIn 0.3s ease;
+}
+
+@keyframes fadeIn {
+ from {
+ opacity: 0;
+ transform: translateY(-10px);
+ }
+ to {
+ opacity: 1;
+ transform: translateY(0);
+ }
+}
+
+.logo-square {
+ width: 32px;
+ height: 32px;
+ border-radius: 8px;
+ background: #ff7a3c;
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ font-weight: 700;
+ color: #050608;
+ transition: transform 0.2s ease;
+}
+
+.logo-square:hover {
+ transform: scale(1.05);
+}
+
+.logo-title {
+ font-size: 16px;
+ font-weight: 600;
+}
+
+.logo-subtitle {
+ font-size: 12px;
+ color: #a1a2b3;
+}
+
+/* Active context card */
+.sidebar-context-card {
+ padding: 10px 12px;
+ border-radius: 10px;
+ background: #151622;
+ border: 1px solid #272832;
+ display: flex;
+ flex-direction: column;
+ gap: 6px;
+ animation: slideIn 0.3s ease;
+}
+
+.sidebar-context-header {
+ display: flex;
+ align-items: center;
+ justify-content: space-between;
+}
+
+.sidebar-context-close {
+ width: 22px;
+ height: 22px;
+ border-radius: 4px;
+ border: none;
+ background: transparent;
+ color: #71717a;
+ cursor: pointer;
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ padding: 0;
+ transition: all 0.15s ease;
+}
+
+.sidebar-context-close:hover {
+ background: #272832;
+ color: #f5f5f7;
+}
+
+.sidebar-section-label {
+ font-size: 10px;
+ font-weight: 700;
+ letter-spacing: 0.08em;
+ color: #71717a;
+ text-transform: uppercase;
+}
+
+.sidebar-context-body {
+ display: flex;
+ flex-direction: column;
+ gap: 2px;
+}
+
+.sidebar-context-repo {
+ font-size: 13px;
+ font-weight: 600;
+ color: #f5f5f7;
+ white-space: nowrap;
+ overflow: hidden;
+ text-overflow: ellipsis;
+}
+
+.sidebar-context-meta {
+ font-size: 11px;
+ color: #9a9bb0;
+ display: flex;
+ align-items: center;
+ gap: 6px;
+}
+
+.sidebar-context-dot {
+ width: 3px;
+ height: 3px;
+ border-radius: 50%;
+ background: #4a4b5e;
+ display: inline-block;
+}
+
+.sidebar-context-actions {
+ display: flex;
+ gap: 6px;
+ margin-top: 2px;
+}
+
+.sidebar-context-btn {
+ border: none;
+ outline: none;
+ background: #1a1b26;
+ color: #9a9bb0;
+ border-radius: 6px;
+ padding: 4px 10px;
+ font-size: 11px;
+ font-weight: 500;
+ cursor: pointer;
+ transition: all 0.15s ease;
+ border: 1px solid #272832;
+}
+
+.sidebar-context-btn:hover {
+ background: #222335;
+ color: #c3c5dd;
+ border-color: #3a3b4d;
+}
+
+/* Per-repo chip list in sidebar context card */
+.sidebar-repo-chips {
+ display: flex;
+ flex-direction: column;
+ gap: 3px;
+}
+
+.sidebar-repo-chip {
+ display: flex;
+ align-items: center;
+ gap: 5px;
+ padding: 5px 6px 5px 8px;
+ border-radius: 6px;
+ border: 1px solid #272832;
+ background: #111220;
+ cursor: pointer;
+ white-space: nowrap;
+ overflow: hidden;
+ transition: border-color 0.15s, background-color 0.15s;
+}
+
+.sidebar-repo-chip:hover {
+ border-color: #3a3b4d;
+ background: #1a1b2e;
+}
+
+.sidebar-repo-chip-active {
+ border-color: #3B82F6;
+ background: rgba(59, 130, 246, 0.06);
+}
+
+.sidebar-chip-name {
+ font-size: 12px;
+ font-weight: 600;
+ color: #c3c5dd;
+ font-family: monospace;
+ overflow: hidden;
+ text-overflow: ellipsis;
+ flex: 1;
+ min-width: 0;
+}
+
+.sidebar-repo-chip-active .sidebar-chip-name {
+ color: #f5f5f7;
+}
+
+.sidebar-chip-dot {
+ width: 2px;
+ height: 2px;
+ border-radius: 50%;
+ background: #4a4b5e;
+ flex-shrink: 0;
+}
+
+.sidebar-chip-branch {
+ font-size: 10px;
+ color: #71717a;
+ font-family: monospace;
+ flex-shrink: 0;
+}
+
+.sidebar-repo-chip-active .sidebar-chip-branch {
+ color: #60a5fa;
+}
+
+.sidebar-chip-write-badge {
+ font-size: 8px;
+ font-weight: 700;
+ text-transform: uppercase;
+ letter-spacing: 0.06em;
+ color: #4caf88;
+ padding: 0 4px;
+ border-radius: 3px;
+ border: 1px solid rgba(76, 175, 136, 0.25);
+ flex-shrink: 0;
+}
+
+/* Per-chip remove button: subtle by default, visible on hover */
+.sidebar-chip-remove {
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ width: 16px;
+ height: 16px;
+ border-radius: 3px;
+ border: none;
+ background: transparent;
+ color: #52525B;
+ cursor: pointer;
+ flex-shrink: 0;
+ padding: 0;
+ opacity: 0;
+ transition: opacity 0.15s, color 0.15s, background 0.15s;
+}
+
+.sidebar-repo-chip:hover .sidebar-chip-remove {
+ opacity: 1;
+}
+
+.sidebar-chip-remove:hover {
+ color: #f87171;
+ background: rgba(248, 113, 113, 0.1);
+}
+
+/* "clear all" link-style button */
+.sidebar-clear-all {
+ font-size: 9px;
+ color: #52525B;
+ width: auto;
+ height: auto;
+ padding: 2px 6px;
+ font-weight: 600;
+ text-transform: uppercase;
+ letter-spacing: 0.04em;
+}
+
+.sidebar-clear-all:hover {
+ color: #f87171;
+ background: rgba(248, 113, 113, 0.08);
+}
+
+@keyframes slideIn {
+ from {
+ opacity: 0;
+ transform: translateX(-10px);
+ }
+ to {
+ opacity: 1;
+ transform: translateX(0);
+ }
+}
+
+/* ContextBar — horizontal chip bar above workspace */
+.ctxbar {
+ display: flex;
+ align-items: center;
+ gap: 8px;
+ padding: 6px 12px;
+ border-bottom: 1px solid #1E1F23;
+ background-color: #0D0D10;
+ min-height: 40px;
+}
+
+.ctxbar-scroll {
+ display: flex;
+ align-items: center;
+ gap: 6px;
+ flex: 1;
+ overflow-x: auto;
+ scrollbar-width: none;
+}
+
+.ctxbar-scroll::-webkit-scrollbar {
+ display: none;
+}
+
+.ctxbar-chip {
+ display: flex;
+ align-items: center;
+ gap: 5px;
+ padding: 4px 6px 4px 8px;
+ border-radius: 6px;
+ border: 1px solid #27272A;
+ background: #18181B;
+ cursor: pointer;
+ white-space: nowrap;
+ position: relative;
+ flex-shrink: 0;
+ transition: border-color 0.15s, background-color 0.15s;
+}
+
+.ctxbar-chip:hover {
+ border-color: #3a3b4d;
+ background: #1e1f30;
+}
+
+.ctxbar-chip-active {
+ border-color: #3B82F6;
+ background: rgba(59, 130, 246, 0.08);
+}
+
+.ctxbar-chip-indicator {
+ position: absolute;
+ left: 0;
+ top: 25%;
+ bottom: 25%;
+ width: 2px;
+ border-radius: 1px;
+ background-color: #3B82F6;
+}
+
+.ctxbar-chip-name {
+ font-size: 12px;
+ font-weight: 600;
+ font-family: monospace;
+ color: #A1A1AA;
+ max-width: 120px;
+ overflow: hidden;
+ text-overflow: ellipsis;
+}
+
+.ctxbar-chip-active .ctxbar-chip-name {
+ color: #E4E4E7;
+}
+
+.ctxbar-chip-dot {
+ width: 2px;
+ height: 2px;
+ border-radius: 50%;
+ background: #4a4b5e;
+ flex-shrink: 0;
+}
+
+.ctxbar-chip-branch {
+ font-size: 10px;
+ font-family: monospace;
+ background: none;
+ border: 1px solid transparent;
+ border-radius: 3px;
+ padding: 1px 4px;
+ cursor: pointer;
+ color: #71717A;
+ transition: border-color 0.15s, color 0.15s;
+}
+
+.ctxbar-chip-branch:hover {
+ border-color: #3a3b4d;
+}
+
+.ctxbar-chip-branch-active {
+ color: #60a5fa;
+}
+
+.ctxbar-chip-write {
+ font-size: 8px;
+ font-weight: 700;
+ text-transform: uppercase;
+ letter-spacing: 0.06em;
+ color: #4caf88;
+ padding: 0 4px;
+ border-radius: 3px;
+ border: 1px solid rgba(76, 175, 136, 0.25);
+ flex-shrink: 0;
+}
+
+/* Hover-reveal remove button (Claude-style: hidden → visible on chip hover → red on X hover) */
+.ctxbar-chip-remove {
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ width: 16px;
+ height: 16px;
+ border-radius: 3px;
+ border: none;
+ background: transparent;
+ color: #52525B;
+ cursor: pointer;
+ flex-shrink: 0;
+ padding: 0;
+ opacity: 0;
+ transition: opacity 0.15s, color 0.15s, background 0.15s;
+}
+
+.ctxbar-chip-remove-visible,
+.ctxbar-chip:hover .ctxbar-chip-remove {
+ opacity: 1;
+}
+
+.ctxbar-chip-remove:hover {
+ color: #f87171;
+ background: rgba(248, 113, 113, 0.1);
+}
+
+.ctxbar-add {
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ width: 28px;
+ height: 28px;
+ border-radius: 6px;
+ border: 1px dashed #3F3F46;
+ background: transparent;
+ color: #71717A;
+ cursor: pointer;
+ flex-shrink: 0;
+ transition: border-color 0.15s, color 0.15s;
+}
+
+.ctxbar-add:hover {
+ border-color: #60a5fa;
+ color: #60a5fa;
+}
+
+.ctxbar-meta {
+ font-size: 10px;
+ color: #52525B;
+ white-space: nowrap;
+ flex-shrink: 0;
+ font-weight: 600;
+ text-transform: uppercase;
+ letter-spacing: 0.04em;
+}
+
+.ctxbar-branch-picker {
+ position: absolute;
+ top: 100%;
+ left: 0;
+ z-index: 100;
+ margin-top: 4px;
+}
+
+/* Legacy compat — kept for other uses */
+.sidebar-repo-info {
+ padding: 10px 12px;
+ border-radius: 10px;
+ background: #151622;
+ border: 1px solid #272832;
+ animation: slideIn 0.3s ease;
+}
+
+.sidebar-repo-name {
+ font-size: 13px;
+ font-weight: 500;
+}
+
+.sidebar-repo-meta {
+ font-size: 11px;
+ color: #9a9bb0;
+ margin-top: 2px;
+}
+
+.settings-button {
+ border: none;
+ outline: none;
+ background: #1a1b26;
+ color: #f5f5f7;
+ border-radius: 8px;
+ padding: 8px 10px;
+ cursor: pointer;
+ font-size: 13px;
+ transition: all 0.2s ease;
+}
+
+.settings-button:hover {
+ background: #222335;
+ transform: translateY(-1px);
+}
+
+/* Repo search */
+.repo-search-box {
+ border-radius: 12px;
+ background: #101117;
+ border: 1px solid #272832;
+ padding: 8px;
+ display: flex;
+ flex-direction: column;
+ gap: 8px;
+}
+
+/* Search header wrapper */
+.repo-search-header {
+ display: flex;
+ flex-direction: column;
+ gap: 8px;
+}
+
+/* Search row with input and button */
+.repo-search-row {
+ display: flex;
+ gap: 6px;
+ align-items: center;
+}
+
+/* Search input */
+.repo-search-input {
+ flex: 1;
+ border-radius: 7px;
+ padding: 8px 10px;
+ border: 1px solid #272832;
+ background: #050608;
+ color: #f5f5f7;
+ font-size: 13px;
+ transition: all 0.2s ease;
+}
+
+.repo-search-input:focus {
+ outline: none;
+ border-color: #ff7a3c;
+ background: #0a0b0f;
+ box-shadow: 0 0 0 3px rgba(255, 122, 60, 0.08);
+}
+
+.repo-search-input::placeholder {
+ color: #676883;
+}
+
+.repo-search-input:disabled {
+ opacity: 0.5;
+ cursor: not-allowed;
+}
+
+/* Search button */
+.repo-search-btn {
+ border-radius: 7px;
+ border: none;
+ outline: none;
+ padding: 8px 14px;
+ background: #1a1b26;
+ color: #f5f5f7;
+ cursor: pointer;
+ font-size: 13px;
+ font-weight: 500;
+ transition: all 0.2s ease;
+ white-space: nowrap;
+}
+
+.repo-search-btn:hover:not(:disabled) {
+ background: #222335;
+ transform: translateY(-1px);
+}
+
+.repo-search-btn:active:not(:disabled) {
+ transform: translateY(0);
+}
+
+.repo-search-btn:disabled {
+ opacity: 0.5;
+ cursor: not-allowed;
+}
+
+/* Info bar (shows count and clear button) */
+.repo-info-bar {
+ display: flex;
+ justify-content: space-between;
+ align-items: center;
+ padding: 6px 10px;
+ background: #0a0b0f;
+ border: 1px solid #272832;
+ border-radius: 7px;
+ font-size: 11px;
+}
+
+.repo-count {
+ color: #9a9bb0;
+ font-weight: 500;
+}
+
+.repo-clear-btn {
+ padding: 3px 10px;
+ background: transparent;
+ border: 1px solid #272832;
+ border-radius: 5px;
+ color: #9a9bb0;
+ font-size: 11px;
+ font-weight: 500;
+ cursor: pointer;
+ transition: all 0.2s ease;
+}
+
+.repo-clear-btn:hover:not(:disabled) {
+ background: #1a1b26;
+ color: #c3c5dd;
+ border-color: #3a3b4d;
+}
+
+.repo-clear-btn:disabled {
+ opacity: 0.5;
+ cursor: not-allowed;
+}
+
+/* Status message */
+.repo-status {
+ padding: 8px 10px;
+ background: #1a1b26;
+ border: 1px solid #272832;
+ border-radius: 7px;
+ color: #9a9bb0;
+ font-size: 11px;
+ text-align: center;
+}
+
+/* Repository list */
+.repo-list {
+ max-height: 220px;
+ overflow-y: auto;
+ overflow-x: hidden;
+ padding-right: 2px;
+ display: flex;
+ flex-direction: column;
+ gap: 4px;
+}
+
+/* Custom scrollbar for repo list */
+.repo-list::-webkit-scrollbar {
+ width: 6px;
+}
+
+.repo-list::-webkit-scrollbar-track {
+ background: transparent;
+}
+
+.repo-list::-webkit-scrollbar-thumb {
+ background: #272832;
+ border-radius: 3px;
+}
+
+.repo-list::-webkit-scrollbar-thumb:hover {
+ background: #3a3b4d;
+}
+
+/* Repository item */
+.repo-item {
+ width: 100%;
+ text-align: left;
+ border: none;
+ outline: none;
+ background: transparent;
+ color: #f5f5f7;
+ padding: 8px 8px;
+ border-radius: 7px;
+ cursor: pointer;
+ display: flex;
+ align-items: center;
+ justify-content: space-between;
+ gap: 8px;
+ transition: all 0.15s ease;
+ border: 1px solid transparent;
+}
+
+.repo-item:hover {
+ background: #1a1b26;
+ border-color: #272832;
+ transform: translateX(2px);
+}
+
+.repo-item-content {
+ display: flex;
+ flex-direction: column;
+ gap: 2px;
+ flex: 1;
+ min-width: 0;
+}
+
+.repo-name {
+ font-size: 13px;
+ font-weight: 500;
+ overflow: hidden;
+ text-overflow: ellipsis;
+ white-space: nowrap;
+}
+
+.repo-owner {
+ font-size: 11px;
+ color: #8e8fac;
+ overflow: hidden;
+ text-overflow: ellipsis;
+ white-space: nowrap;
+}
+
+/* Private badge */
+.repo-badge-private {
+ padding: 2px 6px;
+ background: #1a1b26;
+ border: 1px solid #3a3b4d;
+ border-radius: 4px;
+ color: #9a9bb0;
+ font-size: 9px;
+ font-weight: 600;
+ text-transform: uppercase;
+ letter-spacing: 0.3px;
+ white-space: nowrap;
+ flex-shrink: 0;
+}
+
+/* Loading states */
+.repo-loading {
+ display: flex;
+ flex-direction: column;
+ align-items: center;
+ gap: 10px;
+ padding: 30px 20px;
+ color: #9a9bb0;
+ font-size: 12px;
+}
+
+.repo-loading-spinner {
+ width: 24px;
+ height: 24px;
+ border: 2px solid #272832;
+ border-top-color: #ff7a3c;
+ border-radius: 50%;
+ animation: repo-spin 0.8s linear infinite;
+}
+
+.repo-loading-spinner-small {
+ width: 14px;
+ height: 14px;
+ border: 2px solid rgba(255, 122, 60, 0.3);
+ border-top-color: #ff7a3c;
+ border-radius: 50%;
+ animation: repo-spin 0.8s linear infinite;
+}
+
+@keyframes repo-spin {
+ to {
+ transform: rotate(360deg);
+ }
+}
+
+/* Load more button */
+.repo-load-more {
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ gap: 8px;
+ width: 100%;
+ padding: 10px 12px;
+ margin: 4px 0;
+ background: #0a0b0f;
+ border: 1px solid #272832;
+ border-radius: 7px;
+ color: #c3c5dd;
+ font-size: 12px;
+ font-weight: 500;
+ cursor: pointer;
+ transition: all 0.2s ease;
+}
+
+.repo-load-more:hover:not(:disabled) {
+ background: #1a1b26;
+ border-color: #3a3b4d;
+ transform: translateY(-1px);
+}
+
+.repo-load-more:active:not(:disabled) {
+ transform: translateY(0);
+}
+
+.repo-load-more:disabled {
+ opacity: 0.6;
+ cursor: not-allowed;
+}
+
+.repo-load-more-count {
+ color: #7779a0;
+ font-weight: 400;
+}
+
+/* All loaded message */
+.repo-all-loaded {
+ padding: 10px 12px;
+ margin: 4px 0;
+ background: rgba(124, 255, 179, 0.08);
+ border: 1px solid rgba(124, 255, 179, 0.2);
+ border-radius: 7px;
+ color: #7cffb3;
+ font-size: 11px;
+ text-align: center;
+ font-weight: 500;
+}
+
+/* GitHub App installation notice */
+.repo-github-notice {
+ display: flex;
+ align-items: flex-start;
+ gap: 10px;
+ padding: 10px 12px;
+ background: #0a0b0f;
+ border: 1px solid #272832;
+ border-radius: 7px;
+ font-size: 11px;
+ line-height: 1.5;
+ margin-top: 4px;
+}
+
+.repo-github-icon {
+ flex-shrink: 0;
+ margin-top: 1px;
+ opacity: 0.6;
+ color: #9a9bb0;
+ width: 16px;
+ height: 16px;
+}
+
+.repo-github-notice-content {
+ flex: 1;
+ display: flex;
+ flex-direction: column;
+ gap: 3px;
+}
+
+.repo-github-notice-title {
+ color: #c3c5dd;
+ font-weight: 600;
+ font-size: 11px;
+}
+
+.repo-github-notice-text {
+ color: #9a9bb0;
+}
+
+.repo-github-link {
+ color: #ff7a3c;
+ text-decoration: none;
+ font-weight: 500;
+ transition: color 0.2s ease;
+}
+
+.repo-github-link:hover {
+ color: #ff8b52;
+ text-decoration: underline;
+}
+
+/* Focus visible for accessibility */
+.repo-item:focus-visible,
+.repo-search-btn:focus-visible,
+.repo-load-more:focus-visible,
+.repo-clear-btn:focus-visible {
+ outline: 2px solid #ff7a3c;
+ outline-offset: 2px;
+}
+
+/* Reduced motion support */
+@media (prefers-reduced-motion: reduce) {
+ .repo-item,
+ .repo-search-btn,
+ .repo-load-more,
+ .repo-clear-btn {
+ transition: none;
+ }
+
+ .repo-loading-spinner,
+ .repo-loading-spinner-small {
+ animation: none;
+ }
+}
+
+/* Mobile responsive adjustments */
+@media (max-width: 768px) {
+ .repo-search-input {
+ font-size: 16px; /* Prevents zoom on iOS */
+ }
+
+ .repo-item {
+ padding: 7px 7px;
+ }
+
+ .repo-name {
+ font-size: 12px;
+ }
+
+ .repo-owner {
+ font-size: 10px;
+ }
+}
+
+/* Workspace */
+.workspace {
+ flex: 1;
+ display: flex;
+ flex-direction: column;
+ position: relative;
+ overflow: hidden;
+ min-height: 0;
+}
+
+.empty-state {
+ margin: auto;
+ max-width: 420px;
+ text-align: center;
+ color: #c3c5dd;
+ animation: fadeIn 0.5s ease;
+}
+
+.empty-bot {
+ font-size: 36px;
+ margin-bottom: 12px;
+ animation: bounce 2s ease infinite;
+}
+
+@keyframes bounce {
+ 0%, 100% {
+ transform: translateY(0);
+ }
+ 50% {
+ transform: translateY(-10px);
+ }
+}
+
+.empty-state h1 {
+ font-size: 24px;
+ margin-bottom: 6px;
+}
+
+.empty-state p {
+ font-size: 14px;
+ color: #9a9bb0;
+}
+
+/* Workspace grid - Properly constrained */
+.workspace-grid {
+ display: grid;
+ grid-template-columns: 320px minmax(340px, 1fr);
+ height: 100%;
+ overflow: hidden;
+ flex: 1;
+ min-height: 0;
+}
+
+/* Panels */
+.panel-header {
+ height: 40px;
+ padding: 0 16px;
+ border-bottom: 1px solid #272832;
+ display: flex;
+ align-items: center;
+ justify-content: space-between;
+ font-size: 13px;
+ font-weight: 500;
+ color: #c3c5dd;
+ background: #0a0b0f;
+ flex-shrink: 0;
+}
+
+.badge {
+ padding: 2px 6px;
+ border-radius: 999px;
+ border: 1px solid #3a3b4d;
+ font-size: 10px;
+}
+
+/* Files */
+.files-panel {
+ border-right: 1px solid #272832;
+ background: #101117;
+ display: flex;
+ flex-direction: column;
+ overflow: hidden;
+}
+
+.files-list {
+ flex: 1;
+ overflow-y: auto;
+ overflow-x: hidden;
+ padding: 6px 4px;
+ min-height: 0;
+}
+
+.files-item {
+ border: none;
+ outline: none;
+ width: 100%;
+ background: transparent;
+ color: #f5f5f7;
+ display: flex;
+ align-items: center;
+ gap: 8px;
+ padding: 4px 8px;
+ border-radius: 6px;
+ cursor: pointer;
+ font-size: 12px;
+ transition: all 0.15s ease;
+}
+
+.files-item:hover {
+ background: #1a1b26;
+ transform: translateX(2px);
+}
+
+.files-item-active {
+ background: #2a2b3c;
+}
+
+.file-icon {
+ width: 16px;
+ flex-shrink: 0;
+}
+
+.file-path {
+ white-space: nowrap;
+ overflow: hidden;
+ text-overflow: ellipsis;
+}
+
+.files-empty {
+ padding: 10px 12px;
+ font-size: 12px;
+ color: #9a9bb0;
+}
+
+/* Chat panel */
+.editor-panel {
+ display: flex;
+ flex-direction: column;
+ background: #050608;
+}
+
+.chat-container {
+ display: flex;
+ flex-direction: column;
+ flex: 1;
+ min-height: 0;
+ overflow: hidden;
+}
+
+.chat-messages {
+ flex: 1;
+ padding: 12px 16px;
+ overflow-y: auto;
+ overflow-x: hidden;
+ font-size: 13px;
+ min-height: 0;
+ scroll-behavior: smooth;
+}
+
+.chat-message-user {
+ margin-bottom: 16px;
+ animation: slideInRight 0.3s ease;
+}
+
+@keyframes slideInRight {
+ from {
+ opacity: 0;
+ transform: translateX(20px);
+ }
+ to {
+ opacity: 1;
+ transform: translateX(0);
+ }
+}
+
+.chat-message-ai {
+ margin-bottom: 16px;
+ animation: slideInLeft 0.3s ease;
+}
+
+@keyframes slideInLeft {
+ from {
+ opacity: 0;
+ transform: translateX(-20px);
+ }
+ to {
+ opacity: 1;
+ transform: translateX(0);
+ }
+}
+
+.chat-message-ai span {
+ display: inline-block;
+ padding: 10px 14px;
+ border-radius: 12px;
+ max-width: 80%;
+ line-height: 1.5;
+}
+
+.chat-message-user span {
+ display: inline;
+ padding: 0;
+ border-radius: 0;
+ background: transparent;
+ border: none;
+ max-width: none;
+ line-height: inherit;
+}
+
+.chat-message-ai span {
+ background: #151622;
+ border: 1px solid #272832;
+}
+
+.chat-empty-state {
+ display: flex;
+ flex-direction: column;
+ align-items: center;
+ justify-content: center;
+ min-height: 300px;
+ padding: 40px 20px;
+ text-align: center;
+}
+
+.chat-empty-icon {
+ font-size: 48px;
+ margin-bottom: 16px;
+ opacity: 0.6;
+ animation: pulse 2s ease infinite;
+}
+
+@keyframes pulse {
+ 0%, 100% {
+ opacity: 0.6;
+ }
+ 50% {
+ opacity: 0.8;
+ }
+}
+
+.chat-empty-state p {
+ margin: 0;
+ font-size: 13px;
+ color: #9a9bb0;
+ max-width: 400px;
+}
+
+.chat-input-box {
+ padding: 12px 16px;
+ border-top: 1px solid #272832;
+ display: flex;
+ flex-direction: column;
+ gap: 10px;
+ background: #050608;
+ flex-shrink: 0;
+ min-height: fit-content;
+ box-shadow: 0 -2px 10px rgba(0, 0, 0, 0.3);
+}
+
+.chat-input-row {
+ display: flex;
+ gap: 10px;
+ align-items: center;
+ flex-wrap: wrap;
+}
+
+.chat-input {
+ flex: 1;
+ min-width: 200px;
+ border-radius: 8px;
+ padding: 10px 12px;
+ border: 1px solid #272832;
+ background: #0a0b0f;
+ color: #f5f5f7;
+ font-size: 13px;
+ line-height: 1.5;
+ transition: all 0.2s ease;
+}
+
+.chat-input:focus {
+ outline: none;
+ border-color: #ff7a3c;
+ background: #101117;
+ box-shadow: 0 0 0 3px rgba(255, 122, 60, 0.1);
+}
+
+.chat-input::placeholder {
+ color: #676883;
+}
+
+.chat-btn {
+ border-radius: 8px;
+ border: none;
+ outline: none;
+ padding: 10px 16px;
+ background: #ff7a3c;
+ color: #050608;
+ cursor: pointer;
+ font-size: 13px;
+ font-weight: 600;
+ transition: all 0.2s ease;
+ white-space: nowrap;
+ min-height: 40px;
+}
+
+.chat-btn:hover:not(:disabled) {
+ background: #ff8c52;
+ transform: translateY(-1px);
+ box-shadow: 0 4px 12px rgba(255, 122, 60, 0.3);
+}
+
+.chat-btn:active:not(:disabled) {
+ transform: translateY(0);
+}
+
+.chat-btn.secondary {
+ background: #1a1b26;
+ color: #f5f5f7;
+ border: 1px solid #272832;
+}
+
+.chat-btn.secondary:hover:not(:disabled) {
+ background: #222335;
+ border-color: #3a3b4d;
+}
+
+.chat-btn:disabled {
+ opacity: 0.5;
+ cursor: not-allowed;
+}
+
+/* Plan rendering */
+.plan-card {
+ border-radius: 12px;
+ background: #101117;
+ border: 1px solid #272832;
+ padding: 10px 12px;
+ margin-top: 6px;
+ animation: fadeIn 0.3s ease;
+}
+
+.plan-steps {
+ margin: 6px 0 0;
+ padding-left: 18px;
+ font-size: 12px;
+}
+
+.plan-steps li {
+ margin-bottom: 4px;
+}
+
+/* Modal */
+.modal-backdrop {
+ position: fixed;
+ inset: 0;
+ background: rgba(0, 0, 0, 0.55);
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ z-index: 20;
+ animation: fadeIn 0.2s ease;
+}
+
+.modal {
+ background: #101117;
+ border-radius: 16px;
+ border: 1px solid #272832;
+ padding: 16px 18px;
+ width: 360px;
+ animation: scaleIn 0.3s ease;
+}
+
+@keyframes scaleIn {
+ from {
+ opacity: 0;
+ transform: scale(0.9);
+ }
+ to {
+ opacity: 1;
+ transform: scale(1);
+ }
+}
+
+.modal-header {
+ display: flex;
+ justify-content: space-between;
+ align-items: center;
+ margin-bottom: 10px;
+}
+
+.modal-title {
+ font-size: 15px;
+ font-weight: 600;
+}
+
+.modal-close {
+ border: none;
+ outline: none;
+ background: transparent;
+ color: #9a9bb0;
+ cursor: pointer;
+ transition: color 0.2s ease;
+}
+
+.modal-close:hover {
+ color: #ff7a3c;
+}
+
+.provider-list {
+ display: flex;
+ flex-direction: column;
+ gap: 6px;
+ margin-top: 8px;
+}
+
+.provider-item {
+ display: flex;
+ align-items: center;
+ justify-content: space-between;
+ padding: 6px 8px;
+ border-radius: 8px;
+ background: #151622;
+ border: 1px solid #272832;
+ font-size: 13px;
+ transition: all 0.2s ease;
+}
+
+.provider-item:hover {
+ border-color: #3a3b4d;
+}
+
+.provider-item.active {
+ border-color: #ff7a3c;
+ background: rgba(255, 122, 60, 0.1);
+}
+
+.provider-name {
+ font-weight: 500;
+}
+
+.provider-badge {
+ font-size: 11px;
+ color: #9a9bb0;
+}
+
+/* Navigation */
+.main-nav {
+ display: flex;
+ flex-direction: column;
+ gap: 2px;
+ margin-top: 10px;
+ margin-bottom: 10px;
+}
+
+.nav-btn {
+ border: none;
+ outline: none;
+ background: transparent;
+ color: #9a9bb0;
+ border-radius: 8px;
+ font-size: 13px;
+ font-weight: 500;
+ padding: 8px 12px;
+ text-align: left;
+ cursor: pointer;
+ transition: all 0.15s ease;
+}
+
+.nav-btn:hover {
+ background: #1a1b26;
+ color: #c3c5dd;
+}
+
+.nav-btn-active {
+ background: #1a1b26;
+ color: #f5f5f7;
+ font-weight: 600;
+ border-left: 2px solid #ff7a3c;
+ padding-left: 10px;
+}
+
+/* Settings page */
+.settings-root {
+ padding: 20px 24px;
+ overflow-y: auto;
+ max-width: 800px;
+}
+
+.settings-root h1 {
+ margin-top: 0;
+ font-size: 24px;
+ margin-bottom: 8px;
+}
+
+.settings-muted {
+ font-size: 13px;
+ color: #9a9bb0;
+ margin-bottom: 20px;
+ line-height: 1.5;
+}
+
+.settings-card {
+ background: #101117;
+ border-radius: 12px;
+ border: 1px solid #272832;
+ padding: 14px 16px;
+ margin-bottom: 14px;
+ display: flex;
+ flex-direction: column;
+ gap: 8px;
+ transition: all 0.2s ease;
+}
+
+.settings-card:hover {
+ border-color: #3a3b4d;
+}
+
+.settings-title {
+ font-size: 15px;
+ font-weight: 600;
+ margin-bottom: 4px;
+}
+
+.settings-label {
+ font-size: 12px;
+ color: #9a9bb0;
+ font-weight: 500;
+ margin-top: 4px;
+}
+
+.settings-input,
+.settings-select {
+ background: #050608;
+ border-radius: 8px;
+ border: 1px solid #272832;
+ padding: 8px 10px;
+ color: #f5f5f7;
+ font-size: 13px;
+ font-family: inherit;
+ transition: all 0.2s ease;
+}
+
+.settings-input:focus,
+.settings-select:focus {
+ outline: none;
+ border-color: #ff7a3c;
+ box-shadow: 0 0 0 3px rgba(255, 122, 60, 0.1);
+}
+
+.settings-input::placeholder {
+ color: #676883;
+}
+
+.settings-hint {
+ font-size: 11px;
+ color: #7a7b8e;
+ margin-top: -2px;
+}
+
+.settings-actions {
+ margin-top: 12px;
+ display: flex;
+ align-items: center;
+ gap: 12px;
+}
+
+.settings-save-btn {
+ background: #ff7a3c;
+ border-radius: 999px;
+ border: none;
+ outline: none;
+ padding: 9px 18px;
+ font-size: 13px;
+ cursor: pointer;
+ color: #050608;
+ font-weight: 600;
+ transition: all 0.2s ease;
+}
+
+.settings-save-btn:hover {
+ background: #ff8b52;
+ transform: translateY(-1px);
+ box-shadow: 0 4px 12px rgba(255, 122, 60, 0.3);
+}
+
+.settings-save-btn:disabled {
+ opacity: 0.5;
+ cursor: not-allowed;
+ transform: none;
+}
+
+.settings-success {
+ font-size: 12px;
+ color: #7cffb3;
+ font-weight: 500;
+}
+
+.settings-error {
+ font-size: 12px;
+ color: #ff8a8a;
+ font-weight: 500;
+}
+
+/* Flow viewer */
+.flow-root {
+ display: flex;
+ flex-direction: column;
+ height: 100%;
+ overflow: hidden;
+}
+
+.flow-header {
+ padding: 16px 20px;
+ border-bottom: 1px solid #272832;
+ display: flex;
+ align-items: center;
+ justify-content: space-between;
+}
+
+.flow-header h1 {
+ margin: 0;
+ font-size: 22px;
+ margin-bottom: 4px;
+}
+
+.flow-header p {
+ margin: 0;
+ font-size: 12px;
+ color: #9a9bb0;
+ max-width: 600px;
+ line-height: 1.5;
+}
+
+.flow-canvas {
+ flex: 1;
+ background: #050608;
+ position: relative;
+}
+
+.flow-error {
+ position: absolute;
+ inset: 0;
+ display: flex;
+ flex-direction: column;
+ align-items: center;
+ justify-content: center;
+ gap: 12px;
+}
+
+.error-icon {
+ font-size: 48px;
+}
+
+.error-text {
+ font-size: 14px;
+ color: #ff8a8a;
+}
+
+/* Assistant Message Sections */
+.gp-section {
+ margin-bottom: 16px;
+ border-radius: 12px;
+ background: #101117;
+ border: 1px solid #272832;
+ overflow: hidden;
+ animation: fadeIn 0.3s ease;
+}
+
+.gp-section-header {
+ padding: 8px 12px;
+ background: #151622;
+ border-bottom: 1px solid #272832;
+}
+
+.gp-section-header h3 {
+ margin: 0;
+ font-size: 13px;
+ font-weight: 600;
+ color: #c3c5dd;
+}
+
+.gp-section-content {
+ padding: 12px;
+}
+
+.gp-section-answer .gp-section-content p {
+ margin: 0;
+ font-size: 13px;
+ line-height: 1.6;
+ color: #f5f5f7;
+}
+
+.gp-section-plan {
+ background: #0a0b0f;
+}
+
+/* Plan View Enhanced */
+.plan-header {
+ margin-bottom: 12px;
+}
+
+.plan-goal {
+ font-size: 13px;
+ font-weight: 600;
+ margin-bottom: 4px;
+ color: #f5f5f7;
+}
+
+.plan-summary {
+ font-size: 12px;
+ color: #c3c5dd;
+ line-height: 1.5;
+}
+
+.plan-totals {
+ display: flex;
+ gap: 8px;
+ margin-bottom: 12px;
+ flex-wrap: wrap;
+}
+
+.plan-total {
+ padding: 4px 8px;
+ border-radius: 6px;
+ font-size: 11px;
+ font-weight: 500;
+ animation: fadeIn 0.3s ease;
+}
+
+.plan-total-create {
+ background: rgba(76, 175, 80, 0.15);
+ color: #81c784;
+ border: 1px solid rgba(76, 175, 80, 0.3);
+}
+
+.plan-total-modify {
+ background: rgba(33, 150, 243, 0.15);
+ color: #64b5f6;
+ border: 1px solid rgba(33, 150, 243, 0.3);
+}
+
+.plan-total-delete {
+ background: rgba(244, 67, 54, 0.15);
+ color: #e57373;
+ border: 1px solid rgba(244, 67, 54, 0.3);
+}
+
+.plan-step {
+ margin-bottom: 12px;
+ padding-bottom: 12px;
+ border-bottom: 1px solid #1a1b26;
+}
+
+.plan-step:last-child {
+ border-bottom: none;
+ padding-bottom: 0;
+ margin-bottom: 0;
+}
+
+.plan-step-header {
+ margin-bottom: 6px;
+}
+
+.plan-step-description {
+ font-size: 12px;
+ color: #9a9bb0;
+ margin-bottom: 8px;
+}
+
+.plan-files {
+ list-style: none;
+ padding: 0;
+ margin: 8px 0;
+}
+
+.plan-file {
+ display: flex;
+ align-items: center;
+ gap: 8px;
+ padding: 4px 0;
+}
+
+.gp-pill {
+ padding: 2px 6px;
+ border-radius: 4px;
+ font-size: 10px;
+ font-weight: 600;
+ text-transform: uppercase;
+ letter-spacing: 0.3px;
+}
+
+.gp-pill-create {
+ background: rgba(76, 175, 80, 0.2);
+ color: #81c784;
+ border: 1px solid rgba(76, 175, 80, 0.4);
+}
+
+.gp-pill-modify {
+ background: rgba(33, 150, 243, 0.2);
+ color: #64b5f6;
+ border: 1px solid rgba(33, 150, 243, 0.4);
+}
+
+.gp-pill-delete {
+ background: rgba(244, 67, 54, 0.2);
+ color: #e57373;
+ border: 1px solid rgba(244, 67, 54, 0.4);
+}
+
+.plan-file-path {
+ font-size: 11px;
+ color: #c3c5dd;
+ font-family: "SF Mono", Monaco, "Cascadia Code", monospace;
+ background: #0a0b0f;
+ padding: 2px 6px;
+ border-radius: 4px;
+}
+
+.plan-step-risks {
+ margin-top: 8px;
+ padding: 6px 8px;
+ background: rgba(255, 152, 0, 0.1);
+ border-left: 2px solid #ff9800;
+ border-radius: 4px;
+ font-size: 11px;
+ color: #ffb74d;
+}
+
+.plan-risk-label {
+ font-weight: 600;
+}
+
+/* Execution Log */
+.execution-steps {
+ list-style: none;
+ padding: 0;
+ margin: 0;
+}
+
+.execution-step {
+ padding: 8px;
+ margin-bottom: 6px;
+ background: #0a0b0f;
+ border-radius: 6px;
+ font-size: 11px;
+ font-family: "SF Mono", Monaco, "Cascadia Code", monospace;
+ white-space: pre-wrap;
+}
+
+.execution-step-number {
+ color: #ff7a3c;
+ font-weight: 600;
+ margin-right: 8px;
+}
+
+.execution-step-summary {
+ color: #c3c5dd;
+}
+
+/* Project Context Panel - Properly constrained */
+.gp-context {
+ padding: 12px;
+ height: 100%;
+ overflow: hidden;
+ display: flex;
+ flex-direction: column;
+}
+
+.gp-context-column {
+ background: #0a0b0f;
+ border-right: 1px solid #272832;
+ overflow: hidden;
+ display: flex;
+ flex-direction: column;
+}
+
+.gp-chat-column {
+ display: flex;
+ flex-direction: column;
+ background: #050608;
+ height: 100%;
+ min-width: 0;
+ overflow: hidden;
+}
+
+.gp-card {
+ background: #101117;
+ border-radius: 12px;
+ border: 1px solid #272832;
+ overflow: hidden;
+ display: flex;
+ flex-direction: column;
+ height: 100%;
+ min-height: 0;
+}
+
+.gp-card-header {
+ padding: 10px 12px;
+ background: #151622;
+ border-bottom: 1px solid #272832;
+ display: flex;
+ align-items: center;
+ justify-content: space-between;
+ flex-shrink: 0;
+}
+
+.gp-card-header h2 {
+ margin: 0;
+ font-size: 14px;
+ font-weight: 600;
+ color: #f5f5f7;
+}
+
+.gp-badge {
+ padding: 3px 8px;
+ border-radius: 999px;
+ background: #2a2b3c;
+ border: 1px solid #3a3b4d;
+ font-size: 11px;
+ color: #c3c5dd;
+ font-weight: 500;
+ transition: all 0.2s ease;
+}
+
+.gp-badge:hover {
+ border-color: #ff7a3c;
+}
+
+.gp-context-meta {
+ padding: 12px;
+ display: flex;
+ flex-direction: column;
+ gap: 6px;
+ border-bottom: 1px solid #272832;
+ flex-shrink: 0;
+ background: #0a0b0f;
+}
+
+.gp-context-meta-item {
+ display: flex;
+ align-items: center;
+ gap: 6px;
+ font-size: 12px;
+}
+
+.gp-context-meta-label {
+ color: #9a9bb0;
+ min-width: 60px;
+}
+
+.gp-context-meta-item strong {
+ color: #f5f5f7;
+ font-weight: 500;
+}
+
+/* File tree - Properly scrollable */
+.gp-context-tree {
+ flex: 1;
+ overflow-y: auto;
+ overflow-x: hidden;
+ min-height: 0;
+ padding: 4px;
+}
+
+.gp-context-empty {
+ padding: 20px 12px;
+ text-align: center;
+ color: #9a9bb0;
+ font-size: 12px;
+}
+
+/* Footer - Fixed at bottom */
+.gp-footer {
+ position: fixed;
+ bottom: 0;
+ left: 0;
+ right: 0;
+ border-top: 1px solid #272832;
+ padding: 8px 20px;
+ display: flex;
+ justify-content: space-between;
+ align-items: center;
+ font-size: 11px;
+ color: #9a9bb0;
+ background: #0a0b0f;
+ backdrop-filter: blur(10px);
+ z-index: 10;
+ box-shadow: 0 -2px 10px rgba(0, 0, 0, 0.2);
+}
+
+.gp-footer-left {
+ display: flex;
+ align-items: center;
+ gap: 6px;
+ font-weight: 500;
+ color: #c3c5dd;
+}
+
+.gp-footer-right {
+ display: flex;
+ align-items: center;
+ gap: 12px;
+}
+
+.gp-footer-right a {
+ color: #9a9bb0;
+ text-decoration: none;
+ transition: all 0.2s ease;
+}
+
+.gp-footer-right a:hover {
+ color: #ff7a3c;
+ transform: translateY(-1px);
+}
+
+/* Adjust app-root to account for fixed footer */
+.app-root > .main-wrapper {
+ padding-bottom: 32px; /* Space for fixed footer */
+}
+
+/* ============================================================================
+ LOGIN PAGE - Enterprise GitHub Authentication
+ ============================================================================ */
+
+.login-page {
+ min-height: 100vh;
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ background: radial-gradient(circle at center, #171823 0%, #050608 70%);
+ padding: 20px;
+ animation: fadeIn 0.4s ease;
+}
+
+.login-container {
+ width: 100%;
+ max-width: 480px;
+ background: #101117;
+ border: 1px solid #272832;
+ border-radius: 24px;
+ padding: 40px 36px;
+ box-shadow: 0 20px 60px rgba(0, 0, 0, 0.4);
+ animation: slideUp 0.5s ease;
+}
+
+@keyframes slideUp {
+ from {
+ opacity: 0;
+ transform: translateY(20px);
+ }
+ to {
+ opacity: 1;
+ transform: translateY(0);
+ }
+}
+
+/* Header */
+.login-header {
+ text-align: center;
+ margin-bottom: 32px;
+}
+
+.login-logo {
+ display: flex;
+ justify-content: center;
+ margin-bottom: 16px;
+}
+
+.logo-icon {
+ width: 64px;
+ height: 64px;
+ border-radius: 16px;
+ background: linear-gradient(135deg, #ff7a3c 0%, #ff6b2b 100%);
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ font-weight: 700;
+ font-size: 28px;
+ color: #050608;
+ box-shadow: 0 8px 24px rgba(255, 122, 60, 0.3);
+ transition: transform 0.3s ease;
+}
+
+.logo-icon:hover {
+ transform: scale(1.05) rotate(3deg);
+}
+
+.login-title {
+ margin: 0;
+ font-size: 28px;
+ font-weight: 700;
+ color: #f5f5f7;
+ margin-bottom: 8px;
+ letter-spacing: -0.5px;
+}
+
+.login-subtitle {
+ margin: 0;
+ font-size: 14px;
+ color: #9a9bb0;
+ font-weight: 500;
+}
+
+/* Welcome Section */
+.login-welcome {
+ margin-bottom: 28px;
+ padding-bottom: 28px;
+ border-bottom: 1px solid #272832;
+}
+
+.login-welcome h2 {
+ margin: 0 0 12px 0;
+ font-size: 20px;
+ font-weight: 600;
+ color: #f5f5f7;
+}
+
+.login-welcome p {
+ margin: 0;
+ font-size: 14px;
+ line-height: 1.6;
+ color: #c3c5dd;
+}
+
+/* Error Message */
+.login-error {
+ display: flex;
+ align-items: center;
+ gap: 10px;
+ padding: 12px 14px;
+ background: rgba(255, 82, 82, 0.1);
+ border: 1px solid rgba(255, 82, 82, 0.3);
+ border-radius: 10px;
+ color: #ff8a8a;
+ font-size: 13px;
+ margin-bottom: 20px;
+ animation: shake 0.4s ease;
+}
+
+@keyframes shake {
+ 0%, 100% { transform: translateX(0); }
+ 25% { transform: translateX(-5px); }
+ 75% { transform: translateX(5px); }
+}
+
+.login-error svg {
+ flex-shrink: 0;
+}
+
+/* Login Actions */
+.login-actions {
+ display: flex;
+ flex-direction: column;
+ gap: 14px;
+ margin-bottom: 28px;
+}
+
+/* Buttons */
+.btn-primary,
+.btn-secondary,
+.btn-text {
+ border: none;
+ outline: none;
+ cursor: pointer;
+ font-family: inherit;
+ font-weight: 600;
+ transition: all 0.2s ease;
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ gap: 10px;
+}
+
+.btn-large {
+ padding: 14px 24px;
+ font-size: 15px;
+ border-radius: 12px;
+}
+
+.btn-primary {
+ background: linear-gradient(135deg, #ff7a3c 0%, #ff6b2b 100%);
+ color: #fff;
+ box-shadow: 0 4px 12px rgba(255, 122, 60, 0.25);
+}
+
+.btn-primary:hover:not(:disabled) {
+ transform: translateY(-2px);
+ box-shadow: 0 8px 20px rgba(255, 122, 60, 0.35);
+}
+
+.btn-primary:active:not(:disabled) {
+ transform: translateY(0);
+}
+
+.btn-primary:disabled {
+ opacity: 0.6;
+ cursor: not-allowed;
+}
+
+.btn-secondary {
+ background: #1a1b26;
+ color: #f5f5f7;
+ border: 1px solid #3a3b4d;
+}
+
+.btn-secondary:hover {
+ background: #2a2b3c;
+ border-color: #4a4b5d;
+ transform: translateY(-1px);
+}
+
+.btn-text {
+ background: transparent;
+ color: #9a9bb0;
+ padding: 10px;
+ font-size: 14px;
+ font-weight: 500;
+}
+
+.btn-text:hover {
+ color: #ff7a3c;
+}
+
+/* Button Spinner */
+.btn-spinner {
+ width: 16px;
+ height: 16px;
+ border: 2px solid rgba(255, 255, 255, 0.3);
+ border-top-color: #fff;
+ border-radius: 50%;
+ animation: spin 0.6s linear infinite;
+}
+
+@keyframes spin {
+ to { transform: rotate(360deg); }
+}
+
+/* Loading Spinner (Page) */
+.loading-spinner {
+ width: 48px;
+ height: 48px;
+ border: 4px solid #272832;
+ border-top-color: #ff7a3c;
+ border-radius: 50%;
+ animation: spin 0.8s linear infinite;
+ margin: 0 auto;
+}
+
+/* Divider */
+.login-divider {
+ position: relative;
+ text-align: center;
+ margin: 8px 0;
+}
+
+.login-divider::before {
+ content: '';
+ position: absolute;
+ top: 50%;
+ left: 0;
+ right: 0;
+ height: 1px;
+ background: #272832;
+}
+
+.login-divider span {
+ position: relative;
+ display: inline-block;
+ padding: 0 16px;
+ background: #101117;
+ color: #9a9bb0;
+ font-size: 12px;
+ font-weight: 500;
+}
+
+/* Form */
+.login-form {
+ display: flex;
+ flex-direction: column;
+ gap: 18px;
+ margin-bottom: 28px;
+}
+
+.form-group {
+ display: flex;
+ flex-direction: column;
+ gap: 8px;
+}
+
+.form-group label {
+ font-size: 13px;
+ font-weight: 600;
+ color: #f5f5f7;
+}
+
+.form-input {
+ background: #0a0b0f;
+ border: 1px solid #272832;
+ border-radius: 10px;
+ padding: 12px 14px;
+ color: #f5f5f7;
+ font-size: 14px;
+ font-family: "SF Mono", Monaco, monospace;
+ transition: all 0.2s ease;
+}
+
+.form-input:focus {
+ outline: none;
+ border-color: #ff7a3c;
+ box-shadow: 0 0 0 4px rgba(255, 122, 60, 0.1);
+}
+
+.form-input:disabled {
+ opacity: 0.5;
+ cursor: not-allowed;
+}
+
+.form-input::placeholder {
+ color: #676883;
+}
+
+.form-hint {
+ font-size: 12px;
+ color: #9a9bb0;
+ line-height: 1.5;
+ margin: 0;
+}
+
+.form-link {
+ color: #ff7a3c;
+ text-decoration: none;
+ font-weight: 500;
+ transition: color 0.2s ease;
+}
+
+.form-link:hover {
+ color: #ff8b52;
+ text-decoration: underline;
+}
+
+.form-hint code {
+ background: #1a1b26;
+ padding: 2px 6px;
+ border-radius: 4px;
+ font-family: "SF Mono", Monaco, monospace;
+ font-size: 11px;
+ color: #ff7a3c;
+}
+
+/* Notice (for no auth configured) */
+.login-notice {
+ padding: 20px;
+ background: rgba(255, 152, 0, 0.1);
+ border: 1px solid rgba(255, 152, 0, 0.3);
+ border-radius: 12px;
+ margin-bottom: 28px;
+}
+
+.login-notice h3 {
+ margin: 0 0 12px 0;
+ font-size: 16px;
+ color: #ffb74d;
+}
+
+.login-notice p {
+ margin: 0 0 12px 0;
+ font-size: 13px;
+ color: #c3c5dd;
+ line-height: 1.6;
+}
+
+.login-notice ul {
+ margin: 0;
+ padding-left: 20px;
+ font-size: 13px;
+ color: #c3c5dd;
+ line-height: 1.8;
+}
+
+.login-notice code {
+ background: #1a1b26;
+ padding: 2px 6px;
+ border-radius: 4px;
+ font-family: "SF Mono", Monaco, monospace;
+ font-size: 12px;
+ color: #ff7a3c;
+}
+
+/* Features List */
+.login-features {
+ display: flex;
+ flex-direction: column;
+ gap: 12px;
+ padding: 20px 0;
+ border-top: 1px solid #272832;
+ border-bottom: 1px solid #272832;
+ margin-bottom: 20px;
+}
+
+.feature-item {
+ display: flex;
+ align-items: flex-start;
+ gap: 12px;
+}
+
+.feature-icon {
+ flex-shrink: 0;
+ width: 20px;
+ height: 20px;
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ color: #7cffb3;
+}
+
+.feature-text {
+ display: flex;
+ flex-direction: column;
+ gap: 2px;
+}
+
+.feature-text strong {
+ font-size: 13px;
+ font-weight: 600;
+ color: #f5f5f7;
+}
+
+.feature-text span {
+ font-size: 12px;
+ color: #9a9bb0;
+}
+
+/* Footer */
+.login-footer {
+ text-align: center;
+}
+
+.login-footer p {
+ margin: 0;
+ font-size: 11px;
+ color: #7a7b8e;
+ line-height: 1.6;
+}/* ============================================================================
+ INSTALLATION MODAL - Claude Code Style
+ ============================================================================ */
+
+.install-modal-backdrop {
+ position: fixed;
+ inset: 0;
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ background: rgba(0, 0, 0, 0.7);
+ backdrop-filter: blur(8px);
+ z-index: 9999;
+ animation: fadeIn 0.2s ease;
+}
+
+.install-modal {
+ width: 480px;
+ max-width: 90vw;
+ background: #101117;
+ border: 1px solid #272832;
+ border-radius: 16px;
+ box-shadow: 0 20px 60px rgba(0, 0, 0, 0.5);
+ animation: modalSlideIn 0.3s ease;
+ overflow: hidden;
+}
+
+@keyframes modalSlideIn {
+ from {
+ opacity: 0;
+ transform: translateY(-20px) scale(0.95);
+ }
+ to {
+ opacity: 1;
+ transform: translateY(0) scale(1);
+ }
+}
+
+/* Modal Header */
+.install-modal-header {
+ padding: 32px 32px 24px;
+ text-align: center;
+ border-bottom: 1px solid #272832;
+}
+
+.install-modal-logo {
+ display: flex;
+ justify-content: center;
+ margin-bottom: 16px;
+}
+
+.logo-icon-large {
+ width: 56px;
+ height: 56px;
+ border-radius: 12px;
+ background: linear-gradient(135deg, #ff7a3c 0%, #ff6b2b 100%);
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ font-weight: 700;
+ font-size: 24px;
+ color: #050608;
+ box-shadow: 0 4px 16px rgba(255, 122, 60, 0.3);
+}
+
+.install-modal-title {
+ margin: 0 0 8px 0;
+ font-size: 20px;
+ font-weight: 600;
+ color: #f5f5f7;
+}
+
+.install-modal-subtitle {
+ margin: 0;
+ font-size: 13px;
+ color: #9a9bb0;
+ line-height: 1.5;
+}
+
+/* Status Indicator */
+.install-status {
+ display: flex;
+ align-items: center;
+ gap: 10px;
+ padding: 12px 16px;
+ margin: 16px 24px;
+ border-radius: 8px;
+ font-size: 13px;
+ transition: all 0.2s ease;
+}
+
+.install-status-error {
+ background: rgba(255, 82, 82, 0.1);
+ border: 1px solid rgba(255, 82, 82, 0.3);
+ color: #ff8a8a;
+}
+
+.install-status-pending {
+ background: rgba(255, 152, 0, 0.1);
+ border: 1px solid rgba(255, 152, 0, 0.3);
+ color: #ffb74d;
+}
+
+.status-icon {
+ flex-shrink: 0;
+}
+
+.status-spinner {
+ width: 16px;
+ height: 16px;
+ border: 2px solid rgba(255, 180, 77, 0.3);
+ border-top-color: #ffb74d;
+ border-radius: 50%;
+ animation: spin 0.6s linear infinite;
+}
+
+/* Installation Steps */
+.install-steps {
+ padding: 24px 32px;
+ display: flex;
+ flex-direction: column;
+ gap: 16px;
+}
+
+.install-step {
+ display: flex;
+ align-items: flex-start;
+ gap: 12px;
+}
+
+.step-number {
+ flex-shrink: 0;
+ width: 28px;
+ height: 28px;
+ border-radius: 8px;
+ background: #1a1b26;
+ border: 1px solid #3a3b4d;
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ font-size: 13px;
+ font-weight: 600;
+ color: #ff7a3c;
+}
+
+.step-content h3 {
+ margin: 0 0 4px 0;
+ font-size: 14px;
+ font-weight: 600;
+ color: #f5f5f7;
+}
+
+.step-content p {
+ margin: 0;
+ font-size: 12px;
+ color: #9a9bb0;
+ line-height: 1.5;
+}
+
+/* Action Buttons */
+.install-modal-actions {
+ display: flex;
+ align-items: center;
+ justify-content: flex-end;
+ gap: 10px;
+ padding: 16px 24px;
+ border-top: 1px solid #272832;
+ background: #0a0b0f;
+}
+
+.btn-install-primary {
+ border: none;
+ outline: none;
+ background: #000;
+ color: #fff;
+ padding: 10px 18px;
+ border-radius: 8px;
+ font-size: 13px;
+ font-weight: 600;
+ cursor: pointer;
+ display: flex;
+ align-items: center;
+ gap: 8px;
+ transition: all 0.2s ease;
+}
+
+.btn-install-primary:hover:not(:disabled) {
+ background: #1a1a1a;
+ transform: translateY(-1px);
+}
+
+.btn-install-primary:active:not(:disabled) {
+ transform: translateY(0);
+}
+
+.btn-install-primary:disabled {
+ opacity: 0.5;
+ cursor: not-allowed;
+}
+
+.btn-check-status {
+ border: 1px solid #3a3b4d;
+ outline: none;
+ background: #1a1b26;
+ color: #f5f5f7;
+ padding: 10px 18px;
+ border-radius: 8px;
+ font-size: 13px;
+ font-weight: 500;
+ cursor: pointer;
+ display: flex;
+ align-items: center;
+ gap: 8px;
+ transition: all 0.2s ease;
+}
+
+.btn-check-status:hover:not(:disabled) {
+ background: #2a2b3c;
+ border-color: #4a4b5d;
+}
+
+.btn-check-status:disabled {
+ opacity: 0.5;
+ cursor: not-allowed;
+}
+
+.btn-install-secondary {
+ border: 1px solid #3a3b4d;
+ outline: none;
+ background: transparent;
+ color: #c3c5dd;
+ padding: 10px 18px;
+ border-radius: 8px;
+ font-size: 13px;
+ font-weight: 500;
+ cursor: pointer;
+ display: flex;
+ align-items: center;
+ gap: 8px;
+ transition: all 0.2s ease;
+}
+
+.btn-install-secondary:hover:not(:disabled) {
+ background: #1a1b26;
+ border-color: #4a4b5d;
+}
+
+.btn-install-secondary:disabled {
+ opacity: 0.5;
+ cursor: not-allowed;
+}
+
+/* Footer */
+.install-modal-footer {
+ padding: 16px 32px 24px;
+ text-align: center;
+}
+
+.install-modal-footer p {
+ margin: 0;
+ font-size: 12px;
+ color: #7a7b8e;
+ line-height: 1.6;
+}
+
+.install-modal-footer strong {
+ color: #c3c5dd;
+ font-weight: 600;
+}
+
+/* Button spinner */
+.btn-spinner {
+ width: 14px;
+ height: 14px;
+ border: 2px solid rgba(255, 255, 255, 0.3);
+ border-top-color: #fff;
+ border-radius: 50%;
+ animation: spin 0.6s linear infinite;
+}
+
+@keyframes spin {
+ to { transform: rotate(360deg); }
+}
+
+/* Secondary primary-style button for "Load available models" */
+.settings-load-btn {
+ margin-top: 8px;
+
+ /* Make it hug the text, not full width */
+ display: inline-flex;
+ align-items: center;
+ justify-content: center;
+ width: auto !important;
+ min-width: 0;
+ align-self: flex-start;
+
+ /* Size: slightly smaller than Save but same family */
+ padding: 7px 14px;
+ border-radius: 999px;
+
+ font-size: 12px;
+ font-weight: 600;
+ letter-spacing: 0.01em;
+
+ border: none;
+ outline: none;
+ cursor: pointer;
+
+ /* Match Save button color palette */
+ background: #ff7a3c;
+ color: #050608;
+
+ transition:
+ background 0.2s ease,
+ box-shadow 0.2s ease,
+ transform 0.15s ease,
+ opacity 0.2s ease;
+}
+
+.settings-load-btn:hover {
+ background: #ff8b52;
+ transform: translateY(-1px);
+ box-shadow: 0 3px 10px rgba(255, 122, 60, 0.28);
+}
+
+.settings-load-btn:active {
+ transform: translateY(0);
+ box-shadow: 0 1px 4px rgba(255, 122, 60, 0.25);
+}
+
+.settings-load-btn:disabled {
+ opacity: 0.55;
+ cursor: not-allowed;
+ transform: none;
+ box-shadow: none;
+}
+
+/* ------------------------------
+ LLM Settings Loading Experience
+ ------------------------------ */
+
+.settings-loading-shell {
+ min-height: calc(100vh - 32px);
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ padding: 32px 24px;
+}
+
+.settings-loading-card {
+ width: 100%;
+ max-width: 520px;
+ background: #101117;
+ border: 1px solid #272832;
+ border-radius: 20px;
+ padding: 40px 28px;
+ text-align: center;
+ box-shadow: 0 24px 80px rgba(0, 0, 0, 0.28);
+}
+
+.settings-loading-card h1 {
+ margin: 0 0 6px 0;
+ font-size: 28px;
+ line-height: 1.1;
+}
+
+.settings-loading-subtitle {
+ font-size: 13px;
+ color: #9a9bb0;
+ margin-bottom: 18px;
+}
+
+.settings-loading-text {
+ font-size: 14px;
+ color: #d4d7e1;
+ line-height: 1.6;
+ margin: 0;
+}
+
+.settings-loading-spinner {
+ width: 56px;
+ height: 56px;
+ border: 4px solid #272832;
+ border-top-color: #ff7a3c;
+ border-right-color: rgba(255, 122, 60, 0.7);
+ border-radius: 50%;
+ animation: spin 0.8s linear infinite;
+ margin: 0 auto 18px;
+}
+
+.settings-loading-slow {
+ margin-top: 18px;
+ padding: 14px 16px;
+ background: #0b0c11;
+ border: 1px solid #272832;
+ border-radius: 12px;
+}
+
+.settings-loading-slow p {
+ margin: 0 0 12px 0;
+ color: #9a9bb0;
+ font-size: 13px;
+ line-height: 1.5;
+}
+
+.settings-inline-error-card {
+ width: 100%;
+ max-width: 620px;
+ margin: 60px auto 0;
+ background: #101117;
+ border: 1px solid #272832;
+ border-radius: 16px;
+ padding: 28px 24px;
+}
+
+.settings-error-banner,
+.settings-success-banner {
+ border-radius: 12px;
+ padding: 12px 14px;
+ margin-bottom: 14px;
+ font-size: 13px;
+ line-height: 1.5;
+}
+
+.settings-error-banner {
+ background: rgba(255, 87, 87, 0.08);
+ border: 1px solid rgba(255, 87, 87, 0.24);
+ color: #ffb0b0;
+}
+
+.settings-success-banner {
+ background: rgba(67, 181, 129, 0.08);
+ border: 1px solid rgba(67, 181, 129, 0.24);
+ color: #9ce7c2;
+}
+
+.settings-error-text {
+ color: #ffb0b0;
+ font-size: 14px;
+ line-height: 1.6;
+ margin: 12px 0 18px;
+}
+
+.settings-secondary-btn {
+ background: transparent;
+ border: 1px solid #313244;
+ color: #f5f5f7;
+ border-radius: 999px;
+ padding: 9px 16px;
+ font-size: 13px;
+ cursor: pointer;
+ transition: all 0.2s ease;
+}
+
+.settings-secondary-btn:hover {
+ border-color: #ff7a3c;
+ color: #fff;
+ background: rgba(255, 122, 60, 0.08);
+}
+
+.settings-inline-row {
+ display: flex;
+ gap: 10px;
+ align-items: center;
+}
+
+.settings-inline-row .settings-input {
+ flex: 1;
+}
+
+.settings-model-list {
+ display: flex;
+ flex-wrap: wrap;
+ gap: 8px;
+}
+
+.settings-model-chip {
+ background: #090a0e;
+ border: 1px solid #2b2c36;
+ border-radius: 999px;
+ color: #f5f5f7;
+ padding: 8px 12px;
+ font-size: 12px;
+ cursor: pointer;
+ transition: all 0.18s ease;
+}
+
+.settings-model-chip:hover {
+ border-color: #ff7a3c;
+ background: rgba(255, 122, 60, 0.08);
+}
+
+/* =========================================================
+ Startup Screen — Enterprise Loader
+ ========================================================= */
+
+.startup-screen {
+ min-height: 100vh;
+ width: 100%;
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ padding: 32px;
+ box-sizing: border-box;
+ background:
+ radial-gradient(circle at top center, rgba(255, 122, 60, 0.10), transparent 18%),
+ linear-gradient(180deg, #050814 0%, #03060f 100%);
+}
+
+.startup-card {
+ width: min(100%, 520px);
+ display: flex;
+ flex-direction: column;
+ gap: 20px;
+ padding: 28px 28px 24px;
+ border-radius: 20px;
+ border: 1px solid rgba(255, 255, 255, 0.08);
+ background:
+ linear-gradient(180deg, rgba(18, 24, 42, 0.94) 0%, rgba(10, 15, 28, 0.96) 100%);
+ box-shadow:
+ 0 10px 40px rgba(0, 0, 0, 0.45),
+ 0 0 0 1px rgba(255, 255, 255, 0.02) inset;
+ backdrop-filter: blur(12px);
+}
+
+.startup-brand-row {
+ display: flex;
+ align-items: center;
+ gap: 16px;
+}
+
+.startup-brand-mark {
+ position: relative;
+ width: 52px;
+ height: 52px;
+ flex: 0 0 52px;
+}
+
+.startup-brand-ring {
+ position: absolute;
+ inset: 0;
+ border-radius: 50%;
+ border: 3px solid rgba(255, 122, 60, 0.22);
+ border-top-color: #ff7a3c;
+ animation: startup-spin 1.1s linear infinite;
+}
+
+.startup-brand-core {
+ position: absolute;
+ inset: 11px;
+ border-radius: 50%;
+ background: radial-gradient(circle, rgba(255, 122, 60, 0.95) 0%, rgba(255, 122, 60, 0.25) 72%, transparent 100%);
+ box-shadow: 0 0 24px rgba(255, 122, 60, 0.28);
+}
+
+.startup-brand-copy {
+ min-width: 0;
+}
+
+.startup-title {
+ font-size: 26px;
+ line-height: 1.1;
+ font-weight: 700;
+ color: #f8fafc;
+ letter-spacing: 0.01em;
+}
+
+.startup-subtitle {
+ margin-top: 4px;
+ font-size: 13px;
+ line-height: 1.5;
+ color: #94a3b8;
+}
+
+.startup-loader-wrap {
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ padding-top: 4px;
+}
+
+.startup-loader {
+ position: relative;
+ width: 72px;
+ height: 72px;
+}
+
+.startup-loader-ring {
+ position: absolute;
+ inset: 0;
+ border-radius: 50%;
+}
+
+.startup-loader-ring-outer {
+ border: 4px solid rgba(255, 255, 255, 0.08);
+ border-top-color: #ff7a3c;
+ animation: startup-spin 1s linear infinite;
+}
+
+.startup-loader-ring-inner {
+ inset: 10px;
+ border: 3px solid rgba(255, 122, 60, 0.14);
+ border-bottom-color: rgba(255, 122, 60, 0.9);
+ animation: startup-spin-reverse 1.4s linear infinite;
+}
+
+.startup-status-block {
+ text-align: center;
+}
+
+.startup-status {
+ font-size: 18px;
+ font-weight: 600;
+ color: #f8fafc;
+ letter-spacing: 0.01em;
+}
+
+.startup-detail {
+ margin-top: 8px;
+ font-size: 13px;
+ line-height: 1.6;
+ color: #94a3b8;
+}
+
+.startup-phase-row {
+ display: flex;
+ justify-content: center;
+}
+
+.startup-phase-badge {
+ display: inline-flex;
+ align-items: center;
+ justify-content: center;
+ min-height: 28px;
+ padding: 0 12px;
+ border-radius: 999px;
+ background: rgba(255, 122, 60, 0.12);
+ border: 1px solid rgba(255, 122, 60, 0.24);
+ color: #ffb089;
+ font-size: 12px;
+ font-weight: 600;
+ letter-spacing: 0.04em;
+ text-transform: uppercase;
+}
+
+.startup-meta-grid {
+ display: grid;
+ grid-template-columns: repeat(2, minmax(0, 1fr));
+ gap: 12px;
+}
+
+.startup-meta-item {
+ padding: 12px 14px;
+ border-radius: 14px;
+ background: rgba(255, 255, 255, 0.035);
+ border: 1px solid rgba(255, 255, 255, 0.05);
+}
+
+.startup-meta-item-wide {
+ grid-column: 1 / -1;
+}
+
+.startup-meta-label {
+ font-size: 11px;
+ font-weight: 600;
+ letter-spacing: 0.05em;
+ text-transform: uppercase;
+ color: #64748b;
+}
+
+.startup-meta-value {
+ margin-top: 6px;
+ font-size: 14px;
+ font-weight: 600;
+ color: #e2e8f0;
+ word-break: break-word;
+}
+
+.startup-footer {
+ font-size: 12px;
+ line-height: 1.6;
+ color: #64748b;
+ text-align: center;
+}
+
+@keyframes startup-spin {
+ from {
+ transform: rotate(0deg);
+ }
+ to {
+ transform: rotate(360deg);
+ }
+}
+
+@keyframes startup-spin-reverse {
+ from {
+ transform: rotate(360deg);
+ }
+ to {
+ transform: rotate(0deg);
+ }
+}
+
+@media (max-width: 640px) {
+ .startup-screen {
+ padding: 20px;
+ }
+
+ .startup-card {
+ padding: 22px 20px 20px;
+ border-radius: 18px;
+ }
+
+ .startup-title {
+ font-size: 22px;
+ }
+
+ .startup-meta-grid {
+ grid-template-columns: 1fr;
+ }
+
+ .startup-meta-item-wide {
+ grid-column: auto;
+ }
+}
\ No newline at end of file
diff --git a/frontend/utils/api.js b/frontend/utils/api.js
new file mode 100644
index 0000000000000000000000000000000000000000..305c26364b52912f76e34fcff6610ec021444b1f
--- /dev/null
+++ b/frontend/utils/api.js
@@ -0,0 +1,251 @@
+/**
+ * API utilities for authenticated requests
+ */
+
+/**
+ * Get backend URL from environment or use relative path (for local dev)
+ * - Production (Vercel): Uses VITE_BACKEND_URL env var (e.g., https://gitpilot-backend.onrender.com)
+ * - Development (local): Uses relative paths (proxied by Vite to localhost:8000)
+ */
+const BACKEND_URL = import.meta.env.VITE_BACKEND_URL || '';
+
+/**
+ * Check if backend URL is configured
+ * @returns {boolean} True if backend URL is set
+ */
+export function isBackendConfigured() {
+ return BACKEND_URL !== '' && BACKEND_URL !== undefined;
+}
+
+/**
+ * Get the configured backend URL
+ * @returns {string} Backend URL or empty string
+ */
+export function getBackendUrl() {
+ return BACKEND_URL;
+}
+
+/**
+ * Construct full API URL
+ * @param {string} path - API endpoint path (e.g., '/api/chat/plan')
+ * @returns {string} Full URL to API endpoint
+ */
+export function apiUrl(path) {
+ // Ensure path starts with /
+ const cleanPath = path.startsWith('/') ? path : `/${path}`;
+ return `${BACKEND_URL}${cleanPath}`;
+}
+
+/**
+ * Enhanced fetch with better error handling for JSON parsing
+ * @param {string} url - URL to fetch
+ * @param {Object} options - Fetch options
+ * @returns {Promise} Parsed JSON response
+ */
+export async function safeFetchJSON(url, options = {}) {
+ try {
+ // Add timeout to prevent hanging when backend is starting up.
+ // Default raised to 15s to tolerate first-load GitHub API checks.
+ const timeout = options.timeout || 15000;
+ const controller = new AbortController();
+ const timer = setTimeout(() => controller.abort(), timeout);
+ const fetchOptions = { ...options, signal: options.signal || controller.signal };
+ delete fetchOptions.timeout;
+
+ let response;
+ try {
+ response = await fetch(url, fetchOptions);
+ } finally {
+ clearTimeout(timer);
+ }
+ const contentType = response.headers.get('content-type');
+
+ // Check if response is actually JSON
+ if (!contentType || !contentType.includes('application/json')) {
+ // If not JSON, it might be an HTML error page
+ const text = await response.text();
+
+ // Check if it looks like HTML (starts with } Fetch response
+ */
+export async function authFetch(url, options = {}) {
+ const headers = {
+ ...getAuthHeaders(),
+ ...options.headers,
+ };
+
+ return fetch(url, {
+ ...options,
+ headers,
+ });
+}
+
+/**
+ * Make an authenticated JSON request
+ * @param {string} url - API endpoint URL
+ * @param {Object} options - Fetch options
+ * @returns {Promise} Parsed JSON response
+ */
+export async function authFetchJSON(url, options = {}) {
+ const headers = {
+ 'Content-Type': 'application/json',
+ ...getAuthHeaders(),
+ ...options.headers,
+ };
+
+ const response = await fetch(url, {
+ ...options,
+ headers,
+ });
+
+ if (!response.ok) {
+ const error = await response.json().catch(() => ({ detail: 'Request failed' }));
+ throw new Error(error.detail || error.message || 'Request failed');
+ }
+
+ return response.json();
+}
+
+// ─── Redesigned API Endpoints ────────────────────────────
+
+/**
+ * Get normalized server status
+ */
+export async function fetchStatus() {
+ return safeFetchJSON(apiUrl("/api/status"));
+}
+
+/**
+ * Get server status with retry (for startup when backend may still be booting).
+ * Retries up to `maxRetries` times with `delayMs` between attempts.
+ * @param {number} maxRetries - Maximum retry attempts (default: 8)
+ * @param {number} delayMs - Delay between retries in ms (default: 2000)
+ * @returns {Promise} Parsed status response or null
+ */
+export async function fetchStatusWithRetry(maxRetries = 8, delayMs = 2000) {
+ for (let i = 0; i < maxRetries; i++) {
+ try {
+ return await safeFetchJSON(apiUrl("/api/status"), { timeout: 5000 });
+ } catch {
+ if (i < maxRetries - 1) {
+ await new Promise((r) => setTimeout(r, delayMs));
+ }
+ }
+ }
+ return null;
+}
+
+/**
+ * Get detailed provider status
+ */
+export async function fetchProviderStatus() {
+ return safeFetchJSON(apiUrl("/api/providers/status"));
+}
+
+/**
+ * Test a provider configuration
+ */
+export async function testProvider(providerConfig) {
+ return safeFetchJSON(apiUrl("/api/providers/test"), {
+ method: "POST",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify(providerConfig),
+ });
+}
+
+/**
+ * Start a session by mode
+ */
+export async function startSession(sessionConfig) {
+ return safeFetchJSON(apiUrl("/api/session/start"), {
+ method: "POST",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify(sessionConfig),
+ });
+}
+
+/**
+ * Send a chat message (redesigned endpoint)
+ */
+export async function sendChatMessage(messageConfig) {
+ return safeFetchJSON(apiUrl("/api/chat/send"), {
+ method: "POST",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify(messageConfig),
+ });
+}
+
+/**
+ * Get workspace summary
+ */
+export async function fetchWorkspaceSummary(folderPath) {
+ const query = folderPath ? `?folder_path=${encodeURIComponent(folderPath)}` : "";
+ return safeFetchJSON(apiUrl(`/api/workspace/summary${query}`));
+}
+
+/**
+ * Run security scan on workspace
+ */
+export async function scanWorkspace(path) {
+ const query = path ? `?path=${encodeURIComponent(path)}` : "";
+ return safeFetchJSON(apiUrl(`/api/security/scan-workspace${query}`));
+}
\ No newline at end of file
diff --git a/frontend/utils/appInit.js b/frontend/utils/appInit.js
new file mode 100644
index 0000000000000000000000000000000000000000..72abf41f7916c1bcc9e572c2b721bf43e23fa822
--- /dev/null
+++ b/frontend/utils/appInit.js
@@ -0,0 +1,157 @@
+/**
+ * GitPilot App Initialization — Single Source of Truth.
+ *
+ * Best-practice bootstrap pattern:
+ * - Runs EXACTLY ONCE per page load (even with React StrictMode)
+ * - Two-phase strategy: fast ping → then parallel status fetch
+ * - Long retry budget for slow backends (WSL, HF Spaces cold start)
+ * - Shared result between App.jsx and LoginPage.jsx
+ * - No duplicate polling, no race conditions
+ *
+ * Phase 1 — Readiness probe (/api/ping):
+ * /api/ping is a zero-dependency endpoint that responds instantly
+ * once uvicorn is listening. We poll it with short timeouts and many
+ * retries to detect backend readiness WITHOUT wasting time on the
+ * heavy /api/status endpoint (which does GitHub API checks).
+ *
+ * Phase 2 — Data fetch (/api/status + /api/auth/status):
+ * Only after ping succeeds, we fetch the real status data in parallel.
+ * These can still be slow (GitHub API, LLM provider probes) but the
+ * user already sees the login page.
+ *
+ * API call budget per page load:
+ * - Best case: 2-3 × /api/ping + 1 × /api/status + 1 × /api/auth/status
+ * - Worst case: up to 30 × /api/ping + 1 + 1 (60s timeout budget)
+ */
+import { safeFetchJSON, apiUrl } from './api.js';
+
+// Module-level singleton — survives React StrictMode double-mount
+let _initPromise = null;
+let _initResult = null;
+
+const PING_MAX_ATTEMPTS = 30; // up to ~60s of readiness polling
+const PING_INTERVAL_MS = 2000; // 2s between pings
+const PING_TIMEOUT_MS = 4000; // each ping gives up after 4s
+const STATUS_TIMEOUT_MS = 15000; // once ready, status fetch has 15s
+
+/**
+ * Wait for the backend to become reachable by polling /api/ping.
+ * This is a zero-dependency endpoint that responds instantly once
+ * uvicorn is listening — much faster than /api/status which does
+ * GitHub API checks.
+ *
+ * @returns {Promise} true if backend became reachable, false otherwise
+ */
+async function waitForBackend() {
+ for (let i = 0; i < PING_MAX_ATTEMPTS; i++) {
+ try {
+ const result = await safeFetchJSON(
+ apiUrl('/api/ping'),
+ { timeout: PING_TIMEOUT_MS }
+ );
+ if (result && (result.ok === true || result.service)) {
+ console.log(
+ `[initApp] ✅ Backend reachable after ${i + 1} ping attempt(s) ` +
+ `(${(i * PING_INTERVAL_MS) / 1000}s elapsed)`
+ );
+ return true;
+ }
+ } catch (err) {
+ // Silent — we expect failures during cold start
+ if (i === 0 || i % 5 === 0) {
+ console.log(
+ `[initApp] Waiting for backend... ` +
+ `attempt ${i + 1}/${PING_MAX_ATTEMPTS}`
+ );
+ }
+ }
+ // Wait before next ping (except after last attempt)
+ if (i < PING_MAX_ATTEMPTS - 1) {
+ await new Promise((r) => setTimeout(r, PING_INTERVAL_MS));
+ }
+ }
+ return false;
+}
+
+/**
+ * Initialize the app.
+ * Phase 1: poll /api/ping until backend is reachable
+ * Phase 2: fetch /api/status and /api/auth/status in parallel
+ *
+ * @returns {Promise<{status: object|null, authMode: string, ready: boolean, error: string|null}>}
+ */
+export function initApp() {
+ if (_initPromise) {
+ return _initPromise;
+ }
+
+ _initPromise = (async () => {
+ // ── Phase 1: wait for backend to be reachable ──
+ const reachable = await waitForBackend();
+
+ if (!reachable) {
+ console.error(
+ `[initApp] ❌ Backend did not respond after ${PING_MAX_ATTEMPTS} ping attempts ` +
+ `(${(PING_MAX_ATTEMPTS * PING_INTERVAL_MS) / 1000}s). Giving up.`
+ );
+ _initResult = {
+ status: null,
+ authMode: 'device',
+ ready: false,
+ error: 'Backend did not become reachable. Please check that the server is running.',
+ };
+ return _initResult;
+ }
+
+ // ── Phase 2: fetch real data in parallel ──
+ try {
+ console.log('[initApp] Fetching /api/status + /api/auth/status in parallel...');
+ const [status, authStatus] = await Promise.all([
+ safeFetchJSON(apiUrl('/api/status'), { timeout: STATUS_TIMEOUT_MS }),
+ safeFetchJSON(apiUrl('/api/auth/status'), { timeout: STATUS_TIMEOUT_MS })
+ .catch(() => null),
+ ]);
+
+ console.log('[initApp] ✅ Init complete');
+ _initResult = {
+ status,
+ authMode: (authStatus && authStatus.mode) || 'device',
+ ready: true,
+ error: null,
+ };
+ return _initResult;
+ } catch (err) {
+ // Backend was reachable via ping but status fetch failed
+ // Still return ready:true so UI can proceed with limited state
+ console.warn(
+ `[initApp] Status fetch failed after ping succeeded: ${err.message || err}. ` +
+ `Proceeding with limited state.`
+ );
+ _initResult = {
+ status: null,
+ authMode: 'device',
+ ready: true, // backend is up, just slow
+ error: null,
+ };
+ return _initResult;
+ }
+ })();
+
+ return _initPromise;
+}
+
+/**
+ * Get the cached init result (null if init hasn't completed yet).
+ */
+export function getInitResult() {
+ return _initResult;
+}
+
+/**
+ * Reset the init singleton. Call this only when you need to force
+ * a re-initialization (e.g., after the user manually clicks "Retry").
+ */
+export function resetInit() {
+ _initPromise = null;
+ _initResult = null;
+}
diff --git a/frontend/utils/sse.js b/frontend/utils/sse.js
new file mode 100644
index 0000000000000000000000000000000000000000..99a99f138e12ec1b045598d3140bad4035985714
--- /dev/null
+++ b/frontend/utils/sse.js
@@ -0,0 +1,183 @@
+/**
+ * SSE (Server-Sent Events) client for GitPilot V2 streaming API.
+ *
+ * Usage:
+ * import { streamChat, cancelStream } from '../utils/sse';
+ *
+ * const unsubscribe = streamChat(sessionId, message, {
+ * onTextDelta: (text) => appendToChat(text),
+ * onToolStart: (data) => showToolActivity(data),
+ * onToolResult: (data) => updateToolActivity(data),
+ * onApprovalNeeded: (data) => showApprovalModal(data),
+ * onTerminalOutput: (data) => appendTerminal(data),
+ * onTestResult: (data) => showTestBadges(data),
+ * onDiagnostics: (data) => showDiagnostics(data),
+ * onDone: (data) => finalize(data),
+ * onError: (error) => showError(error),
+ * });
+ *
+ * // To cancel:
+ * cancelStream(sessionId);
+ */
+
+const BACKEND_URL = import.meta.env.VITE_BACKEND_URL || '';
+
+function apiUrl(path) {
+ return BACKEND_URL ? `${BACKEND_URL}${path}` : path;
+}
+
+// Active abort controllers keyed by sessionId
+const activeControllers = new Map();
+
+/**
+ * Stream a chat message via the V2 SSE endpoint.
+ * Returns a cleanup function to abort the stream.
+ */
+export function streamChat(sessionId, message, handlers = {}) {
+ const controller = new AbortController();
+ activeControllers.set(sessionId, controller);
+
+ const run = async () => {
+ try {
+ const res = await fetch(apiUrl('/api/v2/chat/stream'), {
+ method: 'POST',
+ headers: { 'Content-Type': 'application/json' },
+ body: JSON.stringify({
+ session_id: sessionId,
+ message,
+ permission_mode: 'normal',
+ }),
+ signal: controller.signal,
+ });
+
+ if (!res.ok || !res.body) {
+ handlers.onError?.({ error: `Server returned ${res.status}` });
+ return;
+ }
+
+ const reader = res.body.getReader();
+ const decoder = new TextDecoder();
+ let buffer = '';
+
+ while (true) {
+ const { done, value } = await reader.read();
+ if (done) break;
+
+ buffer += decoder.decode(value, { stream: true });
+ const parts = buffer.split('\n\n');
+ buffer = parts.pop() || '';
+
+ for (const part of parts) {
+ if (!part.startsWith('data: ')) continue;
+ let event;
+ try {
+ event = JSON.parse(part.slice(6));
+ } catch {
+ continue;
+ }
+
+ switch (event.type) {
+ case 'text_delta':
+ handlers.onTextDelta?.(event.text);
+ break;
+ case 'tool_start':
+ handlers.onToolStart?.(event);
+ break;
+ case 'tool_result':
+ handlers.onToolResult?.(event);
+ break;
+ case 'approval_needed':
+ handlers.onApprovalNeeded?.(event);
+ break;
+ case 'plan_step':
+ handlers.onPlanStep?.(event);
+ break;
+ case 'terminal_output':
+ handlers.onTerminalOutput?.(event);
+ break;
+ case 'terminal_exit':
+ handlers.onTerminalExit?.(event);
+ break;
+ case 'test_result':
+ handlers.onTestResult?.(event);
+ break;
+ case 'diagnostics':
+ handlers.onDiagnostics?.(event);
+ break;
+ case 'status_change':
+ handlers.onStatusChange?.(event.status, event.message);
+ break;
+ case 'done':
+ handlers.onDone?.(event);
+ break;
+ case 'error':
+ handlers.onError?.(event);
+ break;
+ }
+ }
+ }
+ } catch (err) {
+ if (controller.signal.aborted) {
+ // User cancelled — not an error
+ return;
+ }
+ handlers.onError?.({ error: String(err) });
+ } finally {
+ activeControllers.delete(sessionId);
+ }
+ };
+
+ run();
+
+ return () => {
+ controller.abort();
+ activeControllers.delete(sessionId);
+ };
+}
+
+/**
+ * Cancel the active SSE stream for a session.
+ */
+export function cancelStream(sessionId) {
+ const controller = activeControllers.get(sessionId);
+ if (controller) {
+ controller.abort();
+ activeControllers.delete(sessionId);
+ }
+}
+
+/**
+ * Send an approval response to the backend.
+ */
+export async function respondToApproval(sessionId, requestId, approved, scope = 'once') {
+ try {
+ await fetch(apiUrl('/api/v2/approval/respond'), {
+ method: 'POST',
+ headers: { 'Content-Type': 'application/json' },
+ body: JSON.stringify({
+ session_id: sessionId,
+ request_id: requestId,
+ approved,
+ scope,
+ }),
+ });
+ } catch (err) {
+ console.error('[GitPilot] Approval response failed:', err);
+ }
+}
+
+/**
+ * Check if the backend supports the V2 streaming API.
+ * Call this once on startup to decide whether to use SSE or batch.
+ */
+export async function checkV2Support() {
+ try {
+ const res = await fetch(apiUrl('/api/status'));
+ if (!res.ok) return false;
+ // If the server is running, v2 endpoints are available
+ // (they're part of the same api.py)
+ return true;
+ } catch {
+ return false;
+ }
+}
diff --git a/frontend/utils/ws.js b/frontend/utils/ws.js
new file mode 100644
index 0000000000000000000000000000000000000000..18396cfb03f9c315ca0cbc66979ee0164f6d762a
--- /dev/null
+++ b/frontend/utils/ws.js
@@ -0,0 +1,168 @@
+/**
+ * WebSocket client for real-time session streaming.
+ *
+ * Provides auto-reconnection, heartbeat, and event dispatching.
+ * Falls back gracefully — callers should always have an HTTP fallback.
+ */
+
+const WS_RECONNECT_DELAYS = [1000, 2000, 4000, 8000, 16000];
+const HEARTBEAT_INTERVAL = 30000;
+const MAX_RECONNECT_ATTEMPTS = 5;
+// If a connection dies within this window it counts as unstable
+const MIN_STABLE_DURATION_MS = 3000;
+
+export class SessionWebSocket {
+ constructor(sessionId, { onMessage, onStatusChange, onError, onConnect, onDisconnect } = {}) {
+ this._sessionId = sessionId;
+ this._handlers = { onMessage, onStatusChange, onError, onConnect, onDisconnect };
+ this._ws = null;
+ this._reconnectAttempt = 0;
+ this._heartbeatTimer = null;
+ this._closed = false;
+ this._connectTime = 0;
+ }
+
+ connect() {
+ if (this._closed) return;
+
+ const protocol = window.location.protocol === 'https:' ? 'wss:' : 'ws:';
+ const backendUrl = import.meta.env.VITE_BACKEND_URL || '';
+ let wsUrl;
+
+ if (backendUrl) {
+ // Production: replace http(s) with ws(s)
+ wsUrl = backendUrl.replace(/^http/, 'ws') + `/ws/sessions/${this._sessionId}`;
+ } else {
+ // Dev: same host (Vite proxy forwards /ws to backend)
+ wsUrl = `${protocol}//${window.location.host}/ws/sessions/${this._sessionId}`;
+ }
+
+ try {
+ this._ws = new WebSocket(wsUrl);
+ } catch {
+ // WebSocket constructor can throw if URL is invalid
+ this._scheduleReconnect();
+ return;
+ }
+
+ this._ws.onopen = () => {
+ this._connectTime = Date.now();
+ this._reconnectAttempt = 0;
+ this._startHeartbeat();
+ this._handlers.onConnect?.();
+ };
+
+ this._ws.onmessage = (event) => {
+ try {
+ const data = JSON.parse(event.data);
+ this._dispatch(data);
+ } catch (e) {
+ console.warn('[ws] Failed to parse message:', e);
+ }
+ };
+
+ this._ws.onclose = (event) => {
+ this._stopHeartbeat();
+ this._handlers.onDisconnect?.(event);
+
+ if (!this._closed) {
+ // If connection died very quickly, count it as unstable
+ const lived = Date.now() - (this._connectTime || 0);
+ if (lived < MIN_STABLE_DURATION_MS) {
+ this._reconnectAttempt++;
+ }
+
+ if (this._reconnectAttempt < MAX_RECONNECT_ATTEMPTS) {
+ this._scheduleReconnect();
+ } else {
+ console.warn('[ws] Max reconnect attempts reached, giving up.');
+ }
+ }
+ };
+
+ this._ws.onerror = () => {
+ // Suppress noisy console errors during reconnection attempts.
+ // The onclose handler already manages reconnection logic.
+ // Only notify the caller if we had a stable connection that broke.
+ if (this._connectTime && Date.now() - this._connectTime > MIN_STABLE_DURATION_MS) {
+ this._handlers.onError?.(new Error('WebSocket connection lost'));
+ }
+ };
+ }
+
+ send(data) {
+ if (this._ws?.readyState === WebSocket.OPEN) {
+ this._ws.send(JSON.stringify(data));
+ return true;
+ }
+ return false;
+ }
+
+ sendMessage(content) {
+ return this.send({ type: 'user_message', content });
+ }
+
+ cancel() {
+ return this.send({ type: 'cancel' });
+ }
+
+ close() {
+ this._closed = true;
+ this._stopHeartbeat();
+ if (this._ws) {
+ this._ws.close();
+ this._ws = null;
+ }
+ }
+
+ get connected() {
+ return this._ws?.readyState === WebSocket.OPEN;
+ }
+
+ _dispatch(data) {
+ const { type } = data;
+
+ switch (type) {
+ case 'agent_message':
+ case 'tool_use':
+ case 'tool_result':
+ case 'diff_update':
+ case 'session_restored':
+ case 'message_received':
+ this._handlers.onMessage?.(data);
+ break;
+ case 'status_change':
+ this._handlers.onStatusChange?.(data.status);
+ break;
+ case 'error':
+ this._handlers.onError?.(new Error(data.message));
+ break;
+ case 'pong':
+ break;
+ default:
+ this._handlers.onMessage?.(data);
+ }
+ }
+
+ _startHeartbeat() {
+ this._stopHeartbeat();
+ this._heartbeatTimer = setInterval(() => {
+ this.send({ type: 'ping' });
+ }, HEARTBEAT_INTERVAL);
+ }
+
+ _stopHeartbeat() {
+ if (this._heartbeatTimer) {
+ clearInterval(this._heartbeatTimer);
+ this._heartbeatTimer = null;
+ }
+ }
+
+ _scheduleReconnect() {
+ const delay = WS_RECONNECT_DELAYS[
+ Math.min(this._reconnectAttempt, WS_RECONNECT_DELAYS.length - 1)
+ ];
+ this._reconnectAttempt++;
+ setTimeout(() => this.connect(), delay);
+ }
+}
diff --git a/frontend/vite.config.js b/frontend/vite.config.js
new file mode 100644
index 0000000000000000000000000000000000000000..e6af0e66dfa29001f58cfe462f36500f56982d2c
--- /dev/null
+++ b/frontend/vite.config.js
@@ -0,0 +1,25 @@
+// frontend/vite.config.js
+import { defineConfig } from "vite";
+import react from "@vitejs/plugin-react";
+
+export default defineConfig({
+ plugins: [react()],
+ define: {
+ __APP_VERSION__: JSON.stringify(process.env.npm_package_version || "unknown"),
+ },
+ server: {
+ port: 5173,
+ host: true,
+ // Only proxy API requests when NOT running in Vercel dev
+ // (Vercel dev handles API routing to serverless functions)
+ proxy: process.env.VERCEL
+ ? undefined
+ : {
+ "/api": "http://localhost:8000",
+ "/ws": {
+ target: "ws://localhost:8000",
+ ws: true,
+ },
+ },
+ },
+});
\ No newline at end of file
diff --git a/gitpilot/__init__.py b/gitpilot/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..febd3bec09bbdd4c708d9dde1a6513130281818e
--- /dev/null
+++ b/gitpilot/__init__.py
@@ -0,0 +1,5 @@
+"""GitPilot package."""
+
+from .version import __version__
+
+__all__ = ["__version__"]
diff --git a/gitpilot/__main__.py b/gitpilot/__main__.py
new file mode 100644
index 0000000000000000000000000000000000000000..46c73041d1fd9224b478b6fe032b704a206b565b
--- /dev/null
+++ b/gitpilot/__main__.py
@@ -0,0 +1,5 @@
+"""Allow running gitpilot as a module: python -m gitpilot"""
+from .cli import main
+
+if __name__ == "__main__":
+ main()
diff --git a/gitpilot/_api_core.py b/gitpilot/_api_core.py
new file mode 100644
index 0000000000000000000000000000000000000000..bea4091952425af8fb916c8fcb6faf4495787bff
--- /dev/null
+++ b/gitpilot/_api_core.py
@@ -0,0 +1,2417 @@
+# gitpilot/_api_core.py -- Original API module (re-exported by api.py)
+from __future__ import annotations
+
+from pathlib import Path
+from typing import List, Optional
+
+from fastapi import FastAPI, Query, Path as FPath, Header, HTTPException, UploadFile, File
+from fastapi.responses import FileResponse, JSONResponse
+from fastapi.staticfiles import StaticFiles
+from fastapi.middleware.cors import CORSMiddleware
+from pydantic import BaseModel, Field
+
+from .version import __version__
+from .github_api import (
+ list_user_repos,
+ list_user_repos_paginated, # Pagination support
+ search_user_repos, # Search across all repos
+ get_repo_tree,
+ get_file,
+ put_file,
+ execution_context,
+ github_request,
+)
+from .github_app import check_repo_write_access
+from .settings import AppSettings, get_settings, set_provider, update_settings, LLMProvider
+from .agentic import (
+ generate_plan,
+ execute_plan,
+ generate_plan_lite,
+ execute_plan_lite,
+ PlanResult,
+ get_flow_definition,
+ dispatch_request,
+ create_pr_after_execution,
+)
+from .agent_router import route as route_request
+from . import github_issues
+from . import github_pulls
+from . import github_search
+from .session import SessionManager, Session
+from .hooks import HookManager, HookEvent
+from .permissions import PermissionManager, PermissionMode
+from .memory import MemoryManager
+from .context_vault import ContextVault
+from .use_case import UseCaseManager
+from .mcp_client import MCPClient
+from .plugins import PluginManager
+from .skills import SkillManager
+from .smart_model_router import ModelRouter, ModelRouterConfig
+from .topology_registry import (
+ list_topologies as _list_topologies,
+ get_topology_graph as _get_topology_graph,
+ classify_message as _classify_message,
+ get_saved_topology_preference,
+ save_topology_preference,
+)
+from .agent_teams import AgentTeam
+
+
+def _is_lite_mode_active() -> bool:
+ """Check if Lite Mode should be used (setting OR topology)."""
+ s = get_settings()
+ if s.lite_mode:
+ return True
+ return get_saved_topology_preference() == "lite_mode"
+from .learning import LearningEngine
+from .cross_repo import CrossRepoAnalyzer
+from .predictions import PredictiveEngine
+from .security import SecurityScanner
+from .nl_database import NLQueryEngine, QueryDialect, SafetyLevel, TableSchema
+from .github_oauth import (
+ generate_authorization_url,
+ exchange_code_for_token,
+ validate_token,
+ initiate_device_flow,
+ poll_device_token,
+ AuthSession,
+ GitHubUser,
+)
+import os
+import logging
+from .model_catalog import list_models_for_provider
+
+# Optional A2A adapter (MCP ContextForge)
+from .a2a_adapter import router as a2a_router
+
+logger = logging.getLogger(__name__)
+
+# --- Phase 1 singletons ---
+_session_mgr = SessionManager()
+_hook_mgr = HookManager()
+_perm_mgr = PermissionManager()
+
+# --- Phase 2 singletons ---
+_mcp_client = MCPClient()
+_plugin_mgr = PluginManager()
+_skill_mgr = SkillManager()
+_model_router = ModelRouter()
+
+# --- Phase 3 singletons ---
+_agent_team = AgentTeam()
+_learning_engine = LearningEngine()
+_cross_repo = CrossRepoAnalyzer()
+_predictive_engine = PredictiveEngine()
+_security_scanner = SecurityScanner()
+_nl_engine = NLQueryEngine()
+
+app = FastAPI(
+ title="GitPilot API",
+ version=__version__,
+ description="Agentic AI assistant for GitHub repositories.",
+)
+
+# ==========================================================================
+# Optional A2A Adapter (MCP ContextForge)
+# ==========================================================================
+# This is feature-flagged and does not affect the existing UI/REST API unless
+# explicitly enabled.
+def _env_bool(name: str, default: bool) -> bool:
+ raw = os.getenv(name)
+ if raw is None:
+ return default
+ return raw.strip().lower() in {"1", "true", "yes", "y", "on"}
+
+
+if _env_bool("GITPILOT_ENABLE_A2A", False):
+ logger.info("A2A adapter enabled (mounting /a2a/* endpoints)")
+ app.include_router(a2a_router)
+else:
+ logger.info("A2A adapter disabled (set GITPILOT_ENABLE_A2A=true to enable)")
+
+# ============================================================================
+# CORS Configuration
+# ============================================================================
+# Enable CORS to allow frontend (local dev or Vercel) to connect to backend
+allowed_origins_str = os.getenv("CORS_ORIGINS", "http://localhost:5173")
+allowed_origins = [origin.strip() for origin in allowed_origins_str.split(",")]
+
+logger.info(f"CORS enabled for origins: {allowed_origins}")
+
+app.add_middleware(
+ CORSMiddleware,
+ allow_origins=allowed_origins,
+ allow_credentials=True,
+ allow_methods=["*"],
+ allow_headers=["*"],
+)
+
+
+def get_github_token(authorization: Optional[str] = Header(None)) -> Optional[str]:
+ """
+ Extract GitHub token from Authorization header.
+
+ Supports formats:
+ - Bearer
+ - token
+ -
+ """
+ if not authorization:
+ return None
+
+ if authorization.startswith("Bearer "):
+ return authorization[7:]
+ elif authorization.startswith("token "):
+ return authorization[6:]
+ else:
+ return authorization
+
+
+# --- FIXED: Added default_branch to model ---
+class RepoSummary(BaseModel):
+ id: int
+ name: str
+ full_name: str
+ private: bool
+ owner: str
+ default_branch: str = "main" # <--- CRITICAL FIX: Defaults to main, but can be master/dev
+
+
+class PaginatedReposResponse(BaseModel):
+ """Response model for paginated repository listing."""
+ repositories: List[RepoSummary]
+ page: int
+ per_page: int
+ total_count: Optional[int] = None
+ has_more: bool
+ query: Optional[str] = None
+
+
+class FileEntry(BaseModel):
+ path: str
+ type: str
+
+
+class FileTreeResponse(BaseModel):
+ files: List[FileEntry] = Field(default_factory=list)
+
+
+class FileContent(BaseModel):
+ path: str
+ encoding: str = "utf-8"
+ content: str
+
+
+class CommitRequest(BaseModel):
+ path: str
+ content: str
+ message: str
+
+
+class CommitResponse(BaseModel):
+ path: str
+ commit_sha: str
+ commit_url: Optional[str] = None
+
+
+class SettingsResponse(BaseModel):
+ provider: LLMProvider
+ providers: List[LLMProvider]
+ openai: dict
+ claude: dict
+ watsonx: dict
+ ollama: dict
+ langflow_url: str
+ has_langflow_plan_flow: bool
+ lite_mode: bool = False
+
+
+class ProviderModelsResponse(BaseModel):
+ provider: LLMProvider
+ models: List[str] = Field(default_factory=list)
+ error: Optional[str] = None
+
+
+class ProviderUpdate(BaseModel):
+ provider: LLMProvider
+
+
+class ChatPlanRequest(BaseModel):
+ repo_owner: str
+ repo_name: str
+ goal: str
+ branch_name: Optional[str] = None
+
+
+class ExecutePlanRequest(BaseModel):
+ repo_owner: str
+ repo_name: str
+ plan: PlanResult
+ branch_name: Optional[str] = None
+
+
+class AuthUrlResponse(BaseModel):
+ authorization_url: str
+ state: str
+
+
+class AuthCallbackRequest(BaseModel):
+ code: str
+ state: str
+
+
+class TokenValidationRequest(BaseModel):
+ access_token: str
+
+
+class UserInfoResponse(BaseModel):
+ user: GitHubUser
+ authenticated: bool
+
+
+class RepoAccessResponse(BaseModel):
+ can_write: bool
+ app_installed: bool
+ auth_type: str
+
+
+# --- v2 Request/Response models ---
+
+class ChatRequest(BaseModel):
+ """Unified chat request for the conversational dispatcher."""
+ repo_owner: str
+ repo_name: str
+ message: str
+ branch_name: Optional[str] = None
+ auto_pr: bool = False
+ topology_id: Optional[str] = None # Override topology for this request
+
+
+class IssueCreateRequest(BaseModel):
+ title: str
+ body: Optional[str] = None
+ labels: Optional[List[str]] = None
+ assignees: Optional[List[str]] = None
+ milestone: Optional[int] = None
+
+
+class IssueUpdateRequest(BaseModel):
+ title: Optional[str] = None
+ body: Optional[str] = None
+ state: Optional[str] = None
+ labels: Optional[List[str]] = None
+ assignees: Optional[List[str]] = None
+ milestone: Optional[int] = None
+
+
+class IssueCommentRequest(BaseModel):
+ body: str
+
+
+class PRCreateRequest(BaseModel):
+ title: str
+ head: str
+ base: str
+ body: Optional[str] = None
+ draft: bool = False
+
+
+class PRMergeRequest(BaseModel):
+ merge_method: str = "merge"
+ commit_title: Optional[str] = None
+ commit_message: Optional[str] = None
+
+
+class SearchRequest(BaseModel):
+ query: str
+ per_page: int = 30
+ page: int = 1
+
+
+# ============================================================================
+# Repository Endpoints - Enterprise Grade with Pagination & Search
+# ============================================================================
+
+@app.get("/api/repos", response_model=PaginatedReposResponse)
+async def api_list_repos(
+ query: Optional[str] = Query(None, description="Search query (searches across ALL repositories)"),
+ page: int = Query(1, ge=1, description="Page number (starts at 1)"),
+ per_page: int = Query(100, ge=1, le=100, description="Results per page (max 100)"),
+ authorization: Optional[str] = Header(None),
+):
+ """
+ List user repositories with enterprise-grade pagination and search.
+ Includes default_branch information for correct frontend routing.
+ """
+ token = get_github_token(authorization)
+
+ try:
+ if query:
+ # SEARCH MODE: Search across ALL repositories
+ result = await search_user_repos(
+ query=query,
+ page=page,
+ per_page=per_page,
+ token=token
+ )
+ else:
+ # PAGINATION MODE: Return repos page by page
+ result = await list_user_repos_paginated(
+ page=page,
+ per_page=per_page,
+ token=token
+ )
+
+ # --- FIXED: Mapping default_branch ---
+ repos = [
+ RepoSummary(
+ id=r["id"],
+ name=r["name"],
+ full_name=r["full_name"],
+ private=r["private"],
+ owner=r["owner"],
+ default_branch=r.get("default_branch", "main"), # <--- CRITICAL FIX
+ )
+ for r in result["repositories"]
+ ]
+
+ return PaginatedReposResponse(
+ repositories=repos,
+ page=result["page"],
+ per_page=result["per_page"],
+ total_count=result.get("total_count"),
+ has_more=result["has_more"],
+ query=query,
+ )
+
+ except Exception as e:
+ logging.exception("Error fetching repositories")
+ return JSONResponse(
+ content={
+ "error": f"Failed to fetch repositories: {str(e)}",
+ "repositories": [],
+ "page": page,
+ "per_page": per_page,
+ "has_more": False,
+ },
+ status_code=500
+ )
+
+
+@app.get("/api/repos/all")
+async def api_list_all_repos(
+ query: Optional[str] = Query(None, description="Search query"),
+ authorization: Optional[str] = Header(None),
+):
+ """
+ Fetch ALL user repositories at once (no pagination).
+ Useful for quick searches, but paginated endpoint is preferred.
+ """
+ token = get_github_token(authorization)
+
+ try:
+ # Fetch all repositories (this will make multiple API calls)
+ all_repos = []
+ page = 1
+ max_pages = 15 # Safety limit: 1500 repos max (15 * 100)
+
+ while page <= max_pages:
+ result = await list_user_repos_paginated(
+ page=page,
+ per_page=100,
+ token=token
+ )
+
+ all_repos.extend(result["repositories"])
+
+ if not result["has_more"]:
+ break
+
+ page += 1
+
+ # Filter by query if provided
+ if query:
+ query_lower = query.lower()
+ all_repos = [
+ r for r in all_repos
+ if query_lower in r["name"].lower() or query_lower in r["full_name"].lower()
+ ]
+
+ # --- FIXED: Mapping default_branch ---
+ repos = [
+ RepoSummary(
+ id=r["id"],
+ name=r["name"],
+ full_name=r["full_name"],
+ private=r["private"],
+ owner=r["owner"],
+ default_branch=r.get("default_branch", "main"), # <--- CRITICAL FIX
+ )
+ for r in all_repos
+ ]
+
+ return {
+ "repositories": repos,
+ "total_count": len(repos),
+ "query": query,
+ }
+
+ except Exception as e:
+ logging.exception("Error fetching all repositories")
+ return JSONResponse(
+ content={"error": f"Failed to fetch repositories: {str(e)}"},
+ status_code=500
+ )
+
+
+@app.get("/api/repos/{owner}/{repo}/tree", response_model=FileTreeResponse)
+async def api_repo_tree(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ ref: Optional[str] = Query(
+ None,
+ description="Git reference (branch, tag, or commit SHA). If omitted, defaults to HEAD.",
+ ),
+ authorization: Optional[str] = Header(None),
+):
+ """
+ Get the file tree for a repository.
+ Handles 'main' vs 'master' discrepancies and empty repositories gracefully.
+ """
+ token = get_github_token(authorization)
+
+ # Keep legacy behavior: missing/empty ref behaves like HEAD.
+ ref_value = (ref or "").strip() or "HEAD"
+
+ try:
+ tree = await get_repo_tree(owner, repo, token=token, ref=ref_value)
+ return FileTreeResponse(files=[FileEntry(**f) for f in tree])
+
+ except HTTPException as e:
+ if e.status_code == 409:
+ return FileTreeResponse(files=[])
+
+ if e.status_code == 404:
+ return JSONResponse(
+ status_code=404,
+ content={
+ "detail": f"Ref '{ref_value}' not found. The repository might be using a different default branch (e.g., 'master')."
+ }
+ )
+
+ raise e
+
+
+@app.get("/api/repos/{owner}/{repo}/file", response_model=FileContent)
+async def api_get_file(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ path: str = Query(...),
+ authorization: Optional[str] = Header(None),
+):
+ token = get_github_token(authorization)
+ content = await get_file(owner, repo, path, token=token)
+ return FileContent(path=path, content=content)
+
+
+@app.post("/api/repos/{owner}/{repo}/file", response_model=CommitResponse)
+async def api_put_file(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ payload: CommitRequest = ...,
+ authorization: Optional[str] = Header(None),
+):
+ token = get_github_token(authorization)
+ result = await put_file(
+ owner, repo, payload.path, payload.content, payload.message, token=token
+ )
+ return CommitResponse(**result)
+
+
+# ============================================================================
+# Settings Endpoints
+# ============================================================================
+
+@app.get("/api/settings", response_model=SettingsResponse)
+async def api_get_settings():
+ s: AppSettings = get_settings()
+ return SettingsResponse(
+ provider=s.provider,
+ providers=[LLMProvider.openai, LLMProvider.claude, LLMProvider.watsonx, LLMProvider.ollama],
+ openai=s.openai.model_dump(),
+ claude=s.claude.model_dump(),
+ watsonx=s.watsonx.model_dump(),
+ ollama=s.ollama.model_dump(),
+ langflow_url=s.langflow_url,
+ has_langflow_plan_flow=bool(s.langflow_plan_flow_id),
+ lite_mode=s.lite_mode,
+ )
+
+
+@app.get("/api/settings/models", response_model=ProviderModelsResponse)
+async def api_list_models(provider: Optional[LLMProvider] = Query(None)):
+ """
+ Return the list of LLM models available for a provider.
+
+ If 'provider' is not given, use the currently active provider from settings.
+ """
+ s: AppSettings = get_settings()
+ effective_provider = provider or s.provider
+
+ models, error = list_models_for_provider(effective_provider, s)
+
+ return ProviderModelsResponse(
+ provider=effective_provider,
+ models=models,
+ error=error,
+ )
+
+
+@app.post("/api/settings/provider", response_model=SettingsResponse)
+async def api_set_provider(update: ProviderUpdate):
+ s = set_provider(update.provider)
+ return SettingsResponse(
+ provider=s.provider,
+ providers=[LLMProvider.openai, LLMProvider.claude, LLMProvider.watsonx, LLMProvider.ollama],
+ openai=s.openai.model_dump(),
+ claude=s.claude.model_dump(),
+ watsonx=s.watsonx.model_dump(),
+ ollama=s.ollama.model_dump(),
+ langflow_url=s.langflow_url,
+ has_langflow_plan_flow=bool(s.langflow_plan_flow_id),
+ lite_mode=s.lite_mode,
+ )
+
+
+@app.put("/api/settings/llm", response_model=SettingsResponse)
+async def api_update_llm_settings(updates: dict):
+ """Update full LLM settings including provider-specific configs."""
+ s = update_settings(updates)
+ return SettingsResponse(
+ provider=s.provider,
+ providers=[LLMProvider.openai, LLMProvider.claude, LLMProvider.watsonx, LLMProvider.ollama],
+ openai=s.openai.model_dump(),
+ claude=s.claude.model_dump(),
+ watsonx=s.watsonx.model_dump(),
+ ollama=s.ollama.model_dump(),
+ langflow_url=s.langflow_url,
+ has_langflow_plan_flow=bool(s.langflow_plan_flow_id),
+ lite_mode=s.lite_mode,
+ )
+
+
+# ============================================================================
+# Chat Endpoints
+# ============================================================================
+
+@app.post("/api/chat/plan", response_model=PlanResult)
+async def api_chat_plan(req: ChatPlanRequest, authorization: Optional[str] = Header(None)):
+ token = get_github_token(authorization)
+
+ # ✅ Added logging for branch_name received
+ logger.info(
+ "PLAN REQUEST: %s/%s | branch_name=%r",
+ req.repo_owner,
+ req.repo_name,
+ req.branch_name,
+ )
+
+ with execution_context(token, ref=req.branch_name): # ✅ set ref context
+ full_name = f"{req.repo_owner}/{req.repo_name}"
+ plan = await generate_plan(req.goal, full_name, token=token, branch_name=req.branch_name)
+ return plan
+
+
+@app.post("/api/chat/execute")
+async def api_chat_execute(
+ req: ExecutePlanRequest,
+ authorization: Optional[str] = Header(None)
+):
+ token = get_github_token(authorization)
+
+ # ✅ FIX: use execution_context(token, ref=req.branch_name) so tool calls that rely on context
+ # never accidentally run on HEAD/default when branch_name is provided.
+ with execution_context(token, ref=req.branch_name):
+ full_name = f"{req.repo_owner}/{req.repo_name}"
+ # Use lite executor when Lite Mode is active
+ _executor = execute_plan_lite if _is_lite_mode_active() else execute_plan
+ result = await _executor(
+ req.plan, full_name, token=token, branch_name=req.branch_name
+ )
+ if isinstance(result, dict):
+ result.setdefault(
+ "mode",
+ "sticky" if req.branch_name else "hard-switch",
+ )
+ return result
+
+
+@app.get("/api/flow/current")
+async def api_get_flow(topology: Optional[str] = Query(None)):
+ """Return the agent flow definition as a graph.
+
+ If ``topology`` query param is provided, returns the graph for that
+ topology. Otherwise falls back to the user's saved preference, and
+ finally to the legacy ``get_flow_definition()`` output for full
+ backward compatibility.
+ """
+ tid = topology or get_saved_topology_preference()
+ if tid:
+ return _get_topology_graph(tid)
+ # Legacy path — returns the original hardcoded graph
+ flow = await get_flow_definition()
+ return flow
+
+
+# ============================================================================
+# Topology Registry Endpoints (additive — no existing behaviour changed)
+# ============================================================================
+
+@app.get("/api/flow/topologies")
+async def api_list_topologies():
+ """Return lightweight summaries of all available topology presets."""
+ return _list_topologies()
+
+
+@app.get("/api/flow/topology/{topology_id}")
+async def api_get_topology(topology_id: str):
+ """Return the full flow graph for a specific topology."""
+ return _get_topology_graph(topology_id)
+
+
+class ClassifyRequest(BaseModel):
+ message: str
+
+
+@app.post("/api/flow/classify")
+async def api_classify_message(req: ClassifyRequest):
+ """Auto-detect the best topology for a given user message.
+
+ Returns the recommended topology, confidence score, and up to 4
+ alternatives ranked by relevance.
+ """
+ result = _classify_message(req.message)
+ return result.to_dict()
+
+
+class TopologyPrefRequest(BaseModel):
+ topology: str
+
+
+@app.get("/api/settings/topology")
+async def api_get_topology_pref():
+ """Return the user's saved topology preference (or null)."""
+ pref = get_saved_topology_preference()
+ return {"topology": pref}
+
+
+@app.post("/api/settings/topology")
+async def api_set_topology_pref(req: TopologyPrefRequest):
+ """Save the user's preferred topology."""
+ save_topology_preference(req.topology)
+ return {"status": "ok", "topology": req.topology}
+
+
+class LiteModeRequest(BaseModel):
+ lite_mode: bool
+
+
+@app.get("/api/settings/lite-mode")
+async def api_get_lite_mode():
+ """Return current Lite Mode status."""
+ s = get_settings()
+ return {"lite_mode": s.lite_mode}
+
+
+@app.post("/api/settings/lite-mode")
+async def api_set_lite_mode(req: LiteModeRequest):
+ """Toggle Lite Mode on or off."""
+ s = update_settings({"lite_mode": req.lite_mode})
+ return {"status": "ok", "lite_mode": s.lite_mode}
+
+
+# ============================================================================
+# Conversational Chat Endpoint (v2 upgrade)
+# ============================================================================
+
+@app.post("/api/chat/message")
+async def api_chat_message(req: ChatRequest, authorization: Optional[str] = Header(None)):
+ """
+ Unified conversational endpoint. The router analyses the message and
+ dispatches to the appropriate agent (issue, PR, search, review, learning,
+ or the existing plan+execute pipeline).
+ """
+ token = get_github_token(authorization)
+
+ logger.info(
+ "CHAT MESSAGE: %s/%s | message=%r | branch=%r",
+ req.repo_owner,
+ req.repo_name,
+ req.message[:80],
+ req.branch_name,
+ )
+
+ with execution_context(token, ref=req.branch_name):
+ full_name = f"{req.repo_owner}/{req.repo_name}"
+ result = await dispatch_request(
+ req.message, full_name, token=token, branch_name=req.branch_name,
+ topology_id=req.topology_id,
+ )
+
+ # If auto_pr is requested and execution completed, create PR
+ if (
+ req.auto_pr
+ and isinstance(result, dict)
+ and result.get("category") == "plan_execute"
+ and result.get("plan")
+ ):
+ result["auto_pr_hint"] = (
+ "Plan generated. Execute it first, then auto-PR will be created."
+ )
+
+ return result
+
+
+@app.post("/api/chat/execute-with-pr")
+async def api_chat_execute_with_pr(
+ req: ExecutePlanRequest,
+ authorization: Optional[str] = Header(None),
+):
+ """Execute a plan AND automatically create a pull request afterwards."""
+ token = get_github_token(authorization)
+
+ with execution_context(token, ref=req.branch_name):
+ full_name = f"{req.repo_owner}/{req.repo_name}"
+ _executor = execute_plan_lite if _is_lite_mode_active() else execute_plan
+ result = await _executor(
+ req.plan, full_name, token=token, branch_name=req.branch_name,
+ )
+
+ if isinstance(result, dict) and result.get("status") == "completed":
+ branch = result.get("branch", req.branch_name)
+ if branch:
+ pr = await create_pr_after_execution(
+ full_name,
+ branch,
+ req.plan.goal,
+ result.get("executionLog", {}),
+ token=token,
+ )
+ if pr:
+ result["pull_request"] = {
+ "number": pr.get("number"),
+ "url": pr.get("html_url"),
+ "title": pr.get("title"),
+ }
+
+ result.setdefault(
+ "mode",
+ "sticky" if req.branch_name else "hard-switch",
+ )
+
+ return result
+
+
+# ============================================================================
+# Issue Endpoints (v2 upgrade)
+# ============================================================================
+
+@app.get("/api/repos/{owner}/{repo}/issues")
+async def api_list_issues(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ state: str = Query("open"),
+ labels: Optional[str] = Query(None),
+ per_page: int = Query(30, ge=1, le=100),
+ page: int = Query(1, ge=1),
+ authorization: Optional[str] = Header(None),
+):
+ """List issues for a repository."""
+ token = get_github_token(authorization)
+ issues = await github_issues.list_issues(
+ owner, repo, state=state, labels=labels,
+ per_page=per_page, page=page, token=token,
+ )
+ return {"issues": issues, "page": page, "per_page": per_page}
+
+
+@app.get("/api/repos/{owner}/{repo}/issues/{issue_number}")
+async def api_get_issue(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ issue_number: int = FPath(...),
+ authorization: Optional[str] = Header(None),
+):
+ """Get a single issue."""
+ token = get_github_token(authorization)
+ return await github_issues.get_issue(owner, repo, issue_number, token=token)
+
+
+@app.post("/api/repos/{owner}/{repo}/issues")
+async def api_create_issue(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ payload: IssueCreateRequest = ...,
+ authorization: Optional[str] = Header(None),
+):
+ """Create a new issue."""
+ token = get_github_token(authorization)
+ return await github_issues.create_issue(
+ owner, repo, payload.title,
+ body=payload.body, labels=payload.labels,
+ assignees=payload.assignees, milestone=payload.milestone,
+ token=token,
+ )
+
+
+@app.patch("/api/repos/{owner}/{repo}/issues/{issue_number}")
+async def api_update_issue(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ issue_number: int = FPath(...),
+ payload: IssueUpdateRequest = ...,
+ authorization: Optional[str] = Header(None),
+):
+ """Update an existing issue."""
+ token = get_github_token(authorization)
+ return await github_issues.update_issue(
+ owner, repo, issue_number,
+ title=payload.title, body=payload.body, state=payload.state,
+ labels=payload.labels, assignees=payload.assignees,
+ milestone=payload.milestone, token=token,
+ )
+
+
+@app.get("/api/repos/{owner}/{repo}/issues/{issue_number}/comments")
+async def api_list_issue_comments(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ issue_number: int = FPath(...),
+ authorization: Optional[str] = Header(None),
+):
+ """List comments on an issue."""
+ token = get_github_token(authorization)
+ return await github_issues.list_issue_comments(owner, repo, issue_number, token=token)
+
+
+@app.post("/api/repos/{owner}/{repo}/issues/{issue_number}/comments")
+async def api_add_issue_comment(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ issue_number: int = FPath(...),
+ payload: IssueCommentRequest = ...,
+ authorization: Optional[str] = Header(None),
+):
+ """Add a comment to an issue."""
+ token = get_github_token(authorization)
+ return await github_issues.add_issue_comment(
+ owner, repo, issue_number, payload.body, token=token,
+ )
+
+
+# ============================================================================
+# Pull Request Endpoints (v2 upgrade)
+# ============================================================================
+
+@app.get("/api/repos/{owner}/{repo}/pulls")
+async def api_list_pulls(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ state: str = Query("open"),
+ per_page: int = Query(30, ge=1, le=100),
+ page: int = Query(1, ge=1),
+ authorization: Optional[str] = Header(None),
+):
+ """List pull requests."""
+ token = get_github_token(authorization)
+ prs = await github_pulls.list_pull_requests(
+ owner, repo, state=state, per_page=per_page, page=page, token=token,
+ )
+ return {"pull_requests": prs, "page": page, "per_page": per_page}
+
+
+@app.get("/api/repos/{owner}/{repo}/pulls/{pull_number}")
+async def api_get_pull(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ pull_number: int = FPath(...),
+ authorization: Optional[str] = Header(None),
+):
+ """Get a single pull request."""
+ token = get_github_token(authorization)
+ return await github_pulls.get_pull_request(owner, repo, pull_number, token=token)
+
+
+@app.post("/api/repos/{owner}/{repo}/pulls")
+async def api_create_pull(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ payload: PRCreateRequest = ...,
+ authorization: Optional[str] = Header(None),
+):
+ """Create a new pull request."""
+ token = get_github_token(authorization)
+ return await github_pulls.create_pull_request(
+ owner, repo, title=payload.title, head=payload.head,
+ base=payload.base, body=payload.body, draft=payload.draft,
+ token=token,
+ )
+
+
+@app.put("/api/repos/{owner}/{repo}/pulls/{pull_number}/merge")
+async def api_merge_pull(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ pull_number: int = FPath(...),
+ payload: PRMergeRequest = ...,
+ authorization: Optional[str] = Header(None),
+):
+ """Merge a pull request."""
+ token = get_github_token(authorization)
+ return await github_pulls.merge_pull_request(
+ owner, repo, pull_number,
+ merge_method=payload.merge_method,
+ commit_title=payload.commit_title,
+ commit_message=payload.commit_message,
+ token=token,
+ )
+
+
+@app.get("/api/repos/{owner}/{repo}/pulls/{pull_number}/files")
+async def api_list_pr_files(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ pull_number: int = FPath(...),
+ authorization: Optional[str] = Header(None),
+):
+ """List files changed in a pull request."""
+ token = get_github_token(authorization)
+ return await github_pulls.list_pr_files(owner, repo, pull_number, token=token)
+
+
+# ============================================================================
+# Search Endpoints (v2 upgrade)
+# ============================================================================
+
+@app.get("/api/search/code")
+async def api_search_code(
+ q: str = Query(..., description="Search query"),
+ owner: Optional[str] = Query(None),
+ repo: Optional[str] = Query(None),
+ language: Optional[str] = Query(None),
+ per_page: int = Query(30, ge=1, le=100),
+ page: int = Query(1, ge=1),
+ authorization: Optional[str] = Header(None),
+):
+ """Search for code across GitHub."""
+ token = get_github_token(authorization)
+ return await github_search.search_code(
+ q, owner=owner, repo=repo, language=language,
+ per_page=per_page, page=page, token=token,
+ )
+
+
+@app.get("/api/search/issues")
+async def api_search_issues(
+ q: str = Query(..., description="Search query"),
+ owner: Optional[str] = Query(None),
+ repo: Optional[str] = Query(None),
+ state: Optional[str] = Query(None),
+ label: Optional[str] = Query(None),
+ per_page: int = Query(30, ge=1, le=100),
+ page: int = Query(1, ge=1),
+ authorization: Optional[str] = Header(None),
+):
+ """Search issues and pull requests."""
+ token = get_github_token(authorization)
+ return await github_search.search_issues(
+ q, owner=owner, repo=repo, state=state, label=label,
+ per_page=per_page, page=page, token=token,
+ )
+
+
+@app.get("/api/search/repositories")
+async def api_search_repositories(
+ q: str = Query(..., description="Search query"),
+ language: Optional[str] = Query(None),
+ sort: Optional[str] = Query(None),
+ per_page: int = Query(30, ge=1, le=100),
+ page: int = Query(1, ge=1),
+ authorization: Optional[str] = Header(None),
+):
+ """Search for repositories."""
+ token = get_github_token(authorization)
+ return await github_search.search_repositories(
+ q, language=language, sort=sort,
+ per_page=per_page, page=page, token=token,
+ )
+
+
+@app.get("/api/search/users")
+async def api_search_users(
+ q: str = Query(..., description="Search query"),
+ type_filter: Optional[str] = Query(None, alias="type"),
+ location: Optional[str] = Query(None),
+ language: Optional[str] = Query(None),
+ per_page: int = Query(30, ge=1, le=100),
+ page: int = Query(1, ge=1),
+ authorization: Optional[str] = Header(None),
+):
+ """Search for GitHub users and organizations."""
+ token = get_github_token(authorization)
+ return await github_search.search_users(
+ q, type_filter=type_filter, location=location, language=language,
+ per_page=per_page, page=page, token=token,
+ )
+
+
+# ============================================================================
+# Route Analysis Endpoint (v2 upgrade)
+# ============================================================================
+
+@app.post("/api/chat/route")
+async def api_chat_route(payload: dict):
+ """Preview how a message would be routed without executing it.
+
+ Useful for the frontend to display which agent(s) will handle the request.
+ """
+ message = payload.get("message", "")
+ if not message:
+ return JSONResponse({"error": "message is required"}, status_code=400)
+
+ workflow = route_request(message)
+ return {
+ "category": workflow.category.value,
+ "agents": [a.value for a in workflow.agents],
+ "description": workflow.description,
+ "requires_repo_context": workflow.requires_repo_context,
+ "entity_number": workflow.entity_number,
+ "metadata": workflow.metadata,
+ }
+
+
+# ============================================================================
+# Authentication Endpoints (Web Flow + Device Flow)
+# ============================================================================
+
+@app.get("/api/auth/url", response_model=AuthUrlResponse)
+async def api_get_auth_url():
+ """
+ Generate GitHub OAuth authorization URL (Web Flow).
+ Requires Client Secret to be configured.
+ """
+ auth_url, state = generate_authorization_url()
+ return AuthUrlResponse(authorization_url=auth_url, state=state)
+
+
+@app.post("/api/auth/callback", response_model=AuthSession)
+async def api_auth_callback(request: AuthCallbackRequest):
+ """
+ Handle GitHub OAuth callback (Web Flow).
+ Exchange the authorization code for an access token.
+ """
+ try:
+ session = await exchange_code_for_token(request.code, request.state)
+ return session
+ except ValueError as e:
+ return JSONResponse(
+ {"error": str(e)},
+ status_code=400,
+ )
+
+
+@app.post("/api/auth/validate", response_model=UserInfoResponse)
+async def api_validate_token(request: TokenValidationRequest):
+ """
+ Validate a GitHub access token and return user information.
+ """
+ user = await validate_token(request.access_token)
+ if user:
+ return UserInfoResponse(user=user, authenticated=True)
+ return UserInfoResponse(
+ user=GitHubUser(login="", id=0, avatar_url=""),
+ authenticated=False,
+ )
+
+
+@app.post("/api/auth/device/code")
+async def api_device_code():
+ """
+ Start the device login flow (Step 1).
+ Does NOT require a client secret.
+ """
+ try:
+ data = await initiate_device_flow()
+ return data
+ except Exception as e:
+ return JSONResponse({"error": str(e)}, status_code=500)
+
+
+@app.post("/api/auth/device/poll")
+async def api_device_poll(payload: dict):
+ """
+ Poll GitHub to check if user authorized the device (Step 2).
+ """
+ device_code = payload.get("device_code")
+ if not device_code:
+ return JSONResponse({"error": "Missing device_code"}, status_code=400)
+
+ try:
+ session = await poll_device_token(device_code)
+ if session:
+ return session
+
+ return JSONResponse({"status": "pending"}, status_code=202)
+ except ValueError as e:
+ return JSONResponse({"error": str(e)}, status_code=400)
+
+
+@app.get("/api/auth/status")
+async def api_auth_status():
+ """
+ Smart check: Do we have a secret (Web Flow) or just ID (Device Flow)?
+ This tells the frontend which UI to render.
+ """
+ has_secret = bool(os.getenv("GITHUB_CLIENT_SECRET"))
+ has_id = bool(os.getenv("GITHUB_CLIENT_ID", "Iv23litmRp80Z6wmlyRn"))
+
+ return {
+ "mode": "web" if has_secret else "device",
+ "configured": has_id,
+ "oauth_configured": has_secret,
+ "pat_configured": bool(os.getenv("GITPILOT_GITHUB_TOKEN") or os.getenv("GITHUB_TOKEN")),
+ }
+
+
+@app.get("/api/auth/app-url")
+async def api_get_app_url():
+ """Get GitHub App installation URL."""
+ app_slug = os.getenv("GITHUB_APP_SLUG", "gitpilota")
+ app_url = f"https://github.com/apps/{app_slug}"
+ return {
+ "app_url": app_url,
+ "app_slug": app_slug,
+ }
+
+
+@app.get("/api/auth/installation-status")
+async def api_check_installation_status():
+ """Check if GitHub App is installed for the current user."""
+ pat_token = os.getenv("GITPILOT_GITHUB_TOKEN") or os.getenv("GITHUB_TOKEN")
+
+ if pat_token:
+ user = await validate_token(pat_token)
+ if user:
+ return {
+ "installed": True,
+ "access_token": pat_token,
+ "user": user,
+ "auth_type": "pat",
+ }
+
+ github_app_id = os.getenv("GITHUB_APP_ID", "2313985")
+ if not github_app_id:
+ return {
+ "installed": False,
+ "message": "GitHub authentication not configured.",
+ "auth_type": "none",
+ }
+
+ return {
+ "installed": False,
+ "message": "GitHub App not installed.",
+ "auth_type": "github_app",
+ }
+
+
+@app.get("/api/auth/repo-access", response_model=RepoAccessResponse)
+async def api_check_repo_access(
+ owner: str = Query(...),
+ repo: str = Query(...),
+ authorization: Optional[str] = Header(None),
+):
+ """
+ Check if we have write access to a repository via User token or GitHub App.
+
+ This endpoint helps the frontend determine if it should show
+ installation prompts or if the user already has sufficient permissions.
+ """
+ token = get_github_token(authorization)
+ access_info = await check_repo_write_access(owner, repo, user_token=token)
+
+ return RepoAccessResponse(
+ can_write=access_info["can_write"],
+ app_installed=access_info["app_installed"],
+ auth_type=access_info["auth_type"],
+ )
+
+
+# ============================================================================
+# Session Endpoints (Phase 1)
+# ============================================================================
+
+@app.get("/api/sessions")
+async def api_list_sessions():
+ """List all saved sessions."""
+ return {"sessions": _session_mgr.list_sessions()}
+
+
+@app.post("/api/sessions")
+async def api_create_session(payload: dict):
+ """Create a new session.
+
+ Accepts either legacy single-repo or multi-repo format:
+ Legacy: {"repo_full_name": "owner/repo", "branch": "main"}
+ Multi: {"repos": [{full_name, branch, mode}], "active_repo": "owner/repo"}
+ """
+ repo = payload.get("repo_full_name", "")
+ branch = payload.get("branch")
+ name = payload.get("name") # optional — derived from first user prompt
+ session = _session_mgr.create(repo_full_name=repo, branch=branch, name=name)
+
+ # Multi-repo context support
+ if payload.get("repos"):
+ session.repos = payload["repos"]
+ session.active_repo = payload.get("active_repo", repo)
+ elif repo:
+ session.repos = [{"full_name": repo, "branch": branch or "main", "mode": "write"}]
+ session.active_repo = repo
+
+ _session_mgr.save(session)
+ return {"session_id": session.id, "status": session.status}
+
+
+@app.get("/api/sessions/{session_id}")
+async def api_get_session(session_id: str):
+ """Get session details."""
+ session = _session_mgr.load(session_id)
+ if not session:
+ raise HTTPException(status_code=404, detail="Session not found")
+ return {
+ "id": session.id,
+ "status": session.status,
+ "repo_full_name": session.repo_full_name,
+ "branch": session.branch,
+ "created_at": session.created_at,
+ "message_count": len(session.messages),
+ "checkpoint_count": len(session.checkpoints),
+ "repos": session.repos,
+ "active_repo": session.active_repo,
+ }
+
+
+@app.delete("/api/sessions/{session_id}")
+async def api_delete_session(session_id: str):
+ """Delete a session."""
+ deleted = _session_mgr.delete(session_id)
+ if not deleted:
+ raise HTTPException(status_code=404, detail="Session not found")
+ return {"deleted": True}
+
+
+@app.patch("/api/sessions/{session_id}/context")
+async def api_update_session_context(session_id: str, payload: dict):
+ """Add, remove, or activate repos in a session's multi-repo context.
+
+ Actions:
+ {"action": "add", "repo_full_name": "owner/repo", "branch": "main"}
+ {"action": "remove", "repo_full_name": "owner/repo"}
+ {"action": "set_active", "repo_full_name": "owner/repo"}
+ """
+ session = _session_mgr.load(session_id)
+ if not session:
+ raise HTTPException(status_code=404, detail="Session not found")
+
+ action = payload.get("action")
+ repo_name = payload.get("repo_full_name")
+ if not action or not repo_name:
+ raise HTTPException(status_code=400, detail="action and repo_full_name required")
+
+ if action == "add":
+ branch = payload.get("branch", "main")
+ if not any(r.get("full_name") == repo_name for r in session.repos):
+ session.repos.append({
+ "full_name": repo_name,
+ "branch": branch,
+ "mode": "read",
+ })
+ if not session.active_repo:
+ session.active_repo = repo_name
+ elif action == "remove":
+ session.repos = [r for r in session.repos if r.get("full_name") != repo_name]
+ if session.active_repo == repo_name:
+ session.active_repo = session.repos[0]["full_name"] if session.repos else None
+ elif action == "set_active":
+ if any(r.get("full_name") == repo_name for r in session.repos):
+ # Update mode flags
+ for r in session.repos:
+ r["mode"] = "write" if r.get("full_name") == repo_name else "read"
+ session.active_repo = repo_name
+ else:
+ raise HTTPException(status_code=400, detail="Repo not in session context")
+ else:
+ raise HTTPException(status_code=400, detail=f"Unknown action: {action}")
+
+ _session_mgr.save(session)
+ return {
+ "repos": session.repos,
+ "active_repo": session.active_repo,
+ }
+
+
+@app.post("/api/sessions/{session_id}/checkpoint")
+async def api_create_checkpoint(session_id: str, payload: dict):
+ """Create a checkpoint for a session."""
+ session = _session_mgr.load(session_id)
+ if not session:
+ raise HTTPException(status_code=404, detail="Session not found")
+ label = payload.get("label", "checkpoint")
+ cp = _session_mgr.create_checkpoint(session, label=label)
+ return {"checkpoint_id": cp.id, "label": cp.label, "created_at": cp.created_at}
+
+
+# ============================================================================
+# Hooks Endpoints (Phase 1)
+# ============================================================================
+
+@app.get("/api/hooks")
+async def api_list_hooks():
+ """List registered hooks."""
+ return {"hooks": _hook_mgr.list_hooks()}
+
+
+@app.post("/api/hooks")
+async def api_register_hook(payload: dict):
+ """Register a new hook."""
+ from .hooks import HookDefinition
+ try:
+ hook = HookDefinition(
+ event=HookEvent(payload["event"]),
+ name=payload["name"],
+ command=payload.get("command"),
+ blocking=payload.get("blocking", False),
+ timeout=payload.get("timeout", 30),
+ )
+ _hook_mgr.register(hook)
+ return {"registered": True, "name": hook.name, "event": hook.event.value}
+ except (KeyError, ValueError) as e:
+ raise HTTPException(status_code=400, detail=str(e))
+
+
+@app.delete("/api/hooks/{event}/{name}")
+async def api_unregister_hook(event: str, name: str):
+ """Unregister a hook by event and name."""
+ try:
+ _hook_mgr.unregister(HookEvent(event), name)
+ return {"unregistered": True}
+ except ValueError as e:
+ raise HTTPException(status_code=400, detail=str(e))
+
+
+# ============================================================================
+# Permissions Endpoints (Phase 1)
+# ============================================================================
+
+@app.get("/api/permissions")
+async def api_get_permissions():
+ """Get current permission policy."""
+ return _perm_mgr.to_dict()
+
+
+@app.put("/api/permissions/mode")
+async def api_set_permission_mode(payload: dict):
+ """Set the permission mode (normal, plan, auto)."""
+ mode_str = payload.get("mode", "normal")
+ try:
+ _perm_mgr.policy.mode = PermissionMode(mode_str)
+ return {"mode": _perm_mgr.policy.mode.value}
+ except ValueError:
+ raise HTTPException(status_code=400, detail=f"Invalid mode: {mode_str}")
+
+
+# ============================================================================
+# Project Context / Memory Endpoints (Phase 1)
+# ============================================================================
+
+@app.get("/api/repos/{owner}/{repo}/context")
+async def api_get_project_context(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+):
+ """Get project conventions and memory for a repository workspace."""
+ from pathlib import Path as StdPath
+ workspace_path = StdPath.home() / ".gitpilot" / "workspaces" / owner / repo
+ if not workspace_path.exists():
+ return {"conventions": "", "rules": [], "auto_memory": {}, "system_prompt": ""}
+ mgr = MemoryManager(workspace_path)
+ ctx = mgr.load_context()
+ return {
+ "conventions": ctx.conventions,
+ "rules": ctx.rules,
+ "auto_memory": ctx.auto_memory,
+ "system_prompt": ctx.to_system_prompt(),
+ }
+
+
+@app.post("/api/repos/{owner}/{repo}/context/init")
+async def api_init_project_context(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+):
+ """Initialize .gitpilot/ directory with template GITPILOT.md."""
+ from pathlib import Path as StdPath
+ workspace_path = StdPath.home() / ".gitpilot" / "workspaces" / owner / repo
+ workspace_path.mkdir(parents=True, exist_ok=True)
+ mgr = MemoryManager(workspace_path)
+ md_path = mgr.init_project()
+ return {"initialized": True, "path": str(md_path)}
+
+
+@app.post("/api/repos/{owner}/{repo}/context/pattern")
+async def api_add_learned_pattern(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ payload: dict = ...,
+):
+ """Add a learned pattern to auto-memory."""
+ from pathlib import Path as StdPath
+ pattern = payload.get("pattern", "")
+ if not pattern:
+ raise HTTPException(status_code=400, detail="pattern is required")
+ workspace_path = StdPath.home() / ".gitpilot" / "workspaces" / owner / repo
+ workspace_path.mkdir(parents=True, exist_ok=True)
+ mgr = MemoryManager(workspace_path)
+ mgr.add_learned_pattern(pattern)
+ return {"added": True, "pattern": pattern}
+
+
+# ============================================================================
+# Context Vault Endpoints (additive — Context + Use Case system)
+# ============================================================================
+
+def _workspace_path(owner: str, repo: str) -> Path:
+ """Resolve the local workspace path for a repo."""
+ return Path.home() / ".gitpilot" / "workspaces" / owner / repo
+
+
+@app.get("/api/repos/{owner}/{repo}/context/assets")
+async def api_list_context_assets(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+):
+ """List all uploaded context assets for a repository."""
+ vault = ContextVault(_workspace_path(owner, repo))
+ assets = vault.list_assets()
+ return {"assets": [a.to_dict() for a in assets]}
+
+
+@app.post("/api/repos/{owner}/{repo}/context/assets/upload")
+async def api_upload_context_asset(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ file: UploadFile = File(...),
+):
+ """Upload a file to the project context vault."""
+ vault = ContextVault(_workspace_path(owner, repo))
+ content = await file.read()
+ mime = file.content_type or ""
+ filename = file.filename or "upload"
+
+ try:
+ meta = vault.upload_asset(filename, content, mime=mime)
+ return {"asset": meta.to_dict()}
+ except ValueError as e:
+ raise HTTPException(status_code=400, detail=str(e))
+
+
+@app.delete("/api/repos/{owner}/{repo}/context/assets/{asset_id}")
+async def api_delete_context_asset(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ asset_id: str = FPath(...),
+):
+ """Delete a context asset."""
+ vault = ContextVault(_workspace_path(owner, repo))
+ vault.delete_asset(asset_id)
+ return {"deleted": True, "asset_id": asset_id}
+
+
+@app.get("/api/repos/{owner}/{repo}/context/assets/{asset_id}/download")
+async def api_download_context_asset(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ asset_id: str = FPath(...),
+):
+ """Download a raw context asset file."""
+ vault = ContextVault(_workspace_path(owner, repo))
+ asset_path = vault.get_asset_path(asset_id)
+ if not asset_path:
+ raise HTTPException(status_code=404, detail="Asset not found")
+ filename = vault.get_asset_filename(asset_id)
+ return FileResponse(asset_path, filename=filename)
+
+
+# ============================================================================
+# Use Case Endpoints (additive — guided requirement clarification)
+# ============================================================================
+
+@app.get("/api/repos/{owner}/{repo}/use-cases")
+async def api_list_use_cases(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+):
+ """List all use cases for a repository."""
+ mgr = UseCaseManager(_workspace_path(owner, repo))
+ return {"use_cases": mgr.list_use_cases()}
+
+
+@app.post("/api/repos/{owner}/{repo}/use-cases")
+async def api_create_use_case(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ payload: dict = ...,
+):
+ """Create a new use case."""
+ title = payload.get("title", "New Use Case")
+ initial_notes = payload.get("initial_notes", "")
+ mgr = UseCaseManager(_workspace_path(owner, repo))
+ uc = mgr.create_use_case(title=title, initial_notes=initial_notes)
+ return {"use_case": uc.to_dict()}
+
+
+@app.get("/api/repos/{owner}/{repo}/use-cases/{use_case_id}")
+async def api_get_use_case(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ use_case_id: str = FPath(...),
+):
+ """Get a single use case with messages and spec."""
+ mgr = UseCaseManager(_workspace_path(owner, repo))
+ uc = mgr.get_use_case(use_case_id)
+ if not uc:
+ raise HTTPException(status_code=404, detail="Use case not found")
+ return {"use_case": uc.to_dict()}
+
+
+@app.post("/api/repos/{owner}/{repo}/use-cases/{use_case_id}/chat")
+async def api_use_case_chat(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ use_case_id: str = FPath(...),
+ payload: dict = ...,
+):
+ """Send a guided chat message and get assistant response + updated spec."""
+ message = payload.get("message", "")
+ if not message:
+ raise HTTPException(status_code=400, detail="message is required")
+ mgr = UseCaseManager(_workspace_path(owner, repo))
+ uc = mgr.chat(use_case_id, message)
+ if not uc:
+ raise HTTPException(status_code=404, detail="Use case not found")
+ return {"use_case": uc.to_dict()}
+
+
+@app.post("/api/repos/{owner}/{repo}/use-cases/{use_case_id}/finalize")
+async def api_finalize_use_case(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ use_case_id: str = FPath(...),
+):
+ """Finalize a use case: mark active, export markdown spec."""
+ mgr = UseCaseManager(_workspace_path(owner, repo))
+ uc = mgr.finalize(use_case_id)
+ if not uc:
+ raise HTTPException(status_code=404, detail="Use case not found")
+ return {"use_case": uc.to_dict()}
+
+
+# ============================================================================
+# MCP Endpoints (Phase 2)
+# ============================================================================
+
+@app.get("/api/mcp/servers")
+async def api_mcp_list_servers():
+ """List configured MCP servers and their connection status."""
+ return _mcp_client.to_dict()
+
+
+@app.post("/api/mcp/connect/{server_name}")
+async def api_mcp_connect(server_name: str):
+ """Connect to a named MCP server."""
+ try:
+ conn = await _mcp_client.connect(server_name)
+ return {
+ "connected": True,
+ "server": server_name,
+ "tools": [{"name": t.name, "description": t.description} for t in conn.tools],
+ }
+ except Exception as e:
+ raise HTTPException(status_code=400, detail=str(e))
+
+
+@app.post("/api/mcp/disconnect/{server_name}")
+async def api_mcp_disconnect(server_name: str):
+ """Disconnect from a named MCP server."""
+ await _mcp_client.disconnect(server_name)
+ return {"disconnected": True, "server": server_name}
+
+
+@app.post("/api/mcp/call")
+async def api_mcp_call_tool(payload: dict):
+ """Call a tool on a connected MCP server."""
+ server = payload.get("server", "")
+ tool_name = payload.get("tool", "")
+ params = payload.get("params", {})
+ if not server or not tool_name:
+ raise HTTPException(status_code=400, detail="server and tool are required")
+ conn = _mcp_client._connections.get(server)
+ if not conn:
+ raise HTTPException(status_code=404, detail=f"Not connected to server: {server}")
+ try:
+ result = await _mcp_client.call_tool(conn, tool_name, params)
+ return {"result": result}
+ except Exception as e:
+ raise HTTPException(status_code=500, detail=str(e))
+
+
+# ============================================================================
+# Plugin Endpoints (Phase 2)
+# ============================================================================
+
+@app.get("/api/plugins")
+async def api_list_plugins():
+ """List installed plugins."""
+ plugins = _plugin_mgr.list_installed()
+ return {"plugins": [p.to_dict() for p in plugins]}
+
+
+@app.post("/api/plugins/install")
+async def api_install_plugin(payload: dict):
+ """Install a plugin from a git URL or local path."""
+ source = payload.get("source", "")
+ if not source:
+ raise HTTPException(status_code=400, detail="source is required")
+ try:
+ info = _plugin_mgr.install(source)
+ return {"installed": True, "plugin": info.to_dict()}
+ except Exception as e:
+ raise HTTPException(status_code=400, detail=str(e))
+
+
+@app.delete("/api/plugins/{name}")
+async def api_uninstall_plugin(name: str):
+ """Uninstall a plugin by name."""
+ removed = _plugin_mgr.uninstall(name)
+ if not removed:
+ raise HTTPException(status_code=404, detail=f"Plugin not found: {name}")
+ return {"uninstalled": True, "name": name}
+
+
+# ============================================================================
+# Skills Endpoints (Phase 2)
+# ============================================================================
+
+@app.get("/api/skills")
+async def api_list_skills():
+ """List all available skills."""
+ return {"skills": _skill_mgr.list_skills()}
+
+
+@app.post("/api/skills/invoke")
+async def api_invoke_skill(payload: dict):
+ """Invoke a skill by name."""
+ name = payload.get("name", "")
+ context = payload.get("context", {})
+ if not name:
+ raise HTTPException(status_code=400, detail="name is required")
+ prompt = _skill_mgr.invoke(name, context)
+ if prompt is None:
+ raise HTTPException(status_code=404, detail=f"Skill not found: {name}")
+ return {"skill": name, "rendered_prompt": prompt}
+
+
+@app.post("/api/skills/reload")
+async def api_reload_skills():
+ """Reload skills from all sources."""
+ count = _skill_mgr.load_all()
+ return {"reloaded": True, "count": count}
+
+
+# ============================================================================
+# Vision Endpoints (Phase 2)
+# ============================================================================
+
+@app.post("/api/vision/analyze")
+async def api_vision_analyze(payload: dict):
+ """Analyze an image with a text prompt."""
+ from .vision import VisionAnalyzer
+ image_path = payload.get("image_path", "")
+ prompt = payload.get("prompt", "Describe this image.")
+ provider = payload.get("provider", "openai")
+ if not image_path:
+ raise HTTPException(status_code=400, detail="image_path is required")
+ try:
+ analyzer = VisionAnalyzer(provider=provider)
+ result = await analyzer.analyze_image(Path(image_path), prompt)
+ return result.to_dict()
+ except Exception as e:
+ raise HTTPException(status_code=400, detail=str(e))
+
+
+# ============================================================================
+# Model Router Endpoints (Phase 2)
+# ============================================================================
+
+@app.post("/api/model-router/select")
+async def api_model_select(payload: dict):
+ """Preview which model would be selected for a request."""
+ request = payload.get("request", "")
+ category = payload.get("category")
+ if not request:
+ raise HTTPException(status_code=400, detail="request is required")
+ selection = _model_router.select(request, category)
+ return {
+ "model": selection.model,
+ "tier": selection.tier.value,
+ "complexity": selection.complexity.value,
+ "provider": selection.provider,
+ "reason": selection.reason,
+ }
+
+
+@app.get("/api/model-router/usage")
+async def api_model_usage():
+ """Get model usage summary and budget status."""
+ return _model_router.get_usage_summary()
+
+
+# ============================================================================
+# Agent Teams Endpoints (Phase 3)
+# ============================================================================
+
+@app.post("/api/agent-teams/plan")
+async def api_team_plan(payload: dict):
+ """Split a complex task into parallel subtasks."""
+ task = payload.get("task", "")
+ if not task:
+ raise HTTPException(status_code=400, detail="task is required")
+ subtasks = _agent_team.plan_and_split(task)
+ return {"subtasks": [{"id": s.id, "title": s.title, "description": s.description} for s in subtasks]}
+
+
+@app.post("/api/agent-teams/execute")
+async def api_team_execute(payload: dict):
+ """Execute subtasks in parallel and merge results."""
+ task = payload.get("task", "")
+ if not task:
+ raise HTTPException(status_code=400, detail="task is required")
+ subtasks = _agent_team.plan_and_split(task)
+ result = await _agent_team.execute_parallel(subtasks)
+ return result.to_dict()
+
+
+# ============================================================================
+# Learning Engine Endpoints (Phase 3)
+# ============================================================================
+
+@app.post("/api/learning/evaluate")
+async def api_learning_evaluate(payload: dict):
+ """Evaluate an action outcome for learning."""
+ action = payload.get("action", "")
+ outcome = payload.get("outcome", {})
+ repo = payload.get("repo", "")
+ if not action:
+ raise HTTPException(status_code=400, detail="action is required")
+ evaluation = _learning_engine.evaluate_outcome(action, outcome, repo=repo)
+ return {
+ "action": evaluation.action,
+ "success": evaluation.success,
+ "score": evaluation.score,
+ "feedback": evaluation.feedback,
+ }
+
+
+@app.get("/api/learning/insights/{owner}/{repo}")
+async def api_learning_insights(owner: str = FPath(...), repo: str = FPath(...)):
+ """Get learned insights for a repository."""
+ repo_name = f"{owner}/{repo}"
+ insights = _learning_engine.get_repo_insights(repo_name)
+ return {
+ "repo": repo_name,
+ "patterns": insights.patterns,
+ "preferred_style": insights.preferred_style,
+ "success_rate": insights.success_rate,
+ "total_evaluations": insights.total_evaluations,
+ }
+
+
+@app.post("/api/learning/style")
+async def api_learning_set_style(payload: dict):
+ """Set preferred coding style for a repository."""
+ repo = payload.get("repo", "")
+ style = payload.get("style", {})
+ if not repo:
+ raise HTTPException(status_code=400, detail="repo is required")
+ _learning_engine.set_preferred_style(repo, style)
+ return {"repo": repo, "style": style}
+
+
+# ============================================================================
+# Cross-Repo Intelligence Endpoints (Phase 3)
+# ============================================================================
+
+@app.post("/api/cross-repo/dependencies")
+async def api_cross_repo_dependencies(payload: dict):
+ """Analyze dependencies from provided file contents."""
+ files = payload.get("files", {})
+ if not files:
+ raise HTTPException(status_code=400, detail="files dict is required (filename -> content)")
+ graph = _cross_repo.analyze_dependencies_from_files(files)
+ return graph.to_dict()
+
+
+@app.post("/api/cross-repo/impact")
+async def api_cross_repo_impact(payload: dict):
+ """Analyze impact of updating a package."""
+ files = payload.get("files", {})
+ package_name = payload.get("package", "")
+ new_version = payload.get("new_version")
+ if not package_name:
+ raise HTTPException(status_code=400, detail="package is required")
+ graph = _cross_repo.analyze_dependencies_from_files(files)
+ report = _cross_repo.impact_analysis(graph, package_name, new_version)
+ return report.to_dict()
+
+
+# ============================================================================
+# Predictions Endpoints (Phase 3)
+# ============================================================================
+
+@app.post("/api/predictions/suggest")
+async def api_predictions_suggest(payload: dict):
+ """Get proactive suggestions based on context."""
+ context = payload.get("context", "")
+ if not context:
+ raise HTTPException(status_code=400, detail="context is required")
+ suggestions = _predictive_engine.predict(context)
+ return {"suggestions": [s.to_dict() for s in suggestions]}
+
+
+@app.get("/api/predictions/rules")
+async def api_predictions_rules():
+ """List all prediction rules."""
+ return {"rules": _predictive_engine.list_rules()}
+
+
+# ============================================================================
+# Security Scanner Endpoints (Phase 3)
+# ============================================================================
+
+@app.post("/api/security/scan-file")
+async def api_security_scan_file(payload: dict):
+ """Scan a single file for security issues."""
+ file_path = payload.get("file_path", "")
+ if not file_path:
+ raise HTTPException(status_code=400, detail="file_path is required")
+ findings = _security_scanner.scan_file(file_path)
+ return {"findings": [f.to_dict() for f in findings], "count": len(findings)}
+
+
+@app.post("/api/security/scan-directory")
+async def api_security_scan_directory(payload: dict):
+ """Recursively scan a directory for security issues."""
+ directory = payload.get("directory", "")
+ if not directory:
+ raise HTTPException(status_code=400, detail="directory is required")
+ result = _security_scanner.scan_directory(directory)
+ return result.to_dict()
+
+
+@app.post("/api/security/scan-diff")
+async def api_security_scan_diff(payload: dict):
+ """Scan a git diff for security issues in added lines."""
+ diff_text = payload.get("diff", "")
+ if not diff_text:
+ raise HTTPException(status_code=400, detail="diff is required")
+ findings = _security_scanner.scan_diff(diff_text)
+ return {"findings": [f.to_dict() for f in findings], "count": len(findings)}
+
+
+# ============================================================================
+# Natural Language Database Endpoints (Phase 3)
+# ============================================================================
+
+@app.post("/api/nl-database/translate")
+async def api_nl_translate(payload: dict):
+ """Translate natural language to SQL."""
+ question = payload.get("question", "")
+ dialect = payload.get("dialect", "postgresql")
+ tables = payload.get("tables", [])
+ if not question:
+ raise HTTPException(status_code=400, detail="question is required")
+ engine = NLQueryEngine(dialect=QueryDialect(dialect))
+ for t in tables:
+ engine.add_table(TableSchema(
+ name=t["name"],
+ columns=t.get("columns", []),
+ primary_key=t.get("primary_key"),
+ ))
+ sql = engine.translate(question)
+ error = engine.validate_query(sql)
+ return {"question": question, "sql": sql, "valid": error is None, "error": error}
+
+
+@app.post("/api/nl-database/explain")
+async def api_nl_explain(payload: dict):
+ """Explain what a SQL query does in plain English."""
+ sql = payload.get("sql", "")
+ if not sql:
+ raise HTTPException(status_code=400, detail="sql is required")
+ explanation = _nl_engine.explain(sql)
+ return {"sql": sql, "explanation": explanation}
+
+
+# ============================================================================
+# Branch Listing Endpoint (Claude-Code-on-Web Parity)
+# ============================================================================
+
+class BranchInfo(BaseModel):
+ name: str
+ is_default: bool = False
+ protected: bool = False
+ commit_sha: Optional[str] = None
+
+
+class BranchListResponse(BaseModel):
+ repository: str
+ default_branch: str
+ page: int
+ per_page: int
+ has_more: bool
+ branches: List[BranchInfo]
+
+
+@app.get("/api/repos/{owner}/{repo}/branches", response_model=BranchListResponse)
+async def api_list_branches(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ page: int = Query(1, ge=1),
+ per_page: int = Query(100, ge=1, le=100),
+ query: Optional[str] = Query(None, description="Substring filter"),
+ authorization: Optional[str] = Header(None),
+):
+ """List branches for a repository with optional search filtering."""
+ import httpx as _httpx
+
+ token = get_github_token(authorization)
+ if not token:
+ raise HTTPException(status_code=401, detail="GitHub token required")
+
+ headers = {
+ "Authorization": f"Bearer {token}",
+ "Accept": "application/vnd.github+json",
+ "User-Agent": "gitpilot",
+ }
+ timeout = _httpx.Timeout(connect=10.0, read=30.0, write=30.0, pool=10.0)
+
+ async with _httpx.AsyncClient(
+ base_url="https://api.github.com", headers=headers, timeout=timeout
+ ) as client:
+ # Fetch repo info for default_branch
+ repo_resp = await client.get(f"/repos/{owner}/{repo}")
+ if repo_resp.status_code >= 400:
+ logging.warning(
+ "branches: repo lookup failed %s/%s → %s %s",
+ owner, repo, repo_resp.status_code, repo_resp.text[:200],
+ )
+ raise HTTPException(
+ status_code=repo_resp.status_code,
+ detail=f"Cannot access repository: {repo_resp.status_code}",
+ )
+
+ repo_data = repo_resp.json()
+ default_branch_name = repo_data.get("default_branch", "main")
+
+ # Fetch ALL branch pages (GitHub caps at 100 per page)
+ all_raw = []
+ current_page = page
+ while True:
+ branch_resp = await client.get(
+ f"/repos/{owner}/{repo}/branches",
+ params={"page": current_page, "per_page": per_page},
+ )
+ if branch_resp.status_code >= 400:
+ logging.warning(
+ "branches: list failed %s/%s page=%s → %s %s",
+ owner, repo, current_page, branch_resp.status_code, branch_resp.text[:200],
+ )
+ raise HTTPException(
+ status_code=branch_resp.status_code,
+ detail=f"Failed to list branches: {branch_resp.status_code}",
+ )
+
+ page_data = branch_resp.json() if isinstance(branch_resp.json(), list) else []
+ all_raw.extend(page_data)
+
+ # Check if there are more pages
+ link_header = branch_resp.headers.get("Link", "") or ""
+ if 'rel="next"' not in link_header or len(page_data) < per_page:
+ break
+ current_page += 1
+ # Safety: cap at 10 pages (1000 branches)
+ if current_page - page >= 10:
+ break
+
+ q = (query or "").strip().lower()
+
+ branches = []
+ for b in all_raw:
+ name = (b.get("name") or "").strip()
+ if not name:
+ continue
+ if q and q not in name.lower():
+ continue
+ branches.append(BranchInfo(
+ name=name,
+ is_default=(name == default_branch_name),
+ protected=bool(b.get("protected", False)),
+ commit_sha=(b.get("commit") or {}).get("sha"),
+ ))
+
+ # Sort: default branch first, then alphabetical
+ branches.sort(key=lambda x: (0 if x.is_default else 1, x.name.lower()))
+
+ return BranchListResponse(
+ repository=f"{owner}/{repo}",
+ default_branch=default_branch_name,
+ page=page,
+ per_page=per_page,
+ has_more=False,
+ branches=branches,
+ )
+
+
+# ============================================================================
+# Environment Configuration Endpoints (Claude-Code-on-Web Parity)
+# ============================================================================
+
+import json as _json
+_ENV_ROOT = Path.home() / ".gitpilot" / "environments"
+
+
+class EnvironmentConfig(BaseModel):
+ id: Optional[str] = None
+ name: str = "Default"
+ network_access: str = Field("limited", description="limited | full | none")
+ env_vars: dict = Field(default_factory=dict)
+
+
+class EnvironmentListResponse(BaseModel):
+ environments: List[EnvironmentConfig]
+
+
+@app.get("/api/environments", response_model=EnvironmentListResponse)
+async def api_list_environments():
+ """List all environment configurations."""
+ _ENV_ROOT.mkdir(parents=True, exist_ok=True)
+ envs = []
+ for path in sorted(_ENV_ROOT.glob("*.json")):
+ try:
+ data = _json.loads(path.read_text())
+ envs.append(EnvironmentConfig(**data))
+ except Exception:
+ continue
+ if not envs:
+ envs.append(EnvironmentConfig(id="default", name="Default", network_access="limited"))
+ return EnvironmentListResponse(environments=envs)
+
+
+@app.post("/api/environments")
+async def api_create_environment(config: EnvironmentConfig):
+ """Create a new environment configuration."""
+ import uuid
+ _ENV_ROOT.mkdir(parents=True, exist_ok=True)
+ config.id = config.id or uuid.uuid4().hex[:12]
+ path = _ENV_ROOT / f"{config.id}.json"
+ path.write_text(_json.dumps(config.model_dump(), indent=2))
+ return config.model_dump()
+
+
+@app.put("/api/environments/{env_id}")
+async def api_update_environment(env_id: str, config: EnvironmentConfig):
+ """Update an environment configuration."""
+ _ENV_ROOT.mkdir(parents=True, exist_ok=True)
+ path = _ENV_ROOT / f"{env_id}.json"
+ config.id = env_id
+ path.write_text(_json.dumps(config.model_dump(), indent=2))
+ return config.model_dump()
+
+
+@app.delete("/api/environments/{env_id}")
+async def api_delete_environment(env_id: str):
+ """Delete an environment configuration."""
+ path = _ENV_ROOT / f"{env_id}.json"
+ if path.exists():
+ path.unlink()
+ return {"deleted": True}
+ raise HTTPException(status_code=404, detail="Environment not found")
+
+
+# ============================================================================
+# Session Messages + Diff Endpoints (Claude-Code-on-Web Parity)
+# ============================================================================
+
+@app.post("/api/sessions/{session_id}/message")
+async def api_add_session_message(session_id: str, payload: dict):
+ """Add a message to a session's conversation history."""
+ try:
+ session = _session_mgr.load(session_id)
+ except FileNotFoundError:
+ raise HTTPException(status_code=404, detail="Session not found")
+ role = payload.get("role", "user")
+ content = payload.get("content", "")
+ session.add_message(role, content, **payload.get("metadata", {}))
+ _session_mgr.save(session)
+ return {"message_count": len(session.messages)}
+
+
+@app.get("/api/sessions/{session_id}/messages")
+async def api_get_session_messages(session_id: str):
+ """Get all messages for a session."""
+ try:
+ session = _session_mgr.load(session_id)
+ except FileNotFoundError:
+ raise HTTPException(status_code=404, detail="Session not found")
+ return {
+ "session_id": session.id,
+ "messages": [
+ {
+ "role": m.role,
+ "content": m.content,
+ "timestamp": m.timestamp,
+ "metadata": m.metadata,
+ }
+ for m in session.messages
+ ],
+ }
+
+
+@app.get("/api/sessions/{session_id}/diff")
+async def api_get_session_diff(session_id: str):
+ """Get diff stats for a session (placeholder for sandbox integration)."""
+ try:
+ session = _session_mgr.load(session_id)
+ except FileNotFoundError:
+ raise HTTPException(status_code=404, detail="Session not found")
+ diff = session.metadata.get("diff", {
+ "files_changed": 0,
+ "additions": 0,
+ "deletions": 0,
+ "files": [],
+ })
+ return {"session_id": session.id, "diff": diff}
+
+
+@app.post("/api/sessions/{session_id}/status")
+async def api_update_session_status(session_id: str, payload: dict):
+ """Update session status (active, completed, failed, waiting)."""
+ try:
+ session = _session_mgr.load(session_id)
+ except FileNotFoundError:
+ raise HTTPException(status_code=404, detail="Session not found")
+ new_status = payload.get("status", "active")
+ if new_status not in ("active", "paused", "completed", "failed", "waiting"):
+ raise HTTPException(status_code=400, detail="Invalid status")
+ session.status = new_status
+ _session_mgr.save(session)
+ return {"session_id": session.id, "status": session.status}
+
+
+# ============================================================================
+# WebSocket Streaming Endpoint (Claude-Code-on-Web Parity)
+# ============================================================================
+
+from fastapi import WebSocket, WebSocketDisconnect
+
+
+@app.websocket("/ws/sessions/{session_id}")
+async def session_websocket(websocket: WebSocket, session_id: str):
+ """
+ Real-time bidirectional communication for a coding session.
+
+ Server events:
+ { type: "agent_message", content: "..." }
+ { type: "tool_use", tool: "bash", input: "npm test" }
+ { type: "tool_result", tool: "bash", output: "All tests passed" }
+ { type: "diff_update", stats: { additions: N, deletions: N, files: N } }
+ { type: "status_change", status: "completed" }
+ { type: "error", message: "..." }
+
+ Client events:
+ { type: "user_message", content: "..." }
+ { type: "cancel" }
+ """
+ await websocket.accept()
+
+ # Verify session exists
+ try:
+ session = _session_mgr.load(session_id)
+ except FileNotFoundError:
+ await websocket.send_json({"type": "error", "message": "Session not found"})
+ await websocket.close()
+ return
+
+ # Send session history on connect
+ await websocket.send_json({
+ "type": "session_restored",
+ "session_id": session.id,
+ "status": session.status,
+ "message_count": len(session.messages),
+ })
+
+ try:
+ while True:
+ data = await websocket.receive_json()
+ event_type = data.get("type", "")
+
+ if event_type == "user_message":
+ content = data.get("content", "")
+ session.add_message("user", content)
+ _session_mgr.save(session)
+
+ # Acknowledge receipt
+ await websocket.send_json({
+ "type": "message_received",
+ "message_index": len(session.messages) - 1,
+ })
+
+ # Stream agent response (integration point for agentic.py)
+ await websocket.send_json({
+ "type": "status_change",
+ "status": "active",
+ })
+
+ # Agent processing hook — when the agent orchestrator is wired,
+ # replace this with actual streaming from agentic.py
+ try:
+ repo_full = session.repo_full_name or ""
+ parts = repo_full.split("/", 1)
+ if len(parts) == 2 and content.strip():
+ # Use canonical dispatcher signature
+ result = await dispatch_request(
+ user_request=content,
+ repo_full_name=f"{parts[0]}/{parts[1]}",
+ branch_name=session.branch,
+ )
+ answer = ""
+ if isinstance(result, dict):
+ answer = (
+ result.get("result")
+ or result.get("answer")
+ or result.get("message")
+ or result.get("summary")
+ or (result.get("plan", {}) or {}).get("summary")
+ or str(result)
+ )
+ else:
+ answer = str(result)
+
+ # Stream the response
+ await websocket.send_json({
+ "type": "agent_message",
+ "content": answer,
+ })
+
+ session.add_message("assistant", answer)
+ _session_mgr.save(session)
+ else:
+ await websocket.send_json({
+ "type": "agent_message",
+ "content": "Session is not connected to a repository.",
+ })
+ except Exception as agent_err:
+ logger.error(f"Agent error in WS session {session_id}: {agent_err}")
+ await websocket.send_json({
+ "type": "error",
+ "message": str(agent_err),
+ })
+
+ await websocket.send_json({
+ "type": "status_change",
+ "status": "waiting",
+ })
+
+ elif event_type == "cancel":
+ await websocket.send_json({
+ "type": "status_change",
+ "status": "waiting",
+ })
+
+ elif event_type == "ping":
+ await websocket.send_json({"type": "pong"})
+
+ except WebSocketDisconnect:
+ logger.info(f"WebSocket disconnected for session {session_id}")
+ except Exception as e:
+ logger.error(f"WebSocket error for session {session_id}: {e}")
+ try:
+ await websocket.send_json({"type": "error", "message": str(e)})
+ except Exception:
+ pass
+
+
+# ============================================================================
+# Static Files & Frontend Serving (SPA Support)
+# ============================================================================
+
+STATIC_DIR = Path(__file__).resolve().parent / "web"
+ASSETS_DIR = STATIC_DIR / "assets"
+
+if ASSETS_DIR.exists():
+ app.mount("/assets", StaticFiles(directory=ASSETS_DIR), name="assets")
+
+if STATIC_DIR.exists():
+ app.mount("/static", StaticFiles(directory=STATIC_DIR), name="static")
+
+
+@app.get("/api/health")
+async def health_check():
+ """Health check endpoint for monitoring and diagnostics."""
+ return {"status": "healthy", "service": "gitpilot-backend"}
+
+
+@app.get("/healthz")
+async def healthz():
+ """Health check endpoint (Render/Kubernetes standard)."""
+ return {"status": "healthy", "service": "gitpilot-backend"}
+
+
+@app.get("/", include_in_schema=False)
+async def index():
+ """Serve the React App entry point."""
+ index_file = STATIC_DIR / "index.html"
+ if index_file.exists():
+ return FileResponse(index_file)
+ return JSONResponse(
+ {"message": "GitPilot UI not built. The static files directory is missing."},
+ status_code=500,
+ )
+
+
+@app.get("/{full_path:path}", include_in_schema=False)
+async def catch_all_spa_routes(full_path: str):
+ """
+ Catch-all route to serve index.html for frontend routing.
+ Excludes '/api' paths to ensure genuine API 404s are returned as JSON.
+ """
+ if full_path.startswith("api/"):
+ return JSONResponse({"detail": "Not Found"}, status_code=404)
+
+ index_file = STATIC_DIR / "index.html"
+ if index_file.exists():
+ return FileResponse(index_file)
+
+ return JSONResponse(
+ {"message": "GitPilot UI not built. The static files directory is missing."},
+ status_code=500,
+ )
diff --git a/gitpilot/a2a_adapter.py b/gitpilot/a2a_adapter.py
new file mode 100644
index 0000000000000000000000000000000000000000..46ca4659db601f09477b71755cca8309c4f678b0
--- /dev/null
+++ b/gitpilot/a2a_adapter.py
@@ -0,0 +1,560 @@
+"""Optional A2A adapter for GitPilot (MCP ContextForge compatible).
+
+This module is feature-flagged. Nothing changes in GitPilot unless the main app
+mounts this router when GITPILOT_ENABLE_A2A=true.
+
+Supported protocols
+- JSON-RPC 2.0 (preferred)
+- ContextForge custom A2A envelope (fallback)
+
+Security model (recommended)
+- Gateway injects a shared secret:
+ X-A2A-Secret:
+ or
+ Authorization: Bearer
+
+- GitHub token (if needed) should be provided via:
+ X-Github-Token:
+ (avoid passing tokens in JSON bodies to reduce leak risk in logs)
+
+Environment
+- GITPILOT_A2A_REQUIRE_AUTH=true
+- GITPILOT_A2A_SHARED_SECRET=
+- GITPILOT_A2A_MAX_BODY_MB=2
+- GITPILOT_A2A_ALLOW_GITHUB_TOKEN_IN_PARAMS=false
+"""
+
+from __future__ import annotations
+
+import os
+import time
+import uuid
+from typing import Any, Dict, Optional, Tuple
+
+from fastapi import APIRouter, Header, HTTPException, Request
+from fastapi.responses import JSONResponse
+
+from .agentic import PlanResult, execute_plan, generate_plan, dispatch_request
+from .github_api import get_file, get_repo_tree, github_request, put_file
+from . import github_issues
+from . import github_pulls
+from . import github_search
+
+router = APIRouter(tags=["a2a"])
+
+
+def _env_bool(name: str, default: bool) -> bool:
+ raw = os.getenv(name)
+ if raw is None:
+ return default
+ return raw.strip().lower() in {"1", "true", "yes", "y", "on"}
+
+
+def _env_int(name: str, default: int) -> int:
+ raw = os.getenv(name)
+ if raw is None:
+ return default
+ try:
+ return int(raw.strip())
+ except Exception:
+ return default
+
+
+def _extract_bearer(value: Optional[str]) -> Optional[str]:
+ if not value:
+ return None
+ if value.startswith("Bearer "):
+ return value[7:]
+ if value.startswith("token "):
+ return value[6:]
+ return value
+
+
+def _get_trace_id(x_request_id: Optional[str]) -> str:
+ return (x_request_id or "").strip() or str(uuid.uuid4())
+
+
+def _require_gateway_secret(authorization: Optional[str], x_a2a_secret: Optional[str]) -> None:
+ require_auth = _env_bool("GITPILOT_A2A_REQUIRE_AUTH", True)
+ if not require_auth:
+ return
+
+ expected = os.getenv("GITPILOT_A2A_SHARED_SECRET", "").strip()
+ if not expected:
+ raise HTTPException(
+ status_code=500,
+ detail="A2A is enabled but GITPILOT_A2A_SHARED_SECRET is not set",
+ )
+
+ candidate = _extract_bearer(authorization) or (x_a2a_secret or "").strip()
+ if not candidate or candidate != expected:
+ raise HTTPException(status_code=401, detail="Unauthorized")
+
+
+def _split_full_name(repo_full_name: str) -> Tuple[str, str]:
+ if not repo_full_name or "/" not in repo_full_name:
+ raise HTTPException(status_code=400, detail="repo_full_name must be 'owner/repo'")
+ owner, repo = repo_full_name.split("/", 1)
+ owner, repo = owner.strip(), repo.strip()
+ if not owner or not repo:
+ raise HTTPException(status_code=400, detail="repo_full_name must be 'owner/repo'")
+ return owner, repo
+
+
+def _jsonrpc_error(id_value: Any, code: int, message: str, data: Any = None) -> Dict[str, Any]:
+ err: Dict[str, Any] = {"code": code, "message": message}
+ if data is not None:
+ err["data"] = data
+ return {"jsonrpc": "2.0", "error": err, "id": id_value}
+
+
+def _jsonrpc_result(id_value: Any, result: Any) -> Dict[str, Any]:
+ return {"jsonrpc": "2.0", "result": result, "id": id_value}
+
+
+async def _dispatch(method: str, params: Dict[str, Any], github_token: Optional[str]) -> Any:
+ if method == "repo.connect":
+ repo_full_name = params.get("repo_full_name")
+ owner, repo = _split_full_name(str(repo_full_name))
+ info = await github_request(f"/repos/{owner}/{repo}", token=github_token)
+ return {
+ "repo": {
+ "id": info.get("id"),
+ "full_name": info.get("full_name"),
+ "private": info.get("private"),
+ "html_url": info.get("html_url"),
+ },
+ "default_branch": info.get("default_branch"),
+ "permissions": info.get("permissions"),
+ }
+
+ if method == "repo.tree":
+ repo_full_name = params.get("repo_full_name")
+ ref = (params.get("ref") or "").strip() or "HEAD"
+ owner, repo = _split_full_name(str(repo_full_name))
+ tree = await get_repo_tree(owner, repo, token=github_token, ref=ref)
+ return {"entries": tree, "ref": ref}
+
+ if method == "repo.read":
+ repo_full_name = params.get("repo_full_name")
+ path = params.get("path")
+ if not path:
+ raise HTTPException(status_code=400, detail="Missing required param: path")
+ owner, repo = _split_full_name(str(repo_full_name))
+ # NOTE: current get_file() reads from default branch/ref in this repo.
+ # You can extend github_api.get_file to accept ref and pass it here later.
+ content = await get_file(owner, repo, str(path), token=github_token)
+ return {"path": str(path), "content": content, "encoding": "utf-8"}
+
+ if method == "repo.write":
+ repo_full_name = params.get("repo_full_name")
+ path = params.get("path")
+ content = params.get("content")
+ message = params.get("message") or "Update via GitPilot A2A"
+ branch = params.get("branch") or params.get("branch_name")
+ if not path:
+ raise HTTPException(status_code=400, detail="Missing required param: path")
+ if content is None:
+ raise HTTPException(status_code=400, detail="Missing required param: content")
+ owner, repo = _split_full_name(str(repo_full_name))
+ result = await put_file(
+ owner,
+ repo,
+ str(path),
+ str(content),
+ str(message),
+ token=github_token,
+ branch=branch,
+ )
+ return result
+
+ if method == "plan.generate":
+ repo_full_name = params.get("repo_full_name")
+ goal = params.get("goal")
+ branch_name = params.get("branch") or params.get("branch_name")
+ if not goal:
+ raise HTTPException(status_code=400, detail="Missing required param: goal")
+ if not repo_full_name:
+ raise HTTPException(status_code=400, detail="Missing required param: repo_full_name")
+ plan = await generate_plan(str(goal), str(repo_full_name), token=github_token, branch_name=branch_name)
+ return plan.model_dump() if hasattr(plan, "model_dump") else plan
+
+ if method == "plan.execute":
+ repo_full_name = params.get("repo_full_name")
+ branch_name = params.get("branch") or params.get("branch_name")
+ plan_raw = params.get("plan")
+ if not repo_full_name:
+ raise HTTPException(status_code=400, detail="Missing required param: repo_full_name")
+ if plan_raw is None:
+ raise HTTPException(status_code=400, detail="Missing required param: plan")
+ if isinstance(plan_raw, PlanResult):
+ plan_obj = plan_raw
+ else:
+ try:
+ plan_obj = PlanResult.model_validate(plan_raw) # pydantic v2
+ except Exception:
+ plan_obj = PlanResult.parse_obj(plan_raw) # pydantic v1
+ result = await execute_plan(plan_obj, str(repo_full_name), token=github_token, branch_name=branch_name)
+ return result
+
+ if method == "repo.search":
+ query = params.get("query")
+ if not query:
+ raise HTTPException(status_code=400, detail="Missing required param: query")
+ result = await github_request(
+ "/search/repositories",
+ params={"q": str(query), "per_page": 20},
+ token=github_token,
+ )
+ items = (result or {}).get("items", []) if isinstance(result, dict) else []
+ return {
+ "repos": [
+ {
+ "full_name": i.get("full_name"),
+ "private": i.get("private"),
+ "html_url": i.get("html_url"),
+ "description": i.get("description"),
+ "default_branch": i.get("default_branch"),
+ }
+ for i in items
+ ]
+ }
+
+ # --- v2 methods: issues, pulls, search, chat --------------------------
+
+ if method == "issue.list":
+ repo_full_name = params.get("repo_full_name")
+ owner, repo = _split_full_name(str(repo_full_name))
+ issues = await github_issues.list_issues(
+ owner, repo, state=params.get("state", "open"),
+ labels=params.get("labels"), per_page=params.get("per_page", 30),
+ token=github_token,
+ )
+ return {"issues": issues}
+
+ if method == "issue.get":
+ repo_full_name = params.get("repo_full_name")
+ issue_number = params.get("issue_number")
+ if not issue_number:
+ raise HTTPException(status_code=400, detail="Missing required param: issue_number")
+ owner, repo = _split_full_name(str(repo_full_name))
+ return await github_issues.get_issue(owner, repo, int(issue_number), token=github_token)
+
+ if method == "issue.create":
+ repo_full_name = params.get("repo_full_name")
+ title = params.get("title")
+ if not title:
+ raise HTTPException(status_code=400, detail="Missing required param: title")
+ owner, repo = _split_full_name(str(repo_full_name))
+ return await github_issues.create_issue(
+ owner, repo, str(title),
+ body=params.get("body"), labels=params.get("labels"),
+ assignees=params.get("assignees"), token=github_token,
+ )
+
+ if method == "issue.update":
+ repo_full_name = params.get("repo_full_name")
+ issue_number = params.get("issue_number")
+ if not issue_number:
+ raise HTTPException(status_code=400, detail="Missing required param: issue_number")
+ owner, repo = _split_full_name(str(repo_full_name))
+ return await github_issues.update_issue(
+ owner, repo, int(issue_number),
+ title=params.get("title"), body=params.get("body"),
+ state=params.get("state"), labels=params.get("labels"),
+ assignees=params.get("assignees"), token=github_token,
+ )
+
+ if method == "issue.comment":
+ repo_full_name = params.get("repo_full_name")
+ issue_number = params.get("issue_number")
+ body = params.get("body")
+ if not issue_number:
+ raise HTTPException(status_code=400, detail="Missing required param: issue_number")
+ if not body:
+ raise HTTPException(status_code=400, detail="Missing required param: body")
+ owner, repo = _split_full_name(str(repo_full_name))
+ return await github_issues.add_issue_comment(
+ owner, repo, int(issue_number), str(body), token=github_token,
+ )
+
+ if method == "pr.list":
+ repo_full_name = params.get("repo_full_name")
+ owner, repo = _split_full_name(str(repo_full_name))
+ return await github_pulls.list_pull_requests(
+ owner, repo, state=params.get("state", "open"),
+ per_page=params.get("per_page", 30), token=github_token,
+ )
+
+ if method == "pr.create":
+ repo_full_name = params.get("repo_full_name")
+ title = params.get("title")
+ head = params.get("head")
+ base = params.get("base")
+ if not title or not head or not base:
+ raise HTTPException(status_code=400, detail="Missing required params: title, head, base")
+ owner, repo = _split_full_name(str(repo_full_name))
+ return await github_pulls.create_pull_request(
+ owner, repo, title=str(title), head=str(head), base=str(base),
+ body=params.get("body"), token=github_token,
+ )
+
+ if method == "pr.merge":
+ repo_full_name = params.get("repo_full_name")
+ pull_number = params.get("pull_number")
+ if not pull_number:
+ raise HTTPException(status_code=400, detail="Missing required param: pull_number")
+ owner, repo = _split_full_name(str(repo_full_name))
+ return await github_pulls.merge_pull_request(
+ owner, repo, int(pull_number),
+ merge_method=params.get("merge_method", "merge"),
+ token=github_token,
+ )
+
+ if method == "search.code":
+ query = params.get("query")
+ if not query:
+ raise HTTPException(status_code=400, detail="Missing required param: query")
+ return await github_search.search_code(
+ str(query), owner=params.get("owner"), repo=params.get("repo"),
+ language=params.get("language"), per_page=params.get("per_page", 20),
+ token=github_token,
+ )
+
+ if method == "search.issues":
+ query = params.get("query")
+ if not query:
+ raise HTTPException(status_code=400, detail="Missing required param: query")
+ return await github_search.search_issues(
+ str(query), owner=params.get("owner"), repo=params.get("repo"),
+ state=params.get("state"), label=params.get("label"),
+ per_page=params.get("per_page", 20), token=github_token,
+ )
+
+ if method == "search.users":
+ query = params.get("query")
+ if not query:
+ raise HTTPException(status_code=400, detail="Missing required param: query")
+ return await github_search.search_users(
+ str(query), type_filter=params.get("type"),
+ location=params.get("location"), language=params.get("language"),
+ per_page=params.get("per_page", 20), token=github_token,
+ )
+
+ if method == "chat.message":
+ repo_full_name = params.get("repo_full_name")
+ message = params.get("message")
+ if not message:
+ raise HTTPException(status_code=400, detail="Missing required param: message")
+ if not repo_full_name:
+ raise HTTPException(status_code=400, detail="Missing required param: repo_full_name")
+ return await dispatch_request(
+ str(message), str(repo_full_name),
+ token=github_token,
+ branch_name=params.get("branch") or params.get("branch_name"),
+ )
+
+ raise HTTPException(status_code=404, detail=f"Unknown method: {method}")
+
+
+@router.get("/a2a/health")
+async def a2a_health() -> Dict[str, Any]:
+ return {"status": "ok", "ts": int(time.time())}
+
+
+@router.get("/a2a/manifest")
+async def a2a_manifest() -> Dict[str, Any]:
+ # Best-effort schemas (kept intentionally simple and stable)
+ return {
+ "name": "gitpilot",
+ "a2a_version": "1.0",
+ "protocols": ["jsonrpc-2.0", "a2a-envelope-1.0"],
+ "auth": {"type": "shared_secret", "header": "X-A2A-Secret"},
+ "rate_limits": {"hint": "apply gateway rate limiting; server enforces body size"},
+ "methods": {
+ "repo.connect": {
+ "params": {"repo_full_name": "string"},
+ "result": {"repo": "object", "default_branch": "string", "permissions": "object?"},
+ },
+ "repo.tree": {
+ "params": {"repo_full_name": "string", "ref": "string?"},
+ "result": {"entries": "array", "ref": "string"},
+ },
+ "repo.read": {
+ "params": {"repo_full_name": "string", "path": "string"},
+ "result": {"path": "string", "content": "string"},
+ },
+ "repo.write": {
+ "params": {
+ "repo_full_name": "string",
+ "path": "string",
+ "content": "string",
+ "message": "string?",
+ "branch": "string?",
+ },
+ "result": "object",
+ },
+ "plan.generate": {
+ "params": {"repo_full_name": "string", "goal": "string", "branch": "string?"},
+ "result": "PlanResult",
+ },
+ "plan.execute": {
+ "params": {"repo_full_name": "string", "plan": "PlanResult", "branch": "string?"},
+ "result": "object",
+ },
+ "repo.search": {
+ "params": {"query": "string"},
+ "result": {"repos": "array"},
+ },
+ # v2 methods
+ "issue.list": {
+ "params": {"repo_full_name": "string", "state": "string?", "labels": "string?"},
+ "result": {"issues": "array"},
+ },
+ "issue.get": {
+ "params": {"repo_full_name": "string", "issue_number": "integer"},
+ "result": "object",
+ },
+ "issue.create": {
+ "params": {"repo_full_name": "string", "title": "string", "body": "string?", "labels": "array?", "assignees": "array?"},
+ "result": "object",
+ },
+ "issue.update": {
+ "params": {"repo_full_name": "string", "issue_number": "integer", "title": "string?", "body": "string?", "state": "string?"},
+ "result": "object",
+ },
+ "issue.comment": {
+ "params": {"repo_full_name": "string", "issue_number": "integer", "body": "string"},
+ "result": "object",
+ },
+ "pr.list": {
+ "params": {"repo_full_name": "string", "state": "string?"},
+ "result": "array",
+ },
+ "pr.create": {
+ "params": {"repo_full_name": "string", "title": "string", "head": "string", "base": "string", "body": "string?"},
+ "result": "object",
+ },
+ "pr.merge": {
+ "params": {"repo_full_name": "string", "pull_number": "integer", "merge_method": "string?"},
+ "result": "object",
+ },
+ "search.code": {
+ "params": {"query": "string", "owner": "string?", "repo": "string?", "language": "string?"},
+ "result": {"total_count": "integer", "items": "array"},
+ },
+ "search.issues": {
+ "params": {"query": "string", "owner": "string?", "repo": "string?", "state": "string?"},
+ "result": {"total_count": "integer", "items": "array"},
+ },
+ "search.users": {
+ "params": {"query": "string", "type": "string?", "location": "string?"},
+ "result": {"total_count": "integer", "items": "array"},
+ },
+ "chat.message": {
+ "params": {"repo_full_name": "string", "message": "string", "branch": "string?"},
+ "result": "object",
+ },
+ },
+ }
+
+
+async def _handle_invoke(
+ request: Request,
+ authorization: Optional[str],
+ x_a2a_secret: Optional[str],
+ x_github_token: Optional[str],
+ x_request_id: Optional[str],
+) -> JSONResponse:
+ trace_id = _get_trace_id(x_request_id)
+ _require_gateway_secret(authorization=authorization, x_a2a_secret=x_a2a_secret)
+
+ # Body size guard (helps protect from abuse)
+ max_mb = _env_int("GITPILOT_A2A_MAX_BODY_MB", 2)
+ cl = request.headers.get("content-length")
+ if cl:
+ try:
+ if int(cl) > max_mb * 1024 * 1024:
+ raise HTTPException(status_code=413, detail="Request entity too large")
+ except ValueError:
+ pass
+
+ started = time.time()
+ payload = await request.json()
+
+ github_token = _extract_bearer(x_github_token) or None
+ if not github_token:
+ github_token = _extract_bearer(authorization)
+
+ # JSON-RPC mode
+ if isinstance(payload, dict) and payload.get("jsonrpc") == "2.0" and "method" in payload:
+ rpc_id = payload.get("id")
+ method = payload.get("method")
+ params = payload.get("params") or {}
+ if not isinstance(params, dict):
+ return JSONResponse(_jsonrpc_error(rpc_id, -32602, "Invalid params"), status_code=400)
+
+ allow_in_params = _env_bool("GITPILOT_A2A_ALLOW_GITHUB_TOKEN_IN_PARAMS", False)
+ if allow_in_params and not github_token:
+ github_token = _extract_bearer(params.get("github_token"))
+
+ try:
+ result = await _dispatch(str(method), params, github_token)
+ resp = _jsonrpc_result(rpc_id, result)
+ return JSONResponse(resp, headers={"X-Trace-Id": trace_id})
+ except HTTPException as e:
+ resp = _jsonrpc_error(rpc_id, e.status_code, str(e.detail), {"trace_id": trace_id})
+ return JSONResponse(resp, status_code=200, headers={"X-Trace-Id": trace_id})
+ except Exception as e:
+ resp = _jsonrpc_error(rpc_id, -32000, "Server error", {"trace_id": trace_id, "error": str(e)})
+ return JSONResponse(resp, status_code=200, headers={"X-Trace-Id": trace_id})
+ finally:
+ _ = time.time() - started
+
+ # Custom envelope fallback
+ if isinstance(payload, dict) and payload.get("interaction_type"):
+ interaction_type = str(payload.get("interaction_type"))
+ parameters = payload.get("parameters") or {}
+ if not isinstance(parameters, dict):
+ raise HTTPException(status_code=400, detail="Invalid parameters")
+
+ if interaction_type == "query":
+ repo_full_name = parameters.get("repo_full_name")
+ goal = parameters.get("query") or parameters.get("goal")
+ params = {
+ "repo_full_name": repo_full_name,
+ "goal": goal,
+ "branch": parameters.get("branch") or parameters.get("branch_name"),
+ }
+ result = await _dispatch("plan.generate", params, github_token)
+ return JSONResponse(
+ {"response": result, "protocol_version": payload.get("protocol_version", "1.0")},
+ headers={"X-Trace-Id": trace_id},
+ )
+
+ raise HTTPException(status_code=404, detail=f"Unsupported interaction_type: {interaction_type}")
+
+ raise HTTPException(status_code=400, detail=f"Invalid A2A payload (trace_id={trace_id})")
+
+
+@router.post("/a2a/invoke")
+async def a2a_invoke(
+ request: Request,
+ authorization: Optional[str] = Header(None),
+ x_a2a_secret: Optional[str] = Header(None, alias="X-A2A-Secret"),
+ x_github_token: Optional[str] = Header(None, alias="X-Github-Token"),
+ x_request_id: Optional[str] = Header(None, alias="X-Request-Id"),
+) -> JSONResponse:
+ return await _handle_invoke(request, authorization, x_a2a_secret, x_github_token, x_request_id)
+
+
+@router.post("/a2a/v1/invoke")
+async def a2a_v1_invoke(
+ request: Request,
+ authorization: Optional[str] = Header(None),
+ x_a2a_secret: Optional[str] = Header(None, alias="X-A2A-Secret"),
+ x_github_token: Optional[str] = Header(None, alias="X-Github-Token"),
+ x_request_id: Optional[str] = Header(None, alias="X-Request-Id"),
+) -> JSONResponse:
+ # Alias for versioned clients. Keep behavior identical to /a2a/invoke.
+ return await _handle_invoke(request, authorization, x_a2a_secret, x_github_token, x_request_id)
diff --git a/gitpilot/agent_events.py b/gitpilot/agent_events.py
new file mode 100644
index 0000000000000000000000000000000000000000..65faa7652f10322747e7577eafc6a1730868ea8c
--- /dev/null
+++ b/gitpilot/agent_events.py
@@ -0,0 +1,297 @@
+# gitpilot/agent_events.py
+"""
+Unified agent event protocol.
+
+Every agent action emits events through an AgentEventBus. Consumers
+(WebSocket v2, SSE, VS Code postMessage) subscribe and forward to clients.
+
+Event types mirror Claude Code's streaming model:
+ - text_delta incremental response text
+ - tool_start agent is calling a tool
+ - tool_result tool returned a result
+ - file_write agent wrote/edited a file
+ - approval_needed agent needs user permission
+ - approval_resolved user responded to approval
+ - plan_step plan step status change
+ - terminal_output shell stdout/stderr line
+ - terminal_exit shell process exited
+ - test_result structured test pass/fail
+ - diagnostics lint/type errors
+ - status_change idle/planning/generating/etc.
+ - done agent finished
+ - error agent failed
+
+All platforms consume the same JSON event shape. The only difference
+is the transport: WebSocket, SSE, or VS Code postMessage.
+"""
+from __future__ import annotations
+
+import asyncio
+import enum
+import logging
+import time
+import uuid
+from dataclasses import dataclass, field
+from typing import Any, AsyncIterator, Dict
+
+logger = logging.getLogger(__name__)
+
+
+class EventType(str, enum.Enum):
+ TEXT_DELTA = "text_delta"
+ TOOL_START = "tool_start"
+ TOOL_RESULT = "tool_result"
+ FILE_WRITE = "file_write"
+ APPROVAL_NEEDED = "approval_needed"
+ APPROVAL_RESOLVED = "approval_resolved"
+ PLAN_STEP = "plan_step"
+ TERMINAL_OUTPUT = "terminal_output"
+ TERMINAL_EXIT = "terminal_exit"
+ TEST_RESULT = "test_result"
+ DIAGNOSTICS = "diagnostics"
+ STATUS_CHANGE = "status_change"
+ DONE = "done"
+ ERROR = "error"
+
+
+@dataclass
+class AgentEvent:
+ """Single event emitted by the agent during execution."""
+
+ type: EventType
+ data: Dict[str, Any] = field(default_factory=dict)
+ timestamp: float = field(default_factory=time.time)
+ id: str = field(default_factory=lambda: uuid.uuid4().hex[:12])
+
+ def to_dict(self) -> dict:
+ return {
+ "type": self.type.value,
+ "id": self.id,
+ "ts": self.timestamp,
+ **self.data,
+ }
+
+ def to_sse(self) -> str:
+ """Format as a Server-Sent Event line."""
+ import json
+
+ return f"data: {json.dumps(self.to_dict())}\n\n"
+
+
+# ---------------------------------------------------------------------------
+# Factory functions for clean event creation
+# ---------------------------------------------------------------------------
+
+
+def text_delta(text: str) -> AgentEvent:
+ return AgentEvent(type=EventType.TEXT_DELTA, data={"text": text})
+
+
+def tool_start(tool_id: str, name: str, args: dict) -> AgentEvent:
+ return AgentEvent(
+ type=EventType.TOOL_START,
+ data={"tool_id": tool_id, "name": name, "arguments": args},
+ )
+
+
+def tool_result(
+ tool_id: str, name: str, result: str, is_error: bool = False
+) -> AgentEvent:
+ return AgentEvent(
+ type=EventType.TOOL_RESULT,
+ data={
+ "tool_id": tool_id,
+ "name": name,
+ "result": result[:2000],
+ "is_error": is_error,
+ },
+ )
+
+
+def file_write(path: str, action: str = "modify") -> AgentEvent:
+ return AgentEvent(
+ type=EventType.FILE_WRITE, data={"path": path, "action": action}
+ )
+
+
+def approval_needed(
+ request_id: str,
+ tool: str,
+ args: dict,
+ summary: str,
+ diff_preview: str | None = None,
+ risk: str = "medium",
+) -> AgentEvent:
+ return AgentEvent(
+ type=EventType.APPROVAL_NEEDED,
+ data={
+ "request_id": request_id,
+ "tool": tool,
+ "arguments": args,
+ "summary": summary,
+ "diff_preview": diff_preview,
+ "risk_level": risk,
+ },
+ )
+
+
+def approval_resolved(request_id: str, approved: bool) -> AgentEvent:
+ return AgentEvent(
+ type=EventType.APPROVAL_RESOLVED,
+ data={"request_id": request_id, "approved": approved},
+ )
+
+
+def plan_step(index: int, title: str, status: str, action: str = "") -> AgentEvent:
+ return AgentEvent(
+ type=EventType.PLAN_STEP,
+ data={
+ "step_index": index,
+ "title": title,
+ "status": status,
+ "action": action,
+ },
+ )
+
+
+def terminal_output(stream: str, text: str) -> AgentEvent:
+ return AgentEvent(
+ type=EventType.TERMINAL_OUTPUT, data={"stream": stream, "text": text}
+ )
+
+
+def terminal_exit(command: str, exit_code: int, duration_ms: int = 0) -> AgentEvent:
+ return AgentEvent(
+ type=EventType.TERMINAL_EXIT,
+ data={
+ "command": command,
+ "exit_code": exit_code,
+ "duration_ms": duration_ms,
+ },
+ )
+
+
+def test_result(
+ framework: str,
+ passed: int,
+ failed: int,
+ skipped: int = 0,
+ output: str = "",
+ exit_code: int = 0,
+) -> AgentEvent:
+ return AgentEvent(
+ type=EventType.TEST_RESULT,
+ data={
+ "framework": framework,
+ "passed": passed,
+ "failed": failed,
+ "skipped": skipped,
+ "output": output[:5000],
+ "exit_code": exit_code,
+ },
+ )
+
+
+def diagnostics(
+ errors: int, warnings: int, entries: list
+) -> AgentEvent:
+ return AgentEvent(
+ type=EventType.DIAGNOSTICS,
+ data={
+ "errors": errors,
+ "warnings": warnings,
+ "entries": entries[:30],
+ },
+ )
+
+
+def status_change(status: str, message: str = "") -> AgentEvent:
+ return AgentEvent(
+ type=EventType.STATUS_CHANGE, data={"status": status, "message": message}
+ )
+
+
+def agent_done(usage: dict | None = None, summary: str = "") -> AgentEvent:
+ return AgentEvent(
+ type=EventType.DONE,
+ data={"usage": usage or {}, "summary": summary},
+ )
+
+
+def agent_error(error: str, recoverable: bool = True) -> AgentEvent:
+ return AgentEvent(
+ type=EventType.ERROR,
+ data={"error": error, "recoverable": recoverable},
+ )
+
+
+# ---------------------------------------------------------------------------
+# AgentEventBus — fan-out event bus per session
+# ---------------------------------------------------------------------------
+
+
+class AgentEventBus:
+ """
+ Fan-out event bus. The executor emits events; multiple consumers
+ (WebSocket, SSE, polling) subscribe via async queues.
+
+ Thread-safe: events are pushed through asyncio.Queue per subscriber.
+ """
+
+ def __init__(self) -> None:
+ self._subscribers: Dict[str, asyncio.Queue[AgentEvent]] = {}
+
+ def subscribe(self) -> tuple[str, asyncio.Queue[AgentEvent]]:
+ """Create a new subscription. Returns (sub_id, queue)."""
+ sub_id = uuid.uuid4().hex[:8]
+ queue: asyncio.Queue[AgentEvent] = asyncio.Queue(maxsize=2000)
+ self._subscribers[sub_id] = queue
+ return sub_id, queue
+
+ def unsubscribe(self, sub_id: str) -> None:
+ self._subscribers.pop(sub_id, None)
+
+ async def emit(self, event: AgentEvent) -> None:
+ """Push event to all subscribers (non-blocking drop on full)."""
+ for queue in self._subscribers.values():
+ try:
+ queue.put_nowait(event)
+ except asyncio.QueueFull:
+ logger.warning("AgentEventBus: subscriber queue full, dropping event")
+
+ async def stream(self, sub_id: str) -> AsyncIterator[AgentEvent]:
+ """Yield events for a subscriber. Sends keepalive every 25s."""
+ queue = self._subscribers.get(sub_id)
+ if not queue:
+ return
+ try:
+ while sub_id in self._subscribers:
+ try:
+ event = await asyncio.wait_for(queue.get(), timeout=25.0)
+ yield event
+ except asyncio.TimeoutError:
+ yield AgentEvent(
+ type=EventType.STATUS_CHANGE,
+ data={"status": "keepalive"},
+ )
+ finally:
+ self.unsubscribe(sub_id)
+
+
+# ---------------------------------------------------------------------------
+# Global bus registry (one bus per session)
+# ---------------------------------------------------------------------------
+
+_session_buses: Dict[str, AgentEventBus] = {}
+
+
+def get_bus(session_id: str) -> AgentEventBus:
+ """Get or create the event bus for a session."""
+ if session_id not in _session_buses:
+ _session_buses[session_id] = AgentEventBus()
+ return _session_buses[session_id]
+
+
+def remove_bus(session_id: str) -> None:
+ """Clean up the event bus for a session."""
+ _session_buses.pop(session_id, None)
diff --git a/gitpilot/agent_executor.py b/gitpilot/agent_executor.py
new file mode 100644
index 0000000000000000000000000000000000000000..b4a5d20b34b7b5c9a2357d50b1249e1d1a312394
--- /dev/null
+++ b/gitpilot/agent_executor.py
@@ -0,0 +1,344 @@
+# gitpilot/agent_executor.py
+"""
+Streaming multi-step agent executor.
+
+Wraps existing CrewAI plan+execute functions with granular event streaming
+via the AgentEventBus. Does NOT modify agentic.py or any existing module.
+
+Execution pipeline:
+ 1. Plan (reuses generate_plan_lite / generate_plan)
+ 2. Execute (reuses execute_plan_lite / execute_plan, with streaming shim)
+ 3. Validate (run detected linter/tests, feed errors back)
+
+All events flow through AgentEventBus -> consumers (WS v2, SSE, VS Code).
+"""
+from __future__ import annotations
+
+import asyncio
+import json
+import logging
+import re
+import uuid
+from pathlib import Path
+from typing import Optional
+
+from . import agent_events as evt
+from .agent_events import AgentEventBus
+from .approval_protocol import ApprovalGate
+from .terminal import TerminalExecutor, TerminalSession
+from .workspace import WorkspaceManager, WorkspaceInfo
+from .test_detection import detect_test_command, detect_framework_name
+from .diagnostics_runner import run_linter, parse_diagnostics
+
+logger = logging.getLogger(__name__)
+
+# Maximum retries for self-correction after validation errors
+MAX_VALIDATION_RETRIES = 2
+
+
+class StreamingAgentExecutor:
+ """
+ Autonomous multi-step agent executor with real-time streaming.
+
+ Orchestrates the full pipeline:
+ plan -> per-step execution -> validation -> self-correction -> done
+
+ Emits all events through the AgentEventBus so any transport
+ (WebSocket v2, SSE, VS Code postMessage) can forward them.
+ """
+
+ def __init__(
+ self,
+ bus: AgentEventBus,
+ gate: ApprovalGate,
+ workspace: Optional[WorkspaceInfo] = None,
+ ws_manager: Optional[WorkspaceManager] = None,
+ terminal: Optional[TerminalExecutor] = None,
+ ) -> None:
+ self._bus = bus
+ self._gate = gate
+ self._workspace = workspace
+ self._ws_manager = ws_manager or WorkspaceManager()
+ self._terminal = terminal or TerminalExecutor()
+ self._cancelled = False
+
+ def cancel(self) -> None:
+ """Signal the executor to stop after the current step."""
+ self._cancelled = True
+
+ async def execute(
+ self,
+ user_message: str,
+ repo_full_name: str,
+ branch: Optional[str] = None,
+ token: Optional[str] = None,
+ mode: str = "auto",
+ ) -> Optional[dict]:
+ """
+ Main entry point. Streams events throughout.
+
+ Args:
+ user_message: The user's request
+ repo_full_name: "owner/repo" (required for multi-agent path)
+ branch: Git branch (default HEAD)
+ token: GitHub token (for remote repos)
+ mode: "auto" | "plan_only"
+
+ Returns:
+ The plan dict on success, None on failure.
+ """
+ # Validate repo_full_name BEFORE hitting CrewAI/generate_plan_lite.
+ # The legacy planners do `owner, repo = repo_full_name.split("/")`
+ # which raises ValueError("not enough values to unpack") on empty
+ # or slashless strings. Catch this at the boundary.
+ #
+ # For folder-only / local-git sessions (no GitHub remote) the
+ # multi-agent CrewAI planner is simply not applicable. Instead of
+ # emitting a scary `agent_error` (which every SSE consumer renders
+ # as a user-visible notice), close the stream cleanly with an
+ # empty `done` event. Callers that support a batch fallback — e.g.
+ # the VS Code extension's sendChatToBackend() flow — will treat
+ # the empty stream as "streaming unavailable" and transparently
+ # fall through to /api/chat/send, which is the correct path for
+ # folder-only sessions. No regression for strict clients: they
+ # still observe a terminal event and the method returns None.
+ if not self._is_valid_repo_full_name(repo_full_name):
+ logger.debug(
+ "StreamingAgentExecutor: skipping CrewAI planner for "
+ "non-GitHub session (repo_full_name=%r); clients should "
+ "fall back to batch chat.",
+ repo_full_name,
+ )
+ await self._bus.emit(evt.status_change("done"))
+ await self._bus.emit(evt.agent_done(summary=""))
+ return None
+
+ try:
+ # ── Phase 1: Plan ──
+ await self._bus.emit(evt.status_change("planning", "Analyzing your request..."))
+
+ plan = await self._generate_plan(user_message, repo_full_name, token, branch)
+ if not plan:
+ await self._bus.emit(evt.agent_error("Failed to generate plan"))
+ return None
+
+ # Emit plan structure
+ steps = plan.get("steps", []) if isinstance(plan, dict) else []
+ if hasattr(plan, "steps"):
+ steps = [
+ {"title": s.title, "description": s.description}
+ for s in plan.steps
+ ]
+
+ goal = plan.get("goal", user_message) if isinstance(plan, dict) else getattr(plan, "goal", user_message)
+ summary = plan.get("summary", "") if isinstance(plan, dict) else getattr(plan, "summary", "")
+
+ for i, step in enumerate(steps):
+ title = step.get("title", f"Step {i + 1}") if isinstance(step, dict) else getattr(step, "title", f"Step {i + 1}")
+ await self._bus.emit(evt.plan_step(i, title, "pending"))
+
+ if self._cancelled:
+ await self._bus.emit(evt.agent_error("Cancelled by user", recoverable=True))
+ return None
+
+ if mode == "plan_only":
+ await self._bus.emit(evt.agent_done(summary="Plan generated."))
+ return plan if isinstance(plan, dict) else {"goal": goal, "summary": summary, "steps": steps}
+
+ # ── Phase 2: Execute ──
+ await self._bus.emit(evt.status_change("generating", "Executing plan..."))
+
+ result = await self._execute_plan(plan, repo_full_name, token, branch)
+
+ # Stream result text in chunks (simulate token streaming from batch)
+ answer = self._extract_answer(result)
+ chunk_size = 80
+ for i in range(0, len(answer), chunk_size):
+ if self._cancelled:
+ break
+ await self._bus.emit(evt.text_delta(answer[i : i + chunk_size]))
+ await asyncio.sleep(0.015)
+
+ if self._cancelled:
+ await self._bus.emit(evt.agent_error("Cancelled by user", recoverable=True))
+ return None
+
+ # ── Phase 3: Validate ──
+ await self._bus.emit(evt.status_change("reviewing", "Validating changes..."))
+ await self._run_validation()
+
+ # ── Done ──
+ await self._bus.emit(evt.status_change("done"))
+ await self._bus.emit(evt.agent_done(summary=answer[:500]))
+
+ return plan if isinstance(plan, dict) else {"goal": goal, "summary": summary}
+
+ except Exception as e:
+ logger.error("StreamingAgentExecutor error: %s", e, exc_info=True)
+ await self._bus.emit(evt.agent_error(str(e)))
+ return None
+
+ # ── Internal helpers ──
+
+ @staticmethod
+ def _is_valid_repo_full_name(name: object) -> bool:
+ """True iff `name` is a non-empty 'owner/repo' string with both parts.
+
+ Rejects: None, empty strings, strings without '/', strings with
+ empty owner or repo, and strings containing more than one '/'.
+ """
+ if not isinstance(name, str) or not name.strip():
+ return False
+ parts = name.strip().split("/")
+ return len(parts) == 2 and all(p.strip() for p in parts)
+
+ async def _generate_plan(self, goal, repo_full_name, token, branch):
+ """Wrap existing plan generators with event streaming."""
+ try:
+ from .agentic import generate_plan_lite
+ return await generate_plan_lite(
+ goal=goal,
+ repo_full_name=repo_full_name,
+ token=token,
+ branch_name=branch,
+ )
+ except ImportError:
+ logger.warning("generate_plan_lite not available, using fallback")
+ except Exception as e:
+ logger.error("Plan generation error: %s", e)
+ await self._bus.emit(evt.agent_error(f"Planning failed: {e}"))
+ return None
+
+ async def _execute_plan(self, plan, repo_full_name, token, branch):
+ """Wrap existing plan executors with event streaming."""
+ try:
+ from .agentic import execute_plan_lite
+ return await execute_plan_lite(
+ plan=plan,
+ repo_full_name=repo_full_name,
+ token=token,
+ branch_name=branch,
+ )
+ except ImportError:
+ logger.warning("execute_plan_lite not available, using fallback")
+ except Exception as e:
+ logger.error("Plan execution error: %s", e)
+ await self._bus.emit(evt.agent_error(f"Execution failed: {e}"))
+ return None
+
+ async def _run_validation(self) -> None:
+ """Run linter and/or tests if the workspace supports them."""
+ if not self._workspace:
+ return
+
+ ws_path = self._workspace.path if hasattr(self._workspace, "path") else Path(str(self._workspace))
+
+ # ── Lint check ──
+ try:
+ lint_result = await run_linter(ws_path, self._terminal, timeout=60)
+ if lint_result:
+ entries = parse_diagnostics(lint_result.stdout + lint_result.stderr)
+ errors = [e for e in entries if e.get("severity") == "error"]
+ warnings = [e for e in entries if e.get("severity") == "warning"]
+ await self._bus.emit(evt.diagnostics(
+ errors=len(errors),
+ warnings=len(warnings),
+ entries=entries,
+ ))
+ except Exception as e:
+ logger.debug("Linter not available: %s", e)
+
+ # ── Test check ──
+ try:
+ test_cmd = await detect_test_command(ws_path)
+ if test_cmd:
+ framework = await detect_framework_name(ws_path) or "unknown"
+ session = TerminalSession(workspace_path=ws_path)
+
+ await self._bus.emit(evt.tool_start(
+ uuid.uuid4().hex[:8], "run_tests", {"command": test_cmd}
+ ))
+
+ # Stream test output
+ output_chunks = []
+ exit_code = -1
+ duration_ms = 0
+
+ async for chunk in self._terminal.execute_streaming(session, test_cmd, timeout=120):
+ if chunk.get("type") == "stdout":
+ output_chunks.append(chunk["data"])
+ await self._bus.emit(evt.terminal_output("stdout", chunk["data"]))
+ elif chunk.get("type") == "error":
+ output_chunks.append(chunk.get("data", ""))
+ await self._bus.emit(evt.terminal_output("stderr", chunk.get("data", "")))
+ elif chunk.get("type") == "exit":
+ exit_code = chunk.get("exit_code", -1)
+ duration_ms = chunk.get("duration_ms", 0)
+
+ await self._bus.emit(evt.terminal_exit(test_cmd, exit_code, duration_ms))
+
+ # Parse test results
+ full_output = "".join(output_chunks)
+ passed, failed, skipped = self._parse_test_counts(full_output)
+
+ await self._bus.emit(evt.test_result(
+ framework=framework,
+ passed=passed,
+ failed=failed,
+ skipped=skipped,
+ output=full_output,
+ exit_code=exit_code,
+ ))
+ except Exception as e:
+ logger.debug("Test runner not available: %s", e)
+
+ @staticmethod
+ def _extract_answer(result) -> str:
+ """Extract the response text from various result formats."""
+ if result is None:
+ return "Task completed."
+ if isinstance(result, str):
+ return result
+ if isinstance(result, dict):
+ return (
+ result.get("result")
+ or result.get("answer")
+ or result.get("summary")
+ or result.get("message")
+ or str(result)
+ )
+ if hasattr(result, "raw"):
+ return str(result.raw)
+ return str(result)
+
+ @staticmethod
+ def _parse_test_counts(output: str) -> tuple[int, int, int]:
+ """Best-effort extraction of pass/fail/skip counts from test output."""
+ passed = failed = skipped = 0
+
+ # Jest / Vitest: "Tests: 3 passed, 1 failed, 4 total"
+ m = re.search(r"(\d+)\s+passed", output)
+ if m:
+ passed = int(m.group(1))
+ m = re.search(r"(\d+)\s+failed", output)
+ if m:
+ failed = int(m.group(1))
+ m = re.search(r"(\d+)\s+skipped", output)
+ if m:
+ skipped = int(m.group(1))
+
+ # pytest: "3 passed, 1 failed, 2 skipped"
+ m = re.search(r"(\d+)\s+passed", output)
+ if m:
+ passed = int(m.group(1))
+ m = re.search(r"(\d+)\s+failed", output)
+ if m:
+ failed = int(m.group(1))
+
+ # go test: "ok" or "FAIL"
+ if "FAIL" in output and failed == 0:
+ failed = output.count("FAIL")
+ if "ok" in output and passed == 0:
+ passed = output.count("\nok")
+
+ return passed, failed, skipped
diff --git a/gitpilot/agent_router.py b/gitpilot/agent_router.py
new file mode 100644
index 0000000000000000000000000000000000000000..4639bf2c47a9fb100c9b296e093e0fbcf45daade
--- /dev/null
+++ b/gitpilot/agent_router.py
@@ -0,0 +1,284 @@
+# gitpilot/agent_router.py
+"""Intelligent Agent Router for GitPilot.
+
+Classifies user requests and delegates them to the appropriate specialised
+agent (or a pipeline of agents). The router itself does **not** use an LLM;
+it relies on lightweight keyword / pattern matching so that routing is
+instantaneous and deterministic.
+
+The router returns a *WorkflowPlan* describing which agents should run and
+in what order. The actual agent execution is handled by the orchestrator
+in ``agentic.py``.
+"""
+from __future__ import annotations
+
+import re
+from dataclasses import dataclass, field
+from enum import Enum
+from typing import List, Optional
+
+
+class AgentType(str, Enum):
+ """Available specialised agents."""
+
+ EXPLORER = "explorer"
+ PLANNER = "planner"
+ CODE_WRITER = "code_writer"
+ CODE_REVIEWER = "code_reviewer"
+ ISSUE_MANAGER = "issue_manager"
+ PR_MANAGER = "pr_manager"
+ SEARCH = "search"
+ LEARNING = "learning"
+ LOCAL_EDITOR = "local_editor" # Phase 1: local file editing + shell
+ TERMINAL = "terminal" # Phase 1: dedicated terminal agent
+
+
+class RequestCategory(str, Enum):
+ """High-level intent category inferred from the user request."""
+
+ PLAN_EXECUTE = "plan_execute" # Existing explore -> plan -> execute workflow
+ ISSUE_MANAGEMENT = "issue_management"
+ PR_MANAGEMENT = "pr_management"
+ CODE_SEARCH = "code_search"
+ CODE_REVIEW = "code_review"
+ LEARNING = "learning"
+ CONVERSATIONAL = "conversational" # Free-form chat / Q&A about the repo
+ LOCAL_EDIT = "local_edit" # Phase 1: direct file editing with verification
+ TERMINAL = "terminal" # Phase 1: shell command execution
+
+
+@dataclass
+class WorkflowPlan:
+ """Describes which agents to invoke and in what order."""
+
+ category: RequestCategory
+ agents: List[AgentType]
+ description: str
+ requires_repo_context: bool = True
+ # If the request mentions a specific issue/PR number, capture it.
+ entity_number: Optional[int] = None
+ # Additional metadata extracted from the request.
+ metadata: dict = field(default_factory=dict)
+
+
+# ---------------------------------------------------------------------------
+# Pattern definitions (order matters -- first match wins)
+# ---------------------------------------------------------------------------
+
+_ISSUE_CREATE_RE = re.compile(
+ r"\b(create|open|new|file|add)\b.*\bissue\b", re.IGNORECASE
+)
+_ISSUE_UPDATE_RE = re.compile(
+ r"\b(update|modify|edit|change|close|reopen|label|assign|milestone)\b.*\bissue\b",
+ re.IGNORECASE,
+)
+_ISSUE_LIST_RE = re.compile(
+ r"\b(list|show|get|find|search)\b.*\bissues?\b", re.IGNORECASE
+)
+_ISSUE_COMMENT_RE = re.compile(
+ r"\b(comment|reply|respond)\b.*\bissue\b", re.IGNORECASE
+)
+_ISSUE_NUMBER_RE = re.compile(r"#(\d+)")
+
+_PR_CREATE_RE = re.compile(
+ r"\b(create|open|new|make)\b.*\b(pull request|pr|pull)\b", re.IGNORECASE
+)
+_PR_MERGE_RE = re.compile(
+ r"\b(merge|squash|rebase)\b.*\b(pull request|pr|pull)\b", re.IGNORECASE
+)
+_PR_REVIEW_RE = re.compile(
+ r"\b(review|approve|request changes)\b.*\b(pull request|pr|pull)\b",
+ re.IGNORECASE,
+)
+_PR_LIST_RE = re.compile(
+ r"\b(list|show|get|find)\b.*\b(pull requests?|prs?|pulls?)\b", re.IGNORECASE
+)
+
+_SEARCH_CODE_RE = re.compile(
+ r"\b(search|find|locate|grep|look for)\b.*\b(code|function|class|symbol|pattern|file)\b",
+ re.IGNORECASE,
+)
+_SEARCH_USER_RE = re.compile(
+ r"\b(search|find|who)\b.*\b(user|developer|org|organization|contributor)\b",
+ re.IGNORECASE,
+)
+_SEARCH_REPO_RE = re.compile(
+ r"\b(search|find|discover)\b.*\b(repo|repository|project)\b", re.IGNORECASE
+)
+
+_TERMINAL_RE = re.compile(
+ r"\b(run|execute|launch)\b.*\b(command|test|tests|script|build|lint|npm|pip|make|docker|pytest|cargo|go)\b",
+ re.IGNORECASE,
+)
+_LOCAL_EDIT_RE = re.compile(
+ r"\b(edit|modify|change|update|fix|write|rewrite|patch)\b.*\b(file|code|function|class|method|module|line|lines)\b",
+ re.IGNORECASE,
+)
+
+_REVIEW_RE = re.compile(
+ r"\b(review|analyze|audit|check|inspect)\b.*\b(code|quality|security|performance)\b",
+ re.IGNORECASE,
+)
+
+_LEARNING_RE = re.compile(
+ r"\b(how (do|can|to)|explain|what is|guide|tutorial|best practice|help with)\b",
+ re.IGNORECASE,
+)
+_GITHUB_TOPICS_RE = re.compile(
+ r"\b(actions?|workflow|ci/?cd|pages?|packages?|discussions?|authentication|deploy|release)\b",
+ re.IGNORECASE,
+)
+
+
+def _extract_issue_number(text: str) -> Optional[int]:
+ m = _ISSUE_NUMBER_RE.search(text)
+ if m:
+ return int(m.group(1))
+ # Also try "issue 42" / "issue number 42"
+ m2 = re.search(r"\bissue\s*(?:number\s*)?(\d+)\b", text, re.IGNORECASE)
+ return int(m2.group(1)) if m2 else None
+
+
+def _extract_pr_number(text: str) -> Optional[int]:
+ m = re.search(r"\b(?:pr|pull request|pull)\s*#?(\d+)\b", text, re.IGNORECASE)
+ return int(m.group(1)) if m else None
+
+
+# ---------------------------------------------------------------------------
+# Public API
+# ---------------------------------------------------------------------------
+
+def route(user_request: str) -> WorkflowPlan:
+ """Classify *user_request* and return a ``WorkflowPlan``."""
+ text = user_request.strip()
+
+ # --- Issue management ------------------------------------------------
+ if _ISSUE_CREATE_RE.search(text):
+ return WorkflowPlan(
+ category=RequestCategory.ISSUE_MANAGEMENT,
+ agents=[AgentType.ISSUE_MANAGER],
+ description="Create a new GitHub issue",
+ entity_number=_extract_issue_number(text),
+ metadata={"action": "create"},
+ )
+ if _ISSUE_COMMENT_RE.search(text):
+ return WorkflowPlan(
+ category=RequestCategory.ISSUE_MANAGEMENT,
+ agents=[AgentType.ISSUE_MANAGER],
+ description="Comment on an issue",
+ entity_number=_extract_issue_number(text),
+ metadata={"action": "comment"},
+ )
+ if _ISSUE_UPDATE_RE.search(text):
+ return WorkflowPlan(
+ category=RequestCategory.ISSUE_MANAGEMENT,
+ agents=[AgentType.ISSUE_MANAGER],
+ description="Update an existing issue",
+ entity_number=_extract_issue_number(text),
+ metadata={"action": "update"},
+ )
+ if _ISSUE_LIST_RE.search(text):
+ return WorkflowPlan(
+ category=RequestCategory.ISSUE_MANAGEMENT,
+ agents=[AgentType.ISSUE_MANAGER],
+ description="List or search issues",
+ metadata={"action": "list"},
+ )
+
+ # --- PR management ---------------------------------------------------
+ if _PR_CREATE_RE.search(text):
+ return WorkflowPlan(
+ category=RequestCategory.PR_MANAGEMENT,
+ agents=[AgentType.PR_MANAGER],
+ description="Create a pull request",
+ metadata={"action": "create"},
+ )
+ if _PR_MERGE_RE.search(text):
+ return WorkflowPlan(
+ category=RequestCategory.PR_MANAGEMENT,
+ agents=[AgentType.PR_MANAGER],
+ description="Merge a pull request",
+ entity_number=_extract_pr_number(text),
+ metadata={"action": "merge"},
+ )
+ if _PR_REVIEW_RE.search(text):
+ return WorkflowPlan(
+ category=RequestCategory.PR_MANAGEMENT,
+ agents=[AgentType.CODE_REVIEWER, AgentType.PR_MANAGER],
+ description="Review a pull request",
+ entity_number=_extract_pr_number(text),
+ metadata={"action": "review"},
+ )
+ if _PR_LIST_RE.search(text):
+ return WorkflowPlan(
+ category=RequestCategory.PR_MANAGEMENT,
+ agents=[AgentType.PR_MANAGER],
+ description="List pull requests",
+ metadata={"action": "list"},
+ )
+
+ # --- Code search -----------------------------------------------------
+ if _SEARCH_USER_RE.search(text):
+ return WorkflowPlan(
+ category=RequestCategory.CODE_SEARCH,
+ agents=[AgentType.SEARCH],
+ description="Search for GitHub users or organisations",
+ requires_repo_context=False,
+ metadata={"search_type": "users"},
+ )
+ if _SEARCH_REPO_RE.search(text):
+ return WorkflowPlan(
+ category=RequestCategory.CODE_SEARCH,
+ agents=[AgentType.SEARCH],
+ description="Search for repositories",
+ requires_repo_context=False,
+ metadata={"search_type": "repositories"},
+ )
+ if _SEARCH_CODE_RE.search(text):
+ return WorkflowPlan(
+ category=RequestCategory.CODE_SEARCH,
+ agents=[AgentType.SEARCH],
+ description="Search for code in the repository",
+ metadata={"search_type": "code"},
+ )
+
+ # --- Terminal / shell commands ----------------------------------------
+ if _TERMINAL_RE.search(text):
+ return WorkflowPlan(
+ category=RequestCategory.TERMINAL,
+ agents=[AgentType.TERMINAL],
+ description="Run shell commands in the workspace",
+ metadata={"action": "execute"},
+ )
+
+ # --- Local file editing -----------------------------------------------
+ if _LOCAL_EDIT_RE.search(text):
+ return WorkflowPlan(
+ category=RequestCategory.LOCAL_EDIT,
+ agents=[AgentType.LOCAL_EDITOR],
+ description="Edit files directly in the local workspace",
+ )
+
+ # --- Code review -----------------------------------------------------
+ if _REVIEW_RE.search(text):
+ return WorkflowPlan(
+ category=RequestCategory.CODE_REVIEW,
+ agents=[AgentType.EXPLORER, AgentType.CODE_REVIEWER],
+ description="Analyse code quality and suggest improvements",
+ )
+
+ # --- Learning & guidance ---------------------------------------------
+ if _LEARNING_RE.search(text) or _GITHUB_TOPICS_RE.search(text):
+ return WorkflowPlan(
+ category=RequestCategory.LEARNING,
+ agents=[AgentType.LEARNING],
+ description="Provide guidance on GitHub features or best practices",
+ requires_repo_context=False,
+ )
+
+ # --- Default: existing plan+execute workflow -------------------------
+ return WorkflowPlan(
+ category=RequestCategory.PLAN_EXECUTE,
+ agents=[AgentType.EXPLORER, AgentType.PLANNER, AgentType.CODE_WRITER],
+ description="Explore repository, create plan, and execute changes",
+ )
diff --git a/gitpilot/agent_teams.py b/gitpilot/agent_teams.py
new file mode 100644
index 0000000000000000000000000000000000000000..354e3a0578e8d398299d716efe8442db1dc593e6
--- /dev/null
+++ b/gitpilot/agent_teams.py
@@ -0,0 +1,263 @@
+# gitpilot/agent_teams.py
+"""Parallel multi-agent execution on git worktrees.
+
+Coordinates multiple agents working on independent subtasks simultaneously.
+Each agent operates on its own git worktree to avoid conflicts, and a lead
+agent reviews and merges the results.
+
+Architecture inspired by the MapReduce pattern and the *divide-and-conquer*
+approach from distributed systems research (Dean & Ghemawat, 2004).
+
+Workflow::
+
+ User: "Add authentication to the API"
+ Lead agent splits → 4 subtasks
+ ┌────────────┐ ┌────────────┐ ┌────────────┐ ┌────────────┐
+ │ Agent A: │ │ Agent B: │ │ Agent C: │ │ Agent D: │
+ │ User model │ │ Middleware │ │ Endpoints │ │ Tests │
+ │ worktree/a │ │ worktree/b │ │ worktree/c │ │ worktree/d │
+ └─────┬──────┘ └─────┬──────┘ └─────┬──────┘ └─────┬──────┘
+ └───────────┴───────────┴───────────┘
+ │
+ Lead reviews & merges
+"""
+from __future__ import annotations
+
+import asyncio
+import logging
+import uuid
+from dataclasses import dataclass, field
+from datetime import datetime, timezone
+from enum import Enum
+from pathlib import Path
+from typing import Any, Dict, List, Optional
+
+logger = logging.getLogger(__name__)
+
+
+class SubTaskStatus(str, Enum):
+ PENDING = "pending"
+ RUNNING = "running"
+ COMPLETED = "completed"
+ FAILED = "failed"
+
+
+@dataclass
+class SubTask:
+ """A single subtask to be executed by one agent."""
+
+ id: str = field(default_factory=lambda: uuid.uuid4().hex[:8])
+ title: str = ""
+ description: str = ""
+ assigned_agent: str = ""
+ files: List[str] = field(default_factory=list)
+ status: SubTaskStatus = SubTaskStatus.PENDING
+ result: str = ""
+ error: Optional[str] = None
+ worktree_path: Optional[Path] = None
+ started_at: Optional[str] = None
+ completed_at: Optional[str] = None
+
+ def to_dict(self) -> Dict[str, Any]:
+ return {
+ "id": self.id,
+ "title": self.title,
+ "description": self.description,
+ "assigned_agent": self.assigned_agent,
+ "files": self.files,
+ "status": self.status.value,
+ "result": self.result,
+ "error": self.error,
+ "started_at": self.started_at,
+ "completed_at": self.completed_at,
+ }
+
+
+@dataclass
+class TeamResult:
+ """Aggregated result from parallel agent execution."""
+
+ task: str
+ subtasks: List[SubTask] = field(default_factory=list)
+ merge_status: str = "pending" # pending | merged | conflict | failed
+ conflicts: List[str] = field(default_factory=list)
+ summary: str = ""
+
+ @property
+ def all_completed(self) -> bool:
+ return all(s.status == SubTaskStatus.COMPLETED for s in self.subtasks)
+
+ @property
+ def any_failed(self) -> bool:
+ return any(s.status == SubTaskStatus.FAILED for s in self.subtasks)
+
+ def to_dict(self) -> Dict[str, Any]:
+ return {
+ "task": self.task,
+ "subtasks": [s.to_dict() for s in self.subtasks],
+ "merge_status": self.merge_status,
+ "conflicts": self.conflicts,
+ "summary": self.summary,
+ "all_completed": self.all_completed,
+ "any_failed": self.any_failed,
+ }
+
+
+class AgentTeam:
+ """Coordinate multiple agents working in parallel.
+
+ Usage::
+
+ team = AgentTeam(workspace_path=Path("/repo"))
+ subtasks = team.plan_and_split("Add auth system", num_agents=4)
+ result = await team.execute_parallel(subtasks, executor_fn=my_agent_fn)
+ merge = await team.merge_results(result)
+ """
+
+ def __init__(self, workspace_path: Optional[Path] = None) -> None:
+ self.workspace_path = workspace_path
+ self._worktrees: List[Path] = []
+
+ def plan_and_split(
+ self,
+ task: str,
+ num_agents: int = 4,
+ subtask_descriptions: Optional[List[Dict[str, str]]] = None,
+ ) -> List[SubTask]:
+ """Split a task into independent subtasks.
+
+ If ``subtask_descriptions`` is provided, use those directly.
+ Otherwise, create generic subtasks from the task description.
+ """
+ subtasks = []
+
+ if subtask_descriptions:
+ for i, desc in enumerate(subtask_descriptions):
+ subtasks.append(SubTask(
+ title=desc.get("title", f"Subtask {i + 1}"),
+ description=desc.get("description", ""),
+ assigned_agent=desc.get("agent", f"agent_{i}"),
+ files=desc.get("files", []),
+ ))
+ else:
+ # Generic split — the LLM would normally do this
+ for i in range(min(num_agents, 8)):
+ subtasks.append(SubTask(
+ title=f"Part {i + 1} of {task}",
+ description=f"Handle part {i + 1} of the task: {task}",
+ assigned_agent=f"agent_{i}",
+ ))
+
+ return subtasks
+
+ async def execute_parallel(
+ self,
+ subtasks: List[SubTask],
+ executor_fn: Optional[Any] = None,
+ ) -> TeamResult:
+ """Execute subtasks in parallel.
+
+ ``executor_fn`` is an async callable(SubTask) -> str that runs the
+ agent logic for each subtask. If not provided, subtasks are marked
+ as completed with a placeholder result.
+ """
+ result = TeamResult(task="parallel_execution", subtasks=subtasks)
+
+ async def _run_subtask(subtask: SubTask) -> None:
+ subtask.status = SubTaskStatus.RUNNING
+ subtask.started_at = datetime.now(timezone.utc).isoformat()
+ try:
+ if executor_fn:
+ subtask.result = await executor_fn(subtask)
+ else:
+ subtask.result = f"Completed: {subtask.title}"
+ subtask.status = SubTaskStatus.COMPLETED
+ except Exception as e:
+ subtask.status = SubTaskStatus.FAILED
+ subtask.error = str(e)
+ logger.error("Subtask %s failed: %s", subtask.id, e)
+ finally:
+ subtask.completed_at = datetime.now(timezone.utc).isoformat()
+
+ # Run all subtasks concurrently
+ await asyncio.gather(*[_run_subtask(st) for st in subtasks])
+
+ return result
+
+ async def merge_results(self, team_result: TeamResult) -> TeamResult:
+ """Merge results from parallel execution.
+
+ In a full implementation, this would:
+ 1. Check for file conflicts between subtask outputs
+ 2. Use git merge-tree for conflict detection
+ 3. Have a lead agent resolve conflicts
+
+ For now, it aggregates results and detects file overlaps.
+ """
+ if team_result.any_failed:
+ team_result.merge_status = "failed"
+ failed = [s for s in team_result.subtasks if s.status == SubTaskStatus.FAILED]
+ team_result.summary = (
+ f"{len(failed)} subtask(s) failed: "
+ + ", ".join(f"{s.title} ({s.error})" for s in failed)
+ )
+ return team_result
+
+ # Detect file conflicts (same file modified by multiple agents)
+ file_owners: Dict[str, List[str]] = {}
+ for st in team_result.subtasks:
+ for f in st.files:
+ file_owners.setdefault(f, []).append(st.assigned_agent)
+
+ conflicts = [f for f, owners in file_owners.items() if len(owners) > 1]
+ team_result.conflicts = conflicts
+
+ if conflicts:
+ team_result.merge_status = "conflict"
+ team_result.summary = (
+ f"File conflicts detected in: {', '.join(conflicts)}. "
+ "Manual review required."
+ )
+ else:
+ team_result.merge_status = "merged"
+ completed = [s for s in team_result.subtasks if s.status == SubTaskStatus.COMPLETED]
+ team_result.summary = (
+ f"All {len(completed)} subtasks completed successfully. "
+ "No file conflicts detected."
+ )
+
+ return team_result
+
+ async def setup_worktrees(self, subtasks: List[SubTask], base_branch: str = "main") -> None:
+ """Create git worktrees for each subtask (requires workspace_path)."""
+ if not self.workspace_path:
+ return
+ for st in subtasks:
+ worktree_name = f"worktree-{st.id}"
+ worktree_path = self.workspace_path / ".worktrees" / worktree_name
+ branch_name = f"team/{st.id}"
+
+ proc = await asyncio.create_subprocess_exec(
+ "git", "worktree", "add", "-b", branch_name,
+ str(worktree_path), base_branch,
+ cwd=str(self.workspace_path),
+ stdout=asyncio.subprocess.PIPE,
+ stderr=asyncio.subprocess.PIPE,
+ )
+ await proc.communicate()
+ st.worktree_path = worktree_path
+ self._worktrees.append(worktree_path)
+
+ async def cleanup_worktrees(self) -> None:
+ """Remove all worktrees created by this team."""
+ if not self.workspace_path:
+ return
+ for wt in self._worktrees:
+ proc = await asyncio.create_subprocess_exec(
+ "git", "worktree", "remove", "--force", str(wt),
+ cwd=str(self.workspace_path),
+ stdout=asyncio.subprocess.PIPE,
+ stderr=asyncio.subprocess.PIPE,
+ )
+ await proc.communicate()
+ self._worktrees.clear()
diff --git a/gitpilot/agent_tools.py b/gitpilot/agent_tools.py
new file mode 100644
index 0000000000000000000000000000000000000000..71572afb6ae17a8ca4a5b09fab4b264e1fb5c77c
--- /dev/null
+++ b/gitpilot/agent_tools.py
@@ -0,0 +1,293 @@
+"""
+Agent Tools for GitPilot Multi-Agent System
+Provides CrewAI-compatible tools for agents to explore and analyze repositories.
+"""
+import asyncio
+import threading
+from typing import Any, Dict, List, Optional, Tuple
+
+from crewai.tools import tool
+
+from .github_api import get_repo_tree, get_file
+
+
+def _sanitize_tool_arg(value: Any, fallback_key: str = "description") -> str:
+ """Fix CrewAI tool argument format bug.
+
+ Smaller LLMs (deepseek-r1, qwen, phi) sometimes send tool arguments
+ as a dict copying the schema definition instead of the actual value:
+ {"description": "README.md", "type": "str"}
+ instead of:
+ "README.md"
+
+ This helper unwraps the dict and returns a plain string.
+ """
+ if isinstance(value, str):
+ return value
+ if isinstance(value, dict):
+ # Try common keys the LLM might stuff the value into
+ for key in (fallback_key, "description", "value", "default", "title"):
+ if key in value and isinstance(value[key], str) and value[key]:
+ return value[key]
+ # Last resort: stringify
+ return str(next(iter(value.values()), ""))
+ return str(value)
+
+# Global context for current repository
+# Now includes 'token' to ensure tools can authenticate even in threads
+# AND includes 'branch' to ensure tools operate on the correct ref (not default HEAD/main)
+_current_repo_context: Dict[str, Any] = {}
+_context_lock = threading.RLock()
+
+
+def set_repo_context(
+ owner: str,
+ repo: str,
+ token: Optional[str] = None,
+ branch: Optional[str] = None,
+):
+ """Set the current repository context for tools."""
+ global _current_repo_context
+ with _context_lock:
+ _current_repo_context = {
+ "owner": owner,
+ "repo": repo,
+ "token": token,
+ "branch": branch or "HEAD",
+ }
+
+
+def get_repo_context() -> Tuple[str, str, Optional[str], str]:
+ """Get the current repository context including token and branch."""
+ with _context_lock:
+ owner = _current_repo_context.get("owner", "")
+ repo = _current_repo_context.get("repo", "")
+ token = _current_repo_context.get("token")
+ branch = _current_repo_context.get("branch", "HEAD")
+
+ if not owner or not repo:
+ raise ValueError("Repository context not set. Call set_repo_context first.")
+ return owner, repo, token, branch
+
+
+async def get_repository_context_summary(
+ owner: str,
+ repo: str,
+ token: Optional[str] = None,
+ branch: str = "HEAD",
+) -> Dict[str, Any]:
+ """Programmatically gather repository context."""
+ try:
+ # Pass token + ref explicitly
+ tree = await get_repo_tree(owner, repo, token=token, ref=branch)
+
+ if not tree:
+ return {
+ "all_files": [],
+ "total_files": 0,
+ "extensions": {},
+ "directories": set(),
+ "key_files": [],
+ }
+
+ all_files = [item["path"] for item in tree]
+ extensions: Dict[str, int] = {}
+ directories: set = set()
+ key_files: List[str] = []
+
+ for item in tree:
+ path = item["path"]
+ if "." in path:
+ ext = "." + path.rsplit(".", 1)[1]
+ extensions[ext] = extensions.get(ext, 0) + 1
+ if "/" in path:
+ directories.add(path.split("/")[0])
+
+ path_lower = path.lower()
+ if any(
+ k in path_lower
+ for k in ["readme", "package.json", "requirements.txt", "dockerfile", "makefile"]
+ ):
+ key_files.append(path)
+
+ return {
+ "all_files": all_files,
+ "total_files": len(all_files),
+ "extensions": extensions,
+ "directories": directories,
+ "key_files": key_files,
+ }
+
+ except Exception as e:
+ print(f"[Error] Failed to get repository context: {str(e)}")
+ return {"error": str(e), "total_files": 0}
+
+
+@tool("List all files in repository")
+def list_repository_files() -> str:
+ """Lists all files in the current repository."""
+ try:
+ owner, repo, token, branch = get_repo_context()
+
+ loop = asyncio.new_event_loop()
+ asyncio.set_event_loop(loop)
+ try:
+ # Pass token + ref explicitly
+ tree = loop.run_until_complete(get_repo_tree(owner, repo, token=token, ref=branch))
+ finally:
+ loop.close()
+
+ if not tree:
+ return f"Repository is empty - no files found. (Branch: {branch})"
+
+ result = f"Repository: {owner}/{repo} (Branch: {branch})\nFiles:\n"
+ for item in sorted(tree, key=lambda x: x["path"]):
+ result += f" - {item['path']}\n"
+ return result
+ except Exception as e:
+ return f"Error listing files: {str(e)}"
+
+
+@tool("Get directory structure")
+def get_directory_structure() -> str:
+ """Gets the hierarchical directory structure."""
+ try:
+ owner, repo, token, branch = get_repo_context()
+
+ loop = asyncio.new_event_loop()
+ asyncio.set_event_loop(loop)
+ try:
+ # Pass token + ref explicitly
+ tree = loop.run_until_complete(get_repo_tree(owner, repo, token=token, ref=branch))
+ finally:
+ loop.close()
+
+ if not tree:
+ return f"No files. (Branch: {branch})"
+
+ # Simple structure generation
+ paths = [t["path"] for t in tree]
+ return f"Structure for {owner}/{repo} (Branch: {branch}):\n" + "\n".join(sorted(paths))
+ except Exception as e:
+ return f"Error: {str(e)}"
+
+
+@tool("Read file content")
+def read_file(file_path: str) -> str:
+ """Reads the content of a specific file."""
+ file_path = _sanitize_tool_arg(file_path)
+ try:
+ owner, repo, token, branch = get_repo_context()
+
+ loop = asyncio.new_event_loop()
+ asyncio.set_event_loop(loop)
+ try:
+ # Pass token + ref explicitly
+ content = loop.run_until_complete(get_file(owner, repo, file_path, token=token, ref=branch))
+ finally:
+ loop.close()
+
+ return f"Content of {file_path}:\n---\n{content}\n---"
+ except Exception as e:
+ return f"Error reading file {file_path}: {str(e)}"
+
+
+@tool("Get repository summary")
+def get_repository_summary() -> str:
+ """Provides a comprehensive summary of the repository."""
+ try:
+ owner, repo, token, branch = get_repo_context()
+
+ loop = asyncio.new_event_loop()
+ asyncio.set_event_loop(loop)
+ try:
+ # Pass token + ref explicitly
+ tree = loop.run_until_complete(get_repo_tree(owner, repo, token=token, ref=branch))
+ finally:
+ loop.close()
+
+ return f"Summary for {owner}/{repo} (Branch: {branch}): {len(tree)} files found."
+ except Exception as e:
+ return f"Error: {str(e)}"
+
+
+# ---------------------------------------------------------------------------
+# Write tools — allow agents to create, update, and delete files via GitHub API
+# ---------------------------------------------------------------------------
+
+@tool("Write or update a file in the repository")
+def write_file(file_path: str, content: str, commit_message: str) -> str:
+ """Creates or updates a file in the repository. Provide the full file content."""
+ file_path = _sanitize_tool_arg(file_path)
+ content = _sanitize_tool_arg(content, fallback_key="value")
+ commit_message = _sanitize_tool_arg(commit_message, fallback_key="value")
+ try:
+ owner, repo, token, branch = get_repo_context()
+ from .github_api import put_file
+
+ loop = asyncio.new_event_loop()
+ asyncio.set_event_loop(loop)
+ try:
+ result = loop.run_until_complete(
+ put_file(owner, repo, file_path, content, commit_message, token=token, branch=branch)
+ )
+ finally:
+ loop.close()
+
+ sha = result.get("commit_sha", "")
+ return f"File '{file_path}' written successfully. Commit: {sha[:8]}"
+ except Exception as e:
+ return f"Error writing file {file_path}: {str(e)}"
+
+
+@tool("Delete a file from the repository")
+def delete_repo_file(file_path: str, commit_message: str) -> str:
+ """Deletes a file from the repository."""
+ file_path = _sanitize_tool_arg(file_path)
+ commit_message = _sanitize_tool_arg(commit_message, fallback_key="value")
+ try:
+ owner, repo, token, branch = get_repo_context()
+ from .github_api import delete_file
+
+ loop = asyncio.new_event_loop()
+ asyncio.set_event_loop(loop)
+ try:
+ result = loop.run_until_complete(
+ delete_file(owner, repo, file_path, commit_message, token=token, branch=branch)
+ )
+ finally:
+ loop.close()
+
+ sha = result.get("commit_sha", "")
+ return f"File '{file_path}' deleted. Commit: {sha[:8]}"
+ except Exception as e:
+ return f"Error deleting file {file_path}: {str(e)}"
+
+
+@tool("Create a new branch in the repository")
+def create_repo_branch(branch_name: str) -> str:
+ """Creates a new branch from the current HEAD."""
+ branch_name = _sanitize_tool_arg(branch_name)
+ try:
+ owner, repo, token, _branch = get_repo_context()
+ from .github_api import create_branch
+
+ loop = asyncio.new_event_loop()
+ asyncio.set_event_loop(loop)
+ try:
+ loop.run_until_complete(
+ create_branch(owner, repo, branch_name, from_ref="HEAD", token=token)
+ )
+ finally:
+ loop.close()
+
+ return f"Branch '{branch_name}' created successfully."
+ except Exception as e:
+ if "already exists" in str(e).lower() or "422" in str(e):
+ return f"Branch '{branch_name}' already exists (OK to use)."
+ return f"Error creating branch: {str(e)}"
+
+
+# Export tools
+REPOSITORY_TOOLS = [list_repository_files, get_directory_structure, read_file, get_repository_summary]
+WRITE_TOOLS = [write_file, delete_repo_file, create_repo_branch]
diff --git a/gitpilot/agentic.py b/gitpilot/agentic.py
new file mode 100644
index 0000000000000000000000000000000000000000..2aed66ba4791fd06552ffa10d024d1e9230e63ab
--- /dev/null
+++ b/gitpilot/agentic.py
@@ -0,0 +1,2054 @@
+from __future__ import annotations
+
+import asyncio
+import contextvars
+import logging
+from textwrap import dedent
+from typing import Any, Dict, List, Literal, Optional
+
+from pydantic import BaseModel, Field
+from .agent_router import AgentType, RequestCategory, WorkflowPlan, route as route_request
+from .context_pack import build_context_pack
+from .topology_registry import (
+ get_topology,
+ get_topology_graph,
+ classify_message,
+ get_saved_topology_preference,
+ ExecutionStyle,
+ RoutingStrategy,
+)
+from fastapi import HTTPException
+
+logger = logging.getLogger(__name__)
+
+
+# ---------------------------------------------------------------------------
+# Incompatible model detection
+# ---------------------------------------------------------------------------
+# Models that struggle with CrewAI's multi-agent ReAct format.
+# Two categories:
+# 1. REASONING models (deepseek-r1, qwq, marco-o1) — produce tokens
+# that break CrewAI's parser regardless of model size
+# 2. SMALL models (<7B params) — return empty responses when they can't
+# follow "Thought: Action: Action Input:" format
+#
+# All of these are auto-routed to Lite Mode for reliability.
+_INCOMPATIBLE_MODEL_PATTERNS = (
+ # Reasoning models (ALL sizes fail — the tag breaks ReAct parser)
+ "deepseek-r1",
+ "qwq",
+ "marco-o1",
+ "o1-",
+ # Small models (<7B)
+ "qwen2.5:0.5b", "qwen2.5:1.5b", "qwen2.5:3b",
+ "qwen2:0.5b", "qwen2:1.5b",
+ "llama3.2:1b", "llama3.2:3b",
+ "phi3:mini", "phi-3-mini", "phi3.5:mini", "phi3:3.8b",
+ "gemma:2b", "gemma2:2b",
+ "deepseek-coder:1.3b", "deepseek-coder:6.7b",
+ "tinyllama", "tinydolphin",
+ "stablelm2", "smollm", "granite3",
+)
+
+
+def _is_incompatible_model(settings) -> bool:
+ """Check if the active model is incompatible with multi-agent ReAct.
+
+ Uses substring matching so "deepseek-r1" catches all variants
+ (deepseek-r1:1.5b, deepseek-r1:7b, deepseek-r1:14b, deepseek-r1:latest).
+ """
+ try:
+ provider = str(getattr(settings, "provider", "")).lower()
+ # Only applies to local Ollama/OllaBridge providers — cloud APIs
+ # (OpenAI, Claude) have native tool-calling that handles this
+ if provider not in ("ollama", "ollabridge"):
+ return False
+
+ if provider == "ollama":
+ model = str(getattr(settings.ollama, "model", "")).lower()
+ else:
+ model = str(getattr(settings.ollabridge, "model", "")).lower()
+
+ for pattern in _INCOMPATIBLE_MODEL_PATTERNS:
+ if pattern in model:
+ return True
+ return False
+ except Exception:
+ return False
+
+
+def _split_repo_full_name(repo_full_name: str) -> tuple[str, str]:
+ """Safely split 'owner/repo' into (owner, repo).
+
+ Raises a clear ValueError if the input is missing, empty, or malformed.
+ This replaces `owner, repo = _split_repo_full_name(repo_full_name)` which produces
+ a cryptic "not enough values to unpack" error on folder/local-git
+ sessions that have no GitHub repository.
+ """
+ if not isinstance(repo_full_name, str) or not repo_full_name.strip():
+ raise ValueError(
+ "repo_full_name is required but was empty. "
+ "This session is not connected to a GitHub repository — "
+ "the multi-agent planner needs a repo in 'owner/repo' format. "
+ "Open the Workspace tab and add a repository before chatting."
+ )
+ parts = repo_full_name.strip().split("/")
+ if len(parts) != 2 or not all(p.strip() for p in parts):
+ raise ValueError(
+ f"repo_full_name must be in 'owner/repo' format, got: {repo_full_name!r}. "
+ "Example: 'octocat/hello-world'"
+ )
+ return parts[0].strip(), parts[1].strip()
+
+
+# ---------------------------------------------------------------------------
+# Resilient agent execution: timeout + circuit breaker
+# ---------------------------------------------------------------------------
+async def _guarded_agent_call(ctx, func, *, label: str = "agent"):
+ """Run a CrewAI kickoff in a thread with timeout and circuit breaker.
+
+ - Checks circuit breaker before starting.
+ - Applies a hard timeout (default 5 min, configurable via GITPILOT_AGENT_TIMEOUT).
+ - Records success/failure in the circuit breaker.
+ """
+ from .resilience import llm_circuit, run_with_timeout
+
+ if not llm_circuit.allow_request():
+ raise RuntimeError(
+ f"LLM provider circuit breaker is OPEN after repeated failures. "
+ f"Requests are temporarily rejected. Try again in "
+ f"{int(llm_circuit.recovery_timeout)}s."
+ )
+
+ try:
+ result = await run_with_timeout(
+ asyncio.to_thread(ctx.run, func),
+ label=label,
+ )
+ llm_circuit.record_success()
+ return result
+ except (TimeoutError, RuntimeError):
+ llm_circuit.record_failure()
+ raise
+ except Exception:
+ llm_circuit.record_failure()
+ raise
+
+
+# ---------------------------------------------------------------------------
+# Lazy-load heavy dependencies (CrewAI, tool modules, LLM provider)
+# so that importing this module does NOT block FastAPI startup on HF Spaces.
+# The actual import happens on first call to any agent function.
+# ---------------------------------------------------------------------------
+_crewai_cache: dict = {}
+
+
+def _crewai():
+ """Return cached CrewAI classes (Agent, Crew, Process, Task)."""
+ if not _crewai_cache:
+ from crewai import Agent, Crew, Process, Task # noqa: F811
+ _crewai_cache.update(Agent=Agent, Crew=Crew, Process=Process, Task=Task)
+ return _crewai_cache
+
+
+_tools_cache: dict = {}
+
+
+def _tools():
+ """Return cached tool collections (lazy-loaded on first use)."""
+ if not _tools_cache:
+ from .agent_tools import REPOSITORY_TOOLS, WRITE_TOOLS, set_repo_context, get_repository_context_summary
+ from .issue_tools import ISSUE_TOOLS
+ from .pr_tools import PR_TOOLS
+ from .search_tools import SEARCH_TOOLS
+ from .local_tools import LOCAL_TOOLS, LOCAL_FILE_TOOLS, LOCAL_GIT_TOOLS, LOCAL_SHELL_TOOLS
+ _tools_cache.update(
+ REPOSITORY_TOOLS=REPOSITORY_TOOLS,
+ WRITE_TOOLS=WRITE_TOOLS,
+ set_repo_context=set_repo_context,
+ get_repository_context_summary=get_repository_context_summary,
+ ISSUE_TOOLS=ISSUE_TOOLS,
+ PR_TOOLS=PR_TOOLS,
+ SEARCH_TOOLS=SEARCH_TOOLS,
+ LOCAL_TOOLS=LOCAL_TOOLS,
+ LOCAL_FILE_TOOLS=LOCAL_FILE_TOOLS,
+ LOCAL_GIT_TOOLS=LOCAL_GIT_TOOLS,
+ LOCAL_SHELL_TOOLS=LOCAL_SHELL_TOOLS,
+ )
+ return _tools_cache
+
+
+def _build_llm():
+ """Lazy-import and call build_llm."""
+ from .llm_provider import build_llm as _build
+ return _build()
+
+
+class PlanFile(BaseModel):
+ """Represents a file operation in a plan step."""
+ path: str
+ action: Literal["CREATE", "MODIFY", "DELETE", "READ"] = "MODIFY"
+
+
+class PlanStep(BaseModel):
+ """A single step in the execution plan."""
+ step_number: int
+ title: str
+ description: str
+ # Important: avoid mutable default list
+ files: List[PlanFile] = Field(default_factory=list)
+ risks: str | None = None
+
+
+class PlanResult(BaseModel):
+ """The complete execution plan."""
+ goal: str
+ summary: str
+ steps: List[PlanStep]
+
+
+async def generate_plan(
+ goal: str,
+ repo_full_name: str,
+ token: str | None = None,
+ branch_name: str | None = None,
+) -> PlanResult:
+ """Agentic planning: create a structured plan but DO NOT modify the repo.
+
+ Two-phase approach:
+ 1) Explore and understand the repository (on the correct branch)
+ 2) Create a plan based on actual repository state
+ """
+ llm = _build_llm()
+
+ owner, repo = _split_repo_full_name(repo_full_name)
+
+ # CRITICAL: Set context INCLUDING branch so tools never fall back to HEAD/main
+ active_ref = branch_name or "HEAD"
+ _tools()["set_repo_context"](owner, repo, token=token, branch=active_ref)
+
+ # CONTEXT PACK: Load project context (conventions, active use case, asset chunks)
+ # This is additive — if nothing exists, context_pack is empty and agents behave as before.
+ from pathlib import Path as _P
+ workspace_path = _P.home() / ".gitpilot" / "workspaces" / owner / repo
+ context_pack = build_context_pack(workspace_path, query=goal)
+ if context_pack:
+ logger.info("[GitPilot] Context pack loaded (%d chars)", len(context_pack))
+
+ # PHASE 1: Explore repository (correct branch)
+ logger.info("[GitPilot] Phase 1: Exploring repository %s (ref=%s)...", repo_full_name, active_ref)
+
+ repo_context_data = await _tools()["get_repository_context_summary"](owner, repo, token=token, branch=active_ref)
+ logger.info(
+ "[GitPilot] Repository context gathered: %s files found (ref=%s)",
+ repo_context_data.get("total_files", 0),
+ active_ref,
+ )
+
+ explorer = _crewai()["Agent"](
+ role="Repository Explorer",
+ goal="Thoroughly explore and document the current state of the repository",
+ backstory=(
+ "You are a meticulous code archaeologist who explores repositories "
+ "to understand their complete structure before any changes are made. "
+ "You use all available tools to build a comprehensive picture."
+ ),
+ llm=llm,
+ tools=_tools()["REPOSITORY_TOOLS"],
+ verbose=True,
+ allow_delegation=False,
+ )
+
+ explore_task = _crewai()["Task"](
+ description=dedent(f"""
+ Repository: {repo_full_name}
+ Active Ref (branch/tag/SHA): {active_ref}
+
+ Your mission is to THOROUGHLY explore this repository and document its current state.
+ You MUST use your tools to gather the following information:
+
+ 1. Call "Get repository summary" - to get overall statistics
+ 2. Call "List all files in repository" - to see EVERY file that exists
+ 3. Call "Get directory structure" - to understand the organization
+ 4. If there are key files (README.md, package.json, etc.), read them
+
+ CRITICAL: You must ACTUALLY CALL these tools. Do not make assumptions.
+
+ After exploring, provide a detailed report in this EXACT format:
+
+ REPOSITORY EXPLORATION REPORT
+ =============================
+
+ Files Found: [list all file paths you discovered]
+
+ Key Files: [list important files like README.md, .gitignore, etc.]
+
+ Directory Structure: [describe the folder organization]
+
+ File Types: [count files by extension]
+
+ Your report MUST be based on ACTUAL tool calls, not assumptions.
+ """),
+ expected_output="A detailed exploration report listing ALL files found in the repository",
+ agent=explorer,
+ )
+
+ explore_crew = _crewai()["Crew"](
+ agents=[explorer],
+ tasks=[explore_task],
+ process=_crewai()["Process"].sequential,
+ verbose=True,
+ )
+
+ def _explore():
+ return explore_crew.kickoff()
+
+ # Propagate context to thread for CrewAI execution
+ ctx = contextvars.copy_context()
+ exploration_result = await _guarded_agent_call(ctx, _explore, label="explore_repo")
+
+ exploration_report = exploration_result.raw if hasattr(exploration_result, "raw") else str(exploration_result)
+ logger.info("[GitPilot] Exploration complete. Report length: %s chars", len(exploration_report))
+
+ # PHASE 2: Plan creation based on exploration
+ logger.info("[GitPilot] Phase 2: Creating plan based on repository exploration (ref=%s)...", active_ref)
+
+ # Build planner backstory with optional context pack injection
+ _planner_backstory = (
+ "You are an experienced staff engineer who creates plans based on FACTS, not assumptions. "
+ "You have received a complete exploration report of the repository. "
+ "You ONLY create plans for files that actually exist in the exploration report. "
+ "You are extremely careful with DELETE actions - you verify the file exists "
+ "and that it's not on the 'keep' list before marking it for deletion. "
+ "When users ask to delete files, you delete individual FILES, not directory names. "
+ "When users ask to ANALYZE files and GENERATE new content (code, docs, examples), "
+ "you create plans that READ existing files and CREATE new files with generated content. "
+ "You understand that 'analyze X and create Y' means: use tools to read X, then plan to CREATE Y. "
+ "You never make changes yourself, only create detailed plans."
+ )
+ if context_pack:
+ _planner_backstory += "\n\n" + context_pack
+
+ planner = _crewai()["Agent"](
+ role="Repository Refactor Planner",
+ goal=(
+ "Design safe, step-by-step refactor plans based on ACTUAL repository state "
+ "discovered during exploration"
+ ),
+ backstory=_planner_backstory,
+ llm=llm,
+ tools=_tools()["REPOSITORY_TOOLS"],
+ verbose=True,
+ allow_delegation=False,
+ )
+
+ plan_task = _crewai()["Task"](
+ description=dedent(f"""
+ User goal: {{goal}}
+ Repository: {repo_full_name}
+ Active Ref (branch/tag/SHA): {active_ref}
+
+ REPOSITORY EXPLORATION REPORT (CRITICAL CONTEXT):
+ ==================================================
+ {exploration_report}
+ ==================================================
+
+ Based on the ACTUAL files listed in the exploration report above, create a plan.
+
+ CRITICAL RULES FOR ANALYSIS AND GENERATION TASKS:
+ - If the goal mentions "analyze" or "generate" or "create examples/demos", you MUST create NEW files
+ - When the user asks to "analyze X and create Y":
+ * Step 1: Use "Read file content" tool to analyze existing files (if needed)
+ * Step 2: Plan CREATE actions for new files (e.g., demo.py, example.py, tutorial.md)
+ - NEW files can include: Python scripts, examples, demos, tutorials, documentation
+ - Examples of analysis tasks that should CREATE files:
+ * "analyze README and generate Python code" → CREATE: demo.py, example.py
+ * "create demo based on documentation" → CREATE: demo.py, test_example.py
+ * "generate tutorial from existing code" → CREATE: tutorial.md, examples/
+ - IMPORTANT: Empty plans (steps: []) are ONLY acceptable if the goal is purely informational
+ - If the user wants something generated/created, you MUST include CREATE actions
+
+ CRITICAL RULES FOR DELETION SCENARIOS:
+ - If the goal mentions "delete files" or "keep only", you MUST identify which files to DELETE
+ - For EACH file in the exploration report:
+ * If it should be KEPT (e.g., README.md if goal says "keep README.md"), do NOT include it in the plan
+ * If it should be DELETED (e.g., all other files), mark it with action "DELETE"
+ - ONLY delete files that actually exist (check the exploration report)
+ - NEVER delete files that the user wants to keep
+ - Be explicit: if the goal is "delete all files except README.md", then:
+ * README.md should NOT appear in your plan (it's being kept)
+ * ALL other files from the exploration report should have action "DELETE"
+
+ CRITICAL RULES FOR VERIFICATION:
+ - ONLY include files that appear in the exploration report
+ - For "CREATE" actions: file must NOT be in the exploration report
+ - For "MODIFY" or "DELETE" actions: file MUST be in the exploration report
+ - If you're unsure, you can still call your tools to double-check
+
+ Your FINAL ANSWER must be a single JSON object that matches exactly this schema:
+
+ {{
+ "goal": "string describing the goal",
+ "summary": "string with overall plan summary",
+ "steps": [
+ {{
+ "step_number": 1,
+ "title": "Step title",
+ "description": "What this step does",
+ "files": [
+ {{"path": "file/path.py", "action": "CREATE"}},
+ {{"path": "another/file.py", "action": "MODIFY"}},
+ {{"path": "old/file.py", "action": "DELETE"}},
+ {{"path": "README.md", "action": "READ"}}
+ ],
+ "risks": "Optional risk description or null"
+ }}
+ ]
+ }}
+
+ CRITICAL JSON RULES:
+ - Output MUST be valid JSON.
+ - STRICTLY NO COMMENTS allowed (no // or #).
+ - Double quotes around all keys and string values.
+ - No trailing commas.
+ - "action" MUST be exactly one of: "CREATE", "MODIFY", "DELETE", "READ"
+ - "step_number" MUST be an integer starting from 1
+ - "risks" can be either a string or null (the JSON null value, without quotes)
+ - Do NOT wrap the JSON in markdown code fences
+ - Do NOT add any explanation before or after the JSON
+ - The ENTIRE response MUST be ONLY the JSON object, starting with '{{' and ending with '}}'
+ """),
+ expected_output=dedent("""
+ A single valid JSON object matching the PlanResult schema:
+ - goal: string
+ - summary: string
+ - steps: array of objects, each with:
+ - step_number: integer
+ - title: string
+ - description: string
+ - files: array of { "path": string, "action": "CREATE" | "MODIFY" | "DELETE" | "READ" }
+ - risks: string or null
+ The response must contain ONLY pure JSON (no markdown, no prose, no code fences, NO COMMENTS).
+ """),
+ agent=planner,
+ output_pydantic=PlanResult,
+ )
+
+ plan_crew = _crewai()["Crew"](
+ agents=[planner],
+ tasks=[plan_task],
+ process=_crewai()["Process"].sequential,
+ verbose=True,
+ )
+
+ def _plan():
+ return plan_crew.kickoff(inputs={"goal": goal})
+
+ ctx = contextvars.copy_context()
+ result = await _guarded_agent_call(ctx, _plan, label="generate_plan")
+
+ if hasattr(result, "pydantic") and result.pydantic:
+ plan = result.pydantic
+ logger.info("[GitPilot] Plan created with %s steps (ref=%s)", len(plan.steps), active_ref)
+ return plan
+
+ logger.warning("[GitPilot] Unexpected planning result type: %r", type(result))
+ return result
+
+
+# ============================================================================
+# Lite Mode — Simplified single-agent for small LLMs (< 7B parameters)
+# ============================================================================
+
+# Regex-based intent classifier — no LLM needed, runs instantly.
+_QUESTION_PATTERNS = [
+ r"\b(what|which|where|how|why|who|when|does|is|are|can|could|tell|show|list|describe|explain|summarize|overview)\b",
+ r"\?$",
+]
+_ACTION_PATTERNS = [
+ r"\b(create|add|delete|remove|modify|change|update|rename|fix|write|implement|refactor|move|generate code)\b",
+]
+
+
+def _classify_lite_intent(goal: str) -> str:
+ """Classify user intent as 'question' or 'action' using regex only."""
+ import re as _re
+ goal_lower = goal.strip().lower()
+
+ action_score = sum(1 for p in _ACTION_PATTERNS if _re.search(p, goal_lower))
+ question_score = sum(1 for p in _QUESTION_PATTERNS if _re.search(p, goal_lower))
+
+ # Action words dominate — user wants to change something
+ if action_score > 0 and action_score >= question_score:
+ return "action"
+ return "question"
+
+
+async def _lite_prefetch_context(
+ owner: str,
+ repo: str,
+ token: str | None,
+ branch: str,
+ key_file_limit: int = 3,
+) -> str:
+ """Pre-fetch repo context programmatically and format as plain text.
+
+ Returns a string ready to inject into the LLM prompt. No LLM
+ tool-calling involved — everything comes from the GitHub API.
+ """
+ from .github_api import get_file as _get_file
+
+ ctx = await _tools()["get_repository_context_summary"](owner, repo, token=token, branch=branch)
+
+ all_files = ctx.get("all_files", [])
+ extensions = ctx.get("extensions", {})
+ directories = ctx.get("directories", set())
+ key_files = ctx.get("key_files", [])
+
+ parts = []
+
+ # File listing (cap at 80 to stay within small-model context)
+ if all_files:
+ shown = all_files[:80]
+ file_lines = "\n".join(f" {f}" for f in shown)
+ parts.append(f"Files ({len(all_files)} total):\n{file_lines}")
+ if len(all_files) > 80:
+ parts.append(f" ... and {len(all_files) - 80} more")
+ else:
+ parts.append("Files: (none found)")
+
+ # Extensions summary
+ if extensions:
+ ext_str = ", ".join(f"{ext} ({n})" for ext, n in sorted(extensions.items(), key=lambda x: -x[1])[:10])
+ parts.append(f"File types: {ext_str}")
+
+ # Top-level directories
+ if directories:
+ dir_list = sorted(directories)[:15]
+ parts.append(f"Top directories: {', '.join(dir_list)}")
+
+ # Read content of key files (README, etc.) — give LLM real context
+ for kf in key_files[:key_file_limit]:
+ try:
+ content = await _get_file(owner, repo, kf, token=token, ref=branch)
+ # Truncate to keep prompt small for 1.5B models
+ snippet = content[:1500] if content else ""
+ if snippet:
+ parts.append(f"--- {kf} ---\n{snippet}")
+ if len(content) > 1500:
+ parts.append(f" [truncated, {len(content)} chars total]")
+ except Exception:
+ pass # File unreadable — skip silently
+
+ return "\n\n".join(parts)
+
+
+async def generate_plan_lite(
+ goal: str,
+ repo_full_name: str,
+ token: str | None = None,
+ branch_name: str | None = None,
+) -> PlanResult:
+ """Lite Mode planning: smart intent detection + single agent + pre-fetched context.
+
+ The topology is:
+ 1. Classify intent (regex — instant, no LLM)
+ 2. Pre-fetch repo context from GitHub API (no LLM tool-calling)
+ 3. Build a short, focused prompt based on intent type
+ 4. Single LLM call → parse response
+
+ For QUESTION intents: LLM answers directly, plan has 0 file actions.
+ For ACTION intents: LLM lists file changes, plan has file actions.
+ """
+ llm = _build_llm()
+
+ owner, repo = _split_repo_full_name(repo_full_name)
+ active_ref = branch_name or "HEAD"
+ _tools()["set_repo_context"](owner, repo, token=token, branch=active_ref)
+
+ intent = _classify_lite_intent(goal)
+ logger.info("[GitPilot Lite] Intent: %s | Goal: %s", intent, goal[:80])
+
+ # PRE-FETCH: real data from GitHub API
+ logger.info("[GitPilot Lite] Pre-fetching context for %s (ref=%s)...", repo_full_name, active_ref)
+ context_text = await _lite_prefetch_context(owner, repo, token, active_ref)
+
+ # BUILD PROMPT based on intent
+ if intent == "question":
+ lite_prompt = (
+ f"Repository: {repo_full_name} (branch: {active_ref})\n\n"
+ f"{context_text}\n\n"
+ f"Question: {goal}\n\n"
+ f"Answer the question based on the repository information above. "
+ f"Be specific — mention actual file names and directories you can see."
+ )
+ expected = "A direct answer to the user's question about the repository"
+ else:
+ lite_prompt = (
+ f"Repository: {repo_full_name} (branch: {active_ref})\n\n"
+ f"{context_text}\n\n"
+ f"Task: {goal}\n\n"
+ f"You MUST respond with ONLY a list of file actions. One per line.\n"
+ f"Format: ACTION filepath\n"
+ f"ACTION is one of: CREATE, MODIFY, DELETE\n\n"
+ f"Examples:\n"
+ f"DELETE demo.py\n"
+ f"DELETE example.py\n"
+ f"CREATE src/main.py\n"
+ f"MODIFY README.md\n\n"
+ f"Rules:\n"
+ f"- Only use MODIFY or DELETE for files that EXIST in the repository.\n"
+ f"- Only use CREATE for NEW files that do not exist yet.\n"
+ f"- Do NOT add explanations. ONLY output ACTION lines.\n"
+ f"- Output NOTHING else — no comments, no code, no explanations."
+ )
+ expected = "ONLY action lines like: DELETE demo.py"
+
+ lite_agent = _crewai()["Agent"](
+ role="GitPilot Lite",
+ goal="Help the user with their repository",
+ backstory="You are a helpful coding assistant. Be concise.",
+ llm=llm,
+ tools=[],
+ verbose=True,
+ allow_delegation=False,
+ )
+
+ lite_task = _crewai()["Task"](
+ description=lite_prompt,
+ expected_output=expected,
+ agent=lite_agent,
+ )
+
+ lite_crew = _crewai()["Crew"](
+ agents=[lite_agent],
+ tasks=[lite_task],
+ process=_crewai()["Process"].sequential,
+ verbose=True,
+ )
+
+ def _run_lite():
+ return lite_crew.kickoff()
+
+ ctx = contextvars.copy_context()
+ result = await _guarded_agent_call(ctx, _run_lite, label="lite_mode")
+
+ raw_text = result.raw if hasattr(result, "raw") else str(result)
+ logger.info("[GitPilot Lite] Response (%d chars, intent=%s)", len(raw_text), intent)
+
+ # PARSE RESPONSE based on intent
+ if intent == "question":
+ # Pure Q&A — no file actions, just wrap the answer.
+ # summary = full answer text (shown in the "Answer" section of the chat)
+ return PlanResult(
+ goal=goal,
+ summary=raw_text,
+ steps=[PlanStep(
+ step_number=1,
+ title="Answer",
+ description=raw_text,
+ files=[],
+ risks=None,
+ )],
+ )
+
+ # Action intent — parse ACTION lines
+ import re as _re
+ action_pattern = _re.compile(r"^(CREATE|MODIFY|DELETE)\s+(\S+)", _re.MULTILINE)
+ matches = action_pattern.findall(raw_text)
+
+ # Strip raw ACTION lines from description to get the human-readable parts
+ clean_description = _re.sub(
+ r"^(CREATE|MODIFY|DELETE)\s+\S+.*$", "", raw_text, flags=_re.MULTILINE,
+ ).strip()
+
+ # Get actual repo files for validation
+ repo_ctx = await _tools()["get_repository_context_summary"](owner, repo, token=token, branch=active_ref)
+ real_files = set(repo_ctx.get("all_files", []))
+
+ # ── Fuzzy fallback: if the LLM didn't use ACTION format, try to infer ──
+ if not matches and real_files:
+ logger.info("[GitPilot Lite] No ACTION lines found — trying fuzzy extraction")
+ goal_lower = goal.strip().lower()
+ response_lower = raw_text.lower()
+
+ # Pattern: "delete all files except X"
+ except_match = _re.search(
+ r"(?:delete|remove)\s+(?:all\s+)?(?:files?\s+)?(?:except|but|besides|other\s+than)\s+(.+)",
+ goal_lower,
+ )
+ if except_match:
+ keep_raw = except_match.group(1).strip()
+ keep_files = {f.strip().rstrip(",.") for f in _re.split(r"[,\s]+and\s+|,\s*|\s+", keep_raw) if f.strip()}
+ for f in real_files:
+ fname = f.rsplit("/", 1)[-1] if "/" in f else f
+ if f not in keep_files and fname not in keep_files:
+ matches.append(("DELETE", f))
+ if matches:
+ logger.info("[GitPilot Lite] Fuzzy: keep=%s, delete=%d files", keep_files, len(matches))
+
+ # Pattern: LLM mentions specific filenames with delete/remove verbs
+ if not matches:
+ for verb in ("delete", "remove", "rm", "git rm"):
+ for f in real_files:
+ if f in response_lower or f in goal_lower:
+ if verb in response_lower or verb in goal_lower:
+ matches.append(("DELETE", f))
+
+ # Pattern: LLM mentions files with create/add verbs
+ if not matches:
+ create_match = _re.findall(r"(?:create|add|write|generate)\s+(\S+\.(?:py|js|ts|md|txt|yaml|json|sh))", goal_lower)
+ for path in create_match:
+ if path not in real_files:
+ matches.append(("CREATE", path))
+
+ valid_files = []
+ for action, path in matches:
+ path = path.strip().rstrip(",-:")
+ if action in ("MODIFY", "DELETE"):
+ if path in real_files:
+ valid_files.append(PlanFile(path=path, action=action))
+ else:
+ logger.warning("[GitPilot Lite] Skipping %s %s — file not in repo", action, path)
+ elif action == "CREATE":
+ if path not in real_files:
+ valid_files.append(PlanFile(path=path, action=action))
+
+ steps = []
+ if valid_files:
+ # Build a clean summary: "Create 2 files, modify 1 file"
+ counts = {}
+ for f in valid_files:
+ counts[f.action] = counts.get(f.action, 0) + 1
+ action_labels = {"CREATE": "create", "MODIFY": "modify", "DELETE": "delete"}
+ summary_parts = []
+ for act in ("CREATE", "MODIFY", "DELETE"):
+ n = counts.get(act, 0)
+ if n > 0:
+ label = action_labels[act]
+ summary_parts.append(f"{label} {n} file{'s' if n > 1 else ''}")
+ clean_summary = "Plan: " + ", ".join(summary_parts) + "."
+
+ # Use the clean description if available, otherwise a generic one
+ step_desc = clean_description if clean_description else f"Apply changes to {len(valid_files)} file(s) in {repo_full_name}."
+
+ steps.append(PlanStep(
+ step_number=1,
+ title="Execute changes",
+ description=step_desc,
+ files=valid_files,
+ risks=None,
+ ))
+ return PlanResult(goal=goal, summary=clean_summary, steps=steps)
+
+ # No valid files after validation — the LLM hallucinated paths.
+ # Return as a Q&A-style answer (no Action Plan section shown in UI).
+ fallback_text = clean_description if clean_description else raw_text
+ # Strip any remaining ACTION-like artifacts
+ fallback_text = _re.sub(r"\bACTION\b", "", fallback_text).strip()
+ if not fallback_text:
+ fallback_text = (
+ f"I analyzed {repo_full_name} but couldn't determine specific file "
+ f"changes for your request. The repository has {len(real_files)} file(s). "
+ f"Try being more specific about what you'd like to create or modify."
+ )
+
+ steps.append(PlanStep(
+ step_number=1,
+ title="Analysis",
+ description=fallback_text,
+ files=[],
+ risks=None,
+ ))
+ return PlanResult(goal=goal, summary=fallback_text, steps=steps)
+
+
+async def execute_plan_lite(
+ plan: PlanResult,
+ repo_full_name: str,
+ token: str | None = None,
+ branch_name: str | None = None,
+) -> dict:
+ """Lite Mode execution: single agent generates file content with simplified prompts.
+
+ Unlike the standard execute_plan, the Lite version:
+ - Uses a single short prompt per file (no CRITICAL INSTRUCTIONS blocks)
+ - Does not require the LLM to call tools
+ - Pre-reads existing file content and injects it into the prompt
+ """
+ from .github_api import get_file, put_file, create_branch, get_repo
+ import re
+ import time
+
+ owner, repo = _split_repo_full_name(repo_full_name)
+ execution_steps: list[dict] = []
+ llm = _build_llm()
+
+ if branch_name is None:
+ sanitized = re.sub(r"[^a-z0-9-]+", "-", plan.goal.lower())
+ sanitized = sanitized[:40].strip("-")
+ timestamp = str(int(time.time()))[-6:]
+ branch_name = f"gitpilot-{sanitized}-{timestamp}"
+
+ try:
+ await create_branch(owner, repo, branch_name, from_ref="HEAD", token=token)
+ except HTTPException:
+ pass # Branch may already exist
+
+ _tools()["set_repo_context"](owner, repo, token=token, branch=branch_name)
+
+ for step in plan.steps:
+ step_summary = f"Step {step.step_number}: {step.title}"
+
+ for file in step.files:
+ try:
+ if file.action == "CREATE":
+ # SIMPLIFIED PROMPT for small LLMs
+ create_prompt = (
+ f"Write the content for a new file: {file.path}\n"
+ f"Goal: {plan.goal}\n"
+ f"Context: {step.description[:300]}\n\n"
+ f"Return ONLY the file content, nothing else."
+ )
+
+ lite_agent = _crewai()["Agent"](
+ role="Code Writer",
+ goal="Write file content",
+ backstory="You write clean, working code.",
+ llm=llm, tools=[], verbose=False, allow_delegation=False,
+ )
+ task = _crewai()["Task"](
+ description=create_prompt,
+ expected_output=f"Content for {file.path}",
+ agent=lite_agent,
+ )
+ crew = _crewai()["Crew"](agents=[lite_agent], tasks=[task], process=_crewai()["Process"].sequential, verbose=False)
+
+ def _create():
+ r = crew.kickoff()
+ return r.raw if hasattr(r, "raw") else str(r)
+
+ ctx = contextvars.copy_context()
+ content = await _guarded_agent_call(ctx, _create, label="create_file")
+ content = content.strip()
+ if content.startswith("```"):
+ lines = content.split("\n")
+ if lines[-1].strip() == "```":
+ content = "\n".join(lines[1:-1])
+ else:
+ content = "\n".join(lines[1:])
+
+ await put_file(owner, repo, file.path, content,
+ f"GitPilot Lite: Create {file.path}", token=token, branch=branch_name)
+ step_summary += f"\n + Created {file.path}"
+
+ elif file.action == "MODIFY":
+ try:
+ existing = await get_file(owner, repo, file.path, token=token, ref=branch_name)
+ modify_prompt = (
+ f"Modify this file: {file.path}\n"
+ f"Goal: {plan.goal}\n"
+ f"What to change: {step.description[:300]}\n\n"
+ f"Current content:\n{existing[:2000]}\n\n"
+ f"Return the complete modified file content, nothing else."
+ )
+
+ lite_agent = _crewai()["Agent"](
+ role="Code Writer",
+ goal="Modify file content",
+ backstory="You write clean, working code.",
+ llm=llm, tools=[], verbose=False, allow_delegation=False,
+ )
+ task = _crewai()["Task"](description=modify_prompt, expected_output=f"Modified {file.path}", agent=lite_agent)
+ crew = _crewai()["Crew"](agents=[lite_agent], tasks=[task], process=_crewai()["Process"].sequential, verbose=False)
+
+ def _modify():
+ r = crew.kickoff()
+ return r.raw if hasattr(r, "raw") else str(r)
+
+ ctx = contextvars.copy_context()
+ modified = await _guarded_agent_call(ctx, _modify, label="modify_file")
+ modified = modified.strip()
+ if modified.startswith("```"):
+ lines = modified.split("\n")
+ if lines[-1].strip() == "```":
+ modified = "\n".join(lines[1:-1])
+ else:
+ modified = "\n".join(lines[1:])
+
+ await put_file(owner, repo, file.path, modified,
+ f"GitPilot Lite: Modify {file.path}", token=token, branch=branch_name)
+ step_summary += f"\n ~ Modified {file.path}"
+ except Exception as e:
+ logger.exception("Lite: Failed to modify %s: %s", file.path, e)
+ step_summary += f"\n ! Failed to modify {file.path}: {e}"
+
+ elif file.action == "DELETE":
+ from .github_api import delete_file
+ try:
+ await delete_file(owner, repo, file.path,
+ f"GitPilot Lite: Delete {file.path}", token=token, branch=branch_name)
+ step_summary += f"\n - Deleted {file.path}"
+ except Exception as e:
+ logger.exception("Lite: Failed to delete %s: %s", file.path, e)
+ step_summary += f"\n ! Failed to delete {file.path}: {e}"
+
+ elif file.action == "READ":
+ step_summary += f"\n i Inspected {file.path}"
+
+ except Exception as e:
+ logger.exception("Lite: Error processing %s: %s", file.path, e)
+ step_summary += f"\n ! Error: {file.path}: {e}"
+
+ execution_steps.append({"step_number": step.step_number, "summary": step_summary})
+
+ return {
+ "status": "completed",
+ "message": f"Lite Mode: executed {len(plan.steps)} steps on {repo_full_name} (branch '{branch_name}')",
+ "branch": branch_name,
+ "branch_url": f"https://github.com/{repo_full_name}/tree/{branch_name}",
+ "executionLog": {"steps": execution_steps},
+ "lite_mode": True,
+ }
+
+
+async def execute_plan(
+ plan: PlanResult,
+ repo_full_name: str,
+ token: str | None = None,
+ branch_name: str | None = None,
+) -> dict:
+ """Execute the approved plan by applying changes to the GitHub repository."""
+ from .github_api import get_file, put_file, create_branch, get_repo
+ import re
+ import time
+
+ owner, repo = _split_repo_full_name(repo_full_name)
+ execution_steps: list[dict] = []
+ llm = _build_llm()
+
+ if branch_name is None:
+ sanitized = re.sub(r"[^a-z0-9-]+", "-", plan.goal.lower())
+ sanitized = sanitized[:40].strip("-")
+ timestamp = str(int(time.time()))[-6:]
+ branch_name = f"gitpilot-{sanitized}-{timestamp}"
+
+ try:
+ logger.info("[GitPilot] Creating feature branch: %s", branch_name)
+ await create_branch(owner, repo, branch_name, from_ref="HEAD", token=token)
+ logger.info("[GitPilot] Branch created successfully: %s", branch_name)
+ except HTTPException as e:
+ logger.warning(
+ "[GitPilot] Branch %s already exists or creation failed: %s. Attempting to use existing branch.",
+ branch_name,
+ e.detail,
+ )
+
+ # CRITICAL: ensure tools read from the ACTIVE execution branch
+ _tools()["set_repo_context"](owner, repo, token=token, branch=branch_name)
+
+ code_writer = _crewai()["Agent"](
+ role="Expert Code Writer",
+ goal="Generate high-quality, production-ready code and documentation based on requirements.",
+ backstory=(
+ "You are a senior software engineer with expertise in multiple programming languages. "
+ "You write clean, well-documented, and functional code. "
+ "You understand context and generate appropriate content for each file type. "
+ "For documentation files (README.md, docs, etc.), you write clear, comprehensive content. "
+ "For code files, you follow best practices and include proper comments. "
+ "IMPORTANT: You ALWAYS use repository exploration tools before creating new content. "
+ "When asked to create demos/examples/tutorials, you first READ the existing files to understand "
+ "the project, then generate content that is relevant and accurate. "
+ "You never create generic examples - you create content specific to THIS repository."
+ ),
+ llm=llm,
+ tools=_tools()["REPOSITORY_TOOLS"],
+ verbose=True,
+ allow_delegation=False,
+ )
+
+ for step in plan.steps:
+ step_summary = f"Step {step.step_number}: {step.title}"
+
+ for file in step.files:
+ try:
+ if file.action == "CREATE":
+ create_task = _crewai()["Task"](
+ description=(
+ f"Generate complete content for a new file: {file.path}\n\n"
+ f"Overall Goal: {plan.goal}\n"
+ f"Step Context: {step.description}\n\n"
+ "CRITICAL INSTRUCTIONS:\n"
+ "- You have access to repository exploration tools - USE THEM!\n"
+ "- If the goal mentions 'analyze' or 'based on', first read the relevant files:\n"
+ " * Use 'Read file content' to read existing files (README.md, source code, etc.)\n"
+ " * Use 'List all files in repository' to see what files exist\n"
+ "- Generate content that is INFORMED by the actual repository content\n"
+ "- If creating a demo/example, make it relevant to the actual project\n"
+ "- If creating documentation, reference actual files and code in the repository\n\n"
+ "Requirements:\n"
+ f"- Create production-ready content appropriate for {file.path}\n"
+ "- If it's a documentation file (.md, .txt, .rst), write comprehensive, well-structured documentation\n"
+ "- If it's a code file, include proper imports, comments, and follow best practices\n"
+ "- If it's a configuration file, include sensible defaults and comments\n"
+ "- Make the content complete and ready to use\n"
+ "- Do NOT include placeholder comments like 'TODO' or 'IMPLEMENT THIS'\n"
+ "- The content should be fully functional and informative\n\n"
+ "Return ONLY the file content, no explanations or markdown code blocks."
+ ),
+ expected_output=f"Complete, production-ready content for {file.path}",
+ agent=code_writer,
+ )
+
+ def _create():
+ crew = _crewai()["Crew"](
+ agents=[code_writer],
+ tasks=[create_task],
+ process=_crewai()["Process"].sequential,
+ verbose=False,
+ )
+ result = crew.kickoff()
+ if hasattr(result, "raw"):
+ return result.raw
+ return str(result)
+
+ ctx = contextvars.copy_context()
+ content = await _guarded_agent_call(ctx, _create, label="exec_create_file")
+
+ content = content.strip()
+ if content.startswith("```"):
+ lines = content.split("\n")
+ if lines[-1].strip() == "```":
+ content = "\n".join(lines[1:-1])
+ else:
+ content = "\n".join(lines[1:])
+
+ await put_file(
+ owner,
+ repo,
+ file.path,
+ content,
+ f"GitPilot: Create {file.path} - {step.title}",
+ token=token,
+ branch=branch_name,
+ )
+ step_summary += f"\n ✓ Created {file.path}"
+
+ elif file.action == "MODIFY":
+ try:
+ existing_content = await get_file(
+ owner, repo, file.path, token=token, ref=branch_name
+ )
+
+ modify_task = _crewai()["Task"](
+ description=(
+ f"Modify the existing file: {file.path}\n\n"
+ f"Overall Goal: {plan.goal}\n"
+ f"Step Context: {step.description}\n\n"
+ f"Current File Content:\n"
+ f"---\n{existing_content}\n---\n\n"
+ "Requirements:\n"
+ "- Make the changes described in the step context\n"
+ "- Preserve the existing structure and format\n"
+ "- For documentation: update or add relevant sections\n"
+ "- For code: add/modify functions, imports, or logic as needed\n"
+ "- Ensure the result is complete and functional\n"
+ "- Do NOT just add comments - make real, substantive changes\n\n"
+ "Return ONLY the complete modified file content, no explanations."
+ ),
+ expected_output=f"Complete, modified content for {file.path}",
+ agent=code_writer,
+ )
+
+ def _modify():
+ crew = _crewai()["Crew"](
+ agents=[code_writer],
+ tasks=[modify_task],
+ process=_crewai()["Process"].sequential,
+ verbose=False,
+ )
+ result = crew.kickoff()
+ if hasattr(result, "raw"):
+ return result.raw
+ return str(result)
+
+ ctx = contextvars.copy_context()
+ modified_content = await _guarded_agent_call(ctx, _modify, label="exec_modify_file")
+
+ modified_content = modified_content.strip()
+ if modified_content.startswith("```"):
+ lines = modified_content.split("\n")
+ if lines[-1].strip() == "```":
+ modified_content = "\n".join(lines[1:-1])
+ else:
+ modified_content = "\n".join(lines[1:])
+
+ await put_file(
+ owner,
+ repo,
+ file.path,
+ modified_content,
+ f"GitPilot: Modify {file.path} - {step.title}",
+ token=token,
+ branch=branch_name,
+ )
+ step_summary += f"\n ✓ Modified {file.path}"
+ except Exception as e: # noqa: BLE001
+ logger.exception(
+ "Failed to modify file %s in step %s: %s",
+ file.path,
+ step.step_number,
+ e,
+ )
+ step_summary += f"\n ✗ Failed to modify {file.path}: {str(e)}"
+
+ elif file.action == "DELETE":
+ from .github_api import delete_file
+
+ try:
+ await delete_file(
+ owner,
+ repo,
+ file.path,
+ f"GitPilot: Delete {file.path} - {step.title}",
+ token=token,
+ branch=branch_name,
+ )
+ step_summary += f"\n ✓ Deleted {file.path}"
+ except Exception as e: # noqa: BLE001
+ logger.exception(
+ "Failed to delete file %s in step %s: %s",
+ file.path,
+ step.step_number,
+ e,
+ )
+ step_summary += f"\n ✗ Failed to delete {file.path}: {str(e)}"
+
+ elif file.action == "READ":
+ step_summary += f"\n ℹ️ READ-only: inspected {file.path}"
+
+ except Exception as e: # noqa: BLE001
+ logger.exception(
+ "Error processing file %s in step %s: %s",
+ file.path,
+ step.step_number,
+ e,
+ )
+ step_summary += f"\n ✗ Error processing {file.path}: {str(e)}"
+
+ execution_steps.append({"step_number": step.step_number, "summary": step_summary})
+
+ return {
+ "status": "completed",
+ "message": f"Successfully executed {len(plan.steps)} steps on {repo_full_name} in branch '{branch_name}'",
+ "branch": branch_name,
+ "branch_url": f"https://github.com/{repo_full_name}/tree/{branch_name}",
+ "executionLog": {"steps": execution_steps},
+ }
+
+
+# ============================================================================
+# New Agent Builders (v2 upgrade)
+# ============================================================================
+
+def _build_issue_agent(llm) -> Agent:
+ return _crewai()["Agent"](
+ role="GitHub Issue Management Specialist",
+ goal="Create, modify, and manage GitHub issues with proper metadata and relationships",
+ backstory=(
+ "You are an expert in GitHub issue management. You can create new issues "
+ "with detailed descriptions, modify existing issues and their metadata, "
+ "manage labels, milestones, and assignees, and add comments. "
+ "You ensure issues are well-organised and provide clear status updates. "
+ "When creating issues you always include a concise title and a structured body."
+ ),
+ llm=llm,
+ tools=_tools()["ISSUE_TOOLS"],
+ verbose=True,
+ allow_delegation=False,
+ )
+
+
+def _build_pr_agent(llm) -> Agent:
+ return _crewai()["Agent"](
+ role="Pull Request Management Specialist",
+ goal="Create branches, commit changes, and manage pull requests",
+ backstory=(
+ "You are skilled in pull request workflows. You can create branches, "
+ "create PRs from feature branches, list open PRs, inspect changed files, "
+ "add reviews, and merge PRs using the appropriate strategy. "
+ "You always verify the source and target branches before acting."
+ ),
+ llm=llm,
+ tools=_tools()["PR_TOOLS"] + _tools()["WRITE_TOOLS"],
+ verbose=True,
+ allow_delegation=False,
+ )
+
+
+def _build_search_agent(llm) -> Agent:
+ return _crewai()["Agent"](
+ role="Search & Discovery Specialist",
+ goal="Find code, repositories, issues, and users across GitHub",
+ backstory=(
+ "You are an expert at finding resources on GitHub. You can search for "
+ "code by keywords, symbols, or patterns within a repository or globally. "
+ "You can find users and organisations, discover repositories by topic, "
+ "and locate issues or PRs matching specific criteria. "
+ "You present results in a clear, structured format."
+ ),
+ llm=llm,
+ tools=_tools()["SEARCH_TOOLS"] + _tools()["REPOSITORY_TOOLS"],
+ verbose=True,
+ allow_delegation=False,
+ )
+
+
+def _build_code_review_agent(llm) -> Agent:
+ return _crewai()["Agent"](
+ role="Code Review & Analysis Specialist",
+ goal="Review code quality, identify patterns, and suggest improvements",
+ backstory=(
+ "You are an experienced code reviewer who analyses code for quality, "
+ "security issues, and performance problems. You inspect files in the "
+ "repository, read their contents, and provide constructive feedback. "
+ "For pull requests you examine the changed files and produce a detailed "
+ "review with actionable suggestions."
+ ),
+ llm=llm,
+ tools=_tools()["REPOSITORY_TOOLS"] + _tools()["PR_TOOLS"] + _tools()["SEARCH_TOOLS"],
+ verbose=True,
+ allow_delegation=False,
+ )
+
+
+def _build_learning_agent(llm) -> Agent:
+ return _crewai()["Agent"](
+ role="GitHub Learning & Guidance Specialist",
+ goal="Provide expert guidance on GitHub features, best practices, and workflows",
+ backstory=(
+ "You are a GitHub expert who helps users understand GitHub Actions, "
+ "CI/CD workflows, authentication, pull request best practices, "
+ "repository maintenance, GitHub Pages, Packages, Discussions, "
+ "and security best practices. You provide clear, actionable guidance "
+ "with examples. You can also read the repository to give contextualised advice."
+ ),
+ llm=llm,
+ tools=_tools()["REPOSITORY_TOOLS"] + _tools()["SEARCH_TOOLS"],
+ verbose=True,
+ allow_delegation=False,
+ )
+
+
+def _build_local_editor_agent(llm) -> Agent:
+ """Phase 1: Agent for direct local file editing with verification."""
+ return _crewai()["Agent"](
+ role="Local File Editor",
+ goal="Read, write, and modify files in the local workspace with verification",
+ backstory=(
+ "You are an expert code editor that operates directly on the local "
+ "filesystem. You read files, make precise edits, write new files, "
+ "and verify changes using git diff. You always check file contents "
+ "before editing and confirm results after. You follow project "
+ "conventions and never introduce breaking changes."
+ ),
+ llm=llm,
+ tools=_tools()["LOCAL_FILE_TOOLS"] + _tools()["LOCAL_GIT_TOOLS"],
+ verbose=True,
+ allow_delegation=False,
+ )
+
+
+def _build_terminal_agent(llm) -> Agent:
+ """Phase 1: Agent for sandboxed shell command execution."""
+ return _crewai()["Agent"](
+ role="Terminal & Shell Executor",
+ goal="Execute shell commands safely in the workspace and report results",
+ backstory=(
+ "You are a terminal expert that runs shell commands in a sandboxed "
+ "environment. You can run tests, linters, build tools, and other "
+ "development commands. You always report exit codes and output. "
+ "You refuse to run destructive commands like rm -rf / or format disks. "
+ "You explain command output clearly to the user."
+ ),
+ llm=llm,
+ tools=_tools()["LOCAL_SHELL_TOOLS"] + _tools()["LOCAL_GIT_TOOLS"],
+ verbose=True,
+ allow_delegation=False,
+ )
+
+
+# ============================================================================
+# Unified Dispatcher (v2 upgrade)
+# ============================================================================
+
+async def dispatch_request(
+ user_request: Optional[str] = None,
+ repo_full_name: Optional[str] = None,
+ token: Optional[str] = None,
+ branch_name: Optional[str] = None,
+ topology_id: Optional[str] = None,
+ # -----------------------------------------------------------------
+ # Backwards-compatible keyword arguments.
+ # Older callers (notably early WebSocket and A2A adapters) used:
+ # dispatch_request(repo_owner=..., repo_name=..., message=...)
+ # Keeping these kwargs prevents crashes when frontend/backend drift.
+ # -----------------------------------------------------------------
+ repo_owner: Optional[str] = None,
+ repo_name: Optional[str] = None,
+ message: Optional[str] = None,
+ **_ignored_kwargs: Any,
+) -> Dict[str, Any]:
+ """Route a free-form user request to the appropriate agent(s) and return the result.
+
+ This is the single entry-point for the new conversational mode. For backwards
+ compatibility the original ``generate_plan`` / ``execute_plan`` pair is still
+ available and untouched.
+
+ If *topology_id* is supplied, topology-aware routing is used:
+ - ``classify_and_dispatch`` → falls through to the existing agent_router
+ - ``always_main_agent`` → all requests go to the primary agent (T2)
+ - ``fixed_sequence`` → a CrewAI sequential crew is built from the
+ topology's agent sequence (T3-T7)
+
+ When *topology_id* is ``None``, behaviour is identical to the original v2
+ dispatcher.
+ """
+ # ---- Input normalization / compat layer ----
+ if user_request is None and message is not None:
+ user_request = message
+ if repo_full_name is None and repo_owner and repo_name:
+ repo_full_name = f"{repo_owner}/{repo_name}"
+
+ if not user_request:
+ raise ValueError("dispatch_request: missing user_request (or legacy 'message')")
+ if not repo_full_name:
+ raise ValueError("dispatch_request: missing repo_full_name (or legacy repo_owner/repo_name)")
+
+ # ---------- Lite Mode check (additive, non-destructive) ----------
+ # Lite mode activates if ANY of:
+ # - the explicit setting is on
+ # - the topology is "lite_mode"
+ # - the active model is incompatible with multi-agent ReAct prompts
+ # (deepseek-r1, qwq, small Ollama models)
+ from .settings import get_settings as _get_settings
+
+ _current_settings = _get_settings()
+ _saved_topology = get_saved_topology_preference()
+
+ # Explicit topology_id must always win.
+ if topology_id:
+ _resolved_tid = topology_id
+ else:
+ _resolved_tid = _saved_topology
+
+ # Auto-detect models that can't handle multi-agent ReAct format
+ # (deepseek-r1, qwq, small local models) — route them to Lite Mode
+ # regardless of explicit settings.
+ _auto_lite = _is_incompatible_model(_current_settings)
+
+ # Lite mode only applies when explicitly selected or globally enabled,
+ # and it must not override an explicit non-lite topology choice.
+ _lite_active = (
+ _current_settings.lite_mode
+ or _resolved_tid == "lite_mode"
+ or _auto_lite
+ )
+
+ # Do not force lite mode when the caller explicitly requested another topology.
+ if topology_id and topology_id != "lite_mode":
+ _lite_active = False
+
+ if _auto_lite and _lite_active:
+ logger.info(
+ "[GitPilot] Auto-routed to Lite Mode: active model is incompatible "
+ "with multi-agent ReAct (deepseek-r1, qwq, or small local model)"
+ )
+
+ if _lite_active:
+ logger.info("[GitPilot Lite] Lite Mode active — using simplified single-agent path")
+ plan = await generate_plan_lite(
+ user_request,
+ repo_full_name,
+ token=token,
+ branch_name=branch_name,
+ )
+ return {
+ "category": "plan_execute",
+ "workflow": "plan_execute",
+ "plan": plan.model_dump() if hasattr(plan, "model_dump") else plan,
+ "message": "Lite Mode: Plan generated. Review and approve to execute.",
+ "lite_mode": True,
+ }
+
+ _active_topology = None
+ if _resolved_tid:
+ _active_topology = get_topology(_resolved_tid)
+
+ # ---------- Topology-aware routing (additive) ----------
+ _active_topology = None
+ _resolved_tid = topology_id or get_saved_topology_preference()
+ if _resolved_tid:
+ _active_topology = get_topology(_resolved_tid)
+
+ if _active_topology and _active_topology.routing_policy.strategy == RoutingStrategy.fixed_sequence:
+ # Pipeline topologies (T3-T7): build a multi-task sequential crew
+ return await _dispatch_pipeline(
+ _active_topology, user_request, repo_full_name,
+ token=token, branch_name=branch_name,
+ )
+
+ # For ``classify_and_dispatch`` (T1/default) and ``always_main_agent`` (T2)
+ # we fall through to the existing routing. T2's react_loop execution will
+ # be wired in a future phase; for now it uses the same single-task path
+ # but the *visualization* already shows the correct graph.
+
+ workflow = route_request(user_request)
+ logger.info(
+ "[GitPilot] Router: category=%s agents=%s desc=%s",
+ workflow.category.value,
+ [a.value for a in workflow.agents],
+ workflow.description,
+ )
+
+ # Phase 2: Smart model routing
+ try:
+ from .smart_model_router import ModelRouter
+ _router = ModelRouter()
+ selection = _router.select(user_request, category=workflow.category.value)
+ logger.info(
+ "[GitPilot] ModelRouter: model=%s tier=%s complexity=%s reason=%s",
+ selection.model, selection.tier.value, selection.complexity.value, selection.reason,
+ )
+ except Exception:
+ pass # Model routing is optional; fall through to default LLM
+
+ # Set repo context if needed
+ if workflow.requires_repo_context and repo_full_name:
+ owner, repo = _split_repo_full_name(repo_full_name)
+ active_ref = branch_name or "HEAD"
+ _tools()["set_repo_context"](owner, repo, token=token, branch=active_ref)
+
+ llm = _build_llm()
+
+ # If it's the existing plan+execute workflow, delegate there
+ if workflow.category == RequestCategory.PLAN_EXECUTE:
+ plan = await generate_plan(user_request, repo_full_name, token=token, branch_name=branch_name)
+ return {
+ "category": workflow.category.value,
+ "workflow": "plan_execute",
+ "plan": plan.model_dump() if hasattr(plan, "model_dump") else plan,
+ "message": "Plan generated. Review and approve to execute.",
+ }
+
+ # CONTEXT PACK: Load project context for non-plan agents too (additive)
+ _dispatch_ctx_pack = ""
+ if repo_full_name:
+ try:
+ _d_owner, _d_repo = repo_full_name.split("/")
+ from pathlib import Path as _P
+ _d_ws = _P.home() / ".gitpilot" / "workspaces" / _d_owner / _d_repo
+ _dispatch_ctx_pack = build_context_pack(_d_ws, query=user_request)
+ except Exception:
+ pass
+
+ # Build the task description
+ task_description = _build_task_description(workflow, user_request, repo_full_name, branch_name)
+ if _dispatch_ctx_pack:
+ task_description += "\n\n" + _dispatch_ctx_pack
+
+ # Build agent(s) for this workflow
+ agents = []
+ for agent_type in workflow.agents:
+ agents.append(_get_agent(agent_type, llm))
+
+ # Use the first agent as the primary executor
+ primary_agent = agents[0]
+ task = _crewai()["Task"](
+ description=task_description,
+ expected_output="A clear, structured response addressing the user request",
+ agent=primary_agent,
+ )
+
+ crew = _crewai()["Crew"](
+ agents=agents,
+ tasks=[task],
+ process=_crewai()["Process"].sequential,
+ verbose=True,
+ )
+
+ def _run():
+ result = crew.kickoff()
+ if hasattr(result, "raw"):
+ return result.raw
+ return str(result)
+
+ ctx = contextvars.copy_context()
+ result_text = await _guarded_agent_call(ctx, _run, label="dispatch")
+
+ return {
+ "category": workflow.category.value,
+ "agents_used": [a.value for a in workflow.agents],
+ "result": result_text,
+ "entity_number": workflow.entity_number,
+ }
+
+
+# ============================================================================
+# Topology Pipeline Dispatcher (additive — T3-T7)
+# ============================================================================
+
+# Maps topology agent IDs to AgentType enum + task descriptions.
+# This bridge lets the topology registry reference agents by string ID while
+# reusing the existing _get_agent() builders.
+_TOPO_AGENT_MAP = {
+ "explorer": (AgentType.EXPLORER, "Explore the codebase: map project structure, discover relevant files, "
+ "identify patterns, dependencies, and test conventions. "
+ "Return a structured analysis with file paths and key findings."),
+ "planner": (AgentType.PLANNER, "Based on the exploration results, create a detailed implementation plan. "
+ "Include: files to modify, files to create, step-by-step order, "
+ "and test strategy. Consider trade-offs and alternatives."),
+ "developer": (AgentType.CODE_WRITER, "Execute the implementation plan step by step. For each step: "
+ "make the code change, then run tests. If tests fail, fix the issue "
+ "before moving to the next step. Follow project coding standards."),
+ "reviewer": (AgentType.CODE_REVIEWER, "Review all code changes. Check for: security vulnerabilities, "
+ "code quality, test coverage, performance issues. "
+ "Organise findings by severity: Critical, Warning, Suggestion."),
+ "git_agent": (AgentType.PR_MANAGER, "Create a branch, commit all changes with a descriptive message, "
+ "push the branch, and create a GitHub PR. PR should summarise "
+ "the changes clearly with a test plan."),
+}
+
+
+async def _dispatch_pipeline(
+ topology,
+ user_request: str,
+ repo_full_name: str,
+ token: Optional[str] = None,
+ branch_name: Optional[str] = None,
+) -> Dict[str, Any]:
+ """Run a topology's fixed-sequence pipeline as a multi-task CrewAI crew.
+
+ Each agent in the sequence gets its own Task. Tasks are linked via
+ CrewAI's ``context`` parameter so the output of step N feeds step N+1.
+ """
+ sequence = topology.routing_policy.sequence or []
+ if not sequence:
+ return {"error": "Topology has no agent sequence defined"}
+
+ # Determine if this pipeline has write-capable agents
+ _write_agents = {"developer", "git_agent"}
+ _has_writers = bool(set(sequence) & _write_agents)
+
+ # Create a working branch for pipelines that modify files
+ pipeline_branch = branch_name
+ if repo_full_name and _has_writers and not branch_name:
+ import re as _re
+ import time as _time
+ owner, repo = _split_repo_full_name(repo_full_name)
+ sanitized = _re.sub(r"[^a-z0-9-]+", "-", user_request.lower())[:35].strip("-")
+ timestamp = str(int(_time.time()))[-6:]
+ pipeline_branch = f"gitpilot-{topology.id}-{sanitized}-{timestamp}"
+ try:
+ from .github_api import create_branch
+ await create_branch(owner, repo, pipeline_branch, from_ref="HEAD", token=token)
+ logger.info("[Pipeline] Created branch: %s", pipeline_branch)
+ except Exception:
+ pass # branch may already exist
+
+ # Set repo context (on the working branch)
+ if repo_full_name:
+ owner, repo = _split_repo_full_name(repo_full_name)
+ active_ref = pipeline_branch or "HEAD"
+ _tools()["set_repo_context"](owner, repo, token=token, branch=active_ref)
+
+ llm = _build_llm()
+
+ # Build agents and tasks
+ agents = []
+ tasks = []
+ for i, agent_id in enumerate(sequence):
+ mapping = _TOPO_AGENT_MAP.get(agent_id)
+ if not mapping:
+ logger.warning("[GitPilot] Unknown topology agent ID: %s — skipping", agent_id)
+ continue
+ agent_type, base_description = mapping
+ agent = _get_agent(agent_type, llm)
+ agents.append(agent)
+
+ # Build task description: combine base description with user request
+ task_desc = (
+ f"User request: {user_request}\n"
+ f"Repository: {repo_full_name}\n"
+ )
+ if pipeline_branch:
+ task_desc += f"Branch: {pipeline_branch}\n"
+ task_desc += f"\nYour role in this pipeline: {base_description}"
+
+ # Tell write-capable agents to actually use their tools
+ if agent_id in _write_agents and pipeline_branch:
+ task_desc += (
+ f"\n\nIMPORTANT: You have tools to write and delete files. "
+ f"USE THEM to make real changes on branch '{pipeline_branch}'. "
+ f"Do NOT just describe changes — actually write/delete files using your tools."
+ )
+
+ # Context chaining: each task after the first receives prior tasks
+ context = tasks[:] if tasks else []
+
+ task = _crewai()["Task"](
+ description=task_desc,
+ expected_output=f"Structured output from the {agent_id} phase",
+ agent=agent,
+ context=context if context else None,
+ )
+ tasks.append(task)
+
+ if not agents:
+ return {"error": "No valid agents could be built for this topology"}
+
+ # Load optional context pack
+ _ctx_pack = ""
+ if repo_full_name:
+ try:
+ from pathlib import Path as _P
+ _owner, _repo = repo_full_name.split("/")
+ _ws = _P.home() / ".gitpilot" / "workspaces" / _owner / _repo
+ _ctx_pack = build_context_pack(_ws, query=user_request)
+ except Exception:
+ pass
+ if _ctx_pack:
+ # Append context pack to the first task's description
+ tasks[0].description += "\n\n" + _ctx_pack
+
+ crew = _crewai()["Crew"](
+ agents=agents,
+ tasks=tasks,
+ process=_crewai()["Process"].sequential,
+ verbose=True,
+ )
+
+ def _run():
+ result = crew.kickoff()
+ if hasattr(result, "raw"):
+ return result.raw
+ return str(result)
+
+ ctx = contextvars.copy_context()
+ result_text = await _guarded_agent_call(ctx, _run, label="topology_pipeline")
+
+ response = {
+ "category": "topology_pipeline",
+ "topology_id": topology.id,
+ "topology_name": topology.name,
+ "execution_style": topology.execution_style.value,
+ "agents_used": sequence,
+ "result": result_text,
+ }
+
+ # Add branch info for pipelines that created a working branch
+ if pipeline_branch and _has_writers:
+ response["branch"] = pipeline_branch
+ response["branch_url"] = f"https://github.com/{repo_full_name}/tree/{pipeline_branch}"
+
+ return response
+
+
+def _get_agent(agent_type: AgentType, llm) -> Agent:
+ """Instantiate an agent by type."""
+ builders = {
+ AgentType.EXPLORER: lambda: _crewai()["Agent"](
+ role="Repository Explorer",
+ goal="Thoroughly explore and document the current state of the repository",
+ backstory="You are a meticulous code archaeologist who explores repositories.",
+ llm=llm,
+ tools=_tools()["REPOSITORY_TOOLS"],
+ verbose=True,
+ allow_delegation=False,
+ ),
+ AgentType.PLANNER: lambda: _crewai()["Agent"](
+ role="Repository Refactor Planner",
+ goal="Design safe, step-by-step refactor plans",
+ backstory="You are an experienced staff engineer who creates plans based on facts.",
+ llm=llm,
+ tools=_tools()["REPOSITORY_TOOLS"],
+ verbose=True,
+ allow_delegation=False,
+ ),
+ AgentType.CODE_WRITER: lambda: _crewai()["Agent"](
+ role="Expert Code Writer",
+ goal="Generate high-quality, production-ready code and write it to the repository",
+ backstory=(
+ "You are a senior software engineer with multi-language expertise. "
+ "You read existing files, write new code, and update files directly "
+ "in the repository using your tools. Always read a file before modifying it."
+ ),
+ llm=llm,
+ tools=_tools()["REPOSITORY_TOOLS"] + _tools()["WRITE_TOOLS"],
+ verbose=True,
+ allow_delegation=False,
+ ),
+ AgentType.CODE_REVIEWER: lambda: _build_code_review_agent(llm),
+ AgentType.ISSUE_MANAGER: lambda: _build_issue_agent(llm),
+ AgentType.PR_MANAGER: lambda: _build_pr_agent(llm),
+ AgentType.SEARCH: lambda: _build_search_agent(llm),
+ AgentType.LEARNING: lambda: _build_learning_agent(llm),
+ AgentType.LOCAL_EDITOR: lambda: _build_local_editor_agent(llm),
+ AgentType.TERMINAL: lambda: _build_terminal_agent(llm),
+ }
+ builder = builders.get(agent_type)
+ if not builder:
+ raise ValueError(f"Unknown agent type: {agent_type}")
+ return builder()
+
+
+def _build_task_description(
+ workflow: WorkflowPlan,
+ user_request: str,
+ repo_full_name: str,
+ branch_name: Optional[str],
+) -> str:
+ """Build a detailed task description for the agent based on the workflow."""
+ parts = [
+ f"User request: {user_request}",
+ f"Repository: {repo_full_name}",
+ ]
+ if branch_name:
+ parts.append(f"Branch: {branch_name}")
+ if workflow.entity_number:
+ parts.append(f"Entity number: #{workflow.entity_number}")
+
+ # Category-specific instructions
+ if workflow.category == RequestCategory.ISSUE_MANAGEMENT:
+ action = workflow.metadata.get("action", "")
+ parts.append(
+ "\nYou are handling an ISSUE MANAGEMENT request. "
+ f"Action hint: {action}. "
+ "Use your issue tools to fulfill the request. "
+ "If creating an issue, extract title and body from the user request. "
+ "If listing issues, present results in a clear table. "
+ "If updating, identify the issue number and fields to change. "
+ "Always confirm what you did with the issue URL."
+ )
+
+ elif workflow.category == RequestCategory.PR_MANAGEMENT:
+ action = workflow.metadata.get("action", "")
+ parts.append(
+ "\nYou are handling a PULL REQUEST request. "
+ f"Action hint: {action}. "
+ "Use your PR tools to fulfill the request. "
+ "If creating a PR, determine the head and base branches. "
+ "If merging, confirm the PR number and merge method. "
+ "Always confirm with the PR URL."
+ )
+
+ elif workflow.category == RequestCategory.CODE_SEARCH:
+ search_type = workflow.metadata.get("search_type", "code")
+ parts.append(
+ f"\nYou are handling a SEARCH request (type: {search_type}). "
+ "Use your search tools to find what the user is looking for. "
+ "Present results clearly with paths, URLs, and context snippets."
+ )
+
+ elif workflow.category == RequestCategory.CODE_REVIEW:
+ parts.append(
+ "\nYou are handling a CODE REVIEW request. "
+ "First explore the repository to understand the codebase, "
+ "then analyse code quality, identify potential issues "
+ "(security, performance, maintainability), and provide "
+ "constructive suggestions with specific file references."
+ )
+
+ elif workflow.category == RequestCategory.LEARNING:
+ parts.append(
+ "\nYou are handling a LEARNING / GUIDANCE request. "
+ "Provide clear, actionable guidance about GitHub features. "
+ "Include examples and best practices. "
+ "If relevant, reference the current repository for context."
+ )
+
+ elif workflow.category == RequestCategory.LOCAL_EDIT:
+ parts.append(
+ "\nYou are handling a LOCAL FILE EDITING request. "
+ "Use your local file tools to read, write, and modify files. "
+ "Always read the file before editing to understand current content. "
+ "After editing, use git_diff or git_status to verify your changes. "
+ "Report exactly what was changed."
+ )
+
+ elif workflow.category == RequestCategory.TERMINAL:
+ parts.append(
+ "\nYou are handling a TERMINAL / SHELL COMMAND request. "
+ "Use the run_command tool to execute the requested command. "
+ "Report the exit code and output. If tests fail, summarise "
+ "which tests failed and why. Never run destructive commands."
+ )
+
+ elif workflow.category == RequestCategory.CONVERSATIONAL:
+ parts.append(
+ "\nYou are handling a general question about the repository. "
+ "Use repository tools to explore and answer the question. "
+ "Be concise and helpful."
+ )
+
+ return "\n".join(parts)
+
+
+# ============================================================================
+# Auto PR Creation (v2 upgrade)
+# ============================================================================
+
+async def create_pr_after_execution(
+ repo_full_name: str,
+ branch_name: str,
+ goal: str,
+ execution_log: Dict[str, Any],
+ token: Optional[str] = None,
+) -> Optional[Dict[str, Any]]:
+ """Automatically create a PR after plan execution completes.
+
+ Returns the PR data dict or None if creation fails.
+ """
+ from .github_pulls import create_pull_request
+ from .github_api import get_repo
+
+ owner, repo = _split_repo_full_name(repo_full_name)
+
+ try:
+ repo_info = await get_repo(owner, repo, token=token)
+ default_branch = repo_info.get("default_branch", "main")
+ except Exception:
+ default_branch = "main"
+
+ # Build PR body from execution log
+ steps = execution_log.get("steps", [])
+ body_lines = [f"## GitPilot Auto-PR\n\n**Goal:** {goal}\n"]
+ for step in steps:
+ body_lines.append(f"- {step.get('summary', '')}")
+ body_lines.append(f"\n---\n*Created by GitPilot*")
+ body = "\n".join(body_lines)
+
+ # Truncate title to stay within GitHub limits
+ title = f"GitPilot: {goal}"
+ if len(title) > 256:
+ title = title[:253] + "..."
+
+ try:
+ pr = await create_pull_request(
+ owner,
+ repo,
+ title=title,
+ head=branch_name,
+ base=default_branch,
+ body=body,
+ token=token,
+ )
+ logger.info("[GitPilot] Auto-PR created: %s", pr.get("html_url", ""))
+ return pr
+ except Exception as e:
+ logger.warning("[GitPilot] Failed to create auto-PR: %s", e)
+ return None
+
+
+# ============================================================================
+# Flow Definition (v3 -- topology-aware with legacy fallback)
+# ============================================================================
+
+async def get_flow_definition(topology_id: Optional[str] = None) -> dict:
+ """Return the agent workflow as a visual graph.
+
+ When *topology_id* is provided (or a saved preference exists), the graph
+ is served from the topology registry. Otherwise the original hardcoded
+ graph is returned for backward compatibility.
+ """
+ tid = topology_id or get_saved_topology_preference()
+ if tid:
+ return get_topology_graph(tid)
+
+ # Legacy hardcoded graph (unchanged from v2)
+ return {
+ "nodes": [
+ {
+ "id": "router",
+ "label": "Request Router",
+ "type": "router",
+ "description": "Analyses user intent and delegates to the right agent(s)",
+ },
+ {
+ "id": "repo_explorer",
+ "label": "Repository Explorer",
+ "type": "agent",
+ "description": "Explores repository to gather current state",
+ },
+ {
+ "id": "planner",
+ "label": "Refactor Planner",
+ "type": "agent",
+ "description": "Creates safe, step-by-step refactor plans based on exploration",
+ },
+ {
+ "id": "code_writer",
+ "label": "Code Writer",
+ "type": "agent",
+ "description": "Implements approved changes to codebase",
+ },
+ {
+ "id": "reviewer",
+ "label": "Code Reviewer",
+ "type": "agent",
+ "description": "Reviews code quality, security, and performance",
+ },
+ {
+ "id": "issue_manager",
+ "label": "Issue Manager",
+ "type": "agent",
+ "description": "Creates, updates, and manages GitHub issues",
+ },
+ {
+ "id": "pr_manager",
+ "label": "PR Manager",
+ "type": "agent",
+ "description": "Creates, reviews, and merges pull requests",
+ },
+ {
+ "id": "search_agent",
+ "label": "Search & Discovery",
+ "type": "agent",
+ "description": "Searches code, repos, issues, and users",
+ },
+ {
+ "id": "learning_agent",
+ "label": "Learning & Guidance",
+ "type": "agent",
+ "description": "Provides GitHub feature guidance and best practices",
+ },
+ {
+ "id": "local_editor",
+ "label": "Local Editor",
+ "type": "agent",
+ "description": "Reads and writes files directly in the local workspace",
+ },
+ {
+ "id": "terminal_agent",
+ "label": "Terminal",
+ "type": "agent",
+ "description": "Executes shell commands in a sandboxed environment",
+ },
+ {
+ "id": "github_tools",
+ "label": "GitHub API",
+ "type": "tool",
+ "description": "Read/write/delete files, issues, PRs, search",
+ },
+ {
+ "id": "local_tools",
+ "label": "Local Tools",
+ "type": "tool",
+ "description": "File I/O, git operations, shell commands on local workspace",
+ },
+ ],
+ "edges": [
+ {
+ "id": "e0",
+ "source": "router",
+ "target": "repo_explorer",
+ "label": "Plan & Execute workflow",
+ },
+ {
+ "id": "e0b",
+ "source": "router",
+ "target": "issue_manager",
+ "label": "Issue management requests",
+ },
+ {
+ "id": "e0c",
+ "source": "router",
+ "target": "pr_manager",
+ "label": "PR management requests",
+ },
+ {
+ "id": "e0d",
+ "source": "router",
+ "target": "search_agent",
+ "label": "Search requests",
+ },
+ {
+ "id": "e0e",
+ "source": "router",
+ "target": "reviewer",
+ "label": "Code review requests",
+ },
+ {
+ "id": "e0f",
+ "source": "router",
+ "target": "learning_agent",
+ "label": "Learning & guidance requests",
+ },
+ {
+ "id": "e1",
+ "source": "repo_explorer",
+ "target": "planner",
+ "label": "Complete repository state & file listing",
+ },
+ {
+ "id": "e2",
+ "source": "planner",
+ "target": "code_writer",
+ "label": "Approved plan with verified file actions",
+ },
+ {
+ "id": "e3",
+ "source": "code_writer",
+ "target": "pr_manager",
+ "label": "Auto-create PR after execution",
+ },
+ {
+ "id": "e4",
+ "source": "reviewer",
+ "target": "pr_manager",
+ "label": "Review results",
+ },
+ {
+ "id": "e5",
+ "source": "issue_manager",
+ "target": "github_tools",
+ "label": "Issue operations",
+ },
+ {
+ "id": "e6",
+ "source": "pr_manager",
+ "target": "github_tools",
+ "label": "PR operations",
+ },
+ {
+ "id": "e7",
+ "source": "search_agent",
+ "target": "github_tools",
+ "label": "Search queries",
+ },
+ {
+ "id": "e8",
+ "source": "router",
+ "target": "local_editor",
+ "label": "Local file editing requests",
+ },
+ {
+ "id": "e9",
+ "source": "router",
+ "target": "terminal_agent",
+ "label": "Shell command requests",
+ },
+ {
+ "id": "e10",
+ "source": "local_editor",
+ "target": "local_tools",
+ "label": "File and git operations",
+ },
+ {
+ "id": "e11",
+ "source": "terminal_agent",
+ "target": "local_tools",
+ "label": "Command execution",
+ },
+ ],
+ }
diff --git a/gitpilot/api.py b/gitpilot/api.py
new file mode 100644
index 0000000000000000000000000000000000000000..da84e369b4e52faa302167278712fe407be4b08b
--- /dev/null
+++ b/gitpilot/api.py
@@ -0,0 +1,3753 @@
+# gitpilot/api.py
+
+from pathlib import Path
+from typing import List, Optional
+
+from fastapi import FastAPI, Query, Path as FPath, Header, HTTPException, UploadFile, File
+from fastapi.responses import FileResponse, JSONResponse
+from fastapi.staticfiles import StaticFiles
+from fastapi.middleware.cors import CORSMiddleware
+from pydantic import BaseModel, Field
+
+from .version import __version__
+from .github_api import (
+ list_user_repos,
+ list_user_repos_paginated, # Pagination support
+ search_user_repos, # Search across all repos
+ get_repo_tree,
+ get_file,
+ put_file,
+ execution_context,
+ github_request,
+)
+from .github_app import check_repo_write_access
+from .settings import AppSettings, get_settings, set_provider, update_settings, autoconfigure_local_provider, LLMProvider
+from .agentic import (
+ generate_plan,
+ execute_plan,
+ generate_plan_lite,
+ execute_plan_lite,
+ PlanResult,
+ get_flow_definition,
+ dispatch_request,
+ create_pr_after_execution,
+)
+from .agent_router import route as route_request
+from . import github_issues
+from . import github_pulls
+from . import github_search
+from .session import SessionManager, Session
+from .hooks import HookManager, HookEvent
+from .permissions import PermissionManager, PermissionMode
+from .memory import MemoryManager
+from .context_vault import ContextVault
+from .use_case import UseCaseManager
+from .mcp_client import MCPClient
+from .plugins import PluginManager
+from .skills import SkillManager
+from .smart_model_router import ModelRouter, ModelRouterConfig
+from .topology_registry import (
+ list_topologies as _list_topologies,
+ get_topology_graph as _get_topology_graph,
+ classify_message as _classify_message,
+ get_saved_topology_preference,
+ save_topology_preference,
+)
+import httpx
+import logging
+from fastapi import HTTPException
+
+logger = logging.getLogger(__name__)
+
+def _is_small_local_model() -> bool:
+ """Detect if the active provider is Ollama/OllaBridge with a model
+ that can't handle multi-agent CrewAI prompts reliably.
+
+ Delegates to agentic._is_incompatible_model (single source of truth)
+ so that /api/chat/plan, /api/chat/execute, and /ws/sessions/ all
+ share the same detection logic.
+ """
+ try:
+ from .agentic import _is_incompatible_model
+ s = autoconfigure_local_provider()
+ return _is_incompatible_model(s)
+ except Exception as exc:
+ logger.debug("[GitPilot] _is_small_local_model check failed: %s", exc)
+ return False
+
+
+def _is_lite_mode_active() -> bool:
+ """Check if Lite Mode should be used.
+
+ Returns True if ANY of:
+ - settings.lite_mode is True (explicit toggle), OR
+ - the saved topology preference is "lite_mode" (selected in flow viewer), OR
+ - the active provider is a small local model that cannot handle
+ multi-agent CrewAI prompts (auto-detected for reliability)
+ """
+ s = autoconfigure_local_provider()
+ if s.lite_mode:
+ return True
+ pref = get_saved_topology_preference()
+ if pref == "lite_mode":
+ return True
+ # Auto-route small local models to lite mode for reliability
+ if _is_small_local_model():
+ logger.info("[GitPilot] Auto-enabling Lite Mode for small local model")
+ return True
+ return False
+# ═════════════════════════════════════════════════════════════════════
+# LAZY IMPORT STRATEGY — Phase 3 heavy modules
+# ═════════════════════════════════════════════════════════════════════
+# agent_teams, learning, cross_repo, predictions, security, nl_database
+# are deferred until first access via _LazyProxy. This saves 200-500ms
+# on WSL cold start (each import triggers disk I/O + pydantic compilation).
+# The proxy pattern means NO code changes are needed at call sites —
+# _agent_team.plan_and_split(...) works identically to the original.
+# NL database types are imported lazily at call site (see /api/nl-db endpoint)
+from .github_oauth import (
+ generate_authorization_url,
+ exchange_code_for_token,
+ validate_token,
+ initiate_device_flow,
+ poll_device_token,
+ AuthSession,
+ GitHubUser,
+)
+import os
+import logging
+from .model_catalog import list_models_for_provider
+
+# Optional A2A adapter (MCP ContextForge)
+from .a2a_adapter import router as a2a_router
+
+logger = logging.getLogger(__name__)
+
+
+class _LazyProxy:
+ """Lazy singleton proxy — instantiates the wrapped class on first attribute access.
+
+ Used to defer heavy imports (agent_teams, learning, cross_repo, etc.) until
+ they're actually needed, reducing backend startup time on slow filesystems
+ (WSL, HF Spaces cold start).
+
+ All attribute access is transparently forwarded to the underlying instance,
+ so existing code like `_agent_team.plan_and_split(...)` works unchanged.
+ """
+
+ def __init__(self, module_path: str, class_name: str) -> None:
+ object.__setattr__(self, "_module_path", module_path)
+ object.__setattr__(self, "_class_name", class_name)
+ object.__setattr__(self, "_instance", None)
+
+ def _get_instance(self) -> object:
+ inst = object.__getattribute__(self, "_instance")
+ if inst is None:
+ import importlib
+ module = importlib.import_module(self._module_path, package=__package__)
+ cls = getattr(module, self._class_name)
+ inst = cls()
+ object.__setattr__(self, "_instance", inst)
+ logger.debug("[LazyProxy] Instantiated %s.%s on first access",
+ self._module_path, self._class_name)
+ return inst
+
+ def __getattr__(self, name: str):
+ return getattr(self._get_instance(), name)
+
+ def __setattr__(self, name: str, value):
+ setattr(self._get_instance(), name, value)
+
+ def __call__(self, *args, **kwargs):
+ return self._get_instance()(*args, **kwargs)
+
+ def __repr__(self) -> str:
+ inst = object.__getattribute__(self, "_instance")
+ if inst is None:
+ return f"<_LazyProxy {self._module_path}.{self._class_name} (not yet loaded)>"
+ return repr(inst)
+
+
+# --- Phase 1 singletons (lightweight, instantiate eagerly) ---
+_session_mgr = SessionManager()
+_hook_mgr = HookManager()
+_perm_mgr = PermissionManager()
+
+# --- Phase 2 singletons (lightweight, instantiate eagerly) ---
+_mcp_client = MCPClient()
+_plugin_mgr = PluginManager()
+_skill_mgr = SkillManager()
+_model_router = ModelRouter()
+
+# --- Phase 3 singletons (HEAVY, lazy-loaded) ---
+# Each of these pulls in several MB of Python code and takes 50-200ms on WSL.
+# Deferred via _LazyProxy until first endpoint call that actually uses them.
+_agent_team = _LazyProxy(".agent_teams", "AgentTeam")
+_learning_engine = _LazyProxy(".learning", "LearningEngine")
+_cross_repo = _LazyProxy(".cross_repo", "CrossRepoAnalyzer")
+_predictive_engine = _LazyProxy(".predictions", "PredictiveEngine")
+_security_scanner = _LazyProxy(".security", "SecurityScanner")
+_nl_engine = _LazyProxy(".nl_database", "NLQueryEngine")
+
+import asyncio as _asyncio
+import signal
+from contextlib import asynccontextmanager
+
+_shutdown_event = _asyncio.Event()
+
+
+@asynccontextmanager
+async def _lifespan(application: FastAPI):
+ """Manage startup (pre-warm) and graceful shutdown."""
+ import time as _time
+
+ _startup_start = _time.monotonic()
+ logger.info("═══════════════════════════════════════════════════")
+ logger.info("🚀 [STARTUP] GitPilot backend initializing...")
+ logger.info("═══════════════════════════════════════════════════")
+
+ # -- Startup: pre-warm CrewAI in background ---
+ async def _warmup():
+ _t0 = _time.monotonic()
+ logger.info("[STARTUP] ⏳ Phase 1/3: Waiting 2s for health endpoint...")
+ await _asyncio.sleep(2)
+
+ _t1 = _time.monotonic()
+ logger.info("[STARTUP] ⏳ Phase 2/3: Importing CrewAI modules...")
+ try:
+ from .agentic import _crewai, _tools # noqa: F811
+ _crewai()
+ _t_crewai = _time.monotonic() - _t1
+ logger.info("[STARTUP] ✅ CrewAI imports complete in %.2fs", _t_crewai)
+
+ _t2 = _time.monotonic()
+ logger.info("[STARTUP] ⏳ Phase 3/3: Loading agent tools...")
+ _tools()
+ _t_tools = _time.monotonic() - _t2
+ logger.info("[STARTUP] ✅ Agent tools loaded in %.2fs", _t_tools)
+
+ _total = _time.monotonic() - _startup_start
+ logger.info("═══════════════════════════════════════════════════")
+ logger.info("[STARTUP] 🎉 Backend fully ready in %.2fs total", _total)
+ logger.info("═══════════════════════════════════════════════════")
+ except Exception as exc:
+ _t_fail = _time.monotonic() - _t1
+ logger.warning(
+ "[STARTUP] ⚠️ CrewAI pre-warm failed after %.2fs (will retry on first request): %s",
+ _t_fail, exc,
+ )
+
+ # Log memory usage after warmup
+ try:
+ import resource
+ rss_mb = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1024
+ logger.info("[STARTUP] 📊 Memory after warmup: %.1f MB RSS", rss_mb)
+ except Exception:
+ pass
+
+ _asyncio.create_task(_warmup())
+
+ # -- Graceful shutdown handler ---
+ def _handle_signal(sig, _frame):
+ logger.info("Received %s — initiating graceful shutdown", signal.Signals(sig).name)
+ _shutdown_event.set()
+
+ for sig in (signal.SIGTERM, signal.SIGINT):
+ try:
+ signal.signal(sig, _handle_signal)
+ except (OSError, ValueError):
+ pass # not main thread or unsupported
+
+ _ready_time = _time.monotonic() - _startup_start
+ logger.info(
+ "[STARTUP] ✅ FastAPI ready to accept requests after %.2fs "
+ "(CrewAI warmup continues in background)",
+ _ready_time,
+ )
+
+ yield
+
+ # Cleanup on shutdown
+ logger.info("[SHUTDOWN] GitPilot shutting down gracefully")
+ _shutdown_event.set()
+
+
+app = FastAPI(
+ title="GitPilot API",
+ version=__version__,
+ description="Agentic AI assistant for GitHub repositories.",
+ lifespan=_lifespan,
+)
+
+# ==========================================================================
+# Optional A2A Adapter (MCP ContextForge)
+# ==========================================================================
+# This is feature-flagged and does not affect the existing UI/REST API unless
+# explicitly enabled.
+def _env_bool(name: str, default: bool) -> bool:
+ raw = os.getenv(name)
+ if raw is None:
+ return default
+ return raw.strip().lower() in {"1", "true", "yes", "y", "on"}
+
+
+if _env_bool("GITPILOT_ENABLE_A2A", False):
+ logger.info("A2A adapter enabled (mounting /a2a/* endpoints)")
+ app.include_router(a2a_router)
+else:
+ logger.info("A2A adapter disabled (set GITPILOT_ENABLE_A2A=true to enable)")
+
+# ============================================================================
+# CORS Configuration
+# ============================================================================
+# Enable CORS to allow frontend (local dev or Vercel) to connect to backend
+allowed_origins_str = os.getenv("CORS_ORIGINS", "http://localhost:5173")
+allowed_origins = [origin.strip() for origin in allowed_origins_str.split(",")]
+
+logger.info(f"CORS enabled for origins: {allowed_origins}")
+
+app.add_middleware(
+ CORSMiddleware,
+ allow_origins=allowed_origins,
+ allow_credentials=True,
+ allow_methods=["*"],
+ allow_headers=["*"],
+)
+
+
+# ──────────────────────────────────────────────────────────────────
+# Request timing middleware (logs slow startup requests for debugging)
+# ──────────────────────────────────────────────────────────────────
+@app.middleware("http")
+async def _log_slow_requests(request, call_next):
+ """Log any request that takes >1s to complete, with path and duration.
+
+ This helps diagnose first-load slowness: if /api/status takes 8s on the
+ first call but <100ms afterwards, we know the backend is doing lazy
+ initialization on first request.
+ """
+ import time as _t
+ _start = _t.monotonic()
+ try:
+ response = await call_next(request)
+ except Exception:
+ _elapsed = _t.monotonic() - _start
+ logger.error(
+ "[HTTP] ❌ %s %s failed after %.2fs",
+ request.method, request.url.path, _elapsed,
+ )
+ raise
+
+ _elapsed = _t.monotonic() - _start
+ # Only log slow requests to avoid spam (>1s is slow for health endpoints)
+ if _elapsed > 1.0:
+ logger.warning(
+ "[HTTP] 🐢 %s %s took %.2fs (status=%s)",
+ request.method, request.url.path, _elapsed, response.status_code,
+ )
+ elif _elapsed > 0.5:
+ logger.info(
+ "[HTTP] ⚠️ %s %s took %.2fs (status=%s)",
+ request.method, request.url.path, _elapsed, response.status_code,
+ )
+
+ return response
+
+
+def _project_context_to_text(project_context) -> str:
+ if not project_context:
+ return ""
+
+ parts = []
+ mode = getattr(project_context, "mode", None)
+ repo_name = getattr(project_context, "repoName", None)
+ branch = getattr(project_context, "branch", None)
+ languages = getattr(project_context, "languages", []) or []
+ manifests = getattr(project_context, "manifests", []) or []
+ key_files = getattr(project_context, "keyFiles", []) or []
+ readme_preview = getattr(project_context, "readmePreview", None)
+ tree_summary = getattr(project_context, "treeSummary", []) or []
+
+ if mode:
+ parts.append(f"Mode: {mode}")
+ if repo_name:
+ parts.append(f"Repo: {repo_name}")
+ if branch:
+ parts.append(f"Branch: {branch}")
+ if languages:
+ parts.append("Languages: " + ", ".join(languages[:20]))
+ if manifests:
+ parts.append("Manifests: " + ", ".join(manifests[:20]))
+ if key_files:
+ parts.append("Key files: " + ", ".join(key_files[:30]))
+
+ if tree_summary:
+ rendered = []
+ for entry in tree_summary[:200]:
+ if isinstance(entry, dict):
+ rendered.append(f"- {entry.get('type', 'file')}: {entry.get('path', '')}")
+ if rendered:
+ parts.append("Project tree:\n" + "\n".join(rendered))
+
+ if readme_preview:
+ parts.append("README preview:\n" + readme_preview)
+
+ return "\n".join(parts)
+
+
+def _working_set_to_text(working_set) -> str:
+ if not working_set:
+ return ""
+
+ parts = []
+ current_file = getattr(working_set, "currentFile", None)
+ language_id = getattr(working_set, "languageId", None)
+ current_selection = getattr(working_set, "currentSelection", None)
+ open_tabs = getattr(working_set, "openTabs", []) or []
+ related_files = getattr(working_set, "relatedFiles", []) or []
+
+ if current_file:
+ parts.append(f"Current file: {current_file}")
+ if language_id:
+ parts.append(f"Language: {language_id}")
+ if open_tabs:
+ parts.append("Open tabs: " + ", ".join(open_tabs[:12]))
+ if related_files:
+ parts.append("Related files: " + ", ".join(related_files[:12]))
+ if current_selection:
+ parts.append("Selected code:\n```\n" + current_selection + "\n```")
+
+ return "\n".join(parts)
+
+
+def _sanitize_relative_path(p: str) -> str | None:
+ """Reject absolute paths, .. traversal, drive letters, and empty strings.
+
+ Also strips LLM artifacts like "three_backticks_space" that some models
+ produce instead of actual backtick characters.
+ """
+ import os
+ import re as _re
+ p = p.strip().strip("`\"'").strip()
+ # Strip common LLM artifacts
+ # Strip literal descriptions LLMs produce instead of actual backtick chars
+ p = _re.sub(r"(?i)three[\s_+]*backtick[s]?[\s_+]*space[\s_+]*", "", p)
+ p = _re.sub(r"(?i)three[\s_+]*\+[\s_+]*markdown[\s_+]*\+[\s_+]*space[\s_+]*\+?\s*", "", p)
+ p = _re.sub(r"(?i)backtick[s]?[\s_+]*", "", p)
+ p = _re.sub(r"(?i)triple[\s_+]*backtick[s]?[\s_+]*", "", p)
+ p = _re.sub(r"(?i)fenced?[\s_+]*code[\s_+]*block[\s_+]*", "", p)
+ p = p.strip()
+ if not p:
+ return None
+ # Reject absolute / drive / UNC paths
+ if os.path.isabs(p) or p.startswith("\\\\") or (len(p) >= 2 and p[1] == ":"):
+ return None
+ # Reject parent traversal
+ parts = p.replace("\\", "/").split("/")
+ if ".." in parts:
+ return None
+ # Normalise to forward slashes
+ return "/".join(parts)
+
+
+def _extract_edits_from_answer(answer: str) -> list[dict]:
+ """Extract structured ProposedEdit objects from LLM markdown answers.
+
+ Parses fenced code blocks where the filename appears on the opening
+ fence line (e.g. ```python hello.py) — the format we instruct the
+ LLM to use in _build_local_repo_aware_prompt.
+
+ Falls back to matching "save as " / "create file "
+ patterns paired with the nearest code block.
+
+ Returns a list of dicts matching the ProposedEdit schema:
+ [{"file": "hello.py", "kind": "create", "content": "...", "summary": "..."}]
+ """
+ import re
+
+ edits: list[dict] = []
+ seen_paths: set[str] = set()
+ if not answer:
+ return edits
+
+ def _add(raw_path: str, content: str) -> None:
+ path = _sanitize_relative_path(raw_path)
+ if not path or path in seen_paths:
+ return
+ seen_paths.add(path)
+ edits.append({
+ "file": path,
+ "kind": "create",
+ "content": content.rstrip(),
+ "summary": f"Create {path}",
+ })
+
+ # Pattern 1 (preferred): ```lang filepath\n...code...\n```
+ blocks = re.findall(
+ r"```(?:\w+)?\s+([^\n`]+?\.\w+)\s*\n(.*?)```",
+ answer,
+ re.DOTALL,
+ )
+ for filepath, content in blocks:
+ _add(filepath, content)
+
+ if edits:
+ return edits
+
+ # Pattern 2: non-standard format some LLMs produce
+ # "```\npython filepath\n---\n...code...\n---\n```"
+ # or just "python filepath\n---\n...code...\n" outside fences
+ dash_blocks = re.findall(
+ r"(?:```\n?)?(\w+)\s+([^\n]+?\.\w+)\s*\n-{3,}\n(.*?)\n-{3,}",
+ answer,
+ re.DOTALL,
+ )
+ for _lang, filepath, content in dash_blocks:
+ _add(filepath, content)
+
+ if edits:
+ return edits
+
+ # Pattern 3: "save this as `filename`" / "create a file called `filename`"
+ # followed by a code block
+ file_mentions = re.findall(
+ r"(?:save\s+(?:this\s+)?(?:as|to|in)|create\s+(?:a\s+)?(?:file\s+)?(?:called|named)?)\s+[`\"']?([^\s`\"']+\.\w+)[`\"']?",
+ answer,
+ re.IGNORECASE,
+ )
+ code_blocks = re.findall(r"```\w*\n(.*?)```", answer, re.DOTALL)
+
+ if file_mentions and code_blocks:
+ for filename, content in zip(file_mentions, code_blocks):
+ _add(filename, content)
+
+ return edits
+
+
+def _build_local_repo_aware_prompt(req, session) -> str:
+ task_summary = getattr(getattr(req, "task_context", None), "summary", None)
+
+ # System instructions — the file-output format uses triple-backtick
+ # fences with the filepath on the opening line. We use a raw block
+ # to avoid confusion when the prompt is joined with --- separators.
+ system_block = (
+ "You are GitPilot, a multi-agent AI coding assistant running in VS Code.\n"
+ "Use the supplied repository metadata, working-set context, and user request to answer precisely.\n"
+ "\n"
+ "IMPORTANT FILE OUTPUT FORMAT:\n"
+ "When you create or edit files, you MUST use triple-backtick fenced code blocks\n"
+ "with the language AND the file path on the SAME opening line.\n"
+ "\n"
+ "Correct format (you MUST follow this exactly):\n"
+ "\n"
+ " ```python hello.py\n"
+ " print('Hello, World!')\n"
+ " ```\n"
+ "\n"
+ " ```typescript src/utils/validate.ts\n"
+ " export function validate(input: string): boolean {\n"
+ " return input.length > 0;\n"
+ " }\n"
+ " ```\n"
+ "\n"
+ "Rules:\n"
+ "- The opening fence MUST be triple backticks followed by the language then the filepath.\n"
+ "- The closing fence MUST be triple backticks on their own line.\n"
+ "- Do NOT use --- separators or any other format.\n"
+ "- Output the COMPLETE file content, not just a snippet.\n"
+ "- For edits to existing files, output the full updated file.\n"
+ "- Be explicit about which files to create or modify and why.\n"
+ "- Prefer incremental, production-safe changes over large rewrites."
+ )
+
+ sections = [system_block]
+
+ session_lines = [
+ f"Session mode: {getattr(session, 'mode', None)}",
+ f"Folder path: {getattr(session, 'folder_path', None)}",
+ f"Repo root: {getattr(session, 'repo_root', None)}",
+ f"Branch: {getattr(session, 'branch', None)}",
+ ]
+
+ valid_session_lines = [
+ line for line in session_lines if line and not line.endswith(": None")
+ ]
+ if valid_session_lines:
+ sections.append("Session context:\n" + "\n".join(valid_session_lines))
+
+ project_txt = _project_context_to_text(getattr(req, "project_context", None))
+ if project_txt:
+ sections.append("Project context:\n" + project_txt)
+
+ working_txt = _working_set_to_text(getattr(req, "working_set", None))
+ if working_txt:
+ sections.append("Working set:\n" + working_txt)
+
+ if task_summary:
+ sections.append("Task context:\n" + task_summary)
+
+ sections.append("User request:\n" + req.message)
+
+ return "\n\n---\n\n".join(sections)
+
+def get_github_token(authorization: Optional[str] = Header(None)) -> Optional[str]:
+ """
+ Extract GitHub token from Authorization header.
+
+ Supports formats:
+ - Bearer
+ - token
+ -
+ """
+ if not authorization:
+ return None
+
+ if authorization.startswith("Bearer "):
+ return authorization[7:]
+ elif authorization.startswith("token "):
+ return authorization[6:]
+ else:
+ return authorization
+
+
+# --- FIXED: Added default_branch to model ---
+class RepoSummary(BaseModel):
+ id: int
+ name: str
+ full_name: str
+ private: bool
+ owner: str
+ default_branch: str = "main" # <--- CRITICAL FIX: Defaults to main, but can be master/dev
+
+
+class PaginatedReposResponse(BaseModel):
+ """Response model for paginated repository listing."""
+ repositories: List[RepoSummary]
+ page: int
+ per_page: int
+ total_count: Optional[int] = None
+ has_more: bool
+ query: Optional[str] = None
+
+
+class FileEntry(BaseModel):
+ path: str
+ type: str
+
+
+class FileTreeResponse(BaseModel):
+ files: List[FileEntry] = Field(default_factory=list)
+
+
+class FileContent(BaseModel):
+ path: str
+ encoding: str = "utf-8"
+ content: str
+
+
+class CommitRequest(BaseModel):
+ path: str
+ content: str
+ message: str
+
+
+class CommitResponse(BaseModel):
+ path: str
+ commit_sha: str
+ commit_url: Optional[str] = None
+
+
+class SettingsResponse(BaseModel):
+ provider: LLMProvider
+ providers: List[LLMProvider]
+ openai: dict
+ claude: dict
+ watsonx: dict
+ ollama: dict
+ ollabridge: dict
+ langflow_url: str
+ has_langflow_plan_flow: bool
+
+
+class ProviderModelsResponse(BaseModel):
+ provider: LLMProvider
+ models: List[str] = Field(default_factory=list)
+ error: Optional[str] = None
+
+
+class ProviderUpdate(BaseModel):
+ provider: LLMProvider
+
+
+class ChatPlanRequest(BaseModel):
+ repo_owner: str
+ repo_name: str
+ goal: str
+ branch_name: Optional[str] = None
+
+
+class ExecutePlanRequest(BaseModel):
+ repo_owner: str
+ repo_name: str
+ plan: PlanResult
+ branch_name: Optional[str] = None
+
+
+class AuthUrlResponse(BaseModel):
+ authorization_url: str
+ state: str
+
+
+class AuthCallbackRequest(BaseModel):
+ code: str
+ state: str
+
+
+class TokenValidationRequest(BaseModel):
+ access_token: str
+
+
+class UserInfoResponse(BaseModel):
+ user: GitHubUser
+ authenticated: bool
+
+
+class RepoAccessResponse(BaseModel):
+ can_write: bool
+ app_installed: bool
+ auth_type: str
+
+
+# --- v2 Request/Response models ---
+
+class ChatRequest(BaseModel):
+ """Unified chat request for the conversational dispatcher."""
+ repo_owner: str
+ repo_name: str
+ message: str
+ branch_name: Optional[str] = None
+ auto_pr: bool = False
+ topology_id: Optional[str] = None # Override topology for this request
+
+
+class IssueCreateRequest(BaseModel):
+ title: str
+ body: Optional[str] = None
+ labels: Optional[List[str]] = None
+ assignees: Optional[List[str]] = None
+ milestone: Optional[int] = None
+
+
+class IssueUpdateRequest(BaseModel):
+ title: Optional[str] = None
+ body: Optional[str] = None
+ state: Optional[str] = None
+ labels: Optional[List[str]] = None
+ assignees: Optional[List[str]] = None
+ milestone: Optional[int] = None
+
+
+class IssueCommentRequest(BaseModel):
+ body: str
+
+
+class PRCreateRequest(BaseModel):
+ title: str
+ head: str
+ base: str
+ body: Optional[str] = None
+ draft: bool = False
+
+
+class PRMergeRequest(BaseModel):
+ merge_method: str = "merge"
+ commit_title: Optional[str] = None
+ commit_message: Optional[str] = None
+
+
+class SearchRequest(BaseModel):
+ query: str
+ per_page: int = 30
+ page: int = 1
+
+
+# ============================================================================
+# Repository Endpoints - Enterprise Grade with Pagination & Search
+# ============================================================================
+
+@app.get("/api/repos", response_model=PaginatedReposResponse)
+async def api_list_repos(
+ query: str | None = Query(None, description="Search query"),
+ page: int = Query(1, ge=1),
+ per_page: int = Query(100, ge=1, le=100),
+ authorization: str | None = Header(None),
+):
+ token = get_github_token(authorization)
+
+ try:
+ if query:
+ result = await search_user_repos(
+ query=query,
+ page=page,
+ per_page=per_page,
+ token=token,
+ )
+ else:
+ result = await list_user_repos_paginated(
+ page=page,
+ per_page=per_page,
+ token=token,
+ )
+
+ repos = [
+ RepoSummary(
+ id=r["id"],
+ name=r["name"],
+ full_name=r["full_name"],
+ private=r["private"],
+ owner=r["owner"],
+ default_branch=r.get("default_branch", "main"),
+ )
+ for r in result["repositories"]
+ ]
+
+ return PaginatedReposResponse(
+ repositories=repos,
+ page=result["page"],
+ per_page=result["per_page"],
+ total_count=result.get("total_count"),
+ has_more=result["has_more"],
+ query=query,
+ )
+
+ except httpx.ConnectTimeout:
+ logger.exception("GitHub connection timed out while fetching repositories")
+ raise HTTPException(
+ status_code=504,
+ detail="Timed out while connecting to GitHub. Please try again."
+ )
+
+ except httpx.TimeoutException:
+ logger.exception("GitHub request timed out while fetching repositories")
+ raise HTTPException(
+ status_code=504,
+ detail="GitHub request timed out. Please try again."
+ )
+
+ except httpx.HTTPError as e:
+ logger.exception("GitHub HTTP error while fetching repositories")
+ raise HTTPException(
+ status_code=502,
+ detail=f"Failed to contact GitHub: {str(e)}"
+ )
+
+ except Exception as e:
+ logger.exception("Error fetching repositories")
+ raise HTTPException(
+ status_code=500,
+ detail=f"Unexpected error fetching repositories: {str(e)}"
+ )
+
+@app.get("/api/repos/all")
+async def api_list_all_repos(
+ query: Optional[str] = Query(None, description="Search query"),
+ authorization: Optional[str] = Header(None),
+):
+ """
+ Fetch ALL user repositories at once (no pagination).
+ Useful for quick searches, but paginated endpoint is preferred.
+ """
+ token = get_github_token(authorization)
+
+ try:
+ # Fetch all repositories (this will make multiple API calls)
+ all_repos = []
+ page = 1
+ max_pages = 15 # Safety limit: 1500 repos max (15 * 100)
+
+ while page <= max_pages:
+ result = await list_user_repos_paginated(
+ page=page,
+ per_page=100,
+ token=token
+ )
+
+ all_repos.extend(result["repositories"])
+
+ if not result["has_more"]:
+ break
+
+ page += 1
+
+ # Filter by query if provided
+ if query:
+ query_lower = query.lower()
+ all_repos = [
+ r for r in all_repos
+ if query_lower in r["name"].lower() or query_lower in r["full_name"].lower()
+ ]
+
+ # --- FIXED: Mapping default_branch ---
+ repos = [
+ RepoSummary(
+ id=r["id"],
+ name=r["name"],
+ full_name=r["full_name"],
+ private=r["private"],
+ owner=r["owner"],
+ default_branch=r.get("default_branch", "main"), # <--- CRITICAL FIX
+ )
+ for r in all_repos
+ ]
+
+ return {
+ "repositories": repos,
+ "total_count": len(repos),
+ "query": query,
+ }
+
+ except Exception as e:
+ logging.exception("Error fetching all repositories")
+ return JSONResponse(
+ content={"error": f"Failed to fetch repositories: {str(e)}"},
+ status_code=500
+ )
+
+
+@app.get("/api/repos/{owner}/{repo}/tree", response_model=FileTreeResponse)
+async def api_repo_tree(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ ref: Optional[str] = Query(
+ None,
+ description="Git reference (branch, tag, or commit SHA). If omitted, defaults to HEAD.",
+ ),
+ authorization: Optional[str] = Header(None),
+):
+ """
+ Get the file tree for a repository.
+ Handles 'main' vs 'master' discrepancies and empty repositories gracefully.
+ """
+ token = get_github_token(authorization)
+
+ # Keep legacy behavior: missing/empty ref behaves like HEAD.
+ ref_value = (ref or "").strip() or "HEAD"
+
+ try:
+ tree = await get_repo_tree(owner, repo, token=token, ref=ref_value)
+ return FileTreeResponse(files=[FileEntry(**f) for f in tree])
+
+ except HTTPException as e:
+ if e.status_code == 409:
+ return FileTreeResponse(files=[])
+
+ if e.status_code == 404:
+ return JSONResponse(
+ status_code=404,
+ content={
+ "detail": f"Ref '{ref_value}' not found. The repository might be using a different default branch (e.g., 'master')."
+ }
+ )
+
+ raise e
+
+
+@app.get("/api/repos/{owner}/{repo}/file", response_model=FileContent)
+async def api_get_file(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ path: str = Query(...),
+ authorization: Optional[str] = Header(None),
+):
+ token = get_github_token(authorization)
+ content = await get_file(owner, repo, path, token=token)
+ return FileContent(path=path, content=content)
+
+
+@app.post("/api/repos/{owner}/{repo}/file", response_model=CommitResponse)
+async def api_put_file(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ payload: CommitRequest = ...,
+ authorization: Optional[str] = Header(None),
+):
+ token = get_github_token(authorization)
+ result = await put_file(
+ owner, repo, payload.path, payload.content, payload.message, token=token
+ )
+ return CommitResponse(**result)
+
+
+# ============================================================================
+# Settings Endpoints
+# ============================================================================
+
+def settings_response_from(s: AppSettings) -> SettingsResponse:
+ return SettingsResponse(
+ provider=s.provider,
+ providers=[
+ LLMProvider.openai,
+ LLMProvider.claude,
+ LLMProvider.watsonx,
+ LLMProvider.ollama,
+ LLMProvider.ollabridge,
+ ],
+ openai=s.openai.model_dump(),
+ claude=s.claude.model_dump(),
+ watsonx=s.watsonx.model_dump(),
+ ollama=s.ollama.model_dump(),
+ ollabridge=s.ollabridge.model_dump(),
+ langflow_url=s.langflow_url,
+ has_langflow_plan_flow=bool(s.langflow_plan_flow_id),
+ )
+
+
+@app.get("/api/settings", response_model=SettingsResponse)
+async def api_get_settings():
+ """
+ Fast path:
+ Return persisted settings immediately without probing providers/models.
+
+ This keeps the Admin / LLM Settings page fast on first render.
+ """
+ s: AppSettings = get_settings()
+ return settings_response_from(s)
+
+
+@app.post("/api/settings/bootstrap", response_model=SettingsResponse)
+async def api_bootstrap_settings():
+ """
+ Slow path:
+ Perform local provider/model auto-configuration explicitly.
+
+ This can be called after the page renders, or on startup, without blocking
+ the first settings paint.
+ """
+ s: AppSettings = autoconfigure_local_provider()
+ return settings_response_from(s)
+
+
+@app.get("/api/settings/models", response_model=ProviderModelsResponse)
+async def api_list_models(provider: Optional[LLMProvider] = Query(None)):
+ """
+ Return the list of LLM models available for a provider.
+
+ If 'provider' is not given, use the currently active provider from settings.
+ """
+ s: AppSettings = get_settings()
+ effective_provider = provider or s.provider
+
+ models, error = list_models_for_provider(effective_provider, s)
+
+ return ProviderModelsResponse(
+ provider=effective_provider,
+ models=models,
+ error=error,
+ )
+
+
+@app.post("/api/settings/provider", response_model=SettingsResponse)
+async def api_set_provider(update: ProviderUpdate):
+ """
+ Provider changes may legitimately trigger local bootstrap, but only when
+ switching to local providers.
+ """
+ s = set_provider(update.provider)
+
+ if s.provider in (LLMProvider.ollama, LLMProvider.ollabridge):
+ s = autoconfigure_local_provider(force=True)
+
+ return settings_response_from(s)
+
+
+@app.put("/api/settings/llm", response_model=SettingsResponse)
+async def api_update_llm_settings(updates: dict):
+ """
+ Update full LLM settings including provider-specific configs.
+
+ Important:
+ - Do NOT auto-probe providers here on every save.
+ - Saving should be fast and deterministic.
+ """
+ s = update_settings(updates)
+ return settings_response_from(s)
+
+ """Update full LLM settings including provider-specific configs."""
+ s = update_settings(updates)
+ s = autoconfigure_local_provider()
+ return SettingsResponse(
+ provider=s.provider,
+ providers=[LLMProvider.openai, LLMProvider.claude, LLMProvider.watsonx, LLMProvider.ollama, LLMProvider.ollabridge],
+ openai=s.openai.model_dump(),
+ claude=s.claude.model_dump(),
+ watsonx=s.watsonx.model_dump(),
+ ollama=s.ollama.model_dump(),
+ ollabridge=s.ollabridge.model_dump(),
+ langflow_url=s.langflow_url,
+ has_langflow_plan_flow=bool(s.langflow_plan_flow_id),
+ )
+
+
+# ============================================================================
+# Chat Endpoints
+# ============================================================================
+
+@app.post("/api/chat/plan")
+async def api_chat_plan(req: ChatPlanRequest, authorization: Optional[str] = Header(None)):
+ token = get_github_token(authorization)
+
+ logger.info(
+ "PLAN REQUEST: %s/%s | branch_name=%r",
+ req.repo_owner,
+ req.repo_name,
+ req.branch_name,
+ )
+
+ with execution_context(token, ref=req.branch_name):
+ full_name = f"{req.repo_owner}/{req.repo_name}"
+
+ # Use lite planner when Lite Mode is active (setting OR topology)
+ planner = generate_plan_lite if _is_lite_mode_active() else generate_plan
+
+ try:
+ plan = await planner(req.goal, full_name, token=token, branch_name=req.branch_name)
+ return plan
+ except Exception as exc:
+ error_msg = str(exc)
+
+ # ── Quota / rate-limit detection ────────────────
+ _quota_keywords = [
+ "insufficient_quota", "exceeded your current quota",
+ "rate_limit_exceeded", "429",
+ "billing", "plan and billing",
+ ]
+ _is_quota = any(kw in error_msg.lower() for kw in _quota_keywords)
+ if _is_quota:
+ logger.warning("[GitPilot] LLM quota/rate-limit error: %s", error_msg)
+ raise HTTPException(
+ status_code=429,
+ detail=(
+ "Your LLM provider credits have been exhausted or you've hit "
+ "a rate limit. Please check your plan and billing details at "
+ "your provider's dashboard, or switch to a different provider "
+ "in Settings (e.g. Ollama or OllaBridge for free local models)."
+ ),
+ ) from exc
+
+ # ── Empty/invalid LLM response (small model can't follow ReAct) ─
+ _empty_llm_errors = (
+ "No valid task outputs",
+ "Invalid response from LLM call",
+ "None or empty",
+ )
+ if any(kw in error_msg for kw in _empty_llm_errors):
+ logger.warning(
+ "[GitPilot] LLM returned empty/invalid response — "
+ "model may be too small for multi-agent CrewAI prompts: %s",
+ error_msg,
+ )
+ raise HTTPException(
+ status_code=502,
+ detail=(
+ "The LLM could not complete the multi-agent reasoning. "
+ "This usually happens with small local models "
+ "(qwen2.5:0.5b, tinyllama, phi3:mini, etc.) that struggle "
+ "with the ReAct format. Solutions:\n"
+ "• Switch to a larger model (llama3, qwen2.5:7b, mistral)\n"
+ "• Enable Lite Mode in Settings for simpler prompts\n"
+ "• Use a cloud provider (OpenAI, Claude) for complex tasks"
+ ),
+ ) from exc
+
+ # ── Structured-output parse failure (common with small models) ─
+ _plan_parse_markers = (
+ "validation error for planresult",
+ "json_invalid",
+ "invalid json: key must be a string",
+ )
+ if any(marker in error_msg.lower() for marker in _plan_parse_markers):
+ logger.warning(
+ "[GitPilot] Planner returned malformed structured output. "
+ "Falling back to Lite planner. Error: %s",
+ error_msg,
+ )
+ try:
+ return await generate_plan_lite(
+ req.goal,
+ full_name,
+ token=token,
+ branch_name=req.branch_name,
+ )
+ except Exception as lite_exc:
+ logger.exception(
+ "[GitPilot] Lite planner fallback also failed after parse error: %s",
+ lite_exc,
+ )
+ raise
+
+ # Re-raise anything else
+ raise
+
+
+@app.post("/api/chat/execute")
+async def api_chat_execute(
+ req: ExecutePlanRequest,
+ authorization: Optional[str] = Header(None)
+):
+ token = get_github_token(authorization)
+
+ with execution_context(token, ref=req.branch_name):
+ full_name = f"{req.repo_owner}/{req.repo_name}"
+ executor = execute_plan_lite if _is_lite_mode_active() else execute_plan
+ try:
+ result = await executor(
+ req.plan, full_name, token=token, branch_name=req.branch_name
+ )
+ except Exception as exc:
+ error_msg = str(exc)
+ _quota_keywords = [
+ "insufficient_quota", "exceeded your current quota",
+ "rate_limit_exceeded", "429", "billing",
+ ]
+ if any(kw in error_msg.lower() for kw in _quota_keywords):
+ raise HTTPException(
+ status_code=429,
+ detail=(
+ "Your LLM provider credits have been exhausted or you've hit "
+ "a rate limit. Please check your plan and billing details, "
+ "or switch to a free local provider in Settings."
+ ),
+ ) from exc
+ _empty_llm_errors = (
+ "No valid task outputs",
+ "Invalid response from LLM call",
+ "None or empty",
+ )
+ if any(kw in error_msg for kw in _empty_llm_errors):
+ raise HTTPException(
+ status_code=502,
+ detail=(
+ "The LLM could not complete the task. This usually happens "
+ "with small local models (qwen2.5:0.5b, tinyllama, phi3:mini). "
+ "Try a larger model (llama3, qwen2.5:7b), enable Lite Mode "
+ "in Settings, or use a cloud provider."
+ ),
+ ) from exc
+ if isinstance(exc, TimeoutError) or "timed out" in error_msg.lower():
+ raise HTTPException(
+ status_code=504,
+ detail=(
+ "The agent operation timed out. The LLM provider may be "
+ "overloaded. Try again or switch to a faster provider."
+ ),
+ ) from exc
+ if "circuit breaker" in error_msg.lower():
+ raise HTTPException(
+ status_code=503,
+ detail=(
+ "The LLM provider is temporarily unavailable after repeated "
+ "failures. Please wait and try again shortly."
+ ),
+ ) from exc
+ raise
+ if isinstance(result, dict):
+ result.setdefault(
+ "mode",
+ "sticky" if req.branch_name else "hard-switch",
+ )
+ return result
+
+
+@app.get("/api/flow/current")
+async def api_get_flow(topology: Optional[str] = Query(None)):
+ """Return the agent flow definition as a graph.
+
+ If ``topology`` query param is provided, returns the graph for that
+ topology. Otherwise falls back to the user's saved preference, and
+ finally to the legacy ``get_flow_definition()`` output for full
+ backward compatibility.
+ """
+ tid = topology or get_saved_topology_preference()
+ if tid:
+ return _get_topology_graph(tid)
+ # Legacy path — returns the original hardcoded graph
+ flow = await get_flow_definition()
+ return flow
+
+
+# ============================================================================
+# Topology Registry Endpoints (additive — no existing behaviour changed)
+# ============================================================================
+
+@app.get("/api/flow/topologies")
+async def api_list_topologies():
+ """Return lightweight summaries of all available topology presets."""
+ return _list_topologies()
+
+
+@app.get("/api/flow/topology/{topology_id}")
+async def api_get_topology(topology_id: str):
+ """Return the full flow graph for a specific topology."""
+ return _get_topology_graph(topology_id)
+
+
+class ClassifyRequest(BaseModel):
+ message: str
+
+
+@app.post("/api/flow/classify")
+async def api_classify_message(req: ClassifyRequest):
+ """Auto-detect the best topology for a given user message.
+
+ Returns the recommended topology, confidence score, and up to 4
+ alternatives ranked by relevance.
+ """
+ result = _classify_message(req.message)
+ return result.to_dict()
+
+
+class TopologyPrefRequest(BaseModel):
+ topology: str
+
+
+@app.get("/api/settings/topology")
+async def api_get_topology_pref():
+ """Return the user's saved topology preference (or null)."""
+ pref = get_saved_topology_preference()
+ return {"topology": pref}
+
+
+@app.post("/api/settings/topology")
+async def api_set_topology_pref(req: TopologyPrefRequest):
+ """Save the user's preferred topology."""
+ save_topology_preference(req.topology)
+ return {"status": "ok", "topology": req.topology}
+
+
+# ============================================================================
+# Conversational Chat Endpoint (v2 upgrade)
+# ============================================================================
+
+@app.post("/api/chat/message")
+async def api_chat_message(req: ChatRequest, authorization: Optional[str] = Header(None)):
+ """
+ Unified conversational endpoint. The router analyses the message and
+ dispatches to the appropriate agent (issue, PR, search, review, learning,
+ or the existing plan+execute pipeline).
+ """
+ token = get_github_token(authorization)
+
+ logger.info(
+ "CHAT MESSAGE: %s/%s | message=%r | branch=%r",
+ req.repo_owner,
+ req.repo_name,
+ req.message[:80],
+ req.branch_name,
+ )
+
+ with execution_context(token, ref=req.branch_name):
+ full_name = f"{req.repo_owner}/{req.repo_name}"
+ try:
+ result = await dispatch_request(
+ req.message, full_name, token=token, branch_name=req.branch_name,
+ topology_id=req.topology_id,
+ )
+ except Exception as exc:
+ error_msg = str(exc)
+ _quota_keywords = [
+ "insufficient_quota", "exceeded your current quota",
+ "rate_limit_exceeded", "429", "billing",
+ ]
+ if any(kw in error_msg.lower() for kw in _quota_keywords):
+ raise HTTPException(
+ status_code=429,
+ detail=(
+ "Your LLM provider credits have been exhausted or you've hit "
+ "a rate limit. Please check your plan and billing details, "
+ "or switch to a free local provider in Settings."
+ ),
+ ) from exc
+ _empty_llm_errors = (
+ "No valid task outputs",
+ "Invalid response from LLM call",
+ "None or empty",
+ )
+ if any(kw in error_msg for kw in _empty_llm_errors):
+ raise HTTPException(
+ status_code=502,
+ detail=(
+ "The LLM could not complete the task. This usually happens "
+ "with small local models (qwen2.5:0.5b, tinyllama, phi3:mini). "
+ "Try a larger model (llama3, qwen2.5:7b), enable Lite Mode "
+ "in Settings, or use a cloud provider."
+ ),
+ ) from exc
+ if isinstance(exc, TimeoutError) or "timed out" in error_msg.lower():
+ raise HTTPException(
+ status_code=504,
+ detail=(
+ "The agent operation timed out. The LLM provider may be "
+ "overloaded. Try again or switch to a faster provider."
+ ),
+ ) from exc
+ if "circuit breaker" in error_msg.lower():
+ raise HTTPException(
+ status_code=503,
+ detail=(
+ "The LLM provider is temporarily unavailable after repeated "
+ "failures. Please wait and try again shortly."
+ ),
+ ) from exc
+ raise
+
+ # If auto_pr is requested and execution completed, create PR
+ if (
+ req.auto_pr
+ and isinstance(result, dict)
+ and result.get("category") == "plan_execute"
+ and result.get("plan")
+ ):
+ result["auto_pr_hint"] = (
+ "Plan generated. Execute it first, then auto-PR will be created."
+ )
+
+ return result
+
+
+@app.post("/api/chat/execute-with-pr")
+async def api_chat_execute_with_pr(
+ req: ExecutePlanRequest,
+ authorization: Optional[str] = Header(None),
+):
+ """Execute a plan AND automatically create a pull request afterwards."""
+ token = get_github_token(authorization)
+
+ with execution_context(token, ref=req.branch_name):
+ full_name = f"{req.repo_owner}/{req.repo_name}"
+ executor = execute_plan_lite if _is_lite_mode_active() else execute_plan
+ try:
+ result = await executor(
+ req.plan, full_name, token=token, branch_name=req.branch_name,
+ )
+ except Exception as exc:
+ error_msg = str(exc)
+ _quota_keywords = [
+ "insufficient_quota", "exceeded your current quota",
+ "rate_limit_exceeded", "429", "billing",
+ ]
+ if any(kw in error_msg.lower() for kw in _quota_keywords):
+ raise HTTPException(
+ status_code=429,
+ detail=(
+ "Your LLM provider credits have been exhausted. "
+ "Check billing or switch to a free local provider."
+ ),
+ ) from exc
+ if "No valid task outputs" in error_msg:
+ raise HTTPException(
+ status_code=502,
+ detail=(
+ "The LLM returned an empty response. Try enabling "
+ "Lite Mode for better results with small models."
+ ),
+ ) from exc
+ raise
+
+ if isinstance(result, dict) and result.get("status") == "completed":
+ branch = result.get("branch", req.branch_name)
+ if branch:
+ pr = await create_pr_after_execution(
+ full_name,
+ branch,
+ req.plan.goal,
+ result.get("executionLog", {}),
+ token=token,
+ )
+ if pr:
+ result["pull_request"] = {
+ "number": pr.get("number"),
+ "url": pr.get("html_url"),
+ "title": pr.get("title"),
+ }
+
+ result.setdefault(
+ "mode",
+ "sticky" if req.branch_name else "hard-switch",
+ )
+
+ return result
+
+
+# ============================================================================
+# Issue Endpoints (v2 upgrade)
+# ============================================================================
+
+@app.get("/api/repos/{owner}/{repo}/issues")
+async def api_list_issues(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ state: str = Query("open"),
+ labels: Optional[str] = Query(None),
+ per_page: int = Query(30, ge=1, le=100),
+ page: int = Query(1, ge=1),
+ authorization: Optional[str] = Header(None),
+):
+ """List issues for a repository."""
+ token = get_github_token(authorization)
+ issues = await github_issues.list_issues(
+ owner, repo, state=state, labels=labels,
+ per_page=per_page, page=page, token=token,
+ )
+ return {"issues": issues, "page": page, "per_page": per_page}
+
+
+@app.get("/api/repos/{owner}/{repo}/issues/{issue_number}")
+async def api_get_issue(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ issue_number: int = FPath(...),
+ authorization: Optional[str] = Header(None),
+):
+ """Get a single issue."""
+ token = get_github_token(authorization)
+ return await github_issues.get_issue(owner, repo, issue_number, token=token)
+
+
+@app.post("/api/repos/{owner}/{repo}/issues")
+async def api_create_issue(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ payload: IssueCreateRequest = ...,
+ authorization: Optional[str] = Header(None),
+):
+ """Create a new issue."""
+ token = get_github_token(authorization)
+ return await github_issues.create_issue(
+ owner, repo, payload.title,
+ body=payload.body, labels=payload.labels,
+ assignees=payload.assignees, milestone=payload.milestone,
+ token=token,
+ )
+
+
+@app.patch("/api/repos/{owner}/{repo}/issues/{issue_number}")
+async def api_update_issue(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ issue_number: int = FPath(...),
+ payload: IssueUpdateRequest = ...,
+ authorization: Optional[str] = Header(None),
+):
+ """Update an existing issue."""
+ token = get_github_token(authorization)
+ return await github_issues.update_issue(
+ owner, repo, issue_number,
+ title=payload.title, body=payload.body, state=payload.state,
+ labels=payload.labels, assignees=payload.assignees,
+ milestone=payload.milestone, token=token,
+ )
+
+
+@app.get("/api/repos/{owner}/{repo}/issues/{issue_number}/comments")
+async def api_list_issue_comments(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ issue_number: int = FPath(...),
+ authorization: Optional[str] = Header(None),
+):
+ """List comments on an issue."""
+ token = get_github_token(authorization)
+ return await github_issues.list_issue_comments(owner, repo, issue_number, token=token)
+
+
+@app.post("/api/repos/{owner}/{repo}/issues/{issue_number}/comments")
+async def api_add_issue_comment(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ issue_number: int = FPath(...),
+ payload: IssueCommentRequest = ...,
+ authorization: Optional[str] = Header(None),
+):
+ """Add a comment to an issue."""
+ token = get_github_token(authorization)
+ return await github_issues.add_issue_comment(
+ owner, repo, issue_number, payload.body, token=token,
+ )
+
+
+# ============================================================================
+# Pull Request Endpoints (v2 upgrade)
+# ============================================================================
+
+@app.get("/api/repos/{owner}/{repo}/pulls")
+async def api_list_pulls(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ state: str = Query("open"),
+ per_page: int = Query(30, ge=1, le=100),
+ page: int = Query(1, ge=1),
+ authorization: Optional[str] = Header(None),
+):
+ """List pull requests."""
+ token = get_github_token(authorization)
+ prs = await github_pulls.list_pull_requests(
+ owner, repo, state=state, per_page=per_page, page=page, token=token,
+ )
+ return {"pull_requests": prs, "page": page, "per_page": per_page}
+
+
+@app.get("/api/repos/{owner}/{repo}/pulls/{pull_number}")
+async def api_get_pull(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ pull_number: int = FPath(...),
+ authorization: Optional[str] = Header(None),
+):
+ """Get a single pull request."""
+ token = get_github_token(authorization)
+ return await github_pulls.get_pull_request(owner, repo, pull_number, token=token)
+
+
+@app.post("/api/repos/{owner}/{repo}/pulls")
+async def api_create_pull(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ payload: PRCreateRequest = ...,
+ authorization: Optional[str] = Header(None),
+):
+ """Create a new pull request."""
+ token = get_github_token(authorization)
+ return await github_pulls.create_pull_request(
+ owner, repo, title=payload.title, head=payload.head,
+ base=payload.base, body=payload.body, draft=payload.draft,
+ token=token,
+ )
+
+
+@app.put("/api/repos/{owner}/{repo}/pulls/{pull_number}/merge")
+async def api_merge_pull(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ pull_number: int = FPath(...),
+ payload: PRMergeRequest = ...,
+ authorization: Optional[str] = Header(None),
+):
+ """Merge a pull request."""
+ token = get_github_token(authorization)
+ return await github_pulls.merge_pull_request(
+ owner, repo, pull_number,
+ merge_method=payload.merge_method,
+ commit_title=payload.commit_title,
+ commit_message=payload.commit_message,
+ token=token,
+ )
+
+
+@app.get("/api/repos/{owner}/{repo}/pulls/{pull_number}/files")
+async def api_list_pr_files(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ pull_number: int = FPath(...),
+ authorization: Optional[str] = Header(None),
+):
+ """List files changed in a pull request."""
+ token = get_github_token(authorization)
+ return await github_pulls.list_pr_files(owner, repo, pull_number, token=token)
+
+
+# ============================================================================
+# Search Endpoints (v2 upgrade)
+# ============================================================================
+
+@app.get("/api/search/code")
+async def api_search_code(
+ q: str = Query(..., description="Search query"),
+ owner: Optional[str] = Query(None),
+ repo: Optional[str] = Query(None),
+ language: Optional[str] = Query(None),
+ per_page: int = Query(30, ge=1, le=100),
+ page: int = Query(1, ge=1),
+ authorization: Optional[str] = Header(None),
+):
+ """Search for code across GitHub."""
+ token = get_github_token(authorization)
+ return await github_search.search_code(
+ q, owner=owner, repo=repo, language=language,
+ per_page=per_page, page=page, token=token,
+ )
+
+
+@app.get("/api/search/issues")
+async def api_search_issues(
+ q: str = Query(..., description="Search query"),
+ owner: Optional[str] = Query(None),
+ repo: Optional[str] = Query(None),
+ state: Optional[str] = Query(None),
+ label: Optional[str] = Query(None),
+ per_page: int = Query(30, ge=1, le=100),
+ page: int = Query(1, ge=1),
+ authorization: Optional[str] = Header(None),
+):
+ """Search issues and pull requests."""
+ token = get_github_token(authorization)
+ return await github_search.search_issues(
+ q, owner=owner, repo=repo, state=state, label=label,
+ per_page=per_page, page=page, token=token,
+ )
+
+
+@app.get("/api/search/repositories")
+async def api_search_repositories(
+ q: str = Query(..., description="Search query"),
+ language: Optional[str] = Query(None),
+ sort: Optional[str] = Query(None),
+ per_page: int = Query(30, ge=1, le=100),
+ page: int = Query(1, ge=1),
+ authorization: Optional[str] = Header(None),
+):
+ """Search for repositories."""
+ token = get_github_token(authorization)
+ return await github_search.search_repositories(
+ q, language=language, sort=sort,
+ per_page=per_page, page=page, token=token,
+ )
+
+
+@app.get("/api/search/users")
+async def api_search_users(
+ q: str = Query(..., description="Search query"),
+ type_filter: Optional[str] = Query(None, alias="type"),
+ location: Optional[str] = Query(None),
+ language: Optional[str] = Query(None),
+ per_page: int = Query(30, ge=1, le=100),
+ page: int = Query(1, ge=1),
+ authorization: Optional[str] = Header(None),
+):
+ """Search for GitHub users and organizations."""
+ token = get_github_token(authorization)
+ return await github_search.search_users(
+ q, type_filter=type_filter, location=location, language=language,
+ per_page=per_page, page=page, token=token,
+ )
+
+
+# ============================================================================
+# Route Analysis Endpoint (v2 upgrade)
+# ============================================================================
+
+@app.post("/api/chat/route")
+async def api_chat_route(payload: dict):
+ """Preview how a message would be routed without executing it.
+
+ Useful for the frontend to display which agent(s) will handle the request.
+ """
+ message = payload.get("message", "")
+ if not message:
+ return JSONResponse({"error": "message is required"}, status_code=400)
+
+ workflow = route_request(message)
+ return {
+ "category": workflow.category.value,
+ "agents": [a.value for a in workflow.agents],
+ "description": workflow.description,
+ "requires_repo_context": workflow.requires_repo_context,
+ "entity_number": workflow.entity_number,
+ "metadata": workflow.metadata,
+ }
+
+
+# ============================================================================
+# Authentication Endpoints (Web Flow + Device Flow)
+# ============================================================================
+
+@app.get("/api/auth/url", response_model=AuthUrlResponse)
+async def api_get_auth_url():
+ """
+ Generate GitHub OAuth authorization URL (Web Flow).
+ Requires Client Secret to be configured.
+ """
+ auth_url, state = generate_authorization_url()
+ return AuthUrlResponse(authorization_url=auth_url, state=state)
+
+
+@app.post("/api/auth/callback", response_model=AuthSession)
+async def api_auth_callback(request: AuthCallbackRequest):
+ """
+ Handle GitHub OAuth callback (Web Flow).
+ Exchange the authorization code for an access token.
+ """
+ try:
+ session = await exchange_code_for_token(request.code, request.state)
+ return session
+ except ValueError as e:
+ return JSONResponse(
+ {"error": str(e)},
+ status_code=400,
+ )
+
+
+@app.post("/api/auth/validate", response_model=UserInfoResponse)
+async def api_validate_token(request: TokenValidationRequest):
+ """
+ Validate a GitHub access token and return user information.
+ """
+ user = await validate_token(request.access_token)
+ if user:
+ return UserInfoResponse(user=user, authenticated=True)
+ return UserInfoResponse(
+ user=GitHubUser(login="", id=0, avatar_url=""),
+ authenticated=False,
+ )
+
+
+@app.post("/api/auth/device/code")
+async def api_device_code():
+ """
+ Start the device login flow (Step 1).
+ Does NOT require a client secret.
+ """
+ try:
+ data = await initiate_device_flow()
+ return data
+ except Exception as e:
+ return JSONResponse({"error": str(e)}, status_code=500)
+
+
+@app.post("/api/auth/device/poll")
+async def api_device_poll(payload: dict):
+ """
+ Poll GitHub to check if user authorized the device (Step 2).
+ """
+ device_code = payload.get("device_code")
+ if not device_code:
+ return JSONResponse({"error": "Missing device_code"}, status_code=400)
+
+ try:
+ session = await poll_device_token(device_code)
+ if session:
+ return session
+
+ return JSONResponse({"status": "pending"}, status_code=202)
+ except ValueError as e:
+ return JSONResponse({"error": str(e)}, status_code=400)
+
+
+@app.get("/api/auth/status")
+async def api_auth_status():
+ """
+ Smart check: Do we have a secret (Web Flow) or just ID (Device Flow)?
+ This tells the frontend which UI to render.
+ """
+ has_secret = bool(os.getenv("GITHUB_CLIENT_SECRET"))
+ has_id = bool(os.getenv("GITHUB_CLIENT_ID", "Iv23litmRp80Z6wmlyRn"))
+
+ return {
+ "mode": "web" if has_secret else "device",
+ "configured": has_id,
+ "oauth_configured": has_secret,
+ "pat_configured": bool(os.getenv("GITPILOT_GITHUB_TOKEN") or os.getenv("GITHUB_TOKEN")),
+ }
+
+
+@app.get("/api/auth/app-url")
+async def api_get_app_url():
+ """Get GitHub App installation URL."""
+ app_slug = os.getenv("GITHUB_APP_SLUG", "gitpilota")
+ app_url = f"https://github.com/apps/{app_slug}"
+ return {
+ "app_url": app_url,
+ "app_slug": app_slug,
+ }
+
+
+@app.get("/api/auth/installation-status")
+async def api_check_installation_status():
+ """Check if GitHub App is installed for the current user."""
+ pat_token = os.getenv("GITPILOT_GITHUB_TOKEN") or os.getenv("GITHUB_TOKEN")
+
+ if pat_token:
+ user = await validate_token(pat_token)
+ if user:
+ return {
+ "installed": True,
+ "access_token": pat_token,
+ "user": user,
+ "auth_type": "pat",
+ }
+
+ github_app_id = os.getenv("GITHUB_APP_ID", "2313985")
+ if not github_app_id:
+ return {
+ "installed": False,
+ "message": "GitHub authentication not configured.",
+ "auth_type": "none",
+ }
+
+ return {
+ "installed": False,
+ "message": "GitHub App not installed.",
+ "auth_type": "github_app",
+ }
+
+
+@app.get("/api/auth/repo-access", response_model=RepoAccessResponse)
+async def api_check_repo_access(
+ owner: str = Query(...),
+ repo: str = Query(...),
+ authorization: Optional[str] = Header(None),
+):
+ """
+ Check if we have write access to a repository via User token or GitHub App.
+
+ This endpoint helps the frontend determine if it should show
+ installation prompts or if the user already has sufficient permissions.
+ """
+ token = get_github_token(authorization)
+ access_info = await check_repo_write_access(owner, repo, user_token=token)
+
+ return RepoAccessResponse(
+ can_write=access_info["can_write"],
+ app_installed=access_info["app_installed"],
+ auth_type=access_info["auth_type"],
+ )
+
+
+# ============================================================================
+# Session Endpoints (Phase 1)
+# ============================================================================
+
+@app.get("/api/sessions")
+async def api_list_sessions():
+ """List all saved sessions."""
+ return {"sessions": _session_mgr.list_sessions()}
+
+
+@app.post("/api/sessions")
+async def api_create_session(payload: dict):
+ """Create a new session.
+
+ Accepts either legacy single-repo or multi-repo format:
+ Legacy: {"repo_full_name": "owner/repo", "branch": "main"}
+ Multi: {"repos": [{full_name, branch, mode}], "active_repo": "owner/repo"}
+ """
+ repo = payload.get("repo_full_name", "")
+ branch = payload.get("branch")
+ name = payload.get("name") # optional — derived from first user prompt
+ session = _session_mgr.create(repo_full_name=repo, branch=branch, name=name)
+
+ # Multi-repo context support
+ if payload.get("repos"):
+ session.repos = payload["repos"]
+ session.active_repo = payload.get("active_repo", repo)
+ elif repo:
+ session.repos = [{"full_name": repo, "branch": branch or "main", "mode": "write"}]
+ session.active_repo = repo
+
+ _session_mgr.save(session)
+ return {"session_id": session.id, "status": session.status}
+
+
+@app.get("/api/sessions/{session_id}")
+async def api_get_session(session_id: str):
+ """Get session details."""
+ session = _session_mgr.load(session_id)
+ if not session:
+ raise HTTPException(status_code=404, detail="Session not found")
+ return {
+ "id": session.id,
+ "status": session.status,
+ "repo_full_name": session.repo_full_name,
+ "branch": session.branch,
+ "created_at": session.created_at,
+ "message_count": len(session.messages),
+ "checkpoint_count": len(session.checkpoints),
+ "repos": session.repos,
+ "active_repo": session.active_repo,
+ }
+
+
+@app.delete("/api/sessions/{session_id}")
+async def api_delete_session(session_id: str):
+ """Delete a session."""
+ deleted = _session_mgr.delete(session_id)
+ if not deleted:
+ raise HTTPException(status_code=404, detail="Session not found")
+ return {"deleted": True}
+
+
+@app.patch("/api/sessions/{session_id}/context")
+async def api_update_session_context(session_id: str, payload: dict):
+ """Add, remove, or activate repos in a session's multi-repo context.
+
+ Actions:
+ {"action": "add", "repo_full_name": "owner/repo", "branch": "main"}
+ {"action": "remove", "repo_full_name": "owner/repo"}
+ {"action": "set_active", "repo_full_name": "owner/repo"}
+ """
+ session = _session_mgr.load(session_id)
+ if not session:
+ raise HTTPException(status_code=404, detail="Session not found")
+
+ action = payload.get("action")
+ repo_name = payload.get("repo_full_name")
+ if not action or not repo_name:
+ raise HTTPException(status_code=400, detail="action and repo_full_name required")
+
+ if action == "add":
+ branch = payload.get("branch", "main")
+ if not any(r.get("full_name") == repo_name for r in session.repos):
+ session.repos.append({
+ "full_name": repo_name,
+ "branch": branch,
+ "mode": "read",
+ })
+ if not session.active_repo:
+ session.active_repo = repo_name
+ elif action == "remove":
+ session.repos = [r for r in session.repos if r.get("full_name") != repo_name]
+ if session.active_repo == repo_name:
+ session.active_repo = session.repos[0]["full_name"] if session.repos else None
+ elif action == "set_active":
+ if any(r.get("full_name") == repo_name for r in session.repos):
+ # Update mode flags
+ for r in session.repos:
+ r["mode"] = "write" if r.get("full_name") == repo_name else "read"
+ session.active_repo = repo_name
+ else:
+ raise HTTPException(status_code=400, detail="Repo not in session context")
+ else:
+ raise HTTPException(status_code=400, detail=f"Unknown action: {action}")
+
+ _session_mgr.save(session)
+ return {
+ "repos": session.repos,
+ "active_repo": session.active_repo,
+ }
+
+
+@app.post("/api/sessions/{session_id}/checkpoint")
+async def api_create_checkpoint(session_id: str, payload: dict):
+ """Create a checkpoint for a session."""
+ session = _session_mgr.load(session_id)
+ if not session:
+ raise HTTPException(status_code=404, detail="Session not found")
+ label = payload.get("label", "checkpoint")
+ cp = _session_mgr.create_checkpoint(session, label=label)
+ return {"checkpoint_id": cp.id, "label": cp.label, "created_at": cp.created_at}
+
+
+# ============================================================================
+# Hooks Endpoints (Phase 1)
+# ============================================================================
+
+@app.get("/api/hooks")
+async def api_list_hooks():
+ """List registered hooks."""
+ return {"hooks": _hook_mgr.list_hooks()}
+
+
+@app.post("/api/hooks")
+async def api_register_hook(payload: dict):
+ """Register a new hook."""
+ from .hooks import HookDefinition
+ try:
+ hook = HookDefinition(
+ event=HookEvent(payload["event"]),
+ name=payload["name"],
+ command=payload.get("command"),
+ blocking=payload.get("blocking", False),
+ timeout=payload.get("timeout", 30),
+ )
+ _hook_mgr.register(hook)
+ return {"registered": True, "name": hook.name, "event": hook.event.value}
+ except (KeyError, ValueError) as e:
+ raise HTTPException(status_code=400, detail=str(e))
+
+
+@app.delete("/api/hooks/{event}/{name}")
+async def api_unregister_hook(event: str, name: str):
+ """Unregister a hook by event and name."""
+ try:
+ _hook_mgr.unregister(HookEvent(event), name)
+ return {"unregistered": True}
+ except ValueError as e:
+ raise HTTPException(status_code=400, detail=str(e))
+
+
+# ============================================================================
+# Permissions Endpoints (Phase 1)
+# ============================================================================
+
+@app.get("/api/permissions")
+async def api_get_permissions():
+ """Get current permission policy."""
+ return _perm_mgr.to_dict()
+
+
+@app.put("/api/permissions/mode")
+async def api_set_permission_mode(payload: dict):
+ """Set the permission mode (normal, plan, auto)."""
+ mode_str = payload.get("mode", "normal")
+ try:
+ _perm_mgr.policy.mode = PermissionMode(mode_str)
+ return {"mode": _perm_mgr.policy.mode.value}
+ except ValueError:
+ raise HTTPException(status_code=400, detail=f"Invalid mode: {mode_str}")
+
+
+# ============================================================================
+# Project Context / Memory Endpoints (Phase 1)
+# ============================================================================
+
+@app.get("/api/repos/{owner}/{repo}/context")
+async def api_get_project_context(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+):
+ """Get project conventions and memory for a repository workspace."""
+ from pathlib import Path as StdPath
+ workspace_path = StdPath.home() / ".gitpilot" / "workspaces" / owner / repo
+ if not workspace_path.exists():
+ return {"conventions": "", "rules": [], "auto_memory": {}, "system_prompt": ""}
+ mgr = MemoryManager(workspace_path)
+ ctx = mgr.load_context()
+ return {
+ "conventions": ctx.conventions,
+ "rules": ctx.rules,
+ "auto_memory": ctx.auto_memory,
+ "system_prompt": ctx.to_system_prompt(),
+ }
+
+
+@app.post("/api/repos/{owner}/{repo}/context/init")
+async def api_init_project_context(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+):
+ """Initialize .gitpilot/ directory with template GITPILOT.md."""
+ from pathlib import Path as StdPath
+ workspace_path = StdPath.home() / ".gitpilot" / "workspaces" / owner / repo
+ workspace_path.mkdir(parents=True, exist_ok=True)
+ mgr = MemoryManager(workspace_path)
+ md_path = mgr.init_project()
+ return {"initialized": True, "path": str(md_path)}
+
+
+@app.post("/api/repos/{owner}/{repo}/context/pattern")
+async def api_add_learned_pattern(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ payload: dict = ...,
+):
+ """Add a learned pattern to auto-memory."""
+ from pathlib import Path as StdPath
+ pattern = payload.get("pattern", "")
+ if not pattern:
+ raise HTTPException(status_code=400, detail="pattern is required")
+ workspace_path = StdPath.home() / ".gitpilot" / "workspaces" / owner / repo
+ workspace_path.mkdir(parents=True, exist_ok=True)
+ mgr = MemoryManager(workspace_path)
+ mgr.add_learned_pattern(pattern)
+ return {"added": True, "pattern": pattern}
+
+
+# ============================================================================
+# Context Vault Endpoints (additive — Context + Use Case system)
+# ============================================================================
+
+def _workspace_path(owner: str, repo: str) -> Path:
+ """Resolve the local workspace path for a repo."""
+ return Path.home() / ".gitpilot" / "workspaces" / owner / repo
+
+
+@app.get("/api/repos/{owner}/{repo}/context/assets")
+async def api_list_context_assets(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+):
+ """List all uploaded context assets for a repository."""
+ vault = ContextVault(_workspace_path(owner, repo))
+ assets = vault.list_assets()
+ return {"assets": [a.to_dict() for a in assets]}
+
+
+@app.post("/api/repos/{owner}/{repo}/context/assets/upload")
+async def api_upload_context_asset(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ file: UploadFile = File(...),
+):
+ """Upload a file to the project context vault."""
+ vault = ContextVault(_workspace_path(owner, repo))
+ content = await file.read()
+ mime = file.content_type or ""
+ filename = file.filename or "upload"
+
+ try:
+ meta = vault.upload_asset(filename, content, mime=mime)
+ return {"asset": meta.to_dict()}
+ except ValueError as e:
+ raise HTTPException(status_code=400, detail=str(e))
+
+
+@app.delete("/api/repos/{owner}/{repo}/context/assets/{asset_id}")
+async def api_delete_context_asset(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ asset_id: str = FPath(...),
+):
+ """Delete a context asset."""
+ vault = ContextVault(_workspace_path(owner, repo))
+ vault.delete_asset(asset_id)
+ return {"deleted": True, "asset_id": asset_id}
+
+
+@app.get("/api/repos/{owner}/{repo}/context/assets/{asset_id}/download")
+async def api_download_context_asset(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ asset_id: str = FPath(...),
+):
+ """Download a raw context asset file."""
+ vault = ContextVault(_workspace_path(owner, repo))
+ asset_path = vault.get_asset_path(asset_id)
+ if not asset_path:
+ raise HTTPException(status_code=404, detail="Asset not found")
+ filename = vault.get_asset_filename(asset_id)
+ return FileResponse(asset_path, filename=filename)
+
+
+# ============================================================================
+# Use Case Endpoints (additive — guided requirement clarification)
+# ============================================================================
+
+@app.get("/api/repos/{owner}/{repo}/use-cases")
+async def api_list_use_cases(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+):
+ """List all use cases for a repository."""
+ mgr = UseCaseManager(_workspace_path(owner, repo))
+ return {"use_cases": mgr.list_use_cases()}
+
+
+@app.post("/api/repos/{owner}/{repo}/use-cases")
+async def api_create_use_case(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ payload: dict = ...,
+):
+ """Create a new use case."""
+ title = payload.get("title", "New Use Case")
+ initial_notes = payload.get("initial_notes", "")
+ mgr = UseCaseManager(_workspace_path(owner, repo))
+ uc = mgr.create_use_case(title=title, initial_notes=initial_notes)
+ return {"use_case": uc.to_dict()}
+
+
+@app.get("/api/repos/{owner}/{repo}/use-cases/{use_case_id}")
+async def api_get_use_case(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ use_case_id: str = FPath(...),
+):
+ """Get a single use case with messages and spec."""
+ mgr = UseCaseManager(_workspace_path(owner, repo))
+ uc = mgr.get_use_case(use_case_id)
+ if not uc:
+ raise HTTPException(status_code=404, detail="Use case not found")
+ return {"use_case": uc.to_dict()}
+
+
+@app.post("/api/repos/{owner}/{repo}/use-cases/{use_case_id}/chat")
+async def api_use_case_chat(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ use_case_id: str = FPath(...),
+ payload: dict = ...,
+):
+ """Send a guided chat message and get assistant response + updated spec."""
+ message = payload.get("message", "")
+ if not message:
+ raise HTTPException(status_code=400, detail="message is required")
+ mgr = UseCaseManager(_workspace_path(owner, repo))
+ uc = mgr.chat(use_case_id, message)
+ if not uc:
+ raise HTTPException(status_code=404, detail="Use case not found")
+ return {"use_case": uc.to_dict()}
+
+
+@app.post("/api/repos/{owner}/{repo}/use-cases/{use_case_id}/finalize")
+async def api_finalize_use_case(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ use_case_id: str = FPath(...),
+):
+ """Finalize a use case: mark active, export markdown spec."""
+ mgr = UseCaseManager(_workspace_path(owner, repo))
+ uc = mgr.finalize(use_case_id)
+ if not uc:
+ raise HTTPException(status_code=404, detail="Use case not found")
+ return {"use_case": uc.to_dict()}
+
+
+# ============================================================================
+# MCP Endpoints (Phase 2)
+# ============================================================================
+
+@app.get("/api/mcp/servers")
+async def api_mcp_list_servers():
+ """List configured MCP servers and their connection status."""
+ return _mcp_client.to_dict()
+
+
+@app.post("/api/mcp/connect/{server_name}")
+async def api_mcp_connect(server_name: str):
+ """Connect to a named MCP server."""
+ try:
+ conn = await _mcp_client.connect(server_name)
+ return {
+ "connected": True,
+ "server": server_name,
+ "tools": [{"name": t.name, "description": t.description} for t in conn.tools],
+ }
+ except Exception as e:
+ raise HTTPException(status_code=400, detail=str(e))
+
+
+@app.post("/api/mcp/disconnect/{server_name}")
+async def api_mcp_disconnect(server_name: str):
+ """Disconnect from a named MCP server."""
+ await _mcp_client.disconnect(server_name)
+ return {"disconnected": True, "server": server_name}
+
+
+@app.post("/api/mcp/call")
+async def api_mcp_call_tool(payload: dict):
+ """Call a tool on a connected MCP server."""
+ server = payload.get("server", "")
+ tool_name = payload.get("tool", "")
+ params = payload.get("params", {})
+ if not server or not tool_name:
+ raise HTTPException(status_code=400, detail="server and tool are required")
+ conn = _mcp_client._connections.get(server)
+ if not conn:
+ raise HTTPException(status_code=404, detail=f"Not connected to server: {server}")
+ try:
+ result = await _mcp_client.call_tool(conn, tool_name, params)
+ return {"result": result}
+ except Exception as e:
+ raise HTTPException(status_code=500, detail=str(e))
+
+
+# ============================================================================
+# Plugin Endpoints (Phase 2)
+# ============================================================================
+
+@app.get("/api/plugins")
+async def api_list_plugins():
+ """List installed plugins."""
+ plugins = _plugin_mgr.list_installed()
+ return {"plugins": [p.to_dict() for p in plugins]}
+
+
+@app.post("/api/plugins/install")
+async def api_install_plugin(payload: dict):
+ """Install a plugin from a git URL or local path."""
+ source = payload.get("source", "")
+ if not source:
+ raise HTTPException(status_code=400, detail="source is required")
+ try:
+ info = _plugin_mgr.install(source)
+ return {"installed": True, "plugin": info.to_dict()}
+ except Exception as e:
+ raise HTTPException(status_code=400, detail=str(e))
+
+
+@app.delete("/api/plugins/{name}")
+async def api_uninstall_plugin(name: str):
+ """Uninstall a plugin by name."""
+ removed = _plugin_mgr.uninstall(name)
+ if not removed:
+ raise HTTPException(status_code=404, detail=f"Plugin not found: {name}")
+ return {"uninstalled": True, "name": name}
+
+
+# ============================================================================
+# Skills Endpoints (Phase 2)
+# ============================================================================
+
+@app.get("/api/skills")
+async def api_list_skills():
+ """List all available skills."""
+ return {"skills": _skill_mgr.list_skills()}
+
+
+@app.post("/api/skills/invoke")
+async def api_invoke_skill(payload: dict):
+ """Invoke a skill by name."""
+ name = payload.get("name", "")
+ context = payload.get("context", {})
+ if not name:
+ raise HTTPException(status_code=400, detail="name is required")
+ prompt = _skill_mgr.invoke(name, context)
+ if prompt is None:
+ raise HTTPException(status_code=404, detail=f"Skill not found: {name}")
+ return {"skill": name, "rendered_prompt": prompt}
+
+
+@app.post("/api/skills/reload")
+async def api_reload_skills():
+ """Reload skills from all sources."""
+ count = _skill_mgr.load_all()
+ return {"reloaded": True, "count": count}
+
+
+# ============================================================================
+# Vision Endpoints (Phase 2)
+# ============================================================================
+
+@app.post("/api/vision/analyze")
+async def api_vision_analyze(payload: dict):
+ """Analyze an image with a text prompt."""
+ from .vision import VisionAnalyzer
+ image_path = payload.get("image_path", "")
+ prompt = payload.get("prompt", "Describe this image.")
+ provider = payload.get("provider", "openai")
+ if not image_path:
+ raise HTTPException(status_code=400, detail="image_path is required")
+ try:
+ analyzer = VisionAnalyzer(provider=provider)
+ result = await analyzer.analyze_image(Path(image_path), prompt)
+ return result.to_dict()
+ except Exception as e:
+ raise HTTPException(status_code=400, detail=str(e))
+
+
+# ============================================================================
+# Model Router Endpoints (Phase 2)
+# ============================================================================
+
+@app.post("/api/model-router/select")
+async def api_model_select(payload: dict):
+ """Preview which model would be selected for a request."""
+ request = payload.get("request", "")
+ category = payload.get("category")
+ if not request:
+ raise HTTPException(status_code=400, detail="request is required")
+ selection = _model_router.select(request, category)
+ return {
+ "model": selection.model,
+ "tier": selection.tier.value,
+ "complexity": selection.complexity.value,
+ "provider": selection.provider,
+ "reason": selection.reason,
+ }
+
+
+@app.get("/api/model-router/usage")
+async def api_model_usage():
+ """Get model usage summary and budget status."""
+ return _model_router.get_usage_summary()
+
+
+# ============================================================================
+# Agent Teams Endpoints (Phase 3)
+# ============================================================================
+
+@app.post("/api/agent-teams/plan")
+async def api_team_plan(payload: dict):
+ """Split a complex task into parallel subtasks."""
+ task = payload.get("task", "")
+ if not task:
+ raise HTTPException(status_code=400, detail="task is required")
+ subtasks = _agent_team.plan_and_split(task)
+ return {"subtasks": [{"id": s.id, "title": s.title, "description": s.description} for s in subtasks]}
+
+
+@app.post("/api/agent-teams/execute")
+async def api_team_execute(payload: dict):
+ """Execute subtasks in parallel and merge results."""
+ task = payload.get("task", "")
+ if not task:
+ raise HTTPException(status_code=400, detail="task is required")
+ subtasks = _agent_team.plan_and_split(task)
+ result = await _agent_team.execute_parallel(subtasks)
+ return result.to_dict()
+
+
+# ============================================================================
+# Learning Engine Endpoints (Phase 3)
+# ============================================================================
+
+@app.post("/api/learning/evaluate")
+async def api_learning_evaluate(payload: dict):
+ """Evaluate an action outcome for learning."""
+ action = payload.get("action", "")
+ outcome = payload.get("outcome", {})
+ repo = payload.get("repo", "")
+ if not action:
+ raise HTTPException(status_code=400, detail="action is required")
+ evaluation = _learning_engine.evaluate_outcome(action, outcome, repo=repo)
+ return {
+ "action": evaluation.action,
+ "success": evaluation.success,
+ "score": evaluation.score,
+ "feedback": evaluation.feedback,
+ }
+
+
+@app.get("/api/learning/insights/{owner}/{repo}")
+async def api_learning_insights(owner: str = FPath(...), repo: str = FPath(...)):
+ """Get learned insights for a repository."""
+ repo_name = f"{owner}/{repo}"
+ insights = _learning_engine.get_repo_insights(repo_name)
+ return {
+ "repo": repo_name,
+ "patterns": insights.patterns,
+ "preferred_style": insights.preferred_style,
+ "success_rate": insights.success_rate,
+ "total_evaluations": insights.total_evaluations,
+ }
+
+
+@app.post("/api/learning/style")
+async def api_learning_set_style(payload: dict):
+ """Set preferred coding style for a repository."""
+ repo = payload.get("repo", "")
+ style = payload.get("style", {})
+ if not repo:
+ raise HTTPException(status_code=400, detail="repo is required")
+ _learning_engine.set_preferred_style(repo, style)
+ return {"repo": repo, "style": style}
+
+
+# ============================================================================
+# Cross-Repo Intelligence Endpoints (Phase 3)
+# ============================================================================
+
+@app.post("/api/cross-repo/dependencies")
+async def api_cross_repo_dependencies(payload: dict):
+ """Analyze dependencies from provided file contents."""
+ files = payload.get("files", {})
+ if not files:
+ raise HTTPException(status_code=400, detail="files dict is required (filename -> content)")
+ graph = _cross_repo.analyze_dependencies_from_files(files)
+ return graph.to_dict()
+
+
+@app.post("/api/cross-repo/impact")
+async def api_cross_repo_impact(payload: dict):
+ """Analyze impact of updating a package."""
+ files = payload.get("files", {})
+ package_name = payload.get("package", "")
+ new_version = payload.get("new_version")
+ if not package_name:
+ raise HTTPException(status_code=400, detail="package is required")
+ graph = _cross_repo.analyze_dependencies_from_files(files)
+ report = _cross_repo.impact_analysis(graph, package_name, new_version)
+ return report.to_dict()
+
+
+# ============================================================================
+# Predictions Endpoints (Phase 3)
+# ============================================================================
+
+@app.post("/api/predictions/suggest")
+async def api_predictions_suggest(payload: dict):
+ """Get proactive suggestions based on context."""
+ context = payload.get("context", "")
+ if not context:
+ raise HTTPException(status_code=400, detail="context is required")
+ suggestions = _predictive_engine.predict(context)
+ return {"suggestions": [s.to_dict() for s in suggestions]}
+
+
+@app.get("/api/predictions/rules")
+async def api_predictions_rules():
+ """List all prediction rules."""
+ return {"rules": _predictive_engine.list_rules()}
+
+
+# ============================================================================
+# Security Scanner Endpoints (Phase 3)
+# ============================================================================
+
+@app.post("/api/security/scan-file")
+async def api_security_scan_file(payload: dict):
+ """Scan a single file for security issues."""
+ file_path = payload.get("file_path", "")
+ if not file_path:
+ raise HTTPException(status_code=400, detail="file_path is required")
+ findings = _security_scanner.scan_file(file_path)
+ return {"findings": [f.to_dict() for f in findings], "count": len(findings)}
+
+
+@app.post("/api/security/scan-directory")
+async def api_security_scan_directory(payload: dict):
+ """Recursively scan a directory for security issues."""
+ directory = payload.get("directory", "")
+ if not directory:
+ raise HTTPException(status_code=400, detail="directory is required")
+ result = _security_scanner.scan_directory(directory)
+ return result.to_dict()
+
+
+@app.post("/api/security/scan-diff")
+async def api_security_scan_diff(payload: dict):
+ """Scan a git diff for security issues in added lines."""
+ diff_text = payload.get("diff", "")
+ if not diff_text:
+ raise HTTPException(status_code=400, detail="diff is required")
+ findings = _security_scanner.scan_diff(diff_text)
+ return {"findings": [f.to_dict() for f in findings], "count": len(findings)}
+
+
+# ============================================================================
+# Natural Language Database Endpoints (Phase 3)
+# ============================================================================
+
+@app.post("/api/nl-database/translate")
+async def api_nl_translate(payload: dict):
+ """Translate natural language to SQL."""
+ question = payload.get("question", "")
+ dialect = payload.get("dialect", "postgresql")
+ tables = payload.get("tables", [])
+ if not question:
+ raise HTTPException(status_code=400, detail="question is required")
+ # Lazy import — nl_database pulls in SQL parsing libraries
+ from .nl_database import NLQueryEngine, QueryDialect, TableSchema
+ engine = NLQueryEngine(dialect=QueryDialect(dialect))
+ for t in tables:
+ engine.add_table(TableSchema(
+ name=t["name"],
+ columns=t.get("columns", []),
+ primary_key=t.get("primary_key"),
+ ))
+ sql = engine.translate(question)
+ error = engine.validate_query(sql)
+ return {"question": question, "sql": sql, "valid": error is None, "error": error}
+
+
+@app.post("/api/nl-database/explain")
+async def api_nl_explain(payload: dict):
+ """Explain what a SQL query does in plain English."""
+ sql = payload.get("sql", "")
+ if not sql:
+ raise HTTPException(status_code=400, detail="sql is required")
+ explanation = _nl_engine.explain(sql)
+ return {"sql": sql, "explanation": explanation}
+
+
+# ============================================================================
+# Branch Listing Endpoint (Claude-Code-on-Web Parity)
+# ============================================================================
+
+class BranchInfo(BaseModel):
+ name: str
+ is_default: bool = False
+ protected: bool = False
+ commit_sha: Optional[str] = None
+
+
+class BranchListResponse(BaseModel):
+ repository: str
+ default_branch: str
+ page: int
+ per_page: int
+ has_more: bool
+ branches: List[BranchInfo]
+
+
+@app.get("/api/repos/{owner}/{repo}/branches", response_model=BranchListResponse)
+async def api_list_branches(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ page: int = Query(1, ge=1),
+ per_page: int = Query(100, ge=1, le=100),
+ query: Optional[str] = Query(None, description="Substring filter"),
+ authorization: Optional[str] = Header(None),
+):
+ """List branches for a repository with optional search filtering."""
+ import httpx as _httpx
+
+ token = get_github_token(authorization)
+ if not token:
+ raise HTTPException(status_code=401, detail="GitHub token required")
+
+ headers = {
+ "Authorization": f"Bearer {token}",
+ "Accept": "application/vnd.github+json",
+ "User-Agent": "gitpilot",
+ }
+ timeout = _httpx.Timeout(connect=10.0, read=30.0, write=30.0, pool=10.0)
+
+ async with _httpx.AsyncClient(
+ base_url="https://api.github.com", headers=headers, timeout=timeout
+ ) as client:
+ # Fetch repo info for default_branch
+ repo_resp = await client.get(f"/repos/{owner}/{repo}")
+ if repo_resp.status_code >= 400:
+ logging.warning(
+ "branches: repo lookup failed %s/%s → %s %s",
+ owner, repo, repo_resp.status_code, repo_resp.text[:200],
+ )
+ raise HTTPException(
+ status_code=repo_resp.status_code,
+ detail=f"Cannot access repository: {repo_resp.status_code}",
+ )
+
+ repo_data = repo_resp.json()
+ default_branch_name = repo_data.get("default_branch", "main")
+
+ # Fetch ALL branch pages (GitHub caps at 100 per page)
+ all_raw = []
+ current_page = page
+ while True:
+ branch_resp = await client.get(
+ f"/repos/{owner}/{repo}/branches",
+ params={"page": current_page, "per_page": per_page},
+ )
+ if branch_resp.status_code >= 400:
+ logging.warning(
+ "branches: list failed %s/%s page=%s → %s %s",
+ owner, repo, current_page, branch_resp.status_code, branch_resp.text[:200],
+ )
+ raise HTTPException(
+ status_code=branch_resp.status_code,
+ detail=f"Failed to list branches: {branch_resp.status_code}",
+ )
+
+ page_data = branch_resp.json() if isinstance(branch_resp.json(), list) else []
+ all_raw.extend(page_data)
+
+ # Check if there are more pages
+ link_header = branch_resp.headers.get("Link", "") or ""
+ if 'rel="next"' not in link_header or len(page_data) < per_page:
+ break
+ current_page += 1
+ # Safety: cap at 10 pages (1000 branches)
+ if current_page - page >= 10:
+ break
+
+ q = (query or "").strip().lower()
+
+ branches = []
+ for b in all_raw:
+ name = (b.get("name") or "").strip()
+ if not name:
+ continue
+ if q and q not in name.lower():
+ continue
+ branches.append(BranchInfo(
+ name=name,
+ is_default=(name == default_branch_name),
+ protected=bool(b.get("protected", False)),
+ commit_sha=(b.get("commit") or {}).get("sha"),
+ ))
+
+ # Sort: default branch first, then alphabetical
+ branches.sort(key=lambda x: (0 if x.is_default else 1, x.name.lower()))
+
+ return BranchListResponse(
+ repository=f"{owner}/{repo}",
+ default_branch=default_branch_name,
+ page=page,
+ per_page=per_page,
+ has_more=False,
+ branches=branches,
+ )
+
+
+# ============================================================================
+# Environment Configuration Endpoints (Claude-Code-on-Web Parity)
+# ============================================================================
+
+import json as _json
+_ENV_ROOT = Path.home() / ".gitpilot" / "environments"
+
+
+class EnvironmentConfig(BaseModel):
+ id: Optional[str] = None
+ name: str = "Default"
+ network_access: str = Field("limited", description="limited | full | none")
+ env_vars: dict = Field(default_factory=dict)
+
+
+class EnvironmentListResponse(BaseModel):
+ environments: List[EnvironmentConfig]
+
+
+@app.get("/api/environments", response_model=EnvironmentListResponse)
+async def api_list_environments():
+ """List all environment configurations."""
+ _ENV_ROOT.mkdir(parents=True, exist_ok=True)
+ envs = []
+ for path in sorted(_ENV_ROOT.glob("*.json")):
+ try:
+ data = _json.loads(path.read_text())
+ envs.append(EnvironmentConfig(**data))
+ except Exception:
+ continue
+ if not envs:
+ envs.append(EnvironmentConfig(id="default", name="Default", network_access="limited"))
+ return EnvironmentListResponse(environments=envs)
+
+
+@app.post("/api/environments")
+async def api_create_environment(config: EnvironmentConfig):
+ """Create a new environment configuration."""
+ import uuid
+ _ENV_ROOT.mkdir(parents=True, exist_ok=True)
+ config.id = config.id or uuid.uuid4().hex[:12]
+ path = _ENV_ROOT / f"{config.id}.json"
+ path.write_text(_json.dumps(config.model_dump(), indent=2))
+ return config.model_dump()
+
+
+@app.put("/api/environments/{env_id}")
+async def api_update_environment(env_id: str, config: EnvironmentConfig):
+ """Update an environment configuration."""
+ _ENV_ROOT.mkdir(parents=True, exist_ok=True)
+ path = _ENV_ROOT / f"{env_id}.json"
+ config.id = env_id
+ path.write_text(_json.dumps(config.model_dump(), indent=2))
+ return config.model_dump()
+
+
+@app.delete("/api/environments/{env_id}")
+async def api_delete_environment(env_id: str):
+ """Delete an environment configuration."""
+ path = _ENV_ROOT / f"{env_id}.json"
+ if path.exists():
+ path.unlink()
+ return {"deleted": True}
+ raise HTTPException(status_code=404, detail="Environment not found")
+
+
+# ============================================================================
+# Session Messages + Diff Endpoints (Claude-Code-on-Web Parity)
+# ============================================================================
+
+@app.post("/api/sessions/{session_id}/message")
+async def api_add_session_message(session_id: str, payload: dict):
+ """Add a message to a session's conversation history."""
+ try:
+ session = _session_mgr.load(session_id)
+ except FileNotFoundError:
+ raise HTTPException(status_code=404, detail="Session not found")
+ role = payload.get("role", "user")
+ content = payload.get("content", "")
+ session.add_message(role, content, **payload.get("metadata", {}))
+ _session_mgr.save(session)
+ return {"message_count": len(session.messages)}
+
+
+@app.get("/api/sessions/{session_id}/messages")
+async def api_get_session_messages(session_id: str):
+ """Get all messages for a session."""
+ try:
+ session = _session_mgr.load(session_id)
+ except FileNotFoundError:
+ raise HTTPException(status_code=404, detail="Session not found")
+ return {
+ "session_id": session.id,
+ "messages": [
+ {
+ "role": m.role,
+ "content": m.content,
+ "timestamp": m.timestamp,
+ "metadata": m.metadata,
+ }
+ for m in session.messages
+ ],
+ }
+
+
+@app.get("/api/sessions/{session_id}/diff")
+async def api_get_session_diff(session_id: str):
+ """Get diff stats for a session (placeholder for sandbox integration)."""
+ try:
+ session = _session_mgr.load(session_id)
+ except FileNotFoundError:
+ raise HTTPException(status_code=404, detail="Session not found")
+ diff = session.metadata.get("diff", {
+ "files_changed": 0,
+ "additions": 0,
+ "deletions": 0,
+ "files": [],
+ })
+ return {"session_id": session.id, "diff": diff}
+
+
+@app.post("/api/sessions/{session_id}/status")
+async def api_update_session_status(session_id: str, payload: dict):
+ """Update session status (active, completed, failed, waiting)."""
+ try:
+ session = _session_mgr.load(session_id)
+ except FileNotFoundError:
+ raise HTTPException(status_code=404, detail="Session not found")
+ new_status = payload.get("status", "active")
+ if new_status not in ("active", "paused", "completed", "failed", "waiting"):
+ raise HTTPException(status_code=400, detail="Invalid status")
+ session.status = new_status
+ _session_mgr.save(session)
+ return {"session_id": session.id, "status": session.status}
+
+
+# ============================================================================
+# WebSocket Streaming Endpoint (Claude-Code-on-Web Parity)
+# ============================================================================
+
+from fastapi import WebSocket, WebSocketDisconnect
+
+
+async def _safe_ws_send_json(websocket: WebSocket, data: dict) -> bool:
+ """Send JSON over a WebSocket, swallowing disconnect errors.
+
+ Returns True if the send succeeded, False if the client has disconnected.
+ This prevents ClientDisconnected / WebSocketDisconnect from crashing the
+ handler when the client closes mid-response (common with Vite HMR,
+ browser tab close, or network drops).
+
+ Best-practice pattern from Starlette docs:
+ https://www.starlette.io/websockets/#disconnect
+ """
+ try:
+ await websocket.send_json(data)
+ return True
+ except WebSocketDisconnect:
+ return False
+ except Exception as exc:
+ # Catches uvicorn.protocols.utils.ClientDisconnected and other
+ # transport-layer errors without importing uvicorn internals
+ exc_name = type(exc).__name__
+ if exc_name in ("ClientDisconnected", "ConnectionClosedError",
+ "ConnectionClosedOK", "WebSocketDisconnect"):
+ return False
+ # Re-raise unexpected errors so they show up in logs
+ raise
+
+
+@app.websocket("/ws/sessions/{session_id}")
+async def session_websocket(websocket: WebSocket, session_id: str):
+ """
+ Real-time bidirectional communication for a coding session.
+
+ Server events:
+ { type: "agent_message", content: "..." }
+ { type: "tool_use", tool: "bash", input: "npm test" }
+ { type: "tool_result", tool: "bash", output: "All tests passed" }
+ { type: "diff_update", stats: { additions: N, deletions: N, files: N } }
+ { type: "status_change", status: "completed" }
+ { type: "error", message: "..." }
+
+ Client events:
+ { type: "user_message", content: "..." }
+ { type: "cancel" }
+ """
+ await websocket.accept()
+
+ # Verify session exists
+ try:
+ session = _session_mgr.load(session_id)
+ except FileNotFoundError:
+ await _safe_ws_send_json(websocket, {"type": "error", "message": "Session not found"})
+ try:
+ await websocket.close()
+ except Exception:
+ pass
+ return
+
+ # Send session history on connect (may fail if client already gone)
+ if not await _safe_ws_send_json(websocket, {
+ "type": "session_restored",
+ "session_id": session.id,
+ "status": session.status,
+ "message_count": len(session.messages),
+ }):
+ logger.info(f"WebSocket disconnected before handshake for session {session_id}")
+ return
+
+ try:
+ while True:
+ try:
+ data = await websocket.receive_json()
+ except WebSocketDisconnect:
+ break
+
+ event_type = data.get("type", "")
+
+ if event_type == "user_message":
+ content = data.get("content", "")
+ session.add_message("user", content)
+ _session_mgr.save(session)
+
+ # Acknowledge receipt
+ if not await _safe_ws_send_json(websocket, {
+ "type": "message_received",
+ "message_index": len(session.messages) - 1,
+ }):
+ break
+
+ # Stream agent response (integration point for agentic.py)
+ if not await _safe_ws_send_json(websocket, {
+ "type": "status_change",
+ "status": "active",
+ }):
+ break
+
+ # Agent processing hook — when the agent orchestrator is wired,
+ # replace this with actual streaming from agentic.py
+ try:
+ repo_full = session.repo_full_name or ""
+ parts = repo_full.split("/", 1)
+ if len(parts) == 2 and content.strip():
+ # Use canonical dispatcher signature
+ result = await dispatch_request(
+ user_request=content,
+ repo_full_name=f"{parts[0]}/{parts[1]}",
+ branch_name=session.branch,
+ )
+ answer = ""
+ if isinstance(result, dict):
+ answer = (
+ result.get("result")
+ or result.get("answer")
+ or result.get("message")
+ or result.get("summary")
+ or (result.get("plan", {}) or {}).get("summary")
+ or str(result)
+ )
+ else:
+ answer = str(result)
+
+ # Stream the response
+ if not await _safe_ws_send_json(websocket, {
+ "type": "agent_message",
+ "content": answer,
+ }):
+ # Client disconnected — still persist the answer for session history
+ session.add_message("assistant", answer)
+ _session_mgr.save(session)
+ break
+
+ session.add_message("assistant", answer)
+ _session_mgr.save(session)
+ else:
+ if not await _safe_ws_send_json(websocket, {
+ "type": "agent_message",
+ "content": "Session is not connected to a repository.",
+ }):
+ break
+ except Exception as agent_err:
+ logger.error(f"Agent error in WS session {session_id}: {agent_err}")
+ err_str = str(agent_err)
+ # Friendly messages for common LLM errors
+ _q_kw = ["insufficient_quota", "exceeded your current quota", "rate_limit_exceeded", "429"]
+ if any(kw in err_str.lower() for kw in _q_kw):
+ err_str = (
+ "Your LLM provider credits have been exhausted or you've "
+ "hit a rate limit. Please check your billing details or "
+ "switch to a free local provider (Ollama / OllaBridge) in Settings."
+ )
+ elif "No valid task outputs" in err_str or "Invalid response from LLM call" in err_str:
+ err_str = (
+ "The LLM returned an empty response. This often happens "
+ "with small/reasoning models. Try a larger model or enable Lite Mode."
+ )
+ if not await _safe_ws_send_json(websocket, {
+ "type": "error",
+ "message": err_str,
+ }):
+ break
+
+ if not await _safe_ws_send_json(websocket, {
+ "type": "status_change",
+ "status": "waiting",
+ }):
+ break
+
+ elif event_type == "cancel":
+ if not await _safe_ws_send_json(websocket, {
+ "type": "status_change",
+ "status": "waiting",
+ }):
+ break
+
+ elif event_type == "ping":
+ if not await _safe_ws_send_json(websocket, {"type": "pong"}):
+ break
+
+ except WebSocketDisconnect:
+ logger.info(f"WebSocket disconnected for session {session_id}")
+ except Exception as e:
+ # Don't log as error if it's a disconnect-related exception
+ exc_name = type(e).__name__
+ if exc_name in ("ClientDisconnected", "ConnectionClosedError", "ConnectionClosedOK"):
+ logger.info(f"WebSocket client disconnected for session {session_id}")
+ else:
+ logger.error(f"WebSocket error for session {session_id}: {e}")
+ await _safe_ws_send_json(websocket, {"type": "error", "message": str(e)})
+
+
+# ─── Redesigned API Endpoints (Phase 1–4) ────────────────────────────────
+
+from gitpilot.models import (
+ ProviderTestRequest as _ProviderTestRequest,
+ StartSessionRequest as _StartSessionRequest,
+ ChatMessageRequest as _ChatMessageRequest,
+)
+
+
+@app.get("/api/status")
+async def api_status():
+ """Normalized status endpoint for the redesigned extension/UI."""
+ from gitpilot.models import (
+ StatusResponse, ProviderStatusResponse, ProviderName,
+ WorkspaceCapabilitySummary, GithubStatusSummary, ProviderHealth,
+ )
+ from gitpilot.settings import autoconfigure_local_provider
+ from gitpilot.github_api import get_github_status_summary
+
+ s = autoconfigure_local_provider()
+ provider_summary = s.get_provider_summary()
+
+ # Build provider status
+ provider = ProviderStatusResponse(
+ configured=provider_summary.configured,
+ name=ProviderName(provider_summary.name.value if hasattr(provider_summary.name, 'value') else str(provider_summary.name)),
+ source=provider_summary.source,
+ model=provider_summary.model,
+ base_url=provider_summary.base_url,
+ connection_type=provider_summary.connection_type,
+ has_api_key=provider_summary.has_api_key,
+ health=provider_summary.health,
+ models_available=provider_summary.models_available,
+ warning=provider_summary.warning,
+ )
+
+ # Workspace capabilities
+ workspace = WorkspaceCapabilitySummary(
+ folder_mode_available=True,
+ local_git_available=True,
+ github_mode_available=False,
+ )
+
+ # GitHub status — wrap with timeout to prevent slow first-load
+ # (GitHub API calls over WSL/slow networks can take 5-10s first time)
+ github = GithubStatusSummary()
+ try:
+ github = await _asyncio.wait_for(get_github_status_summary(), timeout=3.0)
+ workspace.github_mode_available = github.connected
+ except _asyncio.TimeoutError:
+ logger.warning("[api/status] GitHub status check timed out after 3s, returning cached/default")
+ except Exception as exc:
+ logger.debug("[api/status] GitHub status check failed: %s", exc)
+
+ return StatusResponse(
+ server_ready=True,
+ provider=provider,
+ workspace=workspace,
+ github=github,
+ )
+
+
+@app.get("/api/providers/status")
+async def api_providers_status():
+ """Get detailed status for the active provider."""
+ from gitpilot.settings import autoconfigure_local_provider
+ from gitpilot.llm_provider import test_provider_connection
+
+ s = autoconfigure_local_provider()
+ summary = await test_provider_connection(s)
+ return summary
+
+
+@app.post("/api/providers/test")
+async def api_providers_test(req: _ProviderTestRequest):
+ """Test a specific provider configuration."""
+ from gitpilot.models import (
+ ProviderTestRequest, ProviderTestResponse, ProviderName,
+ ProviderHealth,
+ )
+ from gitpilot.settings import get_settings, AppSettings
+ from gitpilot.llm_provider import test_provider_connection
+ import copy
+
+ s = autoconfigure_local_provider()
+ # Apply test overrides temporarily
+ test_settings = copy.deepcopy(s)
+
+ provider = req.provider
+ if provider == ProviderName.openai and req.openai:
+ if req.openai.api_key:
+ test_settings.openai.api_key = req.openai.api_key
+ if req.openai.base_url:
+ test_settings.openai.base_url = req.openai.base_url
+ if req.openai.model:
+ test_settings.openai.model = req.openai.model
+ test_settings.provider = test_settings.provider.__class__("openai")
+ elif provider == ProviderName.claude and req.claude:
+ if req.claude.api_key:
+ test_settings.claude.api_key = req.claude.api_key
+ if req.claude.base_url:
+ test_settings.claude.base_url = req.claude.base_url
+ if req.claude.model:
+ test_settings.claude.model = req.claude.model
+ test_settings.provider = test_settings.provider.__class__("claude")
+ elif provider == ProviderName.watsonx and req.watsonx:
+ if req.watsonx.api_key:
+ test_settings.watsonx.api_key = req.watsonx.api_key
+ if req.watsonx.project_id:
+ test_settings.watsonx.project_id = req.watsonx.project_id
+ if req.watsonx.base_url:
+ test_settings.watsonx.base_url = req.watsonx.base_url
+ if req.watsonx.model_id:
+ test_settings.watsonx.model_id = req.watsonx.model_id
+ test_settings.provider = test_settings.provider.__class__("watsonx")
+ elif provider == ProviderName.ollama and req.ollama:
+ if req.ollama.base_url:
+ test_settings.ollama.base_url = req.ollama.base_url
+ if req.ollama.model:
+ test_settings.ollama.model = req.ollama.model
+ test_settings.provider = test_settings.provider.__class__("ollama")
+ elif provider == ProviderName.ollabridge and req.ollabridge:
+ if req.ollabridge.base_url:
+ test_settings.ollabridge.base_url = req.ollabridge.base_url
+ if req.ollabridge.model:
+ test_settings.ollabridge.model = req.ollabridge.model
+ if req.ollabridge.api_key:
+ test_settings.ollabridge.api_key = req.ollabridge.api_key
+ test_settings.provider = test_settings.provider.__class__("ollabridge")
+
+ summary = await test_provider_connection(test_settings)
+ return ProviderTestResponse(
+ configured=summary.configured,
+ name=summary.name,
+ source=summary.source,
+ model=summary.model,
+ base_url=summary.base_url,
+ connection_type=summary.connection_type,
+ has_api_key=summary.has_api_key,
+ health=summary.health,
+ models_available=summary.models_available,
+ warning=summary.warning,
+ details=f"Provider {provider.value} test completed",
+ )
+
+
+@app.post("/api/session/start")
+async def api_session_start(req: _StartSessionRequest):
+ """Start a new session by mode (folder, local_git, github)."""
+ from gitpilot.models import (
+ StartSessionRequest, StartSessionResponse, WorkspaceMode,
+ )
+ from gitpilot.session import SessionManager
+
+ mgr = SessionManager()
+
+ if req.mode == WorkspaceMode.folder:
+ if not req.folder_path:
+ raise HTTPException(status_code=422, detail="folder_path is required for folder mode")
+ session = mgr.create_folder_session(req.folder_path)
+ elif req.mode == WorkspaceMode.local_git:
+ repo_root = req.repo_root or req.folder_path
+ if not repo_root:
+ raise HTTPException(status_code=422, detail="repo_root is required for local_git mode")
+ session = mgr.create_local_git_session(repo_root, req.branch)
+ elif req.mode == WorkspaceMode.github:
+ if not req.repo_full_name:
+ raise HTTPException(status_code=422, detail="repo_full_name is required for github mode")
+ session = mgr.create_github_session(req.repo_full_name, req.branch)
+ else:
+ raise HTTPException(status_code=400, detail=f"Unknown mode: {req.mode}")
+
+ return StartSessionResponse(
+ session_id=session.id,
+ mode=req.mode,
+ title=session.name,
+ folder_path=session.folder_path,
+ repo_root=session.repo_root,
+ repo_full_name=session.repo_full_name,
+ branch=session.branch,
+ )
+
+
+@app.post("/api/chat/send")
+async def api_chat_message_v2(req: _ChatMessageRequest):
+ """Normalized chat message endpoint for the redesigned extension."""
+ from gitpilot.models import ChatMessageRequest, ChatMessageResponse
+ from gitpilot.session import SessionManager
+ import uuid
+
+ mgr = SessionManager()
+
+ # Load session
+ try:
+ session = mgr.load(req.session_id)
+ except Exception:
+ raise HTTPException(status_code=404, detail=f"Session {req.session_id} not found")
+
+ # Use the canonical dispatcher for chat
+ answer = ""
+ plan = None
+ references = []
+
+ repo_full = session.repo_full_name or ""
+ try:
+ if repo_full:
+ result = await dispatch_request(
+ user_request=req.message,
+ repo_full_name=repo_full,
+ branch_name=session.branch,
+ )
+ if isinstance(result, dict):
+ answer = (
+ result.get("result")
+ or result.get("answer")
+ or result.get("message")
+ or result.get("summary")
+ or str(result)
+ )
+ plan = result.get("plan")
+ references = result.get("references", [])
+ else:
+ answer = str(result)
+ else:
+ # Folder-mode: use LLM directly for simple chat
+ from gitpilot.llm_provider import build_llm
+ llm = build_llm()
+ local_prompt = _build_local_repo_aware_prompt(req, session)
+ answer = llm.call(
+ [{"role": "user", "content": local_prompt}]
+ )
+ except Exception as e:
+ err_str = str(e)
+ _q_kw = ["insufficient_quota", "exceeded your current quota", "rate_limit_exceeded", "429"]
+ if any(kw in err_str.lower() for kw in _q_kw):
+ answer = (
+ "Your LLM provider credits have been exhausted or you've hit a "
+ "rate limit. Please check your billing details or switch to a "
+ "free local provider (Ollama / OllaBridge) in Settings."
+ )
+ elif "No valid task outputs" in err_str:
+ answer = (
+ "The LLM returned an empty response. This often happens with "
+ "small models. Try enabling Lite Mode in Settings."
+ )
+ else:
+ answer = f"Error processing message: {err_str}"
+
+ # Store message in session
+ from gitpilot.session import Message
+ session.messages.append(Message(role="user", content=req.message))
+ session.messages.append(Message(role="assistant", content=answer))
+ mgr.save(session)
+
+ # Extract structured edits from the LLM answer so the VS Code
+ # extension can offer an "Apply Patch" button for file creation.
+ edits = _extract_edits_from_answer(answer) if answer else []
+
+ return ChatMessageResponse(
+ session_id=req.session_id,
+ answer=answer,
+ message_id=str(uuid.uuid4()),
+ plan=plan,
+ edits=edits,
+ references=references,
+ )
+
+
+@app.get("/api/workspace/summary")
+async def api_workspace_summary(folder_path: str = Query(default=".")):
+ """Get workspace summary for UI display."""
+ from gitpilot.workspace import summarize_workspace
+ return await summarize_workspace(folder_path)
+
+
+@app.get("/api/security/scan-workspace")
+async def api_security_scan_workspace(path: str = Query(default=".")):
+ """Quick action security scan for workspace."""
+ from gitpilot.security import scan_current_workspace
+ return scan_current_workspace(path)
+
+
+# ============================================================================
+# Static Files & Frontend Serving (SPA Support)
+# ============================================================================
+
+STATIC_DIR = Path(__file__).resolve().parent / "web"
+ASSETS_DIR = STATIC_DIR / "assets"
+
+if ASSETS_DIR.exists():
+ app.mount("/assets", StaticFiles(directory=ASSETS_DIR), name="assets")
+
+if STATIC_DIR.exists():
+ app.mount("/static", StaticFiles(directory=STATIC_DIR), name="static")
+
+
+@app.get("/api/ping")
+async def ping():
+ """Zero-dependency ping — used by frontend initApp() to detect when
+ the backend is accepting requests. Returns immediately without touching
+ any modules, settings, or external APIs. Always fast even during
+ CrewAI warmup or GitHub API outages.
+ """
+ return {"ok": True, "service": "gitpilot", "version": __version__}
+
+
+@app.get("/api/health")
+async def health_check():
+ """Lightweight health check — always fast, used by HF Spaces HEALTHCHECK."""
+ return {"status": "healthy", "service": "gitpilot-backend"}
+
+
+@app.get("/api/health/deep")
+async def deep_health():
+ """Deep health check — verifies LLM provider connectivity and system status."""
+ from .resilience import deep_health_check
+ result = await deep_health_check()
+ status_code = 200 if result["status"] == "healthy" else 503
+ return JSONResponse(content=result, status_code=status_code)
+
+
+@app.get("/healthz")
+async def healthz():
+ """Health check endpoint (Render/Kubernetes standard)."""
+ return {"status": "healthy", "service": "gitpilot-backend"}
+
+
+@app.get("/", include_in_schema=False)
+async def index():
+ """Serve the React App entry point."""
+ index_file = STATIC_DIR / "index.html"
+ if index_file.exists():
+ return FileResponse(index_file)
+ return JSONResponse(
+ {"message": "GitPilot UI not built. The static files directory is missing."},
+ status_code=500,
+ )
+
+
+@app.get("/{full_path:path}", include_in_schema=False)
+async def catch_all_spa_routes(full_path: str):
+ """
+ Catch-all route to serve index.html for frontend routing.
+ Excludes '/api' paths to ensure genuine API 404s are returned as JSON.
+ """
+ if full_path.startswith("api/"):
+ return JSONResponse({"detail": "Not Found"}, status_code=404)
+
+ index_file = STATIC_DIR / "index.html"
+ if index_file.exists():
+ return FileResponse(index_file)
+
+ return JSONResponse(
+ {"message": "GitPilot UI not built. The static files directory is missing."},
+ status_code=500,
+ )
+
+# ---------------------------------------------------------------------------
+# OllaBridge Cloud Extension (additive, non-destructive)
+# ---------------------------------------------------------------------------
+try:
+ from .api_ollabridge_ext import apply_ollabridge_extension as _apply_ob
+ _apply_ob(app)
+ del _apply_ob
+except ImportError:
+ pass # Extension not available, skip gracefully
+
+
+# ============================================================================
+# V2 Streaming Agent Endpoints (additive, non-destructive)
+#
+# These endpoints use the unified AgentEventBus protocol so every client
+# (VS Code, React web, HF Spaces) receives the same JSON event shapes.
+#
+# Existing endpoints are NOT modified. These are /api/v2/ prefixed.
+# ============================================================================
+
+import asyncio as _asyncio
+from fastapi import Request as _Request
+from fastapi.responses import StreamingResponse as _StreamingResponse
+from gitpilot.agent_events import get_bus as _get_bus, remove_bus as _remove_bus, EventType as _EvType
+from gitpilot.agent_executor import StreamingAgentExecutor as _StreamingExecutor
+from gitpilot.approval_protocol import ApprovalGate as _ApprovalGate
+from gitpilot.workspace import WorkspaceManager as _V2WorkspaceManager
+
+# Track active executors for cancellation
+_active_executors: dict[str, _StreamingExecutor] = {}
+
+
+@app.post("/api/v2/chat/stream", tags=["v2-streaming"])
+async def v2_chat_stream(request: _Request):
+ """
+ Server-Sent Events endpoint for agent execution.
+
+ Returns text/event-stream. Each line is:
+ data: {"type": "text_delta", "text": "..."}\n\n
+ data: {"type": "tool_start", "name": "read_file", ...}\n\n
+ data: {"type": "done", ...}\n\n
+
+ This is the PREFERRED endpoint for:
+ - Hugging Face Spaces (SSE works through nginx/proxies)
+ - VS Code extension (can consume SSE via fetch ReadableStream)
+ - Any HTTP client that supports streaming
+ """
+ body = await request.json()
+ user_message = body.get("message", "")
+ session_id = body.get("session_id", "")
+ permission_mode = body.get("permission_mode", "normal")
+
+ if not user_message:
+ return JSONResponse({"error": "message is required"}, status_code=400)
+
+ # Load session (reuse existing session manager)
+ session = None
+ repo_full_name = ""
+ branch = None
+ token = body.get("token")
+
+ if session_id:
+ try:
+ session = _session_mgr.load(session_id)
+ repo_full_name = session.repo_full_name or ""
+ branch = session.branch
+ except FileNotFoundError:
+ return JSONResponse({"error": "Session not found"}, status_code=404)
+
+ bus = _get_bus(session_id or "ephemeral")
+ gate = _ApprovalGate(bus, mode=permission_mode)
+
+ # Resolve workspace (if session has a local workspace)
+ workspace = None
+ if session and repo_full_name:
+ try:
+ parts = repo_full_name.split("/", 1)
+ if len(parts) == 2:
+ ws_mgr = _V2WorkspaceManager()
+ workspace = await ws_mgr.ensure_workspace(
+ owner=parts[0], repo=parts[1],
+ token=token, branch=branch,
+ )
+ except Exception as ws_err:
+ logger.warning("Could not resolve workspace: %s", ws_err)
+
+ executor = _StreamingExecutor(
+ bus=bus, gate=gate, workspace=workspace,
+ ws_manager=_V2WorkspaceManager(),
+ )
+ _active_executors[session_id or "ephemeral"] = executor
+
+ sub_id, _queue = bus.subscribe()
+
+ async def event_generator():
+ """Run agent in background, yield events as SSE."""
+ # Start execution as a background task
+ exec_task = _asyncio.create_task(
+ executor.execute(
+ user_message=user_message,
+ repo_full_name=repo_full_name,
+ branch=branch,
+ token=token,
+ )
+ )
+
+ try:
+ async for event in bus.stream(sub_id):
+ yield event.to_sse()
+ if event.type in (_EvType.DONE, _EvType.ERROR):
+ break
+ finally:
+ bus.unsubscribe(sub_id)
+ _active_executors.pop(session_id or "ephemeral", None)
+
+ # Ensure the task completes
+ if not exec_task.done():
+ exec_task.cancel()
+ try:
+ await exec_task
+ except (_asyncio.CancelledError, Exception):
+ pass
+
+ # Save assistant message to session
+ if session and exec_task.done() and not exec_task.cancelled():
+ try:
+ result = exec_task.result()
+ if result:
+ summary = result.get("summary", "") if isinstance(result, dict) else str(result)
+ session.add_message("assistant", summary[:5000])
+ _session_mgr.save(session)
+ except Exception:
+ pass
+
+ _remove_bus(session_id or "ephemeral")
+
+ return _StreamingResponse(
+ event_generator(),
+ media_type="text/event-stream",
+ headers={
+ "Cache-Control": "no-cache",
+ "Connection": "keep-alive",
+ "X-Accel-Buffering": "no",
+ },
+ )
+
+
+@app.post("/api/v2/approval/respond", tags=["v2-streaming"])
+async def v2_approval_respond(request: _Request):
+ """
+ Client sends approval/denial for a tool execution.
+ Used by all clients (web, VS Code, HF Spaces).
+ """
+ body = await request.json()
+ session_id = body.get("session_id", "ephemeral")
+ request_id = body.get("request_id", "")
+ approved = body.get("approved", False)
+ scope = body.get("scope", "once")
+
+ if not request_id:
+ return JSONResponse({"error": "request_id is required"}, status_code=400)
+
+ # The approval gate is created per-stream, so we emit an event
+ # that the gate's listener will pick up
+ bus = _get_bus(session_id)
+ from gitpilot.agent_events import approval_resolved
+ await bus.emit(approval_resolved(request_id, approved))
+
+ return {"status": "resolved", "request_id": request_id, "approved": approved}
+
+
+@app.post("/api/v2/agent/cancel", tags=["v2-streaming"])
+async def v2_agent_cancel(request: _Request):
+ """Cancel the running agent stream for a session."""
+ body = await request.json()
+ session_id = body.get("session_id", "ephemeral")
+
+ executor = _active_executors.get(session_id)
+ if executor:
+ executor.cancel()
+ return {"status": "cancelled", "session_id": session_id}
+
+ return JSONResponse({"error": "No active executor for this session"}, status_code=404)
+
+
+@app.websocket("/ws/v2/sessions/{session_id}")
+async def v2_session_websocket(websocket: WebSocket, session_id: str):
+ """
+ V2 WebSocket with full agent streaming protocol.
+
+ Same event types as SSE endpoint. Client can also send:
+ { type: "user_message", content: "..." }
+ { type: "approval_response", request_id: "...", approved: true, scope: "session" }
+ { type: "cancel" }
+ { type: "ping" }
+ """
+ await websocket.accept()
+
+ try:
+ session = _session_mgr.load(session_id)
+ except FileNotFoundError:
+ await _safe_ws_send_json(websocket, {"type": "error", "message": "Session not found"})
+ try:
+ await websocket.close()
+ except Exception:
+ pass
+ return
+
+ if not await _safe_ws_send_json(websocket, {
+ "type": "session_restored",
+ "session_id": session.id,
+ "status": session.status,
+ "protocol": "v2",
+ }):
+ logger.info("V2 WebSocket disconnected before handshake for session %s", session_id)
+ return
+
+ bus = _get_bus(session_id)
+ gate = _ApprovalGate(bus)
+ sub_id, _queue = bus.subscribe()
+
+ # Forward bus events -> WebSocket
+ async def forward_events():
+ try:
+ async for event in bus.stream(sub_id):
+ if not await _safe_ws_send_json(websocket, event.to_dict()):
+ break
+ except Exception:
+ pass
+
+ forwarder = _asyncio.create_task(forward_events())
+
+ try:
+ while True:
+ try:
+ data = await websocket.receive_json()
+ except WebSocketDisconnect:
+ break
+ event_type = data.get("type", "")
+
+ if event_type == "user_message":
+ content = data.get("content", "")
+ if not content:
+ continue
+
+ session.add_message("user", content)
+ _session_mgr.save(session)
+
+ # Resolve workspace
+ workspace = None
+ repo_full = session.repo_full_name or ""
+ parts = repo_full.split("/", 1)
+ if len(parts) == 2:
+ try:
+ ws_mgr = _V2WorkspaceManager()
+ workspace = await ws_mgr.ensure_workspace(
+ owner=parts[0], repo=parts[1],
+ token=data.get("token"),
+ branch=session.branch,
+ )
+ except Exception:
+ pass
+
+ executor = _StreamingExecutor(
+ bus=bus, gate=gate, workspace=workspace,
+ ws_manager=_V2WorkspaceManager(),
+ )
+ _active_executors[session_id] = executor
+
+ # Run agent (non-blocking)
+ _asyncio.create_task(executor.execute(
+ user_message=content,
+ repo_full_name=repo_full,
+ branch=session.branch,
+ token=data.get("token"),
+ ))
+
+ elif event_type == "approval_response":
+ gate.resolve(
+ request_id=data.get("request_id", ""),
+ approved=data.get("approved", False),
+ scope=data.get("scope", "once"),
+ )
+
+ elif event_type == "cancel":
+ executor = _active_executors.get(session_id)
+ if executor:
+ executor.cancel()
+
+ elif event_type == "ping":
+ if not await _safe_ws_send_json(websocket, {"type": "pong"}):
+ break
+
+ except WebSocketDisconnect:
+ logger.info("V2 WebSocket disconnected for session %s", session_id)
+ except Exception as e:
+ logger.error("V2 WebSocket error for session %s: %s", session_id, e)
+ finally:
+ forwarder.cancel()
+ bus.unsubscribe(sub_id)
+ _active_executors.pop(session_id, None)
+ gate.cancel_all()
+ _remove_bus(session_id)
diff --git a/gitpilot/api_ollabridge_ext.py b/gitpilot/api_ollabridge_ext.py
new file mode 100644
index 0000000000000000000000000000000000000000..b0803a1fa5dd7007e3248796aadd6a12e2f82ee3
--- /dev/null
+++ b/gitpilot/api_ollabridge_ext.py
@@ -0,0 +1,106 @@
+"""OllaBridge Cloud integration extension for GitPilot API.
+
+This module patches the FastAPI app at import time to add:
+- OllaBridge as a first-class LLM provider in settings
+- /api/ollabridge/* proxy endpoints (pairing, models, health)
+
+Completely additive - does not modify api.py.
+Imported automatically via __init__.py or cli.py startup.
+"""
+from __future__ import annotations
+
+import logging
+
+from pydantic import BaseModel
+
+from .settings import (
+ AppSettings,
+ LLMProvider,
+ get_settings,
+ set_provider,
+ update_settings,
+)
+
+logger = logging.getLogger(__name__)
+
+
+# Extended SettingsResponse that includes ollabridge
+class SettingsResponseExt(BaseModel):
+ provider: LLMProvider
+ providers: list[LLMProvider]
+ openai: dict
+ claude: dict
+ watsonx: dict
+ ollama: dict
+ ollabridge: dict = {}
+ ollabridge_connection_type: str | None = None
+ langflow_url: str
+ has_langflow_plan_flow: bool
+
+
+ALL_PROVIDERS = [
+ LLMProvider.ollabridge,
+ LLMProvider.openai,
+ LLMProvider.claude,
+ LLMProvider.watsonx,
+ LLMProvider.ollama,
+]
+
+
+def _build_settings_response(s: AppSettings) -> SettingsResponseExt:
+ ollabridge_connection_type = "local"
+ if s.ollabridge.api_key:
+ ollabridge_connection_type = "api_key"
+
+ # Warn if user included /v1 in base_url
+ ob_base = s.ollabridge.base_url or ""
+ if ob_base.rstrip("/").endswith("/v1"):
+ # The response should carry a warning; we'll handle this in the settings response
+ pass
+
+ return SettingsResponseExt(
+ provider=s.provider,
+ providers=ALL_PROVIDERS,
+ openai=s.openai.model_dump(),
+ claude=s.claude.model_dump(),
+ watsonx=s.watsonx.model_dump(),
+ ollama=s.ollama.model_dump(),
+ ollabridge=s.ollabridge.model_dump(),
+ ollabridge_connection_type=ollabridge_connection_type,
+ langflow_url=s.langflow_url,
+ has_langflow_plan_flow=bool(s.langflow_plan_flow_id),
+ )
+
+
+def apply_ollabridge_extension(app):
+ """Apply OllaBridge integration to the FastAPI app.
+
+ Call this after the app is created but before it starts serving.
+ Adds/overrides the settings endpoints to include ollabridge,
+ and mounts the ollabridge proxy router.
+ """
+ from .ollabridge_proxy import router as ollabridge_router
+
+ # Mount proxy routes
+ app.include_router(ollabridge_router)
+ logger.info("OllaBridge proxy mounted at /api/ollabridge/*")
+
+ # Override settings endpoints to include ollabridge
+ @app.get("/api/settings", response_model=SettingsResponseExt)
+ async def api_get_settings_ext():
+ return _build_settings_response(get_settings())
+
+ class ProviderUpdate(BaseModel):
+ provider: LLMProvider
+
+ @app.post("/api/settings/provider", response_model=SettingsResponseExt)
+ async def api_set_provider_ext(update: ProviderUpdate):
+ s = set_provider(update.provider)
+ return _build_settings_response(s)
+
+ @app.put("/api/settings/llm", response_model=SettingsResponseExt)
+ async def api_update_llm_settings_ext(updates: dict):
+ s = update_settings(updates)
+ return _build_settings_response(s)
+
+ logger.info("OllaBridge settings endpoints registered (overrides original)")
diff --git a/gitpilot/approval_protocol.py b/gitpilot/approval_protocol.py
new file mode 100644
index 0000000000000000000000000000000000000000..703922f14120590f824c10658c8b5928f8288176
--- /dev/null
+++ b/gitpilot/approval_protocol.py
@@ -0,0 +1,142 @@
+# gitpilot/approval_protocol.py
+"""
+Tool approval protocol for agent execution.
+
+When an agent wants to run a dangerous tool (write_file, run_command,
+git_commit), the ApprovalGate pauses execution and waits for the user
+to approve via WebSocket, SSE callback, or VS Code postMessage.
+
+Permission modes:
+ - "normal" Ask user before dangerous tools (default)
+ - "auto" Approve everything automatically
+ - "plan" Block all writes and commands (read-only)
+"""
+from __future__ import annotations
+
+import asyncio
+import logging
+from typing import Dict
+
+from .agent_events import AgentEventBus, approval_needed, approval_resolved
+
+logger = logging.getLogger(__name__)
+
+DANGEROUS_TOOLS = frozenset({
+ # VS Code local agent tool names
+ "write_file",
+ "edit_file",
+ "run_command",
+ "git_commit",
+ # CrewAI tool display names
+ "Write local file",
+ "Delete local file",
+ "Run shell command",
+ "Git commit",
+})
+
+
+class ApprovalGate:
+ """
+ Async approval gate between the agent and tool execution.
+
+ Flow:
+ 1. Agent calls gate.check(tool, args, summary)
+ 2. Gate emits APPROVAL_NEEDED event to the bus
+ 3. Gate creates an asyncio.Future and waits
+ 4. Client sends approval response
+ 5. resolve() sets the Future result, agent proceeds or skips
+ """
+
+ def __init__(self, bus: AgentEventBus, mode: str = "normal") -> None:
+ self._bus = bus
+ self._mode = mode
+ self._pending: Dict[str, asyncio.Future] = {}
+ self._session_allowed: set[str] = set()
+
+ @property
+ def mode(self) -> str:
+ return self._mode
+
+ @mode.setter
+ def mode(self, value: str) -> None:
+ self._mode = value
+
+ async def check(
+ self,
+ tool_name: str,
+ tool_args: dict,
+ summary: str = "",
+ diff_preview: str | None = None,
+ ) -> bool:
+ """
+ Returns True if the tool may proceed. Blocks until user responds.
+ """
+ if tool_name not in DANGEROUS_TOOLS:
+ return True
+
+ if self._mode == "auto":
+ return True
+
+ if self._mode == "plan":
+ await self._bus.emit(
+ approval_resolved(f"denied-plan-{id(self)}", approved=False)
+ )
+ return False
+
+ if tool_name in self._session_allowed:
+ return True
+
+ # Normal mode: ask user
+ request_id = f"approval-{id(self)}-{len(self._pending)}"
+ future: asyncio.Future = asyncio.get_event_loop().create_future()
+ self._pending[request_id] = future
+
+ risk = "high" if tool_name in ("run_command", "Run shell command") else "medium"
+
+ await self._bus.emit(
+ approval_needed(
+ request_id=request_id,
+ tool=tool_name,
+ args=tool_args,
+ summary=summary
+ or f"{tool_name}({', '.join(f'{k}={v!r}' for k, v in list(tool_args.items())[:3])})",
+ diff_preview=diff_preview,
+ risk=risk,
+ )
+ )
+
+ try:
+ result = await asyncio.wait_for(future, timeout=120.0)
+ except asyncio.TimeoutError:
+ logger.warning(
+ "Approval timed out for %s (request %s)", tool_name, request_id
+ )
+ self._pending.pop(request_id, None)
+ return False
+
+ self._pending.pop(request_id, None)
+
+ approved = result.get("approved", False)
+ scope = result.get("scope", "once")
+
+ if approved and scope == "session":
+ self._session_allowed.add(tool_name)
+
+ await self._bus.emit(approval_resolved(request_id, approved))
+ return approved
+
+ def resolve(
+ self, request_id: str, approved: bool, scope: str = "once"
+ ) -> None:
+ """Called by the transport layer when the user responds."""
+ future = self._pending.get(request_id)
+ if future and not future.done():
+ future.set_result({"approved": approved, "scope": scope})
+
+ def cancel_all(self) -> None:
+ """Deny all pending approvals (e.g., on session close)."""
+ for future in self._pending.values():
+ if not future.done():
+ future.set_result({"approved": False, "scope": "once"})
+ self._pending.clear()
+ self._session_allowed.clear()
diff --git a/gitpilot/cli.py b/gitpilot/cli.py
new file mode 100644
index 0000000000000000000000000000000000000000..ce0e569dc5c5089a685eb79d7123fd3130b5d3b7
--- /dev/null
+++ b/gitpilot/cli.py
@@ -0,0 +1,662 @@
+from __future__ import annotations
+
+import os
+import sys
+import threading
+import time
+import webbrowser
+from pathlib import Path
+
+import typer
+import uvicorn
+from rich.console import Console
+from rich.panel import Panel
+from rich.table import Table
+
+from .version import __version__
+from .settings import get_settings, LLMProvider
+from .model_catalog import list_models_for_provider
+
+
+cli = typer.Typer(add_completion=False, help="GitPilot - Agentic AI assistant for GitHub")
+console = Console()
+
+
+def _check_configuration():
+ """Check and display configuration status."""
+ issues = []
+ warnings = []
+
+ # Check for .env file
+ env_file = Path.cwd() / ".env"
+ has_env = env_file.exists()
+
+ # Check GitHub token
+ github_token = os.getenv("GITPILOT_GITHUB_TOKEN") or os.getenv("GITHUB_TOKEN")
+ if not github_token:
+ issues.append("❌ GitHub token not found")
+ warnings.append(" Set GITPILOT_GITHUB_TOKEN or GITHUB_TOKEN in .env")
+ warnings.append(" Get token at: https://github.com/settings/tokens")
+
+ # Check LLM provider configuration
+ settings = get_settings()
+ provider = settings.provider
+
+ provider_configured = False
+ if provider == LLMProvider.openai:
+ api_key = settings.openai.api_key or os.getenv("OPENAI_API_KEY")
+ provider_configured = bool(api_key)
+ elif provider == LLMProvider.claude:
+ api_key = settings.claude.api_key or os.getenv("ANTHROPIC_API_KEY")
+ provider_configured = bool(api_key)
+ elif provider == LLMProvider.watsonx:
+ api_key = settings.watsonx.api_key or os.getenv("WATSONX_API_KEY")
+ provider_configured = bool(api_key)
+ elif provider == LLMProvider.ollama:
+ # Ollama doesn't require API key, just needs to be running
+ provider_configured = True
+
+ if not provider_configured:
+ issues.append(f"❌ {provider.value.upper()} API key not configured")
+ warnings.append(f" Configure in Admin UI or set environment variable")
+
+ return has_env, github_token is not None, provider_configured, issues, warnings
+
+
+def _display_startup_banner(host: str, port: int):
+ """Display a professional startup banner with configuration status."""
+ console.print()
+
+ # Header
+ console.print(Panel.fit(
+ f"[bold cyan]GitPilot[/bold cyan] [dim]v{__version__}[/dim]\n"
+ "[white]Agentic AI Assistant for GitHub Repositories[/white]",
+ border_style="cyan"
+ ))
+
+ # Check configuration
+ has_env, has_github, has_llm, issues, warnings = _check_configuration()
+ settings = get_settings()
+
+ # Configuration table
+ table = Table(show_header=False, box=None, padding=(0, 2))
+ table.add_column("Key", style="cyan")
+ table.add_column("Value", style="white")
+
+ # Environment file status
+ env_status = "✅ Found" if has_env else "⚠️ Not found (using defaults)"
+ table.add_row("Environment File", env_status)
+
+ # GitHub token status
+ github_status = "✅ Configured" if has_github else "❌ Not configured"
+ table.add_row("GitHub Token", github_status)
+
+ # LLM Provider status
+ provider_name = settings.provider.value.upper()
+ llm_status = f"✅ {provider_name}" if has_llm else f"⚠️ {provider_name} (not configured)"
+ table.add_row("LLM Provider", llm_status)
+
+ # Server info
+ table.add_row("Server", f"http://{host}:{port}")
+
+ console.print(table)
+ console.print()
+
+ # Display issues and warnings
+ if issues:
+ console.print("[bold yellow]⚠️ Configuration Issues:[/bold yellow]")
+ for issue in issues:
+ console.print(f" {issue}")
+ for warning in warnings:
+ console.print(f" [dim]{warning}[/dim]")
+ console.print()
+
+ # Setup instructions if needed
+ if not has_env and (not has_github or not has_llm):
+ console.print(Panel(
+ "[bold]Quick Setup:[/bold]\n\n"
+ "1. Copy .env.template to .env:\n"
+ " [cyan]cp .env.template .env[/cyan]\n\n"
+ "2. Edit .env and add your credentials\n\n"
+ "3. Or configure via Admin UI in your browser\n\n"
+ "[dim]See README.md for detailed setup instructions[/dim]",
+ title="[yellow]Setup Required[/yellow]",
+ border_style="yellow"
+ ))
+ else:
+ console.print("[bold green]✓[/bold green] GitPilot is ready!")
+ console.print()
+ console.print("[bold]Next Steps:[/bold]")
+ console.print(" • Open the Admin UI to configure LLM providers")
+ console.print(" • Select a repository in the Workspace tab")
+ console.print(" • Start chatting with your AI coding assistant")
+
+ console.print()
+ console.print("[dim]Press Ctrl+C to stop the server[/dim]")
+ console.print()
+
+
+def _run_server(host: str, port: int, reload: bool = False):
+ """Run the FastAPI server."""
+ uvicorn.run(
+ "gitpilot.api:app",
+ host=host,
+ port=port,
+ reload=reload,
+ log_level="info",
+ )
+
+
+@cli.command()
+def serve(
+ host: str = typer.Option("127.0.0.1", "--host", "-h", help="Host to bind"),
+ port: int = typer.Option(8000, "--port", "-p", help="Port to bind"),
+ reload: bool = typer.Option(False, "--reload", help="Enable auto-reload"),
+ open_browser: bool = typer.Option(True, "--open/--no-open", help="Open browser"),
+):
+ """Start the GitPilot server with web UI."""
+ # Check if port is already in use (prevent double-start)
+ import socket
+ with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
+ if s.connect_ex((host, port)) == 0:
+ console.print(
+ f"[yellow]⚠[/yellow] Port {port} is already in use. "
+ f"GitPilot may already be running."
+ )
+ console.print(
+ f"[dim]Run 'make stop' or kill the process on port {port} first.[/dim]"
+ )
+ sys.exit(1)
+
+ # Display startup banner
+ _display_startup_banner(host, port)
+
+ # Start server in background thread
+ thread = threading.Thread(
+ target=_run_server,
+ kwargs={"host": host, "port": port, "reload": reload},
+ daemon=False,
+ )
+ thread.start()
+
+ # Open browser after a short delay
+ if open_browser:
+ time.sleep(1.5)
+ try:
+ webbrowser.open(f"http://{host}:{port}")
+ console.print(f"[green]✓[/green] Browser opened at http://{host}:{port}")
+ except Exception:
+ console.print(f"[yellow]![/yellow] Please open http://{host}:{port} in your browser")
+
+ # Wait for server thread
+ try:
+ thread.join()
+ except KeyboardInterrupt:
+ console.print("\n[yellow]Shutting down GitPilot...[/yellow]")
+ sys.exit(0)
+
+
+@cli.command()
+def config():
+ """Show current configuration."""
+ console.print()
+ console.print(Panel.fit(
+ "[bold cyan]GitPilot Configuration[/bold cyan]",
+ border_style="cyan"
+ ))
+
+ settings = get_settings()
+
+ # Configuration details
+ table = Table(title="Settings", show_header=True, header_style="bold cyan")
+ table.add_column("Setting", style="cyan")
+ table.add_column("Value", style="white")
+ table.add_column("Source", style="dim")
+
+ # Provider
+ env_provider = os.getenv("GITPILOT_PROVIDER")
+ provider_source = "Environment" if env_provider else "Settings file"
+ table.add_row("Active Provider", settings.provider.value, provider_source)
+
+ # GitHub token
+ github_token = os.getenv("GITPILOT_GITHUB_TOKEN") or os.getenv("GITHUB_TOKEN")
+ github_status = "Configured" if github_token else "Not set"
+ github_source = "Environment" if github_token else "N/A"
+ table.add_row("GitHub Token", github_status, github_source)
+
+ # Provider-specific config
+ if settings.provider == LLMProvider.openai:
+ api_key = settings.openai.api_key or os.getenv("OPENAI_API_KEY")
+ key_status = "Configured" if api_key else "Not set"
+ key_source = "Environment" if os.getenv("OPENAI_API_KEY") else ("Settings" if settings.openai.api_key else "N/A")
+ table.add_row("OpenAI API Key", key_status, key_source)
+ table.add_row("OpenAI Model", settings.openai.model or "gpt-4o-mini", "Settings")
+
+ elif settings.provider == LLMProvider.claude:
+ api_key = settings.claude.api_key or os.getenv("ANTHROPIC_API_KEY")
+ key_status = "Configured" if api_key else "Not set"
+ key_source = "Environment" if os.getenv("ANTHROPIC_API_KEY") else ("Settings" if settings.claude.api_key else "N/A")
+ table.add_row("Claude API Key", key_status, key_source)
+ table.add_row("Claude Model", settings.claude.model, "Settings")
+
+ elif settings.provider == LLMProvider.watsonx:
+ api_key = settings.watsonx.api_key or os.getenv("WATSONX_API_KEY")
+ key_status = "Configured" if api_key else "Not set"
+ key_source = "Environment" if os.getenv("WATSONX_API_KEY") else ("Settings" if settings.watsonx.api_key else "N/A")
+ table.add_row("Watsonx API Key", key_status, key_source)
+ table.add_row("Watsonx Model", settings.watsonx.model_id, "Settings")
+
+ elif settings.provider == LLMProvider.ollama:
+ table.add_row("Ollama URL", settings.ollama.base_url, "Settings")
+ table.add_row("Ollama Model", settings.ollama.model, "Settings")
+
+ console.print(table)
+ console.print()
+ console.print(f"[dim]Settings file: ~/.gitpilot/settings.json[/dim]")
+ console.print()
+
+
+@cli.command()
+def version():
+ """Show GitPilot version."""
+ console.print(f"GitPilot [cyan]v{__version__}[/cyan]")
+
+
+def main():
+ """Main entry point - run server by default."""
+ if len(sys.argv) == 1:
+ # No arguments, run server with defaults
+ import socket
+ with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
+ if s.connect_ex(("127.0.0.1", 8000)) == 0:
+ console.print(
+ "[yellow]⚠[/yellow] Port 8000 is already in use. "
+ "GitPilot may already be running."
+ )
+ sys.exit(1)
+ _display_startup_banner("127.0.0.1", 8000)
+ try:
+ _run_server("127.0.0.1", 8000, reload=False)
+ except KeyboardInterrupt:
+ console.print("\n[yellow]Shutting down GitPilot...[/yellow]")
+ sys.exit(0)
+ else:
+ # Run CLI commands
+ cli()
+
+
+def serve_only():
+ """Entry point for gitpilot-api command."""
+ console.print("[cyan]GitPilot API Server[/cyan]")
+ console.print("[dim]Starting on http://127.0.0.1:8000[/dim]\n")
+ try:
+ _run_server("127.0.0.1", 8000, reload=False)
+ except KeyboardInterrupt:
+ console.print("\n[yellow]Shutting down...[/yellow]")
+ sys.exit(0)
+@cli.command()
+def run(
+ repo: str = typer.Option(..., "--repo", "-r", help="Repository as owner/repo"),
+ message: str = typer.Option("", "--message", "-m", help="User request message"),
+ branch: str = typer.Option(None, "--branch", "-b", help="Target branch"),
+ auto_pr: bool = typer.Option(False, "--auto-pr", help="Create PR after execution"),
+ from_pr: int = typer.Option(None, "--from-pr", help="Fetch context from PR number"),
+ headless: bool = typer.Option(False, "--headless", help="Non-interactive JSON output"),
+):
+ """Run GitPilot non-interactively (headless mode for CI/CD)."""
+ import asyncio
+ import sys
+
+ if not message and not sys.stdin.isatty():
+ message = sys.stdin.read().strip()
+
+ if not message:
+ console.print("[red]Error:[/red] --message is required (or pipe via stdin)")
+ raise typer.Exit(code=1)
+
+ token = os.getenv("GITPILOT_GITHUB_TOKEN") or os.getenv("GITHUB_TOKEN")
+ if not token:
+ console.print("[red]Error:[/red] GITPILOT_GITHUB_TOKEN or GITHUB_TOKEN must be set")
+ raise typer.Exit(code=1)
+
+ from .headless import run_headless
+
+ result = asyncio.run(run_headless(
+ repo_full_name=repo,
+ message=message,
+ token=token,
+ branch=branch,
+ auto_pr=auto_pr,
+ from_pr=from_pr,
+ ))
+
+ if headless:
+ # Pure JSON for CI/CD consumption
+ console.print(result.to_json())
+ else:
+ if result.success:
+ console.print(f"[green]Success:[/green] {result.output[:500]}")
+ else:
+ console.print(f"[red]Failed:[/red] {result.error}")
+ if result.pr_url:
+ console.print(f"[cyan]PR:[/cyan] {result.pr_url}")
+
+ raise typer.Exit(code=0 if result.success else 1)
+
+
+@cli.command("init")
+def init_project(
+ path: str = typer.Argument(".", help="Project directory to initialise"),
+):
+ """Initialize .gitpilot/ directory with template GITPILOT.md."""
+ from pathlib import Path as StdPath
+ from .memory import MemoryManager
+
+ workspace = StdPath(path).resolve()
+ mgr = MemoryManager(workspace)
+ md_path = mgr.init_project()
+ console.print(f"[green]Initialized:[/green] {md_path}")
+ console.print("[dim]Edit .gitpilot/GITPILOT.md to add your project conventions.[/dim]")
+
+
+@cli.command("plugin")
+def plugin_cmd(
+ action: str = typer.Argument(..., help="install | uninstall | list"),
+ source: str = typer.Argument(None, help="Git URL, local path, or plugin name"),
+):
+ """Manage GitPilot plugins."""
+ from .plugins import PluginManager
+
+ mgr = PluginManager()
+
+ if action == "list":
+ plugins = mgr.list_installed()
+ if not plugins:
+ console.print("[dim]No plugins installed.[/dim]")
+ return
+ table = Table(show_header=True, header_style="bold cyan")
+ table.add_column("Name")
+ table.add_column("Version")
+ table.add_column("Description")
+ for p in plugins:
+ table.add_row(p.name, p.version, p.description)
+ console.print(table)
+
+ elif action == "install":
+ if not source:
+ console.print("[red]Error:[/red] source is required for install")
+ raise typer.Exit(code=1)
+ try:
+ info = mgr.install(source)
+ console.print(f"[green]Installed:[/green] {info.name} v{info.version}")
+ except Exception as e:
+ console.print(f"[red]Error:[/red] {e}")
+ raise typer.Exit(code=1)
+
+ elif action == "uninstall":
+ if not source:
+ console.print("[red]Error:[/red] plugin name is required")
+ raise typer.Exit(code=1)
+ if mgr.uninstall(source):
+ console.print(f"[green]Uninstalled:[/green] {source}")
+ else:
+ console.print(f"[yellow]Not found:[/yellow] {source}")
+
+ else:
+ console.print(f"[red]Unknown action:[/red] {action}. Use: install, uninstall, list")
+ raise typer.Exit(code=1)
+
+
+@cli.command("skill")
+def skill_cmd(
+ name: str = typer.Argument(None, help="Skill name to invoke (or 'list')"),
+):
+ """List or invoke skills."""
+ from .skills import SkillManager
+
+ mgr = SkillManager(workspace_path=Path.cwd())
+ mgr.load_all()
+
+ if not name or name == "list":
+ skills = mgr.list_skills()
+ if not skills:
+ console.print("[dim]No skills found.[/dim]")
+ console.print("[dim]Create .gitpilot/skills/*.md to add skills.[/dim]")
+ return
+ table = Table(show_header=True, header_style="bold cyan")
+ table.add_column("Name")
+ table.add_column("Description")
+ table.add_column("Auto")
+ for s in skills:
+ table.add_row(s["name"], s["description"], str(s.get("auto_trigger", False)))
+ console.print(table)
+ else:
+ prompt = mgr.invoke(name)
+ if prompt is None:
+ console.print(f"[red]Skill not found:[/red] {name}")
+ raise typer.Exit(code=1)
+ console.print(f"[cyan]/{name}[/cyan]")
+ console.print(prompt)
+
+
+@cli.command("scan")
+def scan_cmd(
+ path: str = typer.Argument(".", help="Directory or file to scan"),
+ min_confidence: float = typer.Option(0.5, "--min-confidence", help="Minimum confidence threshold"),
+):
+ """Run AI-powered security scan on a directory or file."""
+ from .security import SecurityScanner
+
+ scanner = SecurityScanner(min_confidence=min_confidence)
+ target = Path(path).resolve()
+
+ if target.is_file():
+ findings = scanner.scan_file(str(target))
+ if not findings:
+ console.print("[green]No security issues found.[/green]")
+ return
+ table = Table(show_header=True, header_style="bold red")
+ table.add_column("Severity")
+ table.add_column("Rule")
+ table.add_column("Title")
+ table.add_column("Line")
+ table.add_column("File")
+ for f in findings:
+ table.add_row(f.severity.value, f.rule_id, f.title, str(f.line_number), f.file_path)
+ console.print(table)
+ else:
+ result = scanner.scan_directory(str(target))
+ console.print(f"[cyan]Scanned:[/cyan] {result.files_scanned} files in {result.scan_duration_ms:.0f}ms")
+ if not result.findings:
+ console.print("[green]No security issues found.[/green]")
+ return
+ console.print(f"[yellow]Found {len(result.findings)} issues:[/yellow]")
+ for sev, count in sorted(result.summary.items()):
+ color = "red" if sev in ("critical", "high") else "yellow" if sev == "medium" else "dim"
+ console.print(f" [{color}]{sev}: {count}[/{color}]")
+ console.print()
+ table = Table(show_header=True, header_style="bold red")
+ table.add_column("Severity")
+ table.add_column("Rule")
+ table.add_column("Title")
+ table.add_column("Line")
+ table.add_column("File")
+ for f in result.findings[:50]:
+ table.add_row(f.severity.value, f.rule_id, f.title, str(f.line_number), f.file_path)
+ console.print(table)
+ if len(result.findings) > 50:
+ console.print(f"[dim]... and {len(result.findings) - 50} more[/dim]")
+
+
+@cli.command("predict")
+def predict_cmd(
+ context: str = typer.Argument(..., help="Context string to get predictions for"),
+):
+ """Get proactive suggestions based on context."""
+ from .predictions import PredictiveEngine
+
+ engine = PredictiveEngine()
+ suggestions = engine.predict(context)
+
+ if not suggestions:
+ console.print("[dim]No suggestions for this context.[/dim]")
+ return
+
+ for s in suggestions:
+ score_color = "green" if s.relevance_score >= 0.8 else "yellow" if s.relevance_score >= 0.6 else "dim"
+ console.print(f" [{score_color}][{s.relevance_score:.0%}][/{score_color}] [bold]{s.title}[/bold]")
+ console.print(f" {s.description}")
+ console.print(f" [cyan]Prompt:[/cyan] {s.prompt}")
+ console.print()
+
+
+@cli.command("list-models")
+def list_models_cmd(
+ provider: str = typer.Option(
+ None,
+ "--provider",
+ "-p",
+ help="LLM provider (openai, claude, watsonx, ollama). Defaults to active provider.",
+ )
+):
+ """List LLM models available for the configured provider."""
+ settings = get_settings()
+
+ if provider is None:
+ target = settings.provider
+ else:
+ # Normalize to enum
+ try:
+ target = LLMProvider(provider)
+ except ValueError:
+ console.print(f"[red]Unknown provider:[/red] {provider}")
+ raise typer.Exit(code=1)
+
+ models, error = list_models_for_provider(target, settings)
+
+ console.print()
+ console.print(
+ Panel.fit(
+ f"[bold cyan]Models for provider[/bold cyan] [white]{target.value}[/white]",
+ border_style="cyan",
+ )
+ )
+
+ if error:
+ console.print(f"[yellow]Warning:[/yellow] {error}")
+
+ if not models:
+ console.print("No models found.")
+ return
+
+ table = Table(show_header=True, header_style="bold cyan")
+ table.add_column("#", style="dim", justify="right")
+ table.add_column("Model ID", style="white")
+
+ for i, m in enumerate(models, start=1):
+ table.add_row(str(i), m)
+
+ console.print(table)
+ console.print()
+
+
+@cli.command("generate")
+def generate_cmd(
+ message: str = typer.Option(..., "--message", "-m", help="What to generate (e.g. 'Flask hello world app')"),
+ output_dir: str = typer.Option(".", "--output", "-o", help="Directory to write generated files into"),
+ dry_run: bool = typer.Option(False, "--dry-run", help="Show what would be generated without writing files"),
+):
+ """Generate code locally using the configured LLM.
+
+ Creates files on disk from a natural-language prompt. No GitHub
+ required — works with any LLM provider (Ollama, OpenAI, Claude).
+
+ Examples::
+
+ gitpilot generate -m "Create a Flask hello world app"
+ gitpilot generate -m "Python CLI with click" -o my-project
+ gitpilot generate -m "React component for a todo list" --dry-run
+ """
+ import re
+
+ settings = get_settings()
+ provider_name = settings.provider.value
+ model_name = "default"
+ provider_settings = getattr(settings, provider_name, None)
+ if provider_settings:
+ model_name = getattr(provider_settings, "model", None) or "default"
+
+ console.print(f"[dim]Provider:[/dim] {provider_name} · [dim]Model:[/dim] {model_name}")
+ console.print(f"[dim]Output:[/dim] {os.path.abspath(output_dir)}")
+ console.print()
+
+ prompt = (
+ "You are GitPilot, a code generation assistant.\n"
+ "Generate the requested project files. For EACH file, output it in this EXACT format:\n"
+ "\n"
+ "```language filepath\n"
+ "...complete file content...\n"
+ "```\n"
+ "\n"
+ "Example:\n"
+ "```python app.py\n"
+ "from flask import Flask\n"
+ "app = Flask(__name__)\n"
+ "```\n"
+ "\n"
+ "Rules:\n"
+ "- Opening fence: triple backticks + language + space + relative filepath\n"
+ "- Output COMPLETE file content, not snippets\n"
+ "- Generate ALL files needed for a working project\n"
+ "- Include a README.md if appropriate\n"
+ "\n"
+ f"User request: {message}\n"
+ )
+
+ console.print("[bold]Generating...[/bold]", end="")
+
+ try:
+ from .llm_provider import build_llm
+ llm = build_llm()
+ answer = llm.call([{"role": "user", "content": prompt}])
+ except Exception as e:
+ console.print(f"\n[red]LLM error:[/red] {e}")
+ raise typer.Exit(code=1)
+
+ console.print(" [green]done[/green]\n")
+
+ # Extract structured edits using the same extractor as the API
+ from .api import _extract_edits_from_answer
+
+ edits = _extract_edits_from_answer(answer)
+
+ if not edits:
+ console.print("[yellow]No files extracted from LLM response.[/yellow]")
+ console.print("[dim]Raw response (first 2000 chars):[/dim]")
+ console.print(answer[:2000])
+ raise typer.Exit(code=1)
+
+ files_written = []
+ for edit in edits:
+ safe_path = edit["file"]
+ if not safe_path:
+ continue
+
+ content = edit.get("content", "").rstrip()
+
+ if dry_run:
+ console.print(f" [cyan]Would create:[/cyan] {safe_path} ({len(content)} bytes)")
+ else:
+ full_path = os.path.join(output_dir, safe_path)
+ os.makedirs(os.path.dirname(full_path) or ".", exist_ok=True)
+ with open(full_path, "w", encoding="utf-8") as f:
+ f.write(content + "\n")
+ files_written.append(safe_path)
+ console.print(f" [green]Created:[/green] {safe_path} ({len(content)} bytes)")
+
+ console.print()
+ if dry_run:
+ console.print(f"[dim]Dry run: {len(edits)} file(s) would be created.[/dim]")
+ else:
+ console.print(f"[green]Generated {len(files_written)} file(s) in {os.path.abspath(output_dir)}[/green]")
+
diff --git a/gitpilot/context_pack.py b/gitpilot/context_pack.py
new file mode 100644
index 0000000000000000000000000000000000000000..86dc9cc361f12ec704e50acb1765cad146e02633
--- /dev/null
+++ b/gitpilot/context_pack.py
@@ -0,0 +1,151 @@
+# gitpilot/context_pack.py
+"""Context Pack — compose a bounded, token-safe context injection for agents.
+
+Non-destructive, additive feature. If no context assets or use cases exist
+the pack is empty and agents behave exactly as before.
+
+Usage in agentic.py / agent builders:
+ from .context_pack import build_context_pack
+ pack = build_context_pack(workspace_path, query=goal)
+ # Prepend ``pack`` to agent backstory or system prompt.
+"""
+from __future__ import annotations
+
+import logging
+from pathlib import Path
+from typing import Optional
+
+logger = logging.getLogger(__name__)
+
+# ---------------------------------------------------------------------------
+# Limits (keep total injection well under 8 K chars to avoid token blowups)
+# ---------------------------------------------------------------------------
+MAX_CONVENTIONS_CHARS = 2_000
+MAX_USE_CASE_CHARS = 2_000
+MAX_CHUNKS_CHARS = 3_000
+MAX_CHUNKS = 8
+
+
+def build_context_pack(
+ workspace_path: Path,
+ query: str = "",
+ *,
+ include_conventions: bool = True,
+ include_use_case: bool = True,
+ include_assets: bool = True,
+ max_total_chars: int = 7_000,
+) -> str:
+ """Build a markdown context pack for agent prompt injection.
+
+ Returns an empty string when nothing is available (zero overhead).
+ """
+ parts: list[str] = []
+ total = 0
+
+ # 1) Conventions / Rules (existing MemoryManager)
+ if include_conventions:
+ section = _conventions_section(workspace_path)
+ if section and total + len(section) <= max_total_chars:
+ parts.append(section)
+ total += len(section)
+
+ # 2) Active Use Case
+ if include_use_case:
+ section = _use_case_section(workspace_path)
+ if section and total + len(section) <= max_total_chars:
+ parts.append(section)
+ total += len(section)
+
+ # 3) Relevant context chunks from uploaded assets
+ if include_assets and query:
+ remaining = max_total_chars - total
+ section = _assets_section(workspace_path, query, max_chars=min(remaining, MAX_CHUNKS_CHARS))
+ if section:
+ parts.append(section)
+
+ if not parts:
+ return ""
+
+ return "## Project Context Pack (auto)\n\n" + "\n\n".join(parts)
+
+
+# ---------------------------------------------------------------------------
+# Section builders
+# ---------------------------------------------------------------------------
+def _conventions_section(workspace_path: Path) -> str:
+ try:
+ from .memory import MemoryManager
+
+ mgr = MemoryManager(workspace_path)
+ ctx = mgr.load_context()
+ prompt = ctx.to_system_prompt()
+ if not prompt:
+ return ""
+ return "### Conventions\n\n" + prompt[:MAX_CONVENTIONS_CHARS]
+ except Exception:
+ logger.debug("Could not load conventions for context pack", exc_info=True)
+ return ""
+
+
+def _use_case_section(workspace_path: Path) -> str:
+ try:
+ from .use_case import UseCaseManager
+
+ mgr = UseCaseManager(workspace_path)
+ uc = mgr.get_active_use_case()
+ if not uc:
+ return ""
+
+ spec = uc.spec
+ lines = ["### Active Use Case"]
+ if spec.title:
+ lines.append(f"- **Title:** {spec.title}")
+ if spec.summary:
+ lines.append(f"- **Summary:** {spec.summary}")
+ if spec.problem:
+ lines.append(f"- **Problem:** {spec.problem}")
+ if spec.users:
+ lines.append(f"- **Users:** {spec.users}")
+ if spec.requirements:
+ lines.append("- **Requirements:**")
+ for r in spec.requirements[:10]:
+ lines.append(f" - {r}")
+ if spec.acceptance_criteria:
+ lines.append("- **Acceptance Criteria:**")
+ for ac in spec.acceptance_criteria[:10]:
+ lines.append(f" - {ac}")
+ if spec.constraints:
+ lines.append("- **Constraints:**")
+ for c in spec.constraints[:5]:
+ lines.append(f" - {c}")
+
+ result = "\n".join(lines)
+ return result[:MAX_USE_CASE_CHARS]
+ except Exception:
+ logger.debug("Could not load active use case for context pack", exc_info=True)
+ return ""
+
+
+def _assets_section(
+ workspace_path: Path,
+ query: str,
+ max_chars: int = MAX_CHUNKS_CHARS,
+) -> str:
+ try:
+ from .context_vault import ContextVault
+
+ vault = ContextVault(workspace_path)
+ chunks = vault.search_chunks(query, max_chunks=MAX_CHUNKS, max_chars=max_chars)
+ if not chunks:
+ return ""
+
+ lines = [f"### Relevant References (Top {len(chunks)})"]
+ for chunk in chunks:
+ lines.append(
+ f"[Asset: {chunk.filename} | chunk {chunk.chunk_index}]\n{chunk.text}"
+ )
+
+ return "\n\n".join(lines)[:max_chars]
+ except Exception:
+ logger.debug("Could not search context vault for context pack", exc_info=True)
+ return ""
diff --git a/gitpilot/context_vault.py b/gitpilot/context_vault.py
new file mode 100644
index 0000000000000000000000000000000000000000..1fb08f450dc9f393bb3d12985e07a2747e8123e9
--- /dev/null
+++ b/gitpilot/context_vault.py
@@ -0,0 +1,532 @@
+# gitpilot/context_vault.py
+"""Context Vault — upload, extract, index, and retrieve project context assets.
+
+Non-destructive, additive feature. Stores everything under:
+ ~/.gitpilot/workspaces/{owner}/{repo}/.gitpilot/context/
+
+Directory layout:
+ context/
+ assets/ raw uploaded files
+ extracted/ extracted text + metadata JSON
+ index/ SQLite metadata + chunk index
+ use_cases/ structured use-case JSON + markdown exports
+
+This module handles asset lifecycle (upload, extract, index, delete)
+and chunk retrieval for context-pack injection into agent prompts.
+"""
+from __future__ import annotations
+
+import json
+import logging
+import os
+import re
+import shutil
+import sqlite3
+import time
+import uuid
+from dataclasses import asdict, dataclass, field
+from pathlib import Path
+from typing import Any, Dict, List, Optional
+
+logger = logging.getLogger(__name__)
+
+# ---------------------------------------------------------------------------
+# Limits
+# ---------------------------------------------------------------------------
+MAX_UPLOAD_BYTES = 200 * 1024 * 1024 # 200 MB default
+MAX_EXTRACT_CHARS = 500_000
+CHUNK_SIZE = 800 # chars per chunk (approx)
+CHUNK_OVERLAP = 100
+MAX_RETRIEVAL_CHUNKS = 8
+MAX_RETRIEVAL_CHARS = 6_000
+
+
+# ---------------------------------------------------------------------------
+# Data classes
+# ---------------------------------------------------------------------------
+@dataclass
+class AssetMeta:
+ asset_id: str
+ filename: str
+ mime: str
+ size_bytes: int
+ created_at: str
+ extracted_chars: int = 0
+ indexed_chunks: int = 0
+ notes: str = ""
+
+ def to_dict(self) -> dict:
+ return asdict(self)
+
+
+@dataclass
+class ExtractedAsset:
+ asset_id: str
+ filename: str
+ mime: str
+ extracted_text: str
+ pages: Optional[int] = None
+ created_at: str = ""
+ notes: str = ""
+
+
+@dataclass
+class ChunkResult:
+ asset_id: str
+ filename: str
+ chunk_index: int
+ text: str
+ score: float = 0.0
+
+
+# ---------------------------------------------------------------------------
+# Vault manager
+# ---------------------------------------------------------------------------
+class ContextVault:
+ """Manages per-repo context vault under .gitpilot/context/."""
+
+ def __init__(self, workspace_path: Path):
+ self.workspace_path = workspace_path
+ self.vault_dir = workspace_path / ".gitpilot" / "context"
+ self.assets_dir = self.vault_dir / "assets"
+ self.extracted_dir = self.vault_dir / "extracted"
+ self.index_dir = self.vault_dir / "index"
+ self.use_cases_dir = self.vault_dir / "use_cases"
+
+ # ------------------------------------------------------------------
+ # Init & safety
+ # ------------------------------------------------------------------
+ def _ensure_dirs(self):
+ for d in (self.assets_dir, self.extracted_dir, self.index_dir, self.use_cases_dir):
+ d.mkdir(parents=True, exist_ok=True)
+
+ def _safe_resolve(self, base: Path, name: str) -> Path:
+ """Prevent path traversal attacks."""
+ full = (base / name).resolve()
+ if not str(full).startswith(str(base.resolve())):
+ raise PermissionError(f"Path traversal blocked: {name}")
+ return full
+
+ # ------------------------------------------------------------------
+ # Asset CRUD
+ # ------------------------------------------------------------------
+ def list_assets(self) -> List[AssetMeta]:
+ """Return metadata for all uploaded assets."""
+ self._ensure_dirs()
+ results: List[AssetMeta] = []
+ for ext_file in sorted(self.extracted_dir.glob("*.json")):
+ try:
+ data = json.loads(ext_file.read_text(encoding="utf-8"))
+ results.append(AssetMeta(
+ asset_id=data.get("asset_id", ext_file.stem),
+ filename=data.get("filename", ""),
+ mime=data.get("mime", ""),
+ size_bytes=data.get("size_bytes", 0),
+ created_at=data.get("created_at", ""),
+ extracted_chars=len(data.get("extracted_text", "")),
+ indexed_chunks=data.get("indexed_chunks", 0),
+ notes=data.get("notes", ""),
+ ))
+ except Exception:
+ logger.warning("Skipping corrupt metadata: %s", ext_file)
+ return results
+
+ def upload_asset(self, filename: str, content: bytes, mime: str = "") -> AssetMeta:
+ """Store a raw asset and run extraction + indexing."""
+ self._ensure_dirs()
+
+ if len(content) > MAX_UPLOAD_BYTES:
+ raise ValueError(
+ f"File too large ({len(content)} bytes). Max is {MAX_UPLOAD_BYTES}."
+ )
+
+ asset_id = uuid.uuid4().hex[:12]
+ safe_name = re.sub(r"[^\w.\-]", "_", filename)
+ stored_name = f"{asset_id}_{safe_name}"
+
+ asset_path = self._safe_resolve(self.assets_dir, stored_name)
+ asset_path.write_bytes(content)
+
+ if not mime:
+ mime = _guess_mime(filename)
+
+ now = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
+
+ # Extract text
+ extracted_text = _extract_text(asset_path, mime)
+
+ # Chunk + index
+ chunks = _chunk_text(extracted_text, CHUNK_SIZE, CHUNK_OVERLAP)
+ indexed_count = self._index_chunks(asset_id, filename, chunks)
+
+ # Save extracted metadata
+ meta_data = {
+ "asset_id": asset_id,
+ "filename": filename,
+ "stored_name": stored_name,
+ "mime": mime,
+ "size_bytes": len(content),
+ "extracted_text": extracted_text[:MAX_EXTRACT_CHARS],
+ "pages": None,
+ "created_at": now,
+ "indexed_chunks": indexed_count,
+ "notes": "",
+ }
+ meta_path = self._safe_resolve(self.extracted_dir, f"{asset_id}.json")
+ meta_path.write_text(json.dumps(meta_data, indent=2), encoding="utf-8")
+
+ return AssetMeta(
+ asset_id=asset_id,
+ filename=filename,
+ mime=mime,
+ size_bytes=len(content),
+ created_at=now,
+ extracted_chars=len(extracted_text),
+ indexed_chunks=indexed_count,
+ )
+
+ def delete_asset(self, asset_id: str) -> bool:
+ """Remove asset, extracted data, and index entries."""
+ self._ensure_dirs()
+
+ # Remove extracted metadata
+ meta_path = self.extracted_dir / f"{asset_id}.json"
+ stored_name = None
+ if meta_path.exists():
+ try:
+ data = json.loads(meta_path.read_text(encoding="utf-8"))
+ stored_name = data.get("stored_name")
+ except Exception:
+ pass
+ meta_path.unlink()
+
+ # Remove raw asset
+ if stored_name:
+ asset_path = self.assets_dir / stored_name
+ if asset_path.exists():
+ asset_path.unlink()
+ else:
+ # fallback: find by prefix
+ for f in self.assets_dir.iterdir():
+ if f.name.startswith(asset_id):
+ f.unlink()
+ break
+
+ # Remove from index
+ self._remove_from_index(asset_id)
+
+ return True
+
+ def get_asset_path(self, asset_id: str) -> Optional[Path]:
+ """Return the raw asset path for download."""
+ self._ensure_dirs()
+ meta_path = self.extracted_dir / f"{asset_id}.json"
+ if meta_path.exists():
+ try:
+ data = json.loads(meta_path.read_text(encoding="utf-8"))
+ stored_name = data.get("stored_name", "")
+ if stored_name:
+ p = self.assets_dir / stored_name
+ if p.exists():
+ return p
+ except Exception:
+ pass
+
+ # fallback
+ for f in self.assets_dir.iterdir():
+ if f.name.startswith(asset_id):
+ return f
+ return None
+
+ def get_asset_filename(self, asset_id: str) -> str:
+ """Return original filename for an asset."""
+ meta_path = self.extracted_dir / f"{asset_id}.json"
+ if meta_path.exists():
+ try:
+ data = json.loads(meta_path.read_text(encoding="utf-8"))
+ return data.get("filename", "unknown")
+ except Exception:
+ pass
+ return "unknown"
+
+ # ------------------------------------------------------------------
+ # Indexing (SQLite-backed)
+ # ------------------------------------------------------------------
+ def _get_db(self) -> sqlite3.Connection:
+ self._ensure_dirs()
+ db_path = self.index_dir / "context.sqlite"
+ conn = sqlite3.connect(str(db_path))
+ conn.execute("""
+ CREATE TABLE IF NOT EXISTS chunks (
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
+ asset_id TEXT NOT NULL,
+ filename TEXT NOT NULL,
+ chunk_index INTEGER NOT NULL,
+ text TEXT NOT NULL
+ )
+ """)
+ conn.execute("""
+ CREATE INDEX IF NOT EXISTS idx_chunks_asset ON chunks(asset_id)
+ """)
+ conn.commit()
+ return conn
+
+ def _index_chunks(self, asset_id: str, filename: str, chunks: List[str]) -> int:
+ conn = self._get_db()
+ try:
+ # Remove old entries for this asset (re-index)
+ conn.execute("DELETE FROM chunks WHERE asset_id = ?", (asset_id,))
+ for i, chunk_text in enumerate(chunks):
+ conn.execute(
+ "INSERT INTO chunks (asset_id, filename, chunk_index, text) VALUES (?, ?, ?, ?)",
+ (asset_id, filename, i, chunk_text),
+ )
+ conn.commit()
+ return len(chunks)
+ finally:
+ conn.close()
+
+ def _remove_from_index(self, asset_id: str):
+ try:
+ conn = self._get_db()
+ conn.execute("DELETE FROM chunks WHERE asset_id = ?", (asset_id,))
+ conn.commit()
+ conn.close()
+ except Exception:
+ pass
+
+ # ------------------------------------------------------------------
+ # Retrieval
+ # ------------------------------------------------------------------
+ def search_chunks(
+ self,
+ query: str,
+ max_chunks: int = MAX_RETRIEVAL_CHUNKS,
+ max_chars: int = MAX_RETRIEVAL_CHARS,
+ ) -> List[ChunkResult]:
+ """Simple keyword-based retrieval (BM25-like scoring).
+
+ Phase 1: naive keyword matching. Phase 2 can add embeddings.
+ """
+ if not query.strip():
+ return []
+
+ keywords = _extract_keywords(query)
+ if not keywords:
+ return []
+
+ try:
+ conn = self._get_db()
+ except Exception:
+ return []
+
+ try:
+ rows = conn.execute(
+ "SELECT asset_id, filename, chunk_index, text FROM chunks"
+ ).fetchall()
+ finally:
+ conn.close()
+
+ scored: List[ChunkResult] = []
+ for asset_id, filename, chunk_index, text in rows:
+ text_lower = text.lower()
+ score = 0.0
+ for kw in keywords:
+ count = text_lower.count(kw.lower())
+ if count > 0:
+ # simple TF score
+ score += count * (1.0 + len(kw) * 0.1)
+ if score > 0:
+ scored.append(ChunkResult(
+ asset_id=asset_id,
+ filename=filename,
+ chunk_index=chunk_index,
+ text=text,
+ score=score,
+ ))
+
+ scored.sort(key=lambda c: c.score, reverse=True)
+
+ # Enforce limits
+ results: List[ChunkResult] = []
+ total_chars = 0
+ for chunk in scored[:max_chunks * 2]: # over-fetch then trim
+ if len(results) >= max_chunks:
+ break
+ if total_chars + len(chunk.text) > max_chars:
+ break
+ results.append(chunk)
+ total_chars += len(chunk.text)
+
+ return results
+
+
+# ---------------------------------------------------------------------------
+# Text extraction helpers
+# ---------------------------------------------------------------------------
+def _guess_mime(filename: str) -> str:
+ ext = Path(filename).suffix.lower()
+ mime_map = {
+ ".txt": "text/plain",
+ ".md": "text/markdown",
+ ".csv": "text/csv",
+ ".json": "application/json",
+ ".pdf": "application/pdf",
+ ".docx": "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
+ ".doc": "application/msword",
+ ".png": "image/png",
+ ".jpg": "image/jpeg",
+ ".jpeg": "image/jpeg",
+ ".gif": "image/gif",
+ ".svg": "image/svg+xml",
+ ".mp4": "video/mp4",
+ ".mov": "video/quicktime",
+ ".mp3": "audio/mpeg",
+ ".wav": "audio/wav",
+ ".vtt": "text/vtt",
+ ".srt": "text/srt",
+ ".py": "text/x-python",
+ ".js": "text/javascript",
+ ".ts": "text/typescript",
+ ".jsx": "text/jsx",
+ ".tsx": "text/tsx",
+ ".html": "text/html",
+ ".css": "text/css",
+ ".yaml": "text/yaml",
+ ".yml": "text/yaml",
+ ".toml": "text/toml",
+ ".xml": "text/xml",
+ ".sh": "text/x-shellscript",
+ ".go": "text/x-go",
+ ".rs": "text/x-rust",
+ ".java": "text/x-java",
+ ".rb": "text/x-ruby",
+ }
+ return mime_map.get(ext, "application/octet-stream")
+
+
+def _extract_text(path: Path, mime: str) -> str:
+ """Best-effort text extraction from a file."""
+ # Text-based files
+ if mime.startswith("text/") or mime in (
+ "application/json",
+ "text/markdown",
+ "text/csv",
+ "text/vtt",
+ "text/srt",
+ "text/toml",
+ "text/yaml",
+ ):
+ try:
+ return path.read_text(encoding="utf-8", errors="replace")[:MAX_EXTRACT_CHARS]
+ except Exception:
+ return ""
+
+ # PDF
+ if mime == "application/pdf":
+ return _extract_pdf(path)
+
+ # DOCX
+ if "wordprocessingml" in mime or mime == "application/msword":
+ return _extract_docx(path)
+
+ # Binary/media — no extraction, store only
+ return ""
+
+
+def _extract_pdf(path: Path) -> str:
+ """Extract text from PDF. Tries pypdf/PyPDF2 first, falls back gracefully."""
+ try:
+ import pypdf
+ try:
+ reader = pypdf.PdfReader(str(path))
+ pages = []
+ for page in reader.pages:
+ text = page.extract_text()
+ if text:
+ pages.append(text)
+ return "\n\n".join(pages)[:MAX_EXTRACT_CHARS]
+ except Exception as e:
+ logger.warning("PDF extraction failed with pypdf: %s", e)
+ return ""
+ except ImportError:
+ pass
+
+ try:
+ import PyPDF2 # noqa: N813
+ try:
+ reader = PyPDF2.PdfReader(str(path))
+ pages = []
+ for page in reader.pages:
+ text = page.extract_text()
+ if text:
+ pages.append(text)
+ return "\n\n".join(pages)[:MAX_EXTRACT_CHARS]
+ except Exception as e:
+ logger.warning("PDF extraction failed with PyPDF2: %s", e)
+ return ""
+ except ImportError:
+ pass
+
+ logger.info("PDF extraction unavailable (install pypdf or PyPDF2). Storing PDF without text.")
+ return ""
+
+def _extract_docx(path: Path) -> str:
+ """Extract text from DOCX."""
+ try:
+ import docx
+ doc = docx.Document(str(path))
+ paragraphs = [p.text for p in doc.paragraphs if p.text.strip()]
+ return "\n\n".join(paragraphs)[:MAX_EXTRACT_CHARS]
+ except ImportError:
+ logger.info("DOCX extraction unavailable (install python-docx). Storing without text.")
+ return ""
+ except Exception as e:
+ logger.warning("DOCX extraction failed: %s", e)
+ return ""
+
+
+# ---------------------------------------------------------------------------
+# Chunking
+# ---------------------------------------------------------------------------
+def _chunk_text(text: str, chunk_size: int = CHUNK_SIZE, overlap: int = CHUNK_OVERLAP) -> List[str]:
+ """Split text into overlapping chunks."""
+ if not text:
+ return []
+
+ chunks: List[str] = []
+ start = 0
+ while start < len(text):
+ end = start + chunk_size
+ chunk = text[start:end]
+ if chunk.strip():
+ chunks.append(chunk.strip())
+ start = end - overlap
+ if start >= len(text):
+ break
+
+ return chunks
+
+
+def _extract_keywords(query: str) -> List[str]:
+ """Extract meaningful keywords from a query string."""
+ # Remove common stop words
+ stop_words = {
+ "the", "a", "an", "is", "are", "was", "were", "be", "been", "being",
+ "have", "has", "had", "do", "does", "did", "will", "would", "could",
+ "should", "may", "might", "shall", "can", "need", "dare", "ought",
+ "used", "to", "of", "in", "for", "on", "with", "at", "by", "from",
+ "as", "into", "through", "during", "before", "after", "above",
+ "below", "between", "out", "off", "over", "under", "again",
+ "further", "then", "once", "here", "there", "when", "where", "why",
+ "how", "all", "both", "each", "few", "more", "most", "other",
+ "some", "such", "no", "nor", "not", "only", "own", "same", "so",
+ "than", "too", "very", "just", "because", "but", "and", "or", "if",
+ "while", "what", "which", "who", "whom", "this", "that", "these",
+ "those", "i", "me", "my", "we", "our", "you", "your", "he", "him",
+ "she", "her", "it", "its", "they", "them", "their",
+ }
+
+ words = re.findall(r"\w+", query.lower())
+ keywords = [w for w in words if w not in stop_words and len(w) > 1]
+ return keywords
diff --git a/gitpilot/cross_repo.py b/gitpilot/cross_repo.py
new file mode 100644
index 0000000000000000000000000000000000000000..45ccb790864d0f87e1dcc39ea9ce6894e0147988
--- /dev/null
+++ b/gitpilot/cross_repo.py
@@ -0,0 +1,351 @@
+# gitpilot/cross_repo.py
+"""Cross-repository intelligence — dependency graphs and impact analysis.
+
+Analyses patterns across multiple repositories to provide:
+- Dependency graphs (repo A depends on repo B)
+- Impact analysis (change in lib affects services)
+- Shared convention detection
+- Migration planning across repos
+
+Draws on the concept of *software ecosystems analysis* from research
+on large-scale dependency management (Decan et al., 2019).
+"""
+from __future__ import annotations
+
+import json
+import logging
+import re
+from dataclasses import dataclass, field
+from pathlib import Path
+from typing import Any, Dict, List, Optional, Set
+
+logger = logging.getLogger(__name__)
+
+# Common dependency file patterns
+_DEP_FILES = {
+ "package.json": "npm",
+ "requirements.txt": "pip",
+ "Pipfile": "pipenv",
+ "pyproject.toml": "pyproject",
+ "Cargo.toml": "cargo",
+ "go.mod": "go",
+ "Gemfile": "bundler",
+ "pom.xml": "maven",
+ "build.gradle": "gradle",
+ "composer.json": "composer",
+}
+
+
+@dataclass
+class Dependency:
+ """A dependency relationship between two entities."""
+
+ source: str # e.g., "owner/repo-a"
+ target: str # e.g., "owner/repo-b" or "package-name"
+ dep_type: str = "runtime" # runtime | dev | peer | optional
+ version: str = ""
+ ecosystem: str = "" # npm, pip, cargo, etc.
+
+ def to_dict(self) -> Dict[str, Any]:
+ return {
+ "source": self.source,
+ "target": self.target,
+ "dep_type": self.dep_type,
+ "version": self.version,
+ "ecosystem": self.ecosystem,
+ }
+
+
+@dataclass
+class DependencyGraph:
+ """A graph of dependencies across repositories."""
+
+ repos: List[str] = field(default_factory=list)
+ dependencies: List[Dependency] = field(default_factory=list)
+ ecosystems: List[str] = field(default_factory=list)
+
+ @property
+ def node_count(self) -> int:
+ nodes: Set[str] = set()
+ for d in self.dependencies:
+ nodes.add(d.source)
+ nodes.add(d.target)
+ return len(nodes)
+
+ @property
+ def edge_count(self) -> int:
+ return len(self.dependencies)
+
+ def to_dict(self) -> Dict[str, Any]:
+ return {
+ "repos": self.repos,
+ "dependencies": [d.to_dict() for d in self.dependencies],
+ "ecosystems": self.ecosystems,
+ "node_count": self.node_count,
+ "edge_count": self.edge_count,
+ }
+
+
+@dataclass
+class ImpactReport:
+ """Impact analysis report for a change in a repository."""
+
+ source_repo: str
+ change_description: str
+ affected_repos: List[str] = field(default_factory=list)
+ risk_level: str = "low" # low | medium | high | critical
+ details: List[str] = field(default_factory=list)
+
+ def to_dict(self) -> Dict[str, Any]:
+ return {
+ "source_repo": self.source_repo,
+ "change_description": self.change_description,
+ "affected_repos": self.affected_repos,
+ "risk_level": self.risk_level,
+ "details": self.details,
+ }
+
+
+@dataclass
+class MigrationPlan:
+ """Plan for migrating a pattern across repositories."""
+
+ target_pattern: str
+ repos: List[str] = field(default_factory=list)
+ steps: List[Dict[str, str]] = field(default_factory=list)
+ estimated_effort: str = "unknown" # low | medium | high
+
+ def to_dict(self) -> Dict[str, Any]:
+ return {
+ "target_pattern": self.target_pattern,
+ "repos": self.repos,
+ "steps": self.steps,
+ "estimated_effort": self.estimated_effort,
+ }
+
+
+class CrossRepoAnalyzer:
+ """Analyze patterns and dependencies across multiple repositories.
+
+ Usage::
+
+ analyzer = CrossRepoAnalyzer()
+ graph = analyzer.analyze_dependencies_from_files({
+ "owner/repo-a": {"package.json": '{"dependencies": {"lodash": "^4"}}'},
+ "owner/repo-b": {"requirements.txt": "requests>=2.28\\nflask>=3.0"},
+ })
+ impact = analyzer.impact_analysis(graph, "owner/repo-a", "Breaking change in API v2")
+ """
+
+ def analyze_dependencies_from_files(
+ self,
+ repo_files: Dict[str, Dict[str, str]],
+ ) -> DependencyGraph:
+ """Build a dependency graph from dependency files.
+
+ Args:
+ repo_files: Mapping of repo name → {filename: content}.
+ """
+ graph = DependencyGraph(repos=list(repo_files.keys()))
+ ecosystems: Set[str] = set()
+
+ for repo, files in repo_files.items():
+ for filename, content in files.items():
+ ecosystem = _DEP_FILES.get(filename)
+ if not ecosystem:
+ continue
+ ecosystems.add(ecosystem)
+ deps = self._parse_dependencies(filename, content, ecosystem)
+ for dep in deps:
+ dep.source = repo
+ graph.dependencies.append(dep)
+
+ graph.ecosystems = sorted(ecosystems)
+ return graph
+
+ def impact_analysis(
+ self,
+ graph: DependencyGraph,
+ source_repo: str,
+ change_description: str,
+ ) -> ImpactReport:
+ """Analyze the impact of a change in one repo on others.
+
+ Walks the dependency graph to find repos that depend (directly
+ or transitively) on the source repo.
+ """
+ # Build reverse adjacency: target → [sources]
+ dependents: Dict[str, List[str]] = {}
+ for dep in graph.dependencies:
+ dependents.setdefault(dep.target, []).append(dep.source)
+
+ # BFS from source_repo
+ affected: Set[str] = set()
+ queue = [source_repo]
+ visited: Set[str] = set()
+
+ while queue:
+ current = queue.pop(0)
+ if current in visited:
+ continue
+ visited.add(current)
+ for dependent in dependents.get(current, []):
+ if dependent != source_repo:
+ affected.add(dependent)
+ queue.append(dependent)
+
+ # Risk assessment
+ if len(affected) == 0:
+ risk = "low"
+ elif len(affected) <= 3:
+ risk = "medium"
+ elif len(affected) <= 10:
+ risk = "high"
+ else:
+ risk = "critical"
+
+ details = []
+ for repo in sorted(affected):
+ deps_on_source = [
+ d for d in graph.dependencies
+ if d.source == repo and d.target == source_repo
+ ]
+ for d in deps_on_source:
+ details.append(f"{repo} depends on {source_repo} ({d.dep_type}, {d.version})")
+
+ return ImpactReport(
+ source_repo=source_repo,
+ change_description=change_description,
+ affected_repos=sorted(affected),
+ risk_level=risk,
+ details=details,
+ )
+
+ def detect_shared_conventions(
+ self,
+ repo_files: Dict[str, Dict[str, str]],
+ ) -> Dict[str, List[str]]:
+ """Detect shared conventions across repos.
+
+ Looks for common config files, linters, formatters, CI configs, etc.
+ """
+ conventions: Dict[str, List[str]] = {}
+
+ convention_files = [
+ ".eslintrc", ".eslintrc.json", ".prettierrc",
+ "ruff.toml", "pyproject.toml", ".flake8",
+ ".github/workflows", "Makefile", "Dockerfile",
+ "tsconfig.json", "jest.config",
+ ]
+
+ for repo, files in repo_files.items():
+ for cf in convention_files:
+ for filename in files:
+ if cf in filename:
+ conventions.setdefault(cf, []).append(repo)
+
+ return conventions
+
+ def suggest_migration(
+ self,
+ repos: List[str],
+ target_pattern: str,
+ ) -> MigrationPlan:
+ """Suggest a migration plan for applying a pattern across repos."""
+ steps = []
+ for i, repo in enumerate(repos):
+ steps.append({
+ "order": str(i + 1),
+ "repo": repo,
+ "action": f"Apply {target_pattern} to {repo}",
+ "status": "pending",
+ })
+
+ effort = "low" if len(repos) <= 3 else ("medium" if len(repos) <= 10 else "high")
+
+ return MigrationPlan(
+ target_pattern=target_pattern,
+ repos=repos,
+ steps=steps,
+ estimated_effort=effort,
+ )
+
+ # ------------------------------------------------------------------
+ # Dependency parsers
+ # ------------------------------------------------------------------
+
+ def _parse_dependencies(
+ self, filename: str, content: str, ecosystem: str,
+ ) -> List[Dependency]:
+ if ecosystem == "npm":
+ return self._parse_npm(content)
+ if ecosystem in ("pip", "pipenv"):
+ return self._parse_pip(content)
+ if ecosystem == "pyproject":
+ return self._parse_pyproject(content)
+ if ecosystem == "go":
+ return self._parse_gomod(content)
+ return []
+
+ def _parse_npm(self, content: str) -> List[Dependency]:
+ deps = []
+ try:
+ data = json.loads(content)
+ for section, dep_type in [
+ ("dependencies", "runtime"),
+ ("devDependencies", "dev"),
+ ("peerDependencies", "peer"),
+ ]:
+ for name, version in data.get(section, {}).items():
+ deps.append(Dependency(
+ source="", target=name,
+ dep_type=dep_type, version=version, ecosystem="npm",
+ ))
+ except json.JSONDecodeError:
+ pass
+ return deps
+
+ def _parse_pip(self, content: str) -> List[Dependency]:
+ deps = []
+ for line in content.strip().split("\n"):
+ line = line.strip()
+ if not line or line.startswith("#") or line.startswith("-"):
+ continue
+ m = re.match(r"([a-zA-Z0-9_-]+)\s*([><=!~]+.+)?", line)
+ if m:
+ deps.append(Dependency(
+ source="", target=m.group(1),
+ dep_type="runtime", version=m.group(2) or "", ecosystem="pip",
+ ))
+ return deps
+
+ def _parse_pyproject(self, content: str) -> List[Dependency]:
+ deps = []
+ in_deps = False
+ for line in content.split("\n"):
+ stripped = line.strip()
+ if stripped.startswith("dependencies"):
+ in_deps = True
+ continue
+ if in_deps:
+ if stripped.startswith("]"):
+ in_deps = False
+ continue
+ m = re.match(r'"([a-zA-Z0-9_-]+)', stripped)
+ if m:
+ deps.append(Dependency(
+ source="", target=m.group(1),
+ dep_type="runtime", ecosystem="pyproject",
+ ))
+ return deps
+
+ def _parse_gomod(self, content: str) -> List[Dependency]:
+ deps = []
+ for line in content.split("\n"):
+ m = re.match(r"\s+(\S+)\s+(\S+)", line)
+ if m and not line.strip().startswith("//"):
+ deps.append(Dependency(
+ source="", target=m.group(1),
+ dep_type="runtime", version=m.group(2), ecosystem="go",
+ ))
+ return deps
diff --git a/gitpilot/diagnostics_runner.py b/gitpilot/diagnostics_runner.py
new file mode 100644
index 0000000000000000000000000000000000000000..2e69bf64119969d1002c22fdefcbc414a28282a5
--- /dev/null
+++ b/gitpilot/diagnostics_runner.py
@@ -0,0 +1,129 @@
+# gitpilot/diagnostics_runner.py
+"""
+Run linters and type-checkers server-side.
+
+For web/HF Spaces where VS Code language services aren't available,
+this module detects and runs the appropriate linter, then parses
+the output into structured diagnostic entries.
+"""
+from __future__ import annotations
+
+import logging
+from pathlib import Path
+from typing import List, Optional
+
+from .terminal import TerminalExecutor, TerminalSession, CommandResult
+
+logger = logging.getLogger(__name__)
+
+# (marker file, linter name, command)
+LINTER_MARKERS = [
+ ("tsconfig.json", "tsc", "npx tsc --noEmit 2>&1"),
+ ("eslint.config.js", "eslint", "npx eslint . --format compact 2>&1"),
+ ("eslint.config.mjs", "eslint", "npx eslint . --format compact 2>&1"),
+ (".eslintrc.json", "eslint", "npx eslint . --format compact 2>&1"),
+ (".eslintrc.js", "eslint", "npx eslint . --format compact 2>&1"),
+ (".eslintrc.yml", "eslint", "npx eslint . --format compact 2>&1"),
+ ("biome.json", "biome", "npx biome check . 2>&1"),
+ ("pyproject.toml", "ruff", "ruff check . --output-format text 2>&1"),
+ ("setup.cfg", "flake8", "flake8 . 2>&1"),
+ (".flake8", "flake8", "flake8 . 2>&1"),
+ ("Cargo.toml", "cargo", "cargo check --message-format short 2>&1"),
+ ("go.mod", "go", "go vet ./... 2>&1"),
+ ("clippy.toml", "clippy", "cargo clippy 2>&1"),
+]
+
+
+async def detect_linter(workspace_path: Path) -> Optional[tuple[str, str]]:
+ """Detect linter from project files. Returns (name, command) or None."""
+ for marker, name, command in LINTER_MARKERS:
+ if (workspace_path / marker).exists():
+ logger.info("Detected linter: %s (via %s)", name, marker)
+ return name, command
+ return None
+
+
+async def run_linter(
+ workspace_path: Path,
+ executor: Optional[TerminalExecutor] = None,
+ timeout: int = 60,
+) -> Optional[CommandResult]:
+ """Detect the linter and run it. Returns CommandResult or None."""
+ detection = await detect_linter(workspace_path)
+ if not detection:
+ return None
+
+ name, command = detection
+ executor = executor or TerminalExecutor()
+ session = TerminalSession(workspace_path=workspace_path)
+
+ logger.info("Running linter %s: %s", name, command)
+ return await executor.execute(session, command, timeout=timeout)
+
+
+def parse_diagnostics(output: str) -> List[dict]:
+ """
+ Parse linter output into structured entries (best-effort).
+
+ Handles common formats:
+ - file:line:col: severity: message (gcc, tsc, eslint compact)
+ - file:line: severity: message
+ - file(line,col): error TS1234: message (tsc)
+ """
+ entries: List[dict] = []
+ for line in output.splitlines()[:200]:
+ line = line.strip()
+ if not line:
+ continue
+
+ # Try tsc format: file(line,col): error TSxxxx: message
+ if "): error " in line or "): warning " in line:
+ try:
+ paren_idx = line.index("(")
+ close_idx = line.index(")")
+ file_path = line[:paren_idx].strip()
+ pos = line[paren_idx + 1 : close_idx]
+ rest = line[close_idx + 2 :].strip()
+ parts = pos.split(",")
+ line_num = int(parts[0]) if parts else 0
+ severity = "error" if rest.startswith("error") else "warning"
+ message = rest.split(":", 1)[-1].strip() if ":" in rest else rest
+ entries.append({
+ "file": file_path,
+ "line": line_num,
+ "severity": severity,
+ "message": message,
+ })
+ continue
+ except (ValueError, IndexError):
+ pass
+
+ # Try standard format: file:line:col: severity: message
+ parts = line.split(":", 4)
+ if len(parts) >= 4:
+ try:
+ file_path = parts[0].strip()
+ line_num = int(parts[1].strip())
+ rest = ":".join(parts[2:]).strip()
+ severity = "error" if "error" in rest.lower() else "warning"
+ message = rest
+ entries.append({
+ "file": file_path,
+ "line": line_num,
+ "severity": severity,
+ "message": message,
+ })
+ continue
+ except (ValueError, IndexError):
+ pass
+
+ # Fallback: treat as info
+ if any(kw in line.lower() for kw in ("error", "warning", "fail")):
+ entries.append({
+ "file": "",
+ "line": 0,
+ "severity": "error" if "error" in line.lower() else "warning",
+ "message": line,
+ })
+
+ return entries
diff --git a/gitpilot/github_api.py b/gitpilot/github_api.py
new file mode 100644
index 0000000000000000000000000000000000000000..d4111e73852a719107984c408032fb66fcfcda01
--- /dev/null
+++ b/gitpilot/github_api.py
@@ -0,0 +1,582 @@
+# gitpilot/github_api.py
+from __future__ import annotations
+
+import contextvars
+import logging
+import os
+import re
+from base64 import b64decode, b64encode
+from contextlib import contextmanager
+from typing import Any
+
+import httpx
+from fastapi import HTTPException
+
+from gitpilot.models import GithubStatusSummary
+
+GITHUB_API_BASE = "https://api.github.com"
+
+# Context variable to store the GitHub token for the current request/execution scope
+_request_token: contextvars.ContextVar[str | None] = contextvars.ContextVar(
+ "request_token", default=None
+)
+
+# Git SHA (40-hex) validator
+_SHA_RE = re.compile(r"^[0-9a-fA-F]{40}$")
+
+# add near _request_token
+_request_ref: contextvars.ContextVar[str | None] = contextvars.ContextVar(
+ "request_ref", default=None
+)
+
+
+@contextmanager
+def execution_context(token: str | None, ref: str | None = None):
+ token_var = _request_token.set(token)
+ ref_var = _request_ref.set(ref)
+ try:
+ yield
+ finally:
+ _request_token.reset(token_var)
+ _request_ref.reset(ref_var)
+
+
+def _github_ref(provided_ref: str | None = None) -> str | None:
+ if provided_ref:
+ return provided_ref
+ return _request_ref.get()
+
+
+def _github_token(provided_token: str | None = None) -> str:
+ """
+ Get GitHub token from:
+ 1. Explicit argument
+ 2. Request Context (set via execution_context)
+ 3. Environment variables (Fallback)
+ """
+ if provided_token:
+ return provided_token
+
+ ctx_token = _request_token.get()
+ if ctx_token:
+ return ctx_token
+
+ token = os.getenv("GITPILOT_GITHUB_TOKEN") or os.getenv("GITHUB_TOKEN")
+ if not token:
+ raise HTTPException(
+ status_code=401,
+ detail=(
+ "GitHub authentication required. "
+ "Please log in via the UI or set GITPILOT_GITHUB_TOKEN in your environment."
+ ),
+ )
+ return token
+
+
+async def github_request(
+ path: str,
+ *,
+ method: str = "GET",
+ json: dict[str, Any] | None = None,
+ params: dict[str, Any] | None = None,
+ token: str | None = None,
+) -> Any:
+ """
+ Core GitHub request helper.
+ Raises HTTPException with GitHub's error message on failures.
+ """
+ github_token = _github_token(token)
+
+ headers = {
+ "Authorization": f"Bearer {github_token}",
+ "Accept": "application/vnd.github+json",
+ "User-Agent": "gitpilot",
+ }
+
+ timeout = httpx.Timeout(connect=15.0, read=45.0, write=30.0, pool=15.0)
+
+ async with httpx.AsyncClient(
+ base_url=GITHUB_API_BASE, headers=headers, timeout=timeout
+ ) as client:
+ resp = await client.request(method, path, json=json, params=params)
+
+ if resp.status_code >= 400:
+ try:
+ data = resp.json()
+ msg = data.get("message") or resp.text
+ except Exception:
+ msg = resp.text
+
+ if resp.status_code == 401:
+ msg = "GitHub Token Expired or Invalid. Please refresh your login."
+
+ raise HTTPException(status_code=resp.status_code, detail=msg)
+
+ if resp.status_code == 204:
+ return None
+
+ # Some GitHub endpoints return 200 with empty body
+ if not resp.content:
+ return None
+
+ return resp.json()
+
+
+# -----------------------------------------------------------------------------
+# Repos listing (legacy + pagination/search)
+# -----------------------------------------------------------------------------
+
+async def list_user_repos(
+ query: str | None = None, token: str | None = None
+) -> list[dict[str, Any]]:
+ """
+ Legacy function - fetches first 100 repos.
+ (Retro-compatible with older GitPilot versions.)
+ """
+ params = {
+ "per_page": 100,
+ "affiliation": "owner,collaborator,organization_member",
+ "sort": "updated",
+ "direction": "desc",
+ }
+ data = await github_request("/user/repos", params=params, token=token)
+
+ # FIXED: Added default_branch mapping
+ repos = [
+ {
+ "id": r["id"],
+ "name": r["name"],
+ "full_name": r["full_name"],
+ "private": r["private"],
+ "owner": r["owner"]["login"],
+ "default_branch": r.get("default_branch", "main"), # Critical Fix
+ }
+ for r in data
+ ]
+
+ if query:
+ q = query.lower()
+ repos = [r for r in repos if q in r["full_name"].lower()]
+ return repos
+
+
+async def list_user_repos_paginated(
+ page: int = 1,
+ per_page: int = 100,
+ token: str | None = None,
+) -> dict[str, Any]:
+ """
+ Fetch user repositories with pagination support.
+ Returns:
+ {
+ "repositories": [...],
+ "page": int,
+ "per_page": int,
+ "has_more": bool,
+ }
+ """
+ per_page = min(per_page, 100)
+ params = {
+ "page": page,
+ "per_page": per_page,
+ "affiliation": "owner,collaborator,organization_member",
+ "sort": "updated",
+ "direction": "desc",
+ }
+
+ github_token = _github_token(token)
+ headers = {
+ "Authorization": f"Bearer {github_token}",
+ "Accept": "application/vnd.github+json",
+ "User-Agent": "gitpilot",
+ }
+
+ timeout = httpx.Timeout(connect=10.0, read=30.0, write=30.0, pool=10.0)
+
+ async with httpx.AsyncClient(
+ base_url=GITHUB_API_BASE, headers=headers, timeout=timeout
+ ) as client:
+ resp = await client.get("/user/repos", params=params)
+
+ if resp.status_code >= 400:
+ raise HTTPException(status_code=resp.status_code, detail=resp.text)
+
+ data = resp.json()
+
+ # FIXED: Added default_branch mapping
+ repos = [
+ {
+ "id": r["id"],
+ "name": r["name"],
+ "full_name": r["full_name"],
+ "private": r["private"],
+ "owner": r["owner"]["login"],
+ "default_branch": r.get("default_branch", "main"), # Critical Fix
+ }
+ for r in data
+ ]
+
+ link_header = resp.headers.get("Link", "") or ""
+ has_more = 'rel="next"' in link_header
+
+ return {
+ "repositories": repos,
+ "page": page,
+ "per_page": per_page,
+ "has_more": has_more,
+ }
+
+
+async def search_user_repos(
+ query: str,
+ page: int = 1,
+ per_page: int = 100,
+ token: str | None = None,
+) -> dict[str, Any]:
+ all_repos: list[dict[str, Any]] = []
+ fetch_page = 1
+ max_pages = 15
+
+ try:
+ while fetch_page <= max_pages:
+ result = await list_user_repos_paginated(
+ page=fetch_page,
+ per_page=100,
+ token=token,
+ )
+ all_repos.extend(result["repositories"])
+
+ if not result["has_more"]:
+ break
+
+ fetch_page += 1
+
+ except httpx.TimeoutException:
+ return {
+ "repositories": [],
+ "page": page,
+ "per_page": per_page,
+ "total_count": 0,
+ "has_more": False,
+ }
+
+ q = query.lower()
+ filtered = [
+ r for r in all_repos
+ if q in r["name"].lower() or q in r["full_name"].lower()
+ ]
+
+ total_count = len(filtered)
+ start = (page - 1) * per_page
+ end = start + per_page
+
+ return {
+ "repositories": filtered[start:end],
+ "page": page,
+ "per_page": per_page,
+ "total_count": total_count,
+ "has_more": end < total_count,
+ }
+
+# -----------------------------------------------------------------------------
+# Repo + Ref resolution helpers (fixes "No commit found for SHA: main")
+# -----------------------------------------------------------------------------
+
+async def get_repo(owner: str, repo: str, token: str | None = None) -> dict[str, Any]:
+ """
+ Get repository information including default_branch.
+ """
+ return await github_request(f"/repos/{owner}/{repo}", token=token)
+
+
+async def _resolve_head_ref(owner: str, repo: str, token: str | None) -> str:
+ repo_data = await get_repo(owner, repo, token=token)
+ return repo_data.get("default_branch", "main")
+
+
+async def _resolve_ref_to_commit_sha(
+ owner: str,
+ repo: str,
+ ref: str | None,
+ token: str | None,
+) -> str:
+ """
+ Resolve a ref (branch/tag/commit SHA/"HEAD"/None) to a commit SHA.
+ """
+ if not ref or ref == "HEAD":
+ ref = await _resolve_head_ref(owner, repo, token)
+
+ if _SHA_RE.match(ref):
+ return ref.lower()
+
+ # Branch ref
+ try:
+ data = await github_request(
+ f"/repos/{owner}/{repo}/git/ref/heads/{ref}",
+ token=token,
+ )
+ return data["object"]["sha"]
+ except HTTPException:
+ pass
+
+ # Tag ref (lightweight or annotated)
+ try:
+ data = await github_request(
+ f"/repos/{owner}/{repo}/git/ref/tags/{ref}",
+ token=token,
+ )
+ obj = data.get("object") or {}
+ sha = obj.get("sha")
+ obj_type = obj.get("type")
+
+ if not sha:
+ raise HTTPException(status_code=404, detail=f"Tag ref '{ref}' not found.")
+
+ # Annotated tag -> dereference to commit SHA
+ if obj_type == "tag":
+ tag_obj = await github_request(
+ f"/repos/{owner}/{repo}/git/tags/{sha}",
+ token=token,
+ )
+ target = tag_obj.get("object") or {}
+ target_sha = target.get("sha")
+ if not target_sha:
+ raise HTTPException(
+ status_code=404, detail=f"Annotated tag '{ref}' has no target sha."
+ )
+ return target_sha
+
+ # Lightweight tag points directly to commit SHA
+ return sha
+ except HTTPException:
+ pass
+
+ # Fallback: commits endpoint resolves branch/tag names to a commit
+ try:
+ commit = await github_request(
+ f"/repos/{owner}/{repo}/commits/{ref}",
+ token=token,
+ )
+ sha = commit.get("sha")
+ if not sha:
+ raise HTTPException(status_code=404, detail=f"Ref not found: {ref}")
+ return sha
+ except HTTPException as e:
+ raise HTTPException(status_code=404, detail=f"Ref not found: {ref}") from e
+
+
+async def _commit_sha_to_tree_sha(
+ owner: str,
+ repo: str,
+ commit_sha: str,
+ token: str | None,
+) -> str:
+ """
+ Convert commit SHA -> tree SHA using /git/commits/{sha}.
+ """
+ commit = await github_request(
+ f"/repos/{owner}/{repo}/git/commits/{commit_sha}",
+ token=token,
+ )
+ tree = commit.get("tree") or {}
+ tree_sha = tree.get("sha")
+ if not tree_sha:
+ raise HTTPException(status_code=500, detail="Failed to resolve tree SHA from commit.")
+ return tree_sha
+
+
+# -----------------------------------------------------------------------------
+# Branch creation
+# -----------------------------------------------------------------------------
+
+async def create_branch(
+ owner: str,
+ repo: str,
+ new_branch: str,
+ from_ref: str = "HEAD",
+ token: str | None = None,
+) -> str:
+ """
+ Create a new branch from a ref (default: HEAD = default branch).
+ """
+ base_commit_sha = await _resolve_ref_to_commit_sha(owner, repo, from_ref, token)
+
+ body = {"ref": f"refs/heads/{new_branch}", "sha": base_commit_sha}
+ new_ref = await github_request(
+ f"/repos/{owner}/{repo}/git/refs",
+ method="POST",
+ json=body,
+ token=token,
+ )
+ return new_ref["ref"]
+
+
+# -----------------------------------------------------------------------------
+# Tree + File APIs (branch-aware)
+# -----------------------------------------------------------------------------
+
+async def get_repo_tree(
+ owner: str,
+ repo: str,
+ token: str | None = None,
+ ref: str = "HEAD",
+):
+ # ✅ FIX: Only use context ref if caller did NOT provide a specific ref
+ # i.e. only when ref is missing/empty or explicitly "HEAD"
+ ctx_ref = _github_ref(None)
+ if (not ref or ref == "HEAD") and ctx_ref:
+ ref = ctx_ref
+
+ commit_sha = await _resolve_ref_to_commit_sha(owner, repo, ref, token)
+ tree_sha = await _commit_sha_to_tree_sha(owner, repo, commit_sha, token)
+
+ tree_data = await github_request(
+ f"/repos/{owner}/{repo}/git/trees/{tree_sha}",
+ params={"recursive": 1},
+ token=token,
+ )
+
+ return [
+ {"path": item["path"], "type": item["type"]}
+ for item in tree_data.get("tree", [])
+ if item.get("type") == "blob"
+ ]
+
+
+async def get_file(
+ owner: str,
+ repo: str,
+ path: str,
+ token: str | None = None,
+ ref: str | None = None,
+) -> str:
+ # ✅ FIX: Only use context ref if ref is missing or "HEAD"
+ ctx_ref = _github_ref(None)
+ if (not ref or ref == "HEAD") and ctx_ref:
+ ref = ctx_ref
+
+ params = {"ref": ref} if ref else None
+ data = await github_request(
+ f"/repos/{owner}/{repo}/contents/{path}",
+ params=params,
+ token=token,
+ )
+ content_b64 = data.get("content") or ""
+ return b64decode(content_b64.encode("utf-8")).decode("utf-8", errors="replace")
+
+
+async def put_file(
+ owner: str,
+ repo: str,
+ path: str,
+ content: str,
+ message: str,
+ token: str | None = None,
+ branch: str | None = None,
+) -> dict[str, Any]:
+ """
+ Create or update a file in the repository on a specific branch.
+ (Retro-compatible signature with older GitPilot versions.)
+ """
+ sha: str | None = None
+ try:
+ params = {"ref": branch} if branch else None
+ existing = await github_request(
+ f"/repos/{owner}/{repo}/contents/{path}",
+ params=params,
+ token=token,
+ )
+ sha = existing.get("sha")
+ except HTTPException:
+ sha = None
+
+ body: dict[str, Any] = {
+ "message": message,
+ "content": b64encode(content.encode("utf-8")).decode("utf-8"),
+ }
+ if sha:
+ body["sha"] = sha
+ if branch:
+ body["branch"] = branch
+
+ result = await github_request(
+ f"/repos/{owner}/{repo}/contents/{path}",
+ method="PUT",
+ json=body,
+ token=token,
+ )
+ commit = (result or {}).get("commit", {}) if isinstance(result, dict) else {}
+ return {
+ "path": path,
+ "commit_sha": commit.get("sha", ""),
+ "commit_url": commit.get("html_url"),
+ }
+
+
+async def delete_file(
+ owner: str,
+ repo: str,
+ path: str,
+ message: str,
+ token: str | None = None,
+ branch: str | None = None,
+) -> dict[str, Any]:
+ """
+ Delete a file from the repository on a specific branch.
+ (Retro-compatible signature with older GitPilot versions.)
+ """
+ params = {"ref": branch} if branch else None
+ existing = await github_request(
+ f"/repos/{owner}/{repo}/contents/{path}",
+ params=params,
+ token=token,
+ )
+ sha = existing.get("sha")
+ if not sha:
+ raise HTTPException(status_code=404, detail=f"File {path} not found or has no SHA")
+
+ body: dict[str, Any] = {"message": message, "sha": sha}
+ if branch:
+ body["branch"] = branch
+
+ result = await github_request(
+ f"/repos/{owner}/{repo}/contents/{path}",
+ method="DELETE",
+ json=body,
+ token=token,
+ )
+ commit = (result or {}).get("commit", {}) if isinstance(result, dict) else {}
+ return {
+ "path": path,
+ "commit_sha": commit.get("sha", ""),
+ "commit_url": commit.get("html_url"),
+ }
+
+
+async def get_github_status_summary() -> GithubStatusSummary:
+ """Return GitHub connection status for the redesigned UI."""
+ token = (
+ os.environ.get("GITPILOT_GITHUB_TOKEN")
+ or os.environ.get("GITHUB_TOKEN")
+ or None
+ )
+ token_configured = bool(token)
+
+ summary = GithubStatusSummary(
+ connected=False,
+ token_configured=token_configured,
+ )
+
+ if not token_configured:
+ return summary
+
+ # Try to get authenticated user
+ try:
+ data = await github_request("/user", token=token)
+ if data and "login" in data:
+ summary.connected = True
+ summary.username = data["login"]
+ except Exception:
+ logging.debug("GitHub connection check failed", exc_info=True)
+
+ return summary
diff --git a/gitpilot/github_app.py b/gitpilot/github_app.py
new file mode 100644
index 0000000000000000000000000000000000000000..8b134a33785683420ba97ae31d0d1404bc25ee7c
--- /dev/null
+++ b/gitpilot/github_app.py
@@ -0,0 +1,232 @@
+"""
+GitHub App Installation Management - PROPER FIX
+
+This checks which repositories ACTUALLY have the GitHub App installed
+by querying the user's app installations.
+"""
+from __future__ import annotations
+
+import logging
+import os
+from typing import Optional, Dict, Any, Set
+
+import httpx
+
+logger = logging.getLogger("gitpilot.github_app")
+
+# Cache for installed repositories
+_installed_repos_cache: Dict[str, Set[str]] = {}
+_cache_timestamp: Dict[str, float] = {}
+CACHE_TTL_SECONDS = 300 # 5 minutes
+
+
+class GitHubAppConfig:
+ """Configuration for GitHub App."""
+
+ def __init__(self):
+ self.app_id = os.getenv("GITHUB_APP_ID", "2313985")
+ self.client_id = os.getenv("GITHUB_CLIENT_ID", "Iv23litmRp80Z6wmlyRn")
+ self.app_slug = os.getenv("GITHUB_APP_SLUG", "gitpilota")
+
+ @property
+ def is_configured(self) -> bool:
+ """Check if GitHub App is configured."""
+ return bool(self.app_id and self.client_id)
+
+
+def get_app_config() -> GitHubAppConfig:
+ """Get GitHub App configuration."""
+ return GitHubAppConfig()
+
+
+async def get_installed_repositories(user_token: str) -> Set[str]:
+ """
+ Get list of repositories where the GitHub App is installed.
+
+ Uses /user/installations endpoint to get all installations,
+ then fetches repositories for each installation.
+
+ Returns:
+ Set of repository full names (e.g., "owner/repo")
+ """
+ cache_key = "installed_repos"
+
+ # Check cache
+ import time
+ if cache_key in _installed_repos_cache:
+ if time.time() - _cache_timestamp.get(cache_key, 0) < CACHE_TTL_SECONDS:
+ logger.debug(f"Using cached installed repositories ({len(_installed_repos_cache[cache_key])} repos)")
+ return _installed_repos_cache[cache_key]
+
+ installed_repos: Set[str] = set()
+
+ try:
+ config = get_app_config()
+
+ async with httpx.AsyncClient(timeout=10.0) as client:
+ # Get user's app installations
+ installations_response = await client.get(
+ "https://api.github.com/user/installations",
+ headers={
+ "Authorization": f"Bearer {user_token}",
+ "Accept": "application/vnd.github+json",
+ "User-Agent": "gitpilot",
+ },
+ )
+
+ if installations_response.status_code != 200:
+ logger.warning(f"Failed to get installations: {installations_response.status_code}")
+ return installed_repos
+
+ installations_data = installations_response.json()
+ installations = installations_data.get("installations", [])
+
+ logger.info(f"Found {len(installations)} app installations")
+
+ # For each installation, get the repositories
+ for installation in installations:
+ installation_id = installation.get("id")
+
+ # Get repositories for this installation
+ repos_response = await client.get(
+ f"https://api.github.com/user/installations/{installation_id}/repositories",
+ headers={
+ "Authorization": f"Bearer {user_token}",
+ "Accept": "application/vnd.github+json",
+ "User-Agent": "gitpilot",
+ },
+ )
+
+ if repos_response.status_code == 200:
+ repos_data = repos_response.json()
+ repositories = repos_data.get("repositories", [])
+
+ for repo in repositories:
+ full_name = repo.get("full_name") # e.g., "owner/repo"
+ if full_name:
+ installed_repos.add(full_name)
+ logger.debug(f" ✓ App installed on: {full_name}")
+
+ logger.info(f"GitHub App is installed on {len(installed_repos)} repositories")
+
+ # Cache the results
+ _installed_repos_cache[cache_key] = installed_repos
+ _cache_timestamp[cache_key] = time.time()
+
+ return installed_repos
+
+ except Exception as e:
+ logger.error(f"Error getting installed repositories: {e}")
+ return installed_repos
+
+
+async def check_repo_write_access(
+ owner: str,
+ repo: str,
+ user_token: Optional[str] = None
+) -> Dict[str, Any]:
+ """
+ Check if user has write access to a repository.
+
+ PROPER FIX: Checks BOTH:
+ 1. User has push permissions
+ 2. GitHub App is ACTUALLY installed on this specific repository
+
+ Args:
+ owner: Repository owner
+ repo: Repository name
+ user_token: User's OAuth token
+
+ Returns:
+ Dict with 'can_write', 'app_installed', 'auth_type', 'reason'
+ """
+ result = {
+ "can_write": False,
+ "app_installed": False,
+ "auth_type": "none",
+ "reason": "No token provided",
+ }
+
+ if not user_token:
+ return result
+
+ full_repo_name = f"{owner}/{repo}"
+
+ try:
+ # Step 1: Check user's push permissions
+ async with httpx.AsyncClient(timeout=5.0) as client:
+ response = await client.get(
+ f"https://api.github.com/repos/{owner}/{repo}",
+ headers={
+ "Authorization": f"Bearer {user_token}",
+ "Accept": "application/vnd.github+json",
+ "User-Agent": "gitpilot",
+ },
+ )
+
+ if response.status_code != 200:
+ result["reason"] = f"Cannot access repository (status: {response.status_code})"
+ logger.warning(f"❌ {full_repo_name}: {result['reason']}")
+ return result
+
+ repo_data = response.json()
+ permissions = repo_data.get("permissions", {})
+ has_push = permissions.get("push", False)
+
+ # Step 2: Check if GitHub App is installed on this repo
+ installed_repos = await get_installed_repositories(user_token)
+ app_installed = full_repo_name in installed_repos
+
+ # Step 3: Determine write access
+ if app_installed:
+ # App IS installed - agent can write!
+ result["can_write"] = True
+ result["app_installed"] = True
+ result["auth_type"] = "github_app"
+ result["reason"] = "GitHub App installed with write access"
+ logger.info(f"✅ {full_repo_name}: App installed (agent can write)")
+ elif has_push:
+ # User has push but App NOT installed - agent operations will FAIL
+ result["can_write"] = False
+ result["app_installed"] = False
+ result["auth_type"] = "user_only"
+ result["reason"] = "User has push access but GitHub App NOT installed (install app for agent operations)"
+ logger.warning(f"⚠️ {full_repo_name}: User can push but app NOT installed - agent will get 403 errors")
+ else:
+ # User has no push and App NOT installed
+ result["can_write"] = False
+ result["app_installed"] = False
+ result["auth_type"] = "read_only"
+ result["reason"] = "No push access and GitHub App not installed"
+ logger.info(f"ℹ️ {full_repo_name}: Read-only access")
+
+ except Exception as e:
+ result["reason"] = f"Error checking access: {str(e)}"
+ logger.error(f"❌ Error checking {full_repo_name}: {e}")
+
+ return result
+
+
+def clear_cache():
+ """Clear all caches."""
+ _installed_repos_cache.clear()
+ _cache_timestamp.clear()
+ logger.info("Cleared installation cache")
+
+
+async def check_installation_for_repo(
+ owner: str,
+ repo: str,
+ user_token: str
+) -> Optional[Dict[str, Any]]:
+ """
+ Legacy function - kept for compatibility.
+ """
+ result = await check_repo_write_access(owner, repo, user_token)
+ if result["app_installed"]:
+ return {
+ "installed": True,
+ "owner": owner,
+ "repo": repo,
+ }
+ return None
\ No newline at end of file
diff --git a/gitpilot/github_issues.py b/gitpilot/github_issues.py
new file mode 100644
index 0000000000000000000000000000000000000000..c78b758cef584c1b468a31f800ab17ed5417a025
--- /dev/null
+++ b/gitpilot/github_issues.py
@@ -0,0 +1,224 @@
+# gitpilot/github_issues.py
+"""GitHub Issues API wrapper.
+
+Provides async functions for creating, reading, updating, and managing
+GitHub issues including labels, assignees, milestones, and comments.
+"""
+from __future__ import annotations
+
+from typing import Any, Dict, List, Optional
+
+from .github_api import github_request
+
+
+# ---------------------------------------------------------------------------
+# Issue CRUD
+# ---------------------------------------------------------------------------
+
+async def list_issues(
+ owner: str,
+ repo: str,
+ *,
+ state: str = "open",
+ labels: Optional[str] = None,
+ assignee: Optional[str] = None,
+ milestone: Optional[str] = None,
+ sort: str = "created",
+ direction: str = "desc",
+ per_page: int = 30,
+ page: int = 1,
+ token: Optional[str] = None,
+) -> List[Dict[str, Any]]:
+ """List issues for a repository with optional filters."""
+ params: Dict[str, Any] = {
+ "state": state,
+ "sort": sort,
+ "direction": direction,
+ "per_page": min(per_page, 100),
+ "page": page,
+ }
+ if labels:
+ params["labels"] = labels
+ if assignee:
+ params["assignee"] = assignee
+ if milestone:
+ params["milestone"] = milestone
+
+ data = await github_request(
+ f"/repos/{owner}/{repo}/issues",
+ params=params,
+ token=token,
+ )
+ # GitHub's issues endpoint also returns PRs; filter them out
+ return [i for i in (data or []) if "pull_request" not in i]
+
+
+async def get_issue(
+ owner: str,
+ repo: str,
+ issue_number: int,
+ token: Optional[str] = None,
+) -> Dict[str, Any]:
+ """Get a single issue by number."""
+ return await github_request(
+ f"/repos/{owner}/{repo}/issues/{issue_number}",
+ token=token,
+ )
+
+
+async def create_issue(
+ owner: str,
+ repo: str,
+ title: str,
+ *,
+ body: Optional[str] = None,
+ labels: Optional[List[str]] = None,
+ assignees: Optional[List[str]] = None,
+ milestone: Optional[int] = None,
+ token: Optional[str] = None,
+) -> Dict[str, Any]:
+ """Create a new issue."""
+ payload: Dict[str, Any] = {"title": title}
+ if body is not None:
+ payload["body"] = body
+ if labels:
+ payload["labels"] = labels
+ if assignees:
+ payload["assignees"] = assignees
+ if milestone is not None:
+ payload["milestone"] = milestone
+
+ return await github_request(
+ f"/repos/{owner}/{repo}/issues",
+ method="POST",
+ json=payload,
+ token=token,
+ )
+
+
+async def update_issue(
+ owner: str,
+ repo: str,
+ issue_number: int,
+ *,
+ title: Optional[str] = None,
+ body: Optional[str] = None,
+ state: Optional[str] = None,
+ labels: Optional[List[str]] = None,
+ assignees: Optional[List[str]] = None,
+ milestone: Optional[int] = None,
+ token: Optional[str] = None,
+) -> Dict[str, Any]:
+ """Update an existing issue (title, body, state, labels, assignees, milestone)."""
+ payload: Dict[str, Any] = {}
+ if title is not None:
+ payload["title"] = title
+ if body is not None:
+ payload["body"] = body
+ if state is not None:
+ payload["state"] = state
+ if labels is not None:
+ payload["labels"] = labels
+ if assignees is not None:
+ payload["assignees"] = assignees
+ if milestone is not None:
+ payload["milestone"] = milestone
+
+ if not payload:
+ return await get_issue(owner, repo, issue_number, token=token)
+
+ return await github_request(
+ f"/repos/{owner}/{repo}/issues/{issue_number}",
+ method="PATCH",
+ json=payload,
+ token=token,
+ )
+
+
+# ---------------------------------------------------------------------------
+# Comments
+# ---------------------------------------------------------------------------
+
+async def list_issue_comments(
+ owner: str,
+ repo: str,
+ issue_number: int,
+ *,
+ per_page: int = 30,
+ page: int = 1,
+ token: Optional[str] = None,
+) -> List[Dict[str, Any]]:
+ """List comments on an issue."""
+ return await github_request(
+ f"/repos/{owner}/{repo}/issues/{issue_number}/comments",
+ params={"per_page": min(per_page, 100), "page": page},
+ token=token,
+ ) or []
+
+
+async def add_issue_comment(
+ owner: str,
+ repo: str,
+ issue_number: int,
+ body: str,
+ token: Optional[str] = None,
+) -> Dict[str, Any]:
+ """Add a comment to an issue."""
+ return await github_request(
+ f"/repos/{owner}/{repo}/issues/{issue_number}/comments",
+ method="POST",
+ json={"body": body},
+ token=token,
+ )
+
+
+# ---------------------------------------------------------------------------
+# Labels
+# ---------------------------------------------------------------------------
+
+async def add_labels(
+ owner: str,
+ repo: str,
+ issue_number: int,
+ labels: List[str],
+ token: Optional[str] = None,
+) -> List[Dict[str, Any]]:
+ """Add labels to an issue."""
+ return await github_request(
+ f"/repos/{owner}/{repo}/issues/{issue_number}/labels",
+ method="POST",
+ json={"labels": labels},
+ token=token,
+ ) or []
+
+
+async def remove_label(
+ owner: str,
+ repo: str,
+ issue_number: int,
+ label: str,
+ token: Optional[str] = None,
+) -> None:
+ """Remove a single label from an issue."""
+ await github_request(
+ f"/repos/{owner}/{repo}/issues/{issue_number}/labels/{label}",
+ method="DELETE",
+ token=token,
+ )
+
+
+# ---------------------------------------------------------------------------
+# Assignees
+# ---------------------------------------------------------------------------
+
+async def set_assignees(
+ owner: str,
+ repo: str,
+ issue_number: int,
+ assignees: List[str],
+ token: Optional[str] = None,
+) -> Dict[str, Any]:
+ """Replace assignees on an issue."""
+ return await update_issue(
+ owner, repo, issue_number, assignees=assignees, token=token,
+ )
diff --git a/gitpilot/github_oauth.py b/gitpilot/github_oauth.py
new file mode 100644
index 0000000000000000000000000000000000000000..a87718115fc04704250004ac850f27bf7944d009
--- /dev/null
+++ b/gitpilot/github_oauth.py
@@ -0,0 +1,285 @@
+# gitpilot/github_oauth.py
+
+"""GitHub OAuth 2.0 authentication flow implementation (Web + Device Flow)."""
+from __future__ import annotations
+
+import logging
+import os
+import secrets
+import time
+from typing import Optional, Dict, Any
+from urllib.parse import urlencode
+
+import httpx
+from pydantic import BaseModel
+
+# Configure logging
+logger = logging.getLogger("gitpilot.auth")
+
+class OAuthConfig(BaseModel):
+ """GitHub OAuth App configuration."""
+ client_id: str
+ # Secret is now optional to allow Device Flow
+ client_secret: Optional[str] = None
+
+class OAuthState(BaseModel):
+ """OAuth state management."""
+ state: str
+ code_verifier: str
+ timestamp: float
+
+class GitHubUser(BaseModel):
+ """GitHub user information."""
+ login: str
+ id: int
+ avatar_url: str
+ name: Optional[str] = None
+ email: Optional[str] = None
+ bio: Optional[str] = None
+ html_url: Optional[str] = None
+
+class AuthSession(BaseModel):
+ """Authenticated user session."""
+ access_token: str
+ token_type: str = "bearer"
+ scope: str = ""
+ user: GitHubUser
+
+# In-memory OAuth state storage (For Web Flow)
+_oauth_states: dict[str, OAuthState] = {}
+
+
+def get_oauth_config() -> OAuthConfig:
+ """
+ Load OAuth configuration from environment variables.
+ """
+ # Use your App's Client ID.
+ # NOTE: Ensure "Device Flow" is enabled in your GitHub App settings.
+ client_id = os.getenv("GITHUB_CLIENT_ID", "Iv23litmRp80Z6wmlyRn")
+ client_secret = os.getenv("GITHUB_CLIENT_SECRET", "")
+
+ return OAuthConfig(
+ client_id=client_id,
+ # Convert empty string to None
+ client_secret=client_secret if client_secret else None
+ )
+
+# ============================================================================
+# WEB FLOW (Standard OAuth2 - Requires Client Secret)
+# ============================================================================
+
+def generate_authorization_url() -> tuple[str, str]:
+ """
+ Generate GitHub OAuth authorization URL with PKCE (Web Flow).
+ Returns: (authorization_url, state)
+ """
+ config = get_oauth_config()
+
+ # 1. State for CSRF protection
+ state = secrets.token_urlsafe(32)
+ code_verifier = secrets.token_urlsafe(32)
+
+ # 2. Store state
+ _oauth_states[state] = OAuthState(
+ state=state,
+ code_verifier=code_verifier,
+ timestamp=time.time(),
+ )
+ _cleanup_old_states()
+
+ # 3. Build URL
+ params = {
+ "client_id": config.client_id,
+ "scope": "repo user:email",
+ "state": state,
+ "allow_signup": "true",
+ }
+
+ auth_url = f"https://github.com/login/oauth/authorize?{urlencode(params)}"
+ return auth_url, state
+
+
+async def exchange_code_for_token(code: str, state: str) -> AuthSession:
+ """
+ Exchange authorization code for access token (Web Flow).
+ Requires GITHUB_CLIENT_SECRET to be set.
+ """
+ config = get_oauth_config()
+
+ if not config.client_secret:
+ raise ValueError("Web Flow requires GITHUB_CLIENT_SECRET. Please use Device Flow or configure the secret.")
+
+ # 1. Validate State
+ if state not in _oauth_states:
+ logger.error(f"State mismatch or expiration. Received: {state}")
+ raise ValueError("Invalid OAuth state. The session may have expired. Please try again.")
+
+ oauth_state = _oauth_states.pop(state)
+ if time.time() - oauth_state.timestamp > 600:
+ raise ValueError("OAuth interaction timed out.")
+
+ # 2. Exchange Code
+ async with httpx.AsyncClient(timeout=10.0) as client:
+ try:
+ token_response = await client.post(
+ "https://github.com/login/oauth/access_token",
+ data={
+ "client_id": config.client_id,
+ "client_secret": config.client_secret,
+ "code": code,
+ },
+ headers={"Accept": "application/json"},
+ )
+ token_response.raise_for_status()
+ token_data = token_response.json()
+ except httpx.HTTPError as e:
+ logger.error(f"HTTP Error contacting GitHub: {e}")
+ raise ValueError("Failed to contact GitHub authentication server.")
+
+ if "error" in token_data:
+ raise ValueError(f"GitHub refused the connection: {token_data.get('error_description')}")
+
+ access_token = token_data.get("access_token")
+ if not access_token:
+ raise ValueError("No access_token returned from GitHub.")
+
+ # 3. Fetch User
+ user = await _fetch_user_profile(client, access_token)
+
+ return AuthSession(
+ access_token=access_token,
+ token_type=token_data.get("token_type", "bearer"),
+ scope=token_data.get("scope", ""),
+ user=user,
+ )
+
+# ============================================================================
+# DEVICE FLOW (No Secret Required)
+# ============================================================================
+
+async def initiate_device_flow() -> Dict[str, Any]:
+ """
+ Step 1: Request a device code from GitHub.
+ """
+ config = get_oauth_config()
+
+ async with httpx.AsyncClient(timeout=10.0) as client:
+ response = await client.post(
+ "https://github.com/login/device/code",
+ data={
+ "client_id": config.client_id,
+ "scope": "repo user:email",
+ },
+ headers={"Accept": "application/json"}
+ )
+ response.raise_for_status()
+ return response.json()
+
+
+async def poll_device_token(device_code: str) -> Optional[AuthSession]:
+ """
+ Step 2: Exchange device code for token (Polling).
+
+ Returns:
+ AuthSession: If authentication is successful.
+ None: If status is 'authorization_pending' or 'slow_down'.
+
+ Raises:
+ ValueError: If the code expired, access denied, or other errors.
+ """
+ config = get_oauth_config()
+
+ async with httpx.AsyncClient(timeout=10.0) as client:
+ response = await client.post(
+ "https://github.com/login/oauth/access_token",
+ data={
+ "client_id": config.client_id,
+ "device_code": device_code,
+ "grant_type": "urn:ietf:params:oauth:grant-type:device_code",
+ },
+ headers={"Accept": "application/json"}
+ )
+ data = response.json()
+
+ # Handle GitHub Device Flow Errors
+ if "error" in data:
+ error_code = data["error"]
+ # These are expected during polling
+ if error_code in ["authorization_pending", "slow_down"]:
+ return None
+
+ # These are actual failures
+ desc = data.get("error_description", error_code)
+ if error_code == "expired_token":
+ raise ValueError("The device code has expired. Please try again.")
+ if error_code == "access_denied":
+ raise ValueError("Access denied by user.")
+
+ raise ValueError(f"GitHub Auth Error: {desc}")
+
+ access_token = data.get("access_token")
+ if not access_token:
+ return None
+
+ # Success: Fetch User details
+ user = await _fetch_user_profile(client, access_token)
+
+ return AuthSession(
+ access_token=access_token,
+ token_type=data.get("token_type", "bearer"),
+ scope=data.get("scope", ""),
+ user=user
+ )
+
+# ============================================================================
+# SHARED HELPERS
+# ============================================================================
+
+async def _fetch_user_profile(client: httpx.AsyncClient, token: str) -> GitHubUser:
+ """Internal helper to fetch user profile with an existing client."""
+ response = await client.get(
+ "https://api.github.com/user",
+ headers={
+ "Authorization": f"Bearer {token}",
+ "Accept": "application/json",
+ },
+ )
+ response.raise_for_status()
+ u = response.json()
+
+ return GitHubUser(
+ login=u["login"],
+ id=u["id"],
+ avatar_url=u["avatar_url"],
+ name=u.get("name"),
+ email=u.get("email"),
+ bio=u.get("bio"),
+ html_url=u.get("html_url")
+ )
+
+
+async def validate_token(access_token: str) -> Optional[GitHubUser]:
+ """
+ Validate GitHub access token and return user info.
+ Useful for checking if a stored session is still valid.
+ """
+ if not access_token:
+ return None
+
+ try:
+ async with httpx.AsyncClient(timeout=5.0) as client:
+ return await _fetch_user_profile(client, access_token)
+ except Exception as e:
+ logger.debug(f"Token validation failed: {e}")
+ return None
+
+
+def _cleanup_old_states():
+ """Remove OAuth states older than 10 minutes to prevent memory leaks."""
+ current_time = time.time()
+ expired_states = [
+ state for state, data in _oauth_states.items()
+ if current_time - data.timestamp > 600
+ ]
+ for state in expired_states:
+ _oauth_states.pop(state, None)
\ No newline at end of file
diff --git a/gitpilot/github_pulls.py b/gitpilot/github_pulls.py
new file mode 100644
index 0000000000000000000000000000000000000000..e47aea832776f9f13fb028711af948672db39302
--- /dev/null
+++ b/gitpilot/github_pulls.py
@@ -0,0 +1,230 @@
+# gitpilot/github_pulls.py
+"""GitHub Pull Requests API wrapper.
+
+Provides async functions for creating, listing, reviewing, and merging
+pull requests.
+"""
+from __future__ import annotations
+
+from typing import Any, Dict, List, Optional
+
+from .github_api import github_request
+
+
+# ---------------------------------------------------------------------------
+# PR CRUD
+# ---------------------------------------------------------------------------
+
+async def list_pull_requests(
+ owner: str,
+ repo: str,
+ *,
+ state: str = "open",
+ sort: str = "created",
+ direction: str = "desc",
+ head: Optional[str] = None,
+ base: Optional[str] = None,
+ per_page: int = 30,
+ page: int = 1,
+ token: Optional[str] = None,
+) -> List[Dict[str, Any]]:
+ """List pull requests with optional filters."""
+ params: Dict[str, Any] = {
+ "state": state,
+ "sort": sort,
+ "direction": direction,
+ "per_page": min(per_page, 100),
+ "page": page,
+ }
+ if head:
+ params["head"] = head
+ if base:
+ params["base"] = base
+
+ return await github_request(
+ f"/repos/{owner}/{repo}/pulls",
+ params=params,
+ token=token,
+ ) or []
+
+
+async def get_pull_request(
+ owner: str,
+ repo: str,
+ pull_number: int,
+ token: Optional[str] = None,
+) -> Dict[str, Any]:
+ """Get a single pull request."""
+ return await github_request(
+ f"/repos/{owner}/{repo}/pulls/{pull_number}",
+ token=token,
+ )
+
+
+async def create_pull_request(
+ owner: str,
+ repo: str,
+ *,
+ title: str,
+ head: str,
+ base: str,
+ body: Optional[str] = None,
+ draft: bool = False,
+ token: Optional[str] = None,
+) -> Dict[str, Any]:
+ """Create a new pull request."""
+ payload: Dict[str, Any] = {
+ "title": title,
+ "head": head,
+ "base": base,
+ "draft": draft,
+ }
+ if body is not None:
+ payload["body"] = body
+
+ return await github_request(
+ f"/repos/{owner}/{repo}/pulls",
+ method="POST",
+ json=payload,
+ token=token,
+ )
+
+
+async def update_pull_request(
+ owner: str,
+ repo: str,
+ pull_number: int,
+ *,
+ title: Optional[str] = None,
+ body: Optional[str] = None,
+ state: Optional[str] = None,
+ base: Optional[str] = None,
+ token: Optional[str] = None,
+) -> Dict[str, Any]:
+ """Update an existing pull request."""
+ payload: Dict[str, Any] = {}
+ if title is not None:
+ payload["title"] = title
+ if body is not None:
+ payload["body"] = body
+ if state is not None:
+ payload["state"] = state
+ if base is not None:
+ payload["base"] = base
+
+ if not payload:
+ return await get_pull_request(owner, repo, pull_number, token=token)
+
+ return await github_request(
+ f"/repos/{owner}/{repo}/pulls/{pull_number}",
+ method="PATCH",
+ json=payload,
+ token=token,
+ )
+
+
+# ---------------------------------------------------------------------------
+# Merge
+# ---------------------------------------------------------------------------
+
+async def merge_pull_request(
+ owner: str,
+ repo: str,
+ pull_number: int,
+ *,
+ commit_title: Optional[str] = None,
+ commit_message: Optional[str] = None,
+ merge_method: str = "merge",
+ token: Optional[str] = None,
+) -> Dict[str, Any]:
+ """Merge a pull request.
+
+ merge_method: one of 'merge', 'squash', 'rebase'.
+ """
+ payload: Dict[str, Any] = {"merge_method": merge_method}
+ if commit_title:
+ payload["commit_title"] = commit_title
+ if commit_message:
+ payload["commit_message"] = commit_message
+
+ return await github_request(
+ f"/repos/{owner}/{repo}/pulls/{pull_number}/merge",
+ method="PUT",
+ json=payload,
+ token=token,
+ )
+
+
+# ---------------------------------------------------------------------------
+# PR Files & Diff
+# ---------------------------------------------------------------------------
+
+async def list_pr_files(
+ owner: str,
+ repo: str,
+ pull_number: int,
+ *,
+ per_page: int = 100,
+ page: int = 1,
+ token: Optional[str] = None,
+) -> List[Dict[str, Any]]:
+ """List files changed in a pull request."""
+ return await github_request(
+ f"/repos/{owner}/{repo}/pulls/{pull_number}/files",
+ params={"per_page": min(per_page, 100), "page": page},
+ token=token,
+ ) or []
+
+
+# ---------------------------------------------------------------------------
+# PR Reviews & Comments
+# ---------------------------------------------------------------------------
+
+async def list_pr_reviews(
+ owner: str,
+ repo: str,
+ pull_number: int,
+ token: Optional[str] = None,
+) -> List[Dict[str, Any]]:
+ """List reviews on a pull request."""
+ return await github_request(
+ f"/repos/{owner}/{repo}/pulls/{pull_number}/reviews",
+ token=token,
+ ) or []
+
+
+async def create_pr_review(
+ owner: str,
+ repo: str,
+ pull_number: int,
+ *,
+ body: str,
+ event: str = "COMMENT",
+ token: Optional[str] = None,
+) -> Dict[str, Any]:
+ """Create a review on a pull request.
+
+ event: one of 'APPROVE', 'REQUEST_CHANGES', 'COMMENT'.
+ """
+ return await github_request(
+ f"/repos/{owner}/{repo}/pulls/{pull_number}/reviews",
+ method="POST",
+ json={"body": body, "event": event},
+ token=token,
+ )
+
+
+async def add_pr_comment(
+ owner: str,
+ repo: str,
+ pull_number: int,
+ body: str,
+ token: Optional[str] = None,
+) -> Dict[str, Any]:
+ """Add a general comment to a pull request (via issues API)."""
+ return await github_request(
+ f"/repos/{owner}/{repo}/issues/{pull_number}/comments",
+ method="POST",
+ json={"body": body},
+ token=token,
+ )
diff --git a/gitpilot/github_search.py b/gitpilot/github_search.py
new file mode 100644
index 0000000000000000000000000000000000000000..0e3f9de16320b4127bc9d13f22f7d2987b03bbb4
--- /dev/null
+++ b/gitpilot/github_search.py
@@ -0,0 +1,157 @@
+# gitpilot/github_search.py
+"""GitHub Search API wrapper.
+
+Provides async functions for searching code, repositories, issues, and users
+via GitHub's Search API.
+"""
+from __future__ import annotations
+
+from typing import Any, Dict, List, Optional
+
+from .github_api import github_request
+
+
+async def search_code(
+ query: str,
+ *,
+ owner: Optional[str] = None,
+ repo: Optional[str] = None,
+ language: Optional[str] = None,
+ path: Optional[str] = None,
+ per_page: int = 30,
+ page: int = 1,
+ token: Optional[str] = None,
+) -> Dict[str, Any]:
+ """Search for code across GitHub repositories.
+
+ Builds a qualified search query string from the parameters.
+ Returns: {total_count, incomplete_results, items[...]}.
+ """
+ parts = [query]
+ if owner and repo:
+ parts.append(f"repo:{owner}/{repo}")
+ elif owner:
+ parts.append(f"user:{owner}")
+ if language:
+ parts.append(f"language:{language}")
+ if path:
+ parts.append(f"path:{path}")
+
+ q = " ".join(parts)
+
+ result = await github_request(
+ "/search/code",
+ params={"q": q, "per_page": min(per_page, 100), "page": page},
+ token=token,
+ )
+ return _normalise_search_result(result)
+
+
+async def search_issues(
+ query: str,
+ *,
+ owner: Optional[str] = None,
+ repo: Optional[str] = None,
+ state: Optional[str] = None,
+ label: Optional[str] = None,
+ is_pr: Optional[bool] = None,
+ per_page: int = 30,
+ page: int = 1,
+ token: Optional[str] = None,
+) -> Dict[str, Any]:
+ """Search issues and pull requests."""
+ parts = [query]
+ if owner and repo:
+ parts.append(f"repo:{owner}/{repo}")
+ elif owner:
+ parts.append(f"user:{owner}")
+ if state:
+ parts.append(f"state:{state}")
+ if label:
+ parts.append(f"label:{label}")
+ if is_pr is True:
+ parts.append("type:pr")
+ elif is_pr is False:
+ parts.append("type:issue")
+
+ q = " ".join(parts)
+
+ result = await github_request(
+ "/search/issues",
+ params={"q": q, "per_page": min(per_page, 100), "page": page},
+ token=token,
+ )
+ return _normalise_search_result(result)
+
+
+async def search_repositories(
+ query: str,
+ *,
+ language: Optional[str] = None,
+ sort: Optional[str] = None,
+ order: str = "desc",
+ per_page: int = 30,
+ page: int = 1,
+ token: Optional[str] = None,
+) -> Dict[str, Any]:
+ """Search for repositories."""
+ parts = [query]
+ if language:
+ parts.append(f"language:{language}")
+
+ q = " ".join(parts)
+
+ params: Dict[str, Any] = {
+ "q": q,
+ "per_page": min(per_page, 100),
+ "page": page,
+ "order": order,
+ }
+ if sort:
+ params["sort"] = sort
+
+ result = await github_request("/search/repositories", params=params, token=token)
+ return _normalise_search_result(result)
+
+
+async def search_users(
+ query: str,
+ *,
+ type_filter: Optional[str] = None,
+ location: Optional[str] = None,
+ language: Optional[str] = None,
+ per_page: int = 30,
+ page: int = 1,
+ token: Optional[str] = None,
+) -> Dict[str, Any]:
+ """Search for users and organizations.
+
+ type_filter: 'user' or 'org' to narrow results.
+ """
+ parts = [query]
+ if type_filter:
+ parts.append(f"type:{type_filter}")
+ if location:
+ parts.append(f"location:{location}")
+ if language:
+ parts.append(f"language:{language}")
+
+ q = " ".join(parts)
+
+ result = await github_request(
+ "/search/users",
+ params={"q": q, "per_page": min(per_page, 100), "page": page},
+ token=token,
+ )
+ return _normalise_search_result(result)
+
+
+def _normalise_search_result(result: Any) -> Dict[str, Any]:
+ """Ensure consistent shape even if GitHub returns None."""
+ if not isinstance(result, dict):
+ return {"total_count": 0, "incomplete_results": False, "items": []}
+ return {
+ "total_count": result.get("total_count", 0),
+ "incomplete_results": result.get("incomplete_results", False),
+ "items": result.get("items", []),
+ }
diff --git a/gitpilot/headless.py b/gitpilot/headless.py
new file mode 100644
index 0000000000000000000000000000000000000000..72f4be6e531c4a8cb6f370f29976725d21588454
--- /dev/null
+++ b/gitpilot/headless.py
@@ -0,0 +1,96 @@
+# gitpilot/headless.py
+"""Headless execution mode for CI/CD pipelines.
+
+Runs GitPilot non-interactively from the command line, GitHub Actions,
+or GitLab CI, returning structured JSON output.
+
+Usage examples::
+
+ gitpilot run --headless -r owner/repo -m "fix the login bug"
+ gitpilot run --headless -r owner/repo --from-pr 42
+ echo "add tests for auth module" | gitpilot run --headless -r owner/repo
+"""
+from __future__ import annotations
+
+import json
+import logging
+from dataclasses import dataclass, field
+from typing import Any, Dict, Optional
+
+from .agent_tools import set_repo_context
+from .agentic import dispatch_request
+
+logger = logging.getLogger(__name__)
+
+
+@dataclass
+class HeadlessResult:
+ """Result of a headless execution."""
+
+ success: bool
+ output: str
+ session_id: Optional[str] = None
+ pr_url: Optional[str] = None
+ plan: Optional[Dict[str, Any]] = None
+ error: Optional[str] = None
+
+ def to_json(self) -> str:
+ return json.dumps(
+ {
+ "success": self.success,
+ "output": self.output,
+ "session_id": self.session_id,
+ "pr_url": self.pr_url,
+ "error": self.error,
+ },
+ indent=2,
+ )
+
+
+async def run_headless(
+ repo_full_name: str,
+ message: str,
+ token: str,
+ branch: Optional[str] = None,
+ auto_pr: bool = False,
+ from_pr: Optional[int] = None,
+) -> HeadlessResult:
+ """Execute a request non-interactively."""
+ owner, repo = repo_full_name.split("/", 1)
+ set_repo_context(owner, repo, token=token, branch=branch or "main")
+
+ # If from_pr, fetch PR context
+ if from_pr:
+ try:
+ from .github_pulls import get_pull_request
+
+ pr = await get_pull_request(owner, repo, from_pr, token=token)
+ message = (
+ f"PR #{from_pr}: {pr.get('title', '')}\n"
+ f"{pr.get('body', '')}\n\n"
+ f"User request: {message}"
+ )
+ except Exception as e:
+ logger.warning("Could not fetch PR #%s: %s", from_pr, e)
+
+ try:
+ result = await dispatch_request(
+ user_request=message,
+ repo_full_name=repo_full_name,
+ token=token,
+ branch_name=branch,
+ )
+
+ output = result.get("result", "") if isinstance(result, dict) else str(result)
+
+ return HeadlessResult(
+ success=True,
+ output=output,
+ )
+ except Exception as e:
+ logger.exception("Headless execution failed")
+ return HeadlessResult(
+ success=False,
+ output="",
+ error=str(e),
+ )
diff --git a/gitpilot/hf_space_tools.py b/gitpilot/hf_space_tools.py
new file mode 100644
index 0000000000000000000000000000000000000000..c9d1dc2f39e70d5f5d63a439558a1e4f649cd471
--- /dev/null
+++ b/gitpilot/hf_space_tools.py
@@ -0,0 +1,407 @@
+"""HuggingFace Space management tools for GitPilot.
+
+Provides CrewAI-compatible tools for:
+- Cloning HF Spaces
+- Analyzing Space health (SDK, deps, dead patterns)
+- Generating fixes via OllaBridge LLM
+- Pushing fixes to HF repos
+- Managing ZeroGPU hardware allocation
+
+Designed to work with GitPilot's multi-agent architecture
+and OllaBridge Cloud as the LLM backend.
+"""
+from __future__ import annotations
+
+import json
+import logging
+import os
+import re
+import subprocess
+import tempfile
+from pathlib import Path
+from typing import Any
+
+logger = logging.getLogger(__name__)
+
+# Dead/deprecated patterns to scan for
+DEAD_PATTERNS: list[tuple[str, str]] = [
+ (r'st\.secrets\[.*BACKEND_SERVER.*\]', 'Dead backend server dependency'),
+ (r'api-inference\.huggingface\.co', 'Deprecated HF Inference API endpoint'),
+ (r'from\s+dalle_mini', 'Deprecated dalle-mini imports'),
+ (r'from\s+min_dalle', 'Deprecated min-dalle imports'),
+ (r'from\s+transformers\.file_utils', 'Removed transformers.file_utils'),
+ (r'jax\.experimental\.PartitionSpec', 'Moved JAX PartitionSpec API'),
+ (r'gr\.inputs\.', 'Deprecated Gradio inputs API'),
+ (r'gr\.outputs\.', 'Deprecated Gradio outputs API'),
+]
+
+
+def clone_hf_space(space_id: str, token: str | None = None) -> dict[str, Any]:
+ """Clone a HuggingFace Space repository to a temp directory.
+
+ Args:
+ space_id: Full Space ID (e.g. 'user/space-name').
+ token: Optional HF token for private repos.
+
+ Returns:
+ Dict with 'path' (str), 'success' (bool), 'error' (str|None).
+ """
+ tmpdir = tempfile.mkdtemp(prefix="gitpilot_hf_")
+ name = space_id.split("/")[-1]
+ repo_dir = os.path.join(tmpdir, name)
+
+ clone_url = f"https://huggingface.co/spaces/{space_id}"
+ if token:
+ clone_url = f"https://user:{token}@huggingface.co/spaces/{space_id}"
+
+ try:
+ result = subprocess.run(
+ ["git", "clone", "--depth=1", clone_url, repo_dir],
+ capture_output=True, text=True, timeout=120,
+ )
+ if result.returncode != 0:
+ return {"path": "", "success": False, "error": result.stderr.strip()}
+ return {"path": repo_dir, "success": True, "error": None}
+ except subprocess.TimeoutExpired:
+ return {"path": "", "success": False, "error": "Clone timed out (120s)"}
+ except Exception as exc:
+ return {"path": "", "success": False, "error": str(exc)}
+
+
+def analyze_hf_space(repo_dir: str) -> dict[str, Any]:
+ """Analyze a cloned HuggingFace Space for issues.
+
+ Returns a diagnosis dict with:
+ sdk, app_file, issues, dead_patterns, needs_gpu,
+ needs_rebuild, severity, recommendations.
+ """
+ path = Path(repo_dir)
+ diag: dict[str, Any] = {
+ "sdk": "unknown",
+ "app_file": "app.py",
+ "issues": [],
+ "dead_patterns": [],
+ "needs_gpu": False,
+ "needs_rebuild": False,
+ "severity": "info",
+ "recommendations": [],
+ "files": [],
+ }
+
+ # Parse README front matter
+ readme = path / "README.md"
+ if readme.exists():
+ text = readme.read_text(errors="replace")
+ sdk_match = re.search(r'^sdk:\s*(\S+)', text, re.MULTILINE)
+ app_match = re.search(r'^app_file:\s*(\S+)', text, re.MULTILINE)
+ if sdk_match:
+ diag["sdk"] = sdk_match.group(1)
+ if app_match:
+ diag["app_file"] = app_match.group(1)
+ else:
+ diag["issues"].append("Missing README.md")
+
+ # Check app_file exists
+ app_path = path / diag["app_file"]
+ if not app_path.exists():
+ diag["issues"].append(f"app_file '{diag['app_file']}' does not exist")
+ diag["severity"] = "critical"
+ diag["needs_rebuild"] = True
+
+ # Check requirements.txt
+ req = path / "requirements.txt"
+ if not req.exists():
+ diag["issues"].append("Missing requirements.txt")
+ elif not req.read_text(errors="replace").strip():
+ diag["issues"].append("Empty requirements.txt")
+
+ # Scan for dead patterns
+ for py_file in path.rglob("*.py"):
+ try:
+ content = py_file.read_text(errors="replace")
+ except OSError:
+ continue
+ for pattern, desc in DEAD_PATTERNS:
+ if re.search(pattern, content):
+ rel = str(py_file.relative_to(path))
+ diag["dead_patterns"].append(f"{rel}: {desc}")
+ diag["issues"].append(f"Dead pattern in {rel}: {desc}")
+ diag["severity"] = "critical"
+ diag["needs_rebuild"] = True
+
+ # Check GPU needs
+ gpu_indicators = [
+ "torch", "diffusers", "transformers", "accelerate",
+ "spaces.GPU", "@spaces.GPU", "cuda", ".to(\"cuda\")",
+ ]
+ for py_file in path.rglob("*.py"):
+ try:
+ content = py_file.read_text(errors="replace")
+ except OSError:
+ continue
+ for indicator in gpu_indicators:
+ if indicator in content:
+ diag["needs_gpu"] = True
+ break
+ if diag["needs_gpu"]:
+ break
+
+ # File listing
+ for p in sorted(path.rglob("*")):
+ if p.is_file() and ".git" not in p.parts:
+ diag["files"].append(str(p.relative_to(path)))
+
+ # Build recommendations
+ if diag["needs_rebuild"]:
+ diag["recommendations"].append("Rebuild app.py with modern dependencies")
+ if diag["sdk"] == "streamlit":
+ diag["recommendations"].append("Consider migrating to Gradio SDK")
+ if diag["dead_patterns"]:
+ diag["recommendations"].append("Remove deprecated API calls")
+ if diag["needs_gpu"]:
+ diag["recommendations"].append("Request ZeroGPU (zero-a10g) hardware")
+
+ return diag
+
+
+def generate_space_fix(
+ space_id: str,
+ diagnosis: dict[str, Any],
+ app_content: str = "",
+ ollabridge_url: str | None = None,
+ ollabridge_model: str = "qwen2.5:1.5b",
+ ollabridge_key: str | None = None,
+) -> dict[str, Any]:
+ """Generate a fix for a broken HF Space.
+
+ If ollabridge_url is provided, uses LLM for intelligent fix.
+ Otherwise falls back to template-based fix.
+
+ Returns dict with 'files' (dict of filename->content), 'explanation' (str).
+ """
+ # Try LLM-powered fix via OllaBridge
+ if ollabridge_url:
+ try:
+ import httpx
+ prompt = _build_repair_prompt(space_id, diagnosis, app_content)
+ payload = {
+ "model": ollabridge_model,
+ "messages": [
+ {"role": "system", "content": "You are an expert HuggingFace Spaces developer. Output valid JSON."},
+ {"role": "user", "content": prompt},
+ ],
+ "temperature": 0.3,
+ "max_tokens": 4096,
+ }
+ headers = {"Content-Type": "application/json"}
+ if ollabridge_key:
+ headers["Authorization"] = f"Bearer {ollabridge_key}"
+
+ resp = httpx.post(
+ f"{ollabridge_url.rstrip('/')}/v1/chat/completions",
+ json=payload, headers=headers, timeout=120.0,
+ )
+ if resp.status_code == 200:
+ content = resp.json()["choices"][0]["message"]["content"]
+ fix = _parse_llm_fix(content)
+ if fix:
+ return fix
+ except Exception as exc:
+ logger.warning("OllaBridge fix generation failed: %s", exc)
+
+ # Template fallback
+ return _generate_template_fix(space_id, diagnosis)
+
+
+def push_space_fix(
+ repo_dir: str,
+ fix: dict[str, Any],
+ commit_message: str = "fix: auto-repair by GitPilot + RepoGuardian",
+) -> dict[str, Any]:
+ """Apply fix files and push to the Space repo.
+
+ Args:
+ repo_dir: Path to cloned Space repo.
+ fix: Fix dict with 'files' key.
+ commit_message: Git commit message.
+
+ Returns:
+ Dict with 'success' (bool), 'changed_files' (list), 'error' (str|None).
+ """
+ path = Path(repo_dir)
+ changed = []
+
+ # Write fix files
+ for filename, content in fix.get("files", {}).items():
+ filepath = path / filename
+ filepath.parent.mkdir(parents=True, exist_ok=True)
+ filepath.write_text(content)
+ changed.append(filename)
+
+ if not changed:
+ return {"success": False, "changed_files": [], "error": "No files to write"}
+
+ # Git add, commit, push
+ cmds = [
+ ["git", "add", "-A"],
+ ["git", "commit", "-m", commit_message],
+ ["git", "push", "origin", "main"],
+ ]
+ for cmd in cmds:
+ result = subprocess.run(cmd, cwd=repo_dir, capture_output=True, text=True, timeout=60)
+ if result.returncode != 0:
+ return {
+ "success": False,
+ "changed_files": changed,
+ "error": f"Command '{' '.join(cmd)}' failed: {result.stderr.strip()}",
+ }
+
+ return {"success": True, "changed_files": changed, "error": None}
+
+
+def manage_space_hardware(
+ space_id: str,
+ token: str,
+ hardware: str = "zero-a10g",
+ auto_free: bool = True,
+) -> dict[str, Any]:
+ """Request hardware for a HuggingFace Space.
+
+ If ZeroGPU slots are full and auto_free is True,
+ automatically downgrades a paused Space to free a slot.
+
+ Returns dict with 'success', 'hardware', 'freed_slot', 'error'.
+ """
+ try:
+ from huggingface_hub import HfApi
+ api = HfApi(token=token)
+
+ # Try direct request
+ try:
+ api.request_space_hardware(space_id, hardware)
+ return {"success": True, "hardware": hardware, "freed_slot": None, "error": None}
+ except Exception as exc:
+ if "limited to" not in str(exc).lower():
+ return {"success": False, "hardware": None, "freed_slot": None, "error": str(exc)}
+
+ if not auto_free:
+ return {"success": False, "hardware": None, "freed_slot": None, "error": "Slots full, auto_free disabled"}
+
+ # Find and downgrade a paused Space
+ namespace = space_id.split("/")[0]
+ spaces = list(api.list_spaces(author=namespace))
+ for s in spaces:
+ try:
+ info = api.space_info(s.id)
+ if not info.runtime:
+ continue
+ raw_hw = info.runtime.raw.get("hardware", {})
+ req_hw = raw_hw.get("requested", "")
+ stage = info.runtime.stage
+ if "zero" in str(req_hw).lower() and stage in ("PAUSED", "SLEEPING") and s.id != space_id:
+ api.request_space_hardware(s.id, "cpu-basic")
+ # Retry the original request
+ api.request_space_hardware(space_id, hardware)
+ return {
+ "success": True,
+ "hardware": hardware,
+ "freed_slot": s.id,
+ "error": None,
+ }
+ except Exception:
+ continue
+
+ return {"success": False, "hardware": None, "freed_slot": None, "error": "No paused Spaces to free"}
+
+ except ImportError:
+ return {"success": False, "hardware": None, "freed_slot": None, "error": "huggingface_hub not installed"}
+
+
+def get_space_runtime_info(space_id: str, token: str | None = None) -> dict[str, Any]:
+ """Fetch runtime info for a HuggingFace Space.
+
+ Returns dict with sdk, stage, hardware, domain, etc.
+ """
+ try:
+ from huggingface_hub import HfApi
+ api = HfApi(token=token)
+ info = api.space_info(space_id)
+ result: dict[str, Any] = {
+ "space_id": space_id,
+ "sdk": info.sdk,
+ "success": True,
+ }
+ if info.runtime:
+ result["stage"] = info.runtime.stage
+ hw = info.runtime.raw.get("hardware", {})
+ result["current_hardware"] = hw.get("current")
+ result["requested_hardware"] = hw.get("requested")
+ domains = info.runtime.raw.get("domains", [])
+ if domains:
+ result["domain"] = domains[0].get("domain")
+ return result
+ except Exception as exc:
+ return {"space_id": space_id, "success": False, "error": str(exc)}
+
+
+# ---- Internal helpers ----
+
+def _build_repair_prompt(space_id: str, diagnosis: dict[str, Any], app_content: str) -> str:
+ return f"""A HuggingFace Space is broken and needs repair.
+
+## Space: {space_id}
+- SDK: {diagnosis.get('sdk', 'unknown')}
+- app_file: {diagnosis.get('app_file', 'app.py')}
+
+## Issues
+{chr(10).join('- ' + i for i in diagnosis.get('issues', []))}
+
+## Dead Patterns
+{chr(10).join('- ' + p for p in diagnosis.get('dead_patterns', []))}
+
+## Current app.py (first 150 lines)
+{app_content[:5000]}
+
+Generate a complete fix as JSON:
+{{
+ "files": {{
+ "app.py": "",
+ "requirements.txt": "",
+ "README.md": ""
+ }},
+ "explanation": ""
+}}"""
+
+
+def _parse_llm_fix(response: str) -> dict[str, Any] | None:
+ try:
+ return json.loads(response)
+ except json.JSONDecodeError:
+ pass
+ match = re.search(r'```(?:json)?\s*\n(.+?)\n```', response, re.DOTALL)
+ if match:
+ try:
+ return json.loads(match.group(1))
+ except json.JSONDecodeError:
+ pass
+ return None
+
+
+def _generate_template_fix(space_id: str, diagnosis: dict[str, Any]) -> dict[str, Any]:
+ name = space_id.split("/")[-1]
+ title = name.replace("-", " ").replace("_", " ").title()
+ needs_gpu = diagnosis.get("needs_gpu", False)
+
+ if needs_gpu:
+ app = f'''"""\n{title} - Auto-repaired by GitPilot + RepoGuardian\n"""\nimport gradio as gr\nimport numpy as np\n\ntry:\n import spaces\n GPU = True\nexcept ImportError:\n GPU = False\n\ndef process(prompt: str, progress=gr.Progress(track_tqdm=True)):\n if not prompt.strip():\n raise gr.Error("Please enter a prompt.")\n return f"Output for: {{prompt}}"\n\nif GPU:\n process = spaces.GPU(process)\n\nwith gr.Blocks(theme=gr.themes.Soft(), title="{title}") as demo:\n gr.Markdown("# {title}")\n with gr.Row():\n inp = gr.Textbox(label="Prompt", lines=3)\n out = gr.Textbox(label="Output", lines=5)\n gr.Button("Generate", variant="primary").click(process, [inp], [out])\n\nif __name__ == "__main__":\n demo.launch()\n'''
+ reqs = "gradio>=4.0.0\ntorch>=2.0.0\nnumpy>=1.24.0\n"
+ else:
+ app = f'''"""\n{title} - Auto-repaired by GitPilot + RepoGuardian\n"""\nimport gradio as gr\n\ndef process(text: str):\n if not text.strip():\n raise gr.Error("Please enter text.")\n return f"Processed: {{text}}"\n\nwith gr.Blocks(theme=gr.themes.Soft(), title="{title}") as demo:\n gr.Markdown("# {title}")\n with gr.Row():\n inp = gr.Textbox(label="Input", lines=3)\n out = gr.Textbox(label="Output", lines=3)\n gr.Button("Process", variant="primary").click(process, [inp], [out])\n\nif __name__ == "__main__":\n demo.launch()\n'''
+ reqs = "gradio>=4.0.0\n"
+
+ readme = f"""---\ntitle: {title}\nemoji: \U0001f680\ncolorFrom: blue\ncolorTo: purple\nsdk: gradio\nsdk_version: 5.23.0\napp_file: app.py\npinned: false\nlicense: apache-2.0\n---\n\n# {title}\n\nAuto-repaired by [GitPilot](https://github.com/ruslanmv/gitpilot) + [RepoGuardian](https://github.com/ruslanmv/RepoGuardian).\n"""
+
+ return {
+ "files": {"app.py": app, "requirements.txt": reqs, "README.md": readme},
+ "explanation": "Template fix: replaced broken app with working Gradio placeholder",
+ }
diff --git a/gitpilot/hooks.py b/gitpilot/hooks.py
new file mode 100644
index 0000000000000000000000000000000000000000..18027fbeea9e0dc54a3d1794b6071de4d6f225fc
--- /dev/null
+++ b/gitpilot/hooks.py
@@ -0,0 +1,195 @@
+# gitpilot/hooks.py
+"""Event hook system for workflow automation.
+
+Allows users to register shell commands or Python callables that fire
+on specific lifecycle events. Hooks are defined in .gitpilot/hooks.json
+or programmatically via the API.
+
+Events
+------
+- session_start Session begins
+- session_end Session ends
+- pre_tool_use Before a tool runs (blocking hooks can cancel)
+- post_tool_use After a tool completes
+- pre_edit Before file edit (blocking hooks can cancel)
+- post_edit After file edit
+- pre_commit Before git commit (blocking hooks can cancel)
+- post_commit After git commit
+- pre_push Before git push (blocking hooks can cancel)
+- user_message When the user sends a message
+"""
+from __future__ import annotations
+
+import asyncio
+import json
+import logging
+import os
+from dataclasses import dataclass, field
+from enum import Enum
+from pathlib import Path
+from typing import Any, Callable, Dict, List, Optional
+
+logger = logging.getLogger(__name__)
+
+
+class HookEvent(str, Enum):
+ SESSION_START = "session_start"
+ SESSION_END = "session_end"
+ PRE_TOOL_USE = "pre_tool_use"
+ POST_TOOL_USE = "post_tool_use"
+ PRE_EDIT = "pre_edit"
+ POST_EDIT = "post_edit"
+ PRE_COMMIT = "pre_commit"
+ POST_COMMIT = "post_commit"
+ PRE_PUSH = "pre_push"
+ USER_MESSAGE = "user_message"
+
+
+@dataclass
+class HookDefinition:
+ event: HookEvent
+ name: str
+ command: Optional[str] = None
+ handler: Optional[Callable] = None
+ blocking: bool = False
+ timeout: int = 30
+
+
+@dataclass
+class HookResult:
+ hook_name: str
+ event: HookEvent
+ success: bool
+ output: str = ""
+ blocked: bool = False
+
+
+class HookManager:
+ """Register and fire lifecycle hooks."""
+
+ def __init__(self):
+ self._hooks: Dict[HookEvent, List[HookDefinition]] = {
+ e: [] for e in HookEvent
+ }
+
+ def register(self, hook: HookDefinition):
+ self._hooks[hook.event].append(hook)
+ logger.info("Registered hook '%s' for event '%s'", hook.name, hook.event)
+
+ def unregister(self, event: HookEvent, name: str):
+ self._hooks[event] = [h for h in self._hooks[event] if h.name != name]
+
+ def list_hooks(self) -> List[Dict[str, Any]]:
+ result = []
+ for event, hooks in self._hooks.items():
+ for h in hooks:
+ result.append({
+ "event": event.value,
+ "name": h.name,
+ "command": h.command,
+ "blocking": h.blocking,
+ "timeout": h.timeout,
+ })
+ return result
+
+ def load_from_file(self, path: Path):
+ """Load hooks from a JSON config file.
+
+ Format::
+
+ [
+ {"event": "post_edit", "name": "lint", "command": "ruff check ."},
+ {"event": "pre_commit", "name": "test", "command": "pytest", "blocking": true}
+ ]
+ """
+ if not path.exists():
+ return
+ try:
+ hooks = json.loads(path.read_text())
+ for h in hooks:
+ self.register(HookDefinition(
+ event=HookEvent(h["event"]),
+ name=h["name"],
+ command=h.get("command"),
+ blocking=h.get("blocking", False),
+ timeout=h.get("timeout", 30),
+ ))
+ except Exception as e:
+ logger.warning("Failed to load hooks from %s: %s", path, e)
+
+ async def fire(
+ self,
+ event: HookEvent,
+ context: Optional[Dict[str, Any]] = None,
+ cwd: Optional[Path] = None,
+ ) -> List[HookResult]:
+ results = []
+ for hook in self._hooks.get(event, []):
+ result = await self._run_hook(hook, context, cwd)
+ results.append(result)
+ if hook.blocking and not result.success:
+ result.blocked = True
+ break
+ return results
+
+ def is_blocked(self, results: List[HookResult]) -> bool:
+ return any(r.blocked for r in results)
+
+ async def _run_hook(
+ self,
+ hook: HookDefinition,
+ context: Optional[Dict[str, Any]],
+ cwd: Optional[Path],
+ ) -> HookResult:
+ try:
+ if hook.command:
+ return await self._run_command_hook(hook, context, cwd)
+ if hook.handler:
+ output = hook.handler(context or {})
+ return HookResult(
+ hook_name=hook.name, event=hook.event,
+ success=True, output=str(output),
+ )
+ return HookResult(
+ hook_name=hook.name, event=hook.event,
+ success=True, output="No action",
+ )
+ except Exception as e:
+ return HookResult(
+ hook_name=hook.name, event=hook.event,
+ success=False, output=str(e),
+ )
+
+ async def _run_command_hook(
+ self,
+ hook: HookDefinition,
+ context: Optional[Dict[str, Any]],
+ cwd: Optional[Path],
+ ) -> HookResult:
+ env = {**os.environ}
+ if context:
+ for k, v in context.items():
+ env[f"GITPILOT_HOOK_{k.upper()}"] = str(v)
+
+ proc = await asyncio.create_subprocess_shell(
+ hook.command,
+ cwd=str(cwd) if cwd else None,
+ stdout=asyncio.subprocess.PIPE,
+ stderr=asyncio.subprocess.STDOUT,
+ env=env,
+ )
+ try:
+ stdout, _ = await asyncio.wait_for(
+ proc.communicate(), timeout=hook.timeout,
+ )
+ return HookResult(
+ hook_name=hook.name, event=hook.event,
+ success=proc.returncode == 0,
+ output=stdout.decode("utf-8", errors="replace"),
+ )
+ except asyncio.TimeoutError:
+ proc.kill()
+ return HookResult(
+ hook_name=hook.name, event=hook.event,
+ success=False, output="Hook timed out",
+ )
diff --git a/gitpilot/issue_tools.py b/gitpilot/issue_tools.py
new file mode 100644
index 0000000000000000000000000000000000000000..38506746709e8d2b787a33c5ddc044dead0f36b7
--- /dev/null
+++ b/gitpilot/issue_tools.py
@@ -0,0 +1,161 @@
+"""CrewAI tools for GitHub Issue management.
+
+These tools allow agents to create, list, update, and comment on GitHub issues.
+They reuse the repo context mechanism from agent_tools.
+"""
+import asyncio
+import json
+from typing import Optional
+
+from crewai.tools import tool
+
+from .agent_tools import get_repo_context
+from . import github_issues as gi
+
+
+def _run_async(coro):
+ """Run an async coroutine from a sync CrewAI tool."""
+ loop = asyncio.new_event_loop()
+ asyncio.set_event_loop(loop)
+ try:
+ return loop.run_until_complete(coro)
+ finally:
+ loop.close()
+
+
+def _fmt_issue(issue: dict) -> str:
+ labels = ", ".join(l.get("name", "") for l in issue.get("labels", []))
+ assignees = ", ".join(a.get("login", "") for a in issue.get("assignees", []))
+ return (
+ f"#{issue.get('number')} [{issue.get('state', 'open')}] "
+ f"{issue.get('title', '')}\n"
+ f" Labels: {labels or 'none'} | Assignees: {assignees or 'none'}\n"
+ f" URL: {issue.get('html_url', '')}"
+ )
+
+
+@tool("List repository issues")
+def list_issues(
+ state: str = "open",
+ labels: Optional[str] = None,
+ per_page: int = 20,
+) -> str:
+ """Lists issues in the current repository. Optional filters: state (open/closed/all), labels (comma-separated), per_page."""
+ try:
+ owner, repo, token, _branch = get_repo_context()
+ issues = _run_async(
+ gi.list_issues(owner, repo, state=state, labels=labels, per_page=per_page, token=token)
+ )
+ if not issues:
+ return f"No {state} issues found in {owner}/{repo}."
+ header = f"Issues in {owner}/{repo} (state={state}):\n"
+ return header + "\n".join(_fmt_issue(i) for i in issues)
+ except Exception as e:
+ return f"Error listing issues: {e}"
+
+
+@tool("Get issue details")
+def get_issue(issue_number: int) -> str:
+ """Gets full details of a specific issue by number."""
+ try:
+ owner, repo, token, _branch = get_repo_context()
+ issue = _run_async(gi.get_issue(owner, repo, issue_number, token=token))
+ body = (issue.get("body") or "")[:500]
+ return (
+ f"Issue #{issue.get('number')}: {issue.get('title')}\n"
+ f"State: {issue.get('state')} | Created: {issue.get('created_at')}\n"
+ f"Author: {issue.get('user', {}).get('login', 'unknown')}\n"
+ f"Body:\n{body}\n"
+ f"URL: {issue.get('html_url', '')}"
+ )
+ except Exception as e:
+ return f"Error getting issue: {e}"
+
+
+@tool("Create a new issue")
+def create_issue(
+ title: str,
+ body: str = "",
+ labels: str = "",
+ assignees: str = "",
+) -> str:
+ """Creates a new GitHub issue. labels and assignees are comma-separated strings."""
+ try:
+ owner, repo, token, _branch = get_repo_context()
+ label_list = [l.strip() for l in labels.split(",") if l.strip()] if labels else None
+ assignee_list = [a.strip() for a in assignees.split(",") if a.strip()] if assignees else None
+ issue = _run_async(
+ gi.create_issue(owner, repo, title, body=body or None, labels=label_list, assignees=assignee_list, token=token)
+ )
+ return f"Created issue #{issue.get('number')}: {issue.get('title')}\nURL: {issue.get('html_url', '')}"
+ except Exception as e:
+ return f"Error creating issue: {e}"
+
+
+@tool("Update an issue")
+def update_issue(
+ issue_number: int,
+ title: str = "",
+ body: str = "",
+ state: str = "",
+ labels: str = "",
+ assignees: str = "",
+) -> str:
+ """Updates an existing issue. Only non-empty fields are changed. labels/assignees are comma-separated."""
+ try:
+ owner, repo, token, _branch = get_repo_context()
+ kwargs: dict = {}
+ if title:
+ kwargs["title"] = title
+ if body:
+ kwargs["body"] = body
+ if state:
+ kwargs["state"] = state
+ if labels:
+ kwargs["labels"] = [l.strip() for l in labels.split(",") if l.strip()]
+ if assignees:
+ kwargs["assignees"] = [a.strip() for a in assignees.split(",") if a.strip()]
+ issue = _run_async(gi.update_issue(owner, repo, issue_number, token=token, **kwargs))
+ return f"Updated issue #{issue.get('number')}: {issue.get('title')}\nState: {issue.get('state')}"
+ except Exception as e:
+ return f"Error updating issue: {e}"
+
+
+@tool("Add a comment to an issue")
+def add_issue_comment(issue_number: int, body: str) -> str:
+ """Adds a comment to an existing issue."""
+ try:
+ owner, repo, token, _branch = get_repo_context()
+ comment = _run_async(gi.add_issue_comment(owner, repo, issue_number, body, token=token))
+ return f"Comment added to issue #{issue_number}\nURL: {comment.get('html_url', '')}"
+ except Exception as e:
+ return f"Error adding comment: {e}"
+
+
+@tool("List issue comments")
+def list_issue_comments(issue_number: int) -> str:
+ """Lists all comments on an issue."""
+ try:
+ owner, repo, token, _branch = get_repo_context()
+ comments = _run_async(gi.list_issue_comments(owner, repo, issue_number, token=token))
+ if not comments:
+ return f"No comments on issue #{issue_number}."
+ lines = [f"Comments on issue #{issue_number}:"]
+ for c in comments:
+ author = c.get("user", {}).get("login", "unknown")
+ body_preview = (c.get("body") or "")[:200]
+ lines.append(f" [{author}] {body_preview}")
+ return "\n".join(lines)
+ except Exception as e:
+ return f"Error listing comments: {e}"
+
+
+# Export all issue tools
+ISSUE_TOOLS = [
+ list_issues,
+ get_issue,
+ create_issue,
+ update_issue,
+ add_issue_comment,
+ list_issue_comments,
+]
diff --git a/gitpilot/langflow_client.py b/gitpilot/langflow_client.py
new file mode 100644
index 0000000000000000000000000000000000000000..e271a9966dfcb03acdb74272f5489e48718487a5
--- /dev/null
+++ b/gitpilot/langflow_client.py
@@ -0,0 +1,50 @@
+from __future__ import annotations
+
+from typing import Any, Dict, Optional
+
+import httpx
+from fastapi import HTTPException
+
+from .settings import get_settings
+
+
+async def run_langflow_flow(
+ flow_id: str,
+ input_value: str,
+ *,
+ session_id: str = "gitpilot-session",
+ tweaks: Optional[Dict[str, Any]] = None,
+) -> str:
+ """Run a LangFlow flow and return the first chat-like output as text."""
+ settings = get_settings()
+ url = f"{settings.langflow_url.rstrip('/')}/api/v1/run/{flow_id}"
+ headers = {"Content-Type": "application/json"}
+ if settings.langflow_api_key:
+ headers["x-api-key"] = settings.langflow_api_key
+
+ payload: Dict[str, Any] = {
+ "input_value": input_value,
+ "session_id": session_id,
+ "input_type": "chat",
+ "output_type": "chat",
+ "output_component": "",
+ "tweaks": tweaks or {},
+ }
+
+ async with httpx.AsyncClient() as client:
+ resp = await client.post(url, headers=headers, json=payload)
+
+ if resp.status_code >= 400:
+ raise HTTPException(resp.status_code, resp.text)
+
+ data = resp.json()
+ try:
+ outputs = data["outputs"][0]["outputs"][0]["results"]
+ if isinstance(outputs, dict):
+ for key in ("message", "text", "output_text"):
+ if key in outputs:
+ return str(outputs[key])
+ except Exception:
+ pass
+
+ return str(data)
diff --git a/gitpilot/learning.py b/gitpilot/learning.py
new file mode 100644
index 0000000000000000000000000000000000000000..b5fc5a2b8e9e2df2882a1a5801edb3c0f90b1f74
--- /dev/null
+++ b/gitpilot/learning.py
@@ -0,0 +1,251 @@
+# gitpilot/learning.py
+"""Self-improving agent learning engine.
+
+After each task execution, evaluates outcomes, extracts patterns,
+and stores them in the project's auto-memory. Over time, GitPilot
+becomes specialised to each project's patterns and conventions.
+
+Inspired by reinforcement learning from human feedback (RLHF) principles
+and the experience-replay mechanism from DeepMind's DQN (Mnih et al., 2015),
+adapted for a software engineering context.
+
+Learning loop::
+
+ Execute task → Evaluate outcome → Extract patterns → Store in memory
+ ↓
+ Future tasks ← Agent reads patterns from memory ← Memory updated
+"""
+from __future__ import annotations
+
+import json
+import logging
+from dataclasses import dataclass, field
+from datetime import datetime, timezone
+from pathlib import Path
+from typing import Any, Dict, List, Optional
+
+logger = logging.getLogger(__name__)
+
+LEARNING_DIR = "learning"
+MAX_INSIGHTS_PER_REPO = 200
+INSIGHT_CATEGORIES = [
+ "code_style",
+ "testing",
+ "architecture",
+ "workflow",
+ "error_pattern",
+ "performance",
+ "security",
+]
+
+
+@dataclass
+class Evaluation:
+ """Result of evaluating a task outcome."""
+
+ task_description: str
+ success: bool
+ outcome_type: str = "" # tests_passed, pr_approved, error_fixed, etc.
+ details: str = ""
+ confidence: float = 0.8 # 0.0 - 1.0
+ timestamp: str = field(
+ default_factory=lambda: datetime.now(timezone.utc).isoformat()
+ )
+
+ def to_dict(self) -> Dict[str, Any]:
+ return {
+ "task_description": self.task_description,
+ "success": self.success,
+ "outcome_type": self.outcome_type,
+ "details": self.details,
+ "confidence": self.confidence,
+ "timestamp": self.timestamp,
+ }
+
+
+@dataclass
+class RepoInsights:
+ """Accumulated insights for a repository."""
+
+ repo: str
+ patterns: List[str] = field(default_factory=list)
+ preferred_style: Dict[str, str] = field(default_factory=dict)
+ common_errors: List[str] = field(default_factory=list)
+ success_rate: float = 0.0
+ total_tasks: int = 0
+ successful_tasks: int = 0
+
+ def to_dict(self) -> Dict[str, Any]:
+ return {
+ "repo": self.repo,
+ "patterns": self.patterns,
+ "preferred_style": self.preferred_style,
+ "common_errors": self.common_errors,
+ "success_rate": self.success_rate,
+ "total_tasks": self.total_tasks,
+ "successful_tasks": self.successful_tasks,
+ }
+
+
+class LearningEngine:
+ """Learn from task execution outcomes and improve over time.
+
+ Usage::
+
+ engine = LearningEngine(storage_dir=Path("~/.gitpilot"))
+ evaluation = engine.evaluate_outcome(
+ task="Fix login bug",
+ result={"tests_passed": True, "pr_approved": True},
+ )
+ patterns = engine.extract_patterns(evaluation)
+ engine.update_strategies("owner/repo", patterns)
+ insights = engine.get_repo_insights("owner/repo")
+ """
+
+ def __init__(self, storage_dir: Optional[Path] = None) -> None:
+ self.storage_dir = storage_dir or (Path.home() / ".gitpilot")
+ self._learning_dir = self.storage_dir / LEARNING_DIR
+ self._learning_dir.mkdir(parents=True, exist_ok=True)
+
+ def evaluate_outcome(
+ self,
+ task: str,
+ result: Optional[Dict[str, Any]] = None,
+ ) -> Evaluation:
+ """Evaluate a task outcome based on result signals.
+
+ Checks for success signals like:
+ - tests_passed: True
+ - pr_approved: True
+ - error_fixed: True
+ - build_success: True
+ """
+ result = result or {}
+ success_signals = [
+ result.get("tests_passed", False),
+ result.get("pr_approved", False),
+ result.get("error_fixed", False),
+ result.get("build_success", False),
+ ]
+ explicit_success = result.get("success")
+
+ if explicit_success is not None:
+ success = bool(explicit_success)
+ else:
+ success = any(success_signals)
+
+ # Determine outcome type
+ if result.get("tests_passed"):
+ outcome_type = "tests_passed"
+ elif result.get("pr_approved"):
+ outcome_type = "pr_approved"
+ elif result.get("error_fixed"):
+ outcome_type = "error_fixed"
+ elif result.get("error"):
+ outcome_type = "error"
+ success = False
+ else:
+ outcome_type = "completed" if success else "unknown"
+
+ confidence = 0.9 if success else 0.6
+
+ return Evaluation(
+ task_description=task,
+ success=success,
+ outcome_type=outcome_type,
+ details=result.get("details", ""),
+ confidence=confidence,
+ )
+
+ def extract_patterns(self, evaluation: Evaluation) -> List[str]:
+ """Extract learnable patterns from an evaluation.
+
+ Generates natural-language patterns that can be injected
+ into future agent system prompts.
+ """
+ patterns = []
+
+ if evaluation.success:
+ patterns.append(
+ f"Task '{evaluation.task_description}' succeeded "
+ f"(outcome: {evaluation.outcome_type})"
+ )
+ if evaluation.outcome_type == "tests_passed":
+ patterns.append("Tests are available and should be run after changes")
+ if evaluation.outcome_type == "pr_approved":
+ patterns.append("PR workflow is active; create PRs for review")
+ else:
+ patterns.append(
+ f"Task '{evaluation.task_description}' failed "
+ f"(outcome: {evaluation.outcome_type})"
+ )
+ if evaluation.details:
+ patterns.append(f"Error context: {evaluation.details[:200]}")
+
+ return patterns
+
+ def update_strategies(self, repo: str, patterns: List[str]) -> None:
+ """Store learned patterns for a repository."""
+ repo_file = self._repo_path(repo)
+ data = self._load_repo_data(repo)
+
+ existing = set(data.get("patterns", []))
+ for p in patterns:
+ existing.add(p)
+
+ data["patterns"] = list(existing)[-MAX_INSIGHTS_PER_REPO:]
+ data["updated_at"] = datetime.now(timezone.utc).isoformat()
+ data.setdefault("total_tasks", 0)
+ data["total_tasks"] += 1
+
+ # Update success rate
+ if any("succeeded" in p for p in patterns):
+ data.setdefault("successful_tasks", 0)
+ data["successful_tasks"] += 1
+
+ total = data.get("total_tasks", 1)
+ successful = data.get("successful_tasks", 0)
+ data["success_rate"] = round(successful / total, 3) if total > 0 else 0.0
+
+ repo_file.write_text(json.dumps(data, indent=2))
+
+ def get_repo_insights(self, repo: str) -> RepoInsights:
+ """Get accumulated insights for a repository."""
+ data = self._load_repo_data(repo)
+ return RepoInsights(
+ repo=repo,
+ patterns=data.get("patterns", []),
+ preferred_style=data.get("preferred_style", {}),
+ common_errors=data.get("common_errors", []),
+ success_rate=data.get("success_rate", 0.0),
+ total_tasks=data.get("total_tasks", 0),
+ successful_tasks=data.get("successful_tasks", 0),
+ )
+
+ def record_error(self, repo: str, error: str) -> None:
+ """Record a common error pattern for a repo."""
+ data = self._load_repo_data(repo)
+ errors = data.setdefault("common_errors", [])
+ if error not in errors:
+ errors.append(error)
+ data["common_errors"] = errors[-50:] # Keep last 50
+ self._repo_path(repo).write_text(json.dumps(data, indent=2))
+
+ def set_preferred_style(self, repo: str, key: str, value: str) -> None:
+ """Set a preferred code style for a repo (e.g., indent: 4spaces)."""
+ data = self._load_repo_data(repo)
+ data.setdefault("preferred_style", {})[key] = value
+ self._repo_path(repo).write_text(json.dumps(data, indent=2))
+
+ def _repo_path(self, repo: str) -> Path:
+ safe_name = repo.replace("/", "__")
+ return self._learning_dir / f"{safe_name}.json"
+
+ def _load_repo_data(self, repo: str) -> Dict[str, Any]:
+ path = self._repo_path(repo)
+ if path.exists():
+ try:
+ return json.loads(path.read_text())
+ except Exception:
+ return {}
+ return {}
diff --git a/gitpilot/llm_provider.py b/gitpilot/llm_provider.py
new file mode 100644
index 0000000000000000000000000000000000000000..cb4d2af5a56d0ad29a6586542982a9fe1c712957
--- /dev/null
+++ b/gitpilot/llm_provider.py
@@ -0,0 +1,338 @@
+from __future__ import annotations
+
+import logging
+import os
+from typing import TYPE_CHECKING, Any
+
+import httpx
+
+# LAZY IMPORT: `from crewai import LLM` pulls in litellm, chromadb, lancedb,
+# opentelemetry, onnxruntime, and ~180 other packages. Importing it at module
+# top-level adds 10-60s to every backend startup (especially on WSL).
+# We defer it into build_llm() so it only loads when a chat is actually sent.
+if TYPE_CHECKING:
+ from crewai import LLM # noqa: F401 — type hint only
+
+from gitpilot.models import ProviderHealth, ProviderSummary
+
+from .settings import LLMProvider, get_settings
+from .reasoning_normalizer import wrap_if_reasoning_model
+
+logger = logging.getLogger(__name__)
+
+
+def _wrap_llm(llm: Any, model: str) -> Any:
+ """Auto-wrap the LLM with ReasoningAwareLLM if the model is a reasoning
+ model (deepseek-r1, qwq, marco-o1, r1-distill, etc.).
+
+ This is the single point where reasoning-model normalization is applied.
+ For non-reasoning models this is a no-op — the original LLM is returned
+ unchanged with zero overhead.
+
+ The wrapper strips ... blocks from LLM responses before
+ CrewAI's ReAct parser sees them, preventing the common
+ "Invalid response from LLM call - None or empty" error.
+ """
+ return wrap_if_reasoning_model(llm, model)
+
+
+def build_llm() -> Any:
+ """Return an initialized CrewAI LLM using the active provider.
+
+ CrewAI is lazy-imported here to avoid loading ~180 packages (litellm,
+ chromadb, lancedb, opentelemetry, onnxruntime, etc.) at server startup.
+ First call adds 5-15s; subsequent calls are instant.
+
+ If the active model is a reasoning model (deepseek-r1, qwq, etc.),
+ the returned LLM is automatically wrapped with ReasoningAwareLLM
+ for CrewAI compatibility. For non-reasoning models, the original
+ LLM is returned unchanged.
+ """
+ # LAZY IMPORT — see module-level comment for rationale
+ from crewai import LLM
+
+ settings = get_settings()
+ provider = settings.provider
+
+ if provider == LLMProvider.openai:
+ # Use settings config if available, otherwise fall back to env vars
+ api_key = settings.openai.api_key or os.getenv("OPENAI_API_KEY", "")
+ model = settings.openai.model or os.getenv("GITPILOT_OPENAI_MODEL", "gpt-4o-mini")
+ base_url = settings.openai.base_url or os.getenv("OPENAI_BASE_URL", "")
+
+ # Validate required credentials
+ if not api_key:
+ raise ValueError(
+ "OpenAI API key is required. "
+ "Configure it in Admin / LLM Settings or set OPENAI_API_KEY environment variable."
+ )
+
+ # Ensure model has provider prefix for CrewAI
+ if not model.startswith("openai/"):
+ model = f"openai/{model}"
+
+ return _wrap_llm(
+ LLM(
+ model=model,
+ api_key=api_key,
+ base_url=base_url if base_url else None,
+ ),
+ model,
+ )
+
+ if provider == LLMProvider.claude:
+ # Use settings config if available, otherwise fall back to env vars
+ api_key = settings.claude.api_key or os.getenv("ANTHROPIC_API_KEY", "")
+ model = settings.claude.model or os.getenv("GITPILOT_CLAUDE_MODEL", "claude-sonnet-4-5")
+ base_url = settings.claude.base_url or os.getenv("ANTHROPIC_BASE_URL", "")
+
+ # Validate required credentials
+ if not api_key:
+ raise ValueError(
+ "Claude API key is required. "
+ "Configure it in Admin / LLM Settings or set "
+ "ANTHROPIC_API_KEY environment variable."
+ )
+
+ # CRITICAL: Set API key as environment variable
+ # (required by CrewAI's native Anthropic provider)
+ # CrewAI's Anthropic integration checks for this env var internally
+ os.environ["ANTHROPIC_API_KEY"] = api_key
+
+ # Optional: Set base URL as environment variable if provided
+ if base_url:
+ os.environ["ANTHROPIC_BASE_URL"] = base_url
+
+ # Ensure model has provider prefix for CrewAI
+ if not model.startswith("anthropic/"):
+ model = f"anthropic/{model}"
+
+ return _wrap_llm(
+ LLM(
+ model=model,
+ api_key=api_key,
+ base_url=base_url if base_url else None,
+ ),
+ model,
+ )
+
+ if provider == LLMProvider.watsonx:
+ # FIXED: Use settings config with proper watsonx.ai integration
+ api_key = settings.watsonx.api_key or os.getenv("WATSONX_API_KEY", "")
+ project_id = settings.watsonx.project_id or os.getenv("WATSONX_PROJECT_ID", "")
+ model = settings.watsonx.model_id or os.getenv(
+ "GITPILOT_WATSONX_MODEL",
+ "ibm/granite-3-8b-instruct", # Default model (without prefix)
+ )
+ base_url = settings.watsonx.base_url or os.getenv(
+ "WATSONX_BASE_URL",
+ "https://us-south.ml.cloud.ibm.com", # Default to US South
+ )
+
+ # Validate required credentials
+ if not api_key:
+ raise ValueError(
+ "Watsonx API key is required. "
+ "Configure it in Admin / LLM Settings or set WATSONX_API_KEY environment variable."
+ )
+ if not project_id:
+ raise ValueError(
+ "Watsonx project ID is required. "
+ "Configure it in Admin / LLM Settings or set "
+ "WATSONX_PROJECT_ID environment variable."
+ )
+
+ # CRITICAL: Set project ID as environment variable (required by watsonx.ai SDK)
+ os.environ["WATSONX_PROJECT_ID"] = project_id
+
+ # CRITICAL: Also set the base URL as WATSONX_URL (some integrations use this)
+ os.environ["WATSONX_URL"] = base_url
+
+ # Ensure model has provider prefix for CrewAI (watsonx/provider/model)
+ # Format: watsonx/ibm/granite-3-8b-instruct
+ if not model.startswith("watsonx/"):
+ model = f"watsonx/{model}"
+
+ # FIXED: Create LLM with project_id parameter (CRITICAL!)
+ return _wrap_llm(
+ LLM(
+ model=model,
+ api_key=api_key,
+ base_url=base_url,
+ project_id=project_id, # \u2190 CRITICAL: This was missing!
+ temperature=0.3, # Default temperature
+ max_tokens=1024, # Default max tokens
+ ),
+ model,
+ )
+
+ if provider == LLMProvider.ollama:
+ # Use settings config if available, otherwise fall back to env vars
+ model = settings.ollama.model or os.getenv("GITPILOT_OLLAMA_MODEL", "llama3")
+ base_url = settings.ollama.base_url or os.getenv("OLLAMA_BASE_URL", "http://localhost:11434")
+
+ # Validate required configuration
+ if not base_url:
+ raise ValueError(
+ "Ollama base URL is required. "
+ "Configure it in Admin / LLM Settings or set OLLAMA_BASE_URL environment variable."
+ )
+
+ # Ensure model has provider prefix for CrewAI
+ if not model.startswith("ollama/"):
+ model = f"ollama/{model}"
+
+ return _wrap_llm(LLM(model=model, base_url=base_url), model)
+
+ if provider == LLMProvider.ollabridge:
+ # OllaBridge / OllaBridge Cloud - OpenAI-compatible API
+ model = settings.ollabridge.model or os.getenv("GITPILOT_OLLABRIDGE_MODEL", "qwen2.5:1.5b")
+ base_url = settings.ollabridge.base_url or os.getenv("OLLABRIDGE_BASE_URL", "http://localhost:8000")
+ api_key = settings.ollabridge.api_key or os.getenv("OLLABRIDGE_API_KEY", "")
+
+ # Validate required configuration
+ if not base_url:
+ raise ValueError(
+ "OllaBridge base URL is required. "
+ "Configure it in Admin / LLM Settings or set "
+ "OLLABRIDGE_BASE_URL environment variable."
+ )
+
+ # OllaBridge exposes an OpenAI-compatible API at /v1/
+ # Use the openai/ prefix so CrewAI routes through the OpenAI adapter
+ if not model.startswith("openai/"):
+ model = f"openai/{model}"
+
+ ollabridge_api_base = f"{base_url.rstrip('/')}/v1"
+ ollabridge_key = api_key or "ollabridge"
+
+ # CRITICAL: Set environment variables so litellm/OpenAI client uses
+ # the remote OllaBridge URL instead of falling back to localhost.
+ # Without this, the openai/ prefix causes litellm to check OPENAI_API_BASE
+ # and default to localhost when it's not set.
+ os.environ["OPENAI_API_KEY"] = ollabridge_key
+ os.environ["OPENAI_API_BASE"] = ollabridge_api_base
+
+ return _wrap_llm(
+ LLM(
+ model=model,
+ api_key=ollabridge_key,
+ base_url=ollabridge_api_base,
+ ),
+ model,
+ )
+
+ raise ValueError(f"Unsupported provider: {provider}")
+
+
+def validate_provider_config(settings) -> tuple[bool, list[str]]:
+ """Validate provider configuration and return (is_valid, errors)."""
+ errors = []
+ provider = settings.provider
+
+ if provider == LLMProvider.openai:
+ if not settings.openai.api_key:
+ errors.append("OpenAI API key is required")
+ elif provider == LLMProvider.claude:
+ if not settings.claude.api_key:
+ errors.append("Anthropic API key is required")
+ elif provider == LLMProvider.watsonx:
+ if not settings.watsonx.api_key:
+ errors.append("Watsonx API key is required")
+ if not settings.watsonx.project_id:
+ errors.append("Watsonx project ID is required")
+ elif provider == LLMProvider.ollama:
+ pass # Local, always valid
+ elif provider == LLMProvider.ollabridge:
+ pass # Local default, always valid
+
+ return (len(errors) == 0, errors)
+
+
+def get_effective_model(settings) -> str | None:
+ """Get the active model name for the current provider."""
+ provider = settings.provider
+ if provider == LLMProvider.openai:
+ return settings.openai.model
+ if provider == LLMProvider.claude:
+ return settings.claude.model
+ if provider == LLMProvider.watsonx:
+ return settings.watsonx.model_id
+ if provider == LLMProvider.ollama:
+ return settings.ollama.model
+ if provider == LLMProvider.ollabridge:
+ return settings.ollabridge.model
+ return None
+
+
+def _apply_health(summary: ProviderSummary, status_code: int) -> None:
+ """Set health and models_available from HTTP status code."""
+ ok = status_code == 200
+ summary.health = ProviderHealth.ok if ok else ProviderHealth.error
+ summary.models_available = ok
+
+
+async def test_provider_connection(settings) -> ProviderSummary:
+ """Test the current provider connection and return status."""
+ summary = settings.get_provider_summary()
+ provider = settings.provider
+
+ try:
+ async with httpx.AsyncClient(timeout=10.0) as client:
+ if provider == LLMProvider.openai:
+ url = settings.openai.base_url or "https://api.openai.com"
+ resp = await client.get(
+ f"{url}/v1/models",
+ headers={"Authorization": f"Bearer {settings.openai.api_key}"},
+ )
+ _apply_health(summary, resp.status_code)
+
+ elif provider == LLMProvider.claude:
+ url = settings.claude.base_url or "https://api.anthropic.com"
+ headers = {
+ "x-api-key": settings.claude.api_key,
+ "anthropic-version": "2023-06-01",
+ }
+ resp = await client.get(f"{url}/v1/models", headers=headers)
+ _apply_health(summary, resp.status_code)
+
+ elif provider == LLMProvider.watsonx:
+ base = settings.watsonx.base_url or "https://us-south.ml.cloud.ibm.com"
+ resp = await client.get(
+ f"{base}/ml/v1/foundation_model_specs",
+ params={"version": "2024-03-14", "limit": "1"},
+ headers={"Authorization": f"Bearer {settings.watsonx.api_key}"},
+ )
+ _apply_health(summary, resp.status_code)
+
+ elif provider == LLMProvider.ollama:
+ base = settings.ollama.base_url or "http://127.0.0.1:11434"
+ resp = await client.get(f"{base}/api/tags")
+ _apply_health(summary, resp.status_code)
+
+ elif provider == LLMProvider.ollabridge:
+ base = settings.ollabridge.base_url or "http://127.0.0.1:8000"
+ base = base.rstrip("/")
+ if base.endswith("/v1"):
+ base = base[:-3]
+ summary.warning = (
+ "Do not include /v1; GitPilot adds it automatically."
+ )
+ api_key = settings.ollabridge.api_key or "ollabridge"
+ resp = await client.get(
+ f"{base}/v1/models",
+ headers={"Authorization": f"Bearer {api_key}"},
+ )
+ _apply_health(summary, resp.status_code)
+
+ except httpx.ConnectError:
+ summary.health = ProviderHealth.error
+ summary.warning = f"Cannot connect to {provider.value} server"
+ except httpx.TimeoutException:
+ summary.health = ProviderHealth.warning
+ summary.warning = f"Connection to {provider.value} timed out"
+ except Exception as e:
+ summary.health = ProviderHealth.error
+ summary.warning = str(e)
+
+ return summary
diff --git a/gitpilot/local_tools.py b/gitpilot/local_tools.py
new file mode 100644
index 0000000000000000000000000000000000000000..05b18d216e76141222c79d655699ab0c1a09c3ef
--- /dev/null
+++ b/gitpilot/local_tools.py
@@ -0,0 +1,212 @@
+# gitpilot/local_tools.py
+"""CrewAI tools for local workspace file and shell operations.
+
+These tools give agents the ability to read, write, search, and navigate
+files on the local filesystem (within the sandboxed workspace directory),
+and to run shell commands like test suites, linters, and build scripts.
+"""
+import asyncio
+import concurrent.futures
+import json
+from typing import Optional
+
+from crewai.tools import tool
+
+from .workspace import WorkspaceManager, WorkspaceInfo
+from .terminal import TerminalExecutor, TerminalSession
+
+_ws_manager = WorkspaceManager()
+_executor = TerminalExecutor()
+_current_workspace: Optional[WorkspaceInfo] = None
+
+
+def set_active_workspace(ws: WorkspaceInfo):
+ global _current_workspace
+ _current_workspace = ws
+
+
+def get_active_workspace() -> Optional[WorkspaceInfo]:
+ return _current_workspace
+
+
+def _require_workspace() -> WorkspaceInfo:
+ if _current_workspace is None:
+ raise RuntimeError("No active workspace. Call set_active_workspace() first.")
+ return _current_workspace
+
+
+def _run_async(coro):
+ """Bridge sync CrewAI tools to async workspace/terminal calls."""
+ try:
+ asyncio.get_running_loop()
+ except RuntimeError:
+ return asyncio.run(coro)
+ # If a loop is already running (CrewAI thread), use a thread pool
+ with concurrent.futures.ThreadPoolExecutor(max_workers=1) as pool:
+ return pool.submit(asyncio.run, coro).result()
+
+
+# -----------------------------------------------------------------------
+# File operations
+# -----------------------------------------------------------------------
+
+@tool("Read local file")
+def read_local_file(file_path: str) -> str:
+ """Read a file from the local workspace. Returns the file content."""
+ ws = _require_workspace()
+ try:
+ content = _run_async(_ws_manager.read_file(ws, file_path))
+ return f"Content of {file_path}:\n---\n{content}\n---"
+ except Exception as e:
+ return f"Error reading {file_path}: {e}"
+
+
+@tool("Write local file")
+def write_local_file(file_path: str, content: str) -> str:
+ """Write content to a file in the local workspace. Creates parent directories."""
+ ws = _require_workspace()
+ try:
+ result = _run_async(_ws_manager.write_file(ws, file_path, content))
+ return f"Written {result['size']} bytes to {result['path']}"
+ except Exception as e:
+ return f"Error writing {file_path}: {e}"
+
+
+@tool("Delete local file")
+def delete_local_file(file_path: str) -> str:
+ """Delete a file from the local workspace."""
+ ws = _require_workspace()
+ try:
+ deleted = _run_async(_ws_manager.delete_file(ws, file_path))
+ return f"Deleted: {deleted}"
+ except Exception as e:
+ return f"Error deleting {file_path}: {e}"
+
+
+@tool("List local files")
+def list_local_files(directory: str = ".") -> str:
+ """List all tracked and untracked files in a directory."""
+ ws = _require_workspace()
+ try:
+ files = _run_async(_ws_manager.list_files(ws, directory))
+ return "\n".join(files) if files else "No files found."
+ except Exception as e:
+ return f"Error listing files: {e}"
+
+
+@tool("Search in files")
+def search_in_files(pattern: str, path: str = ".") -> str:
+ """Search for a text pattern across all files using git grep.
+ Returns matching lines with file paths and line numbers."""
+ ws = _require_workspace()
+ try:
+ matches = _run_async(_ws_manager.search_files(ws, pattern, path))
+ if not matches:
+ return "No matches found."
+ lines = [f"{m['file']}:{m['line']}: {m['content']}" for m in matches[:50]]
+ return "\n".join(lines)
+ except Exception as e:
+ return f"Error searching: {e}"
+
+
+# -----------------------------------------------------------------------
+# Git operations
+# -----------------------------------------------------------------------
+
+@tool("Git diff")
+def git_diff(staged: str = "false") -> str:
+ """Show the current git diff (unstaged changes by default)."""
+ ws = _require_workspace()
+ try:
+ return _run_async(_ws_manager.diff(ws, staged=staged.lower() == "true")) or "No changes."
+ except Exception as e:
+ return f"Error: {e}"
+
+
+@tool("Git status")
+def git_status() -> str:
+ """Show the current git status."""
+ ws = _require_workspace()
+ try:
+ status = _run_async(_ws_manager.status(ws))
+ return json.dumps(status, indent=2)
+ except Exception as e:
+ return f"Error: {e}"
+
+
+@tool("Git commit")
+def git_commit(message: str, files: str = "") -> str:
+ """Commit changes. Optionally specify files (comma-separated)."""
+ ws = _require_workspace()
+ try:
+ file_list = [f.strip() for f in files.split(",") if f.strip()] or None
+ result = _run_async(_ws_manager.commit(ws, message, file_list))
+ return json.dumps(result)
+ except Exception as e:
+ return f"Error: {e}"
+
+
+@tool("Git log")
+def git_log(count: str = "10") -> str:
+ """Show recent commit history."""
+ ws = _require_workspace()
+ try:
+ commits = _run_async(_ws_manager.log(ws, int(count)))
+ return json.dumps(commits, indent=2)
+ except Exception as e:
+ return f"Error: {e}"
+
+
+# -----------------------------------------------------------------------
+# Shell command execution
+# -----------------------------------------------------------------------
+
+@tool("Run shell command")
+def run_command(command: str, timeout: str = "120") -> str:
+ """Run a shell command in the workspace directory.
+ Returns stdout, stderr, and exit code.
+ Examples: 'npm test', 'python -m pytest', 'make build', 'ls -la'."""
+ ws = _require_workspace()
+ try:
+ session = TerminalSession(workspace_path=ws.path)
+ result = _run_async(_executor.execute(session, command, int(timeout)))
+ output = f"Exit code: {result.exit_code}\n"
+ if result.stdout:
+ output += f"--- stdout ---\n{result.stdout}\n"
+ if result.stderr:
+ output += f"--- stderr ---\n{result.stderr}\n"
+ if result.timed_out:
+ output += "WARNING: Command timed out\n"
+ if result.truncated:
+ output += "WARNING: Output was truncated\n"
+ return output
+ except PermissionError as e:
+ return f"Permission denied: {e}"
+ except Exception as e:
+ return f"Error: {e}"
+
+
+# -----------------------------------------------------------------------
+# Exports
+# -----------------------------------------------------------------------
+
+LOCAL_FILE_TOOLS = [
+ read_local_file,
+ write_local_file,
+ delete_local_file,
+ list_local_files,
+ search_in_files,
+]
+
+LOCAL_GIT_TOOLS = [
+ git_diff,
+ git_status,
+ git_commit,
+ git_log,
+]
+
+LOCAL_SHELL_TOOLS = [
+ run_command,
+]
+
+LOCAL_TOOLS = LOCAL_FILE_TOOLS + LOCAL_GIT_TOOLS + LOCAL_SHELL_TOOLS
diff --git a/gitpilot/mcp_client.py b/gitpilot/mcp_client.py
new file mode 100644
index 0000000000000000000000000000000000000000..3c248d9fb58c0e818e270bc35e30b63fecb6cc06
--- /dev/null
+++ b/gitpilot/mcp_client.py
@@ -0,0 +1,341 @@
+# gitpilot/mcp_client.py
+"""Model Context Protocol (MCP) client for GitPilot.
+
+Connects to MCP servers (databases, Slack, Figma, Sentry, etc.) and
+exposes their tools to GitPilot agents. Supports three transport types:
+
+- **stdio** — launch a local subprocess and communicate via stdin/stdout
+- **http** — send JSON-RPC requests over HTTP
+- **sse** — Server-Sent Events streaming connection
+
+Configuration lives in ``.gitpilot/mcp.json``.
+"""
+from __future__ import annotations
+
+import asyncio
+import json
+import logging
+import os
+from dataclasses import dataclass, field
+from enum import Enum
+from pathlib import Path
+from typing import Any, Dict, List, Optional
+
+import httpx
+
+logger = logging.getLogger(__name__)
+
+MCP_CONFIG_FILE = "mcp.json"
+MCP_JSONRPC_VERSION = "2.0"
+
+
+class TransportType(str, Enum):
+ STDIO = "stdio"
+ HTTP = "http"
+ SSE = "sse"
+
+
+@dataclass
+class MCPServerConfig:
+ """Configuration for a single MCP server."""
+
+ name: str
+ transport: TransportType
+ # stdio
+ command: Optional[str] = None
+ args: List[str] = field(default_factory=list)
+ env: Dict[str, str] = field(default_factory=dict)
+ # http / sse
+ url: Optional[str] = None
+ headers: Dict[str, str] = field(default_factory=dict)
+ # auth
+ auth_token: Optional[str] = None
+
+ @classmethod
+ def from_dict(cls, data: dict) -> "MCPServerConfig":
+ transport = TransportType(data.get("type", data.get("transport", "stdio")))
+ env = {}
+ for k, v in data.get("env", {}).items():
+ # Expand $ENV_VAR references
+ env[k] = os.path.expandvars(v) if isinstance(v, str) else v
+ return cls(
+ name=data["name"],
+ transport=transport,
+ command=data.get("command"),
+ args=[os.path.expandvars(a) for a in data.get("args", [])],
+ env=env,
+ url=data.get("url"),
+ headers=data.get("headers", {}),
+ auth_token=os.path.expandvars(data["auth_token"]) if data.get("auth_token") else None,
+ )
+
+
+@dataclass
+class MCPTool:
+ """A tool discovered from an MCP server."""
+
+ name: str
+ description: str
+ input_schema: Dict[str, Any] = field(default_factory=dict)
+ server_name: str = ""
+
+
+@dataclass
+class MCPConnection:
+ """An active connection to an MCP server."""
+
+ config: MCPServerConfig
+ tools: List[MCPTool] = field(default_factory=list)
+ _process: Optional[asyncio.subprocess.Process] = field(default=None, repr=False)
+ _request_id: int = field(default=0, repr=False)
+
+ @property
+ def is_alive(self) -> bool:
+ if self.config.transport == TransportType.STDIO:
+ return self._process is not None and self._process.returncode is None
+ return True # HTTP/SSE are stateless per-request
+
+ def next_id(self) -> int:
+ self._request_id += 1
+ return self._request_id
+
+
+class MCPClient:
+ """Connect to MCP servers and call their tools.
+
+ Usage::
+
+ client = MCPClient()
+ client.load_config(workspace / ".gitpilot")
+ conn = await client.connect("postgres")
+ tools = await client.list_tools(conn)
+ result = await client.call_tool(conn, "query", {"sql": "SELECT 1"})
+ """
+
+ def __init__(self) -> None:
+ self._configs: Dict[str, MCPServerConfig] = {}
+ self._connections: Dict[str, MCPConnection] = {}
+
+ # ------------------------------------------------------------------
+ # Configuration
+ # ------------------------------------------------------------------
+
+ def load_config(self, gitpilot_dir: Path) -> int:
+ """Load MCP server configs from ``.gitpilot/mcp.json``. Returns count."""
+ config_path = gitpilot_dir / MCP_CONFIG_FILE
+ if not config_path.exists():
+ return 0
+ try:
+ data = json.loads(config_path.read_text())
+ servers = data if isinstance(data, list) else data.get("servers", [])
+ for entry in servers:
+ cfg = MCPServerConfig.from_dict(entry)
+ self._configs[cfg.name] = cfg
+ logger.info("Loaded %d MCP server configs", len(servers))
+ return len(servers)
+ except Exception as e:
+ logger.warning("Failed to load MCP config: %s", e)
+ return 0
+
+ def add_server(self, config: MCPServerConfig) -> None:
+ self._configs[config.name] = config
+
+ def list_servers(self) -> List[str]:
+ return list(self._configs.keys())
+
+ # ------------------------------------------------------------------
+ # Connection management
+ # ------------------------------------------------------------------
+
+ async def connect(self, server_name: str) -> MCPConnection:
+ """Connect to a named MCP server and discover its tools."""
+ if server_name in self._connections and self._connections[server_name].is_alive:
+ return self._connections[server_name]
+
+ config = self._configs.get(server_name)
+ if not config:
+ raise ValueError(f"Unknown MCP server: {server_name}")
+
+ conn = MCPConnection(config=config)
+
+ if config.transport == TransportType.STDIO:
+ await self._connect_stdio(conn)
+
+ # Discover tools via initialize + tools/list
+ await self._initialize(conn)
+ conn.tools = await self.list_tools(conn)
+
+ self._connections[server_name] = conn
+ logger.info("Connected to MCP server '%s' — %d tools", server_name, len(conn.tools))
+ return conn
+
+ async def disconnect(self, server_name: str) -> None:
+ conn = self._connections.pop(server_name, None)
+ if conn and conn._process:
+ conn._process.terminate()
+ await conn._process.wait()
+
+ async def disconnect_all(self) -> None:
+ for name in list(self._connections):
+ await self.disconnect(name)
+
+ # ------------------------------------------------------------------
+ # Tool operations
+ # ------------------------------------------------------------------
+
+ async def list_tools(self, conn: MCPConnection) -> List[MCPTool]:
+ """List tools available on the connected server."""
+ result = await self._send_request(conn, "tools/list", {})
+ tools = []
+ for t in result.get("tools", []):
+ tools.append(MCPTool(
+ name=t["name"],
+ description=t.get("description", ""),
+ input_schema=t.get("inputSchema", {}),
+ server_name=conn.config.name,
+ ))
+ return tools
+
+ async def call_tool(
+ self,
+ conn: MCPConnection,
+ tool_name: str,
+ params: Optional[Dict[str, Any]] = None,
+ ) -> Any:
+ """Call a tool on the connected server."""
+ result = await self._send_request(conn, "tools/call", {
+ "name": tool_name,
+ "arguments": params or {},
+ })
+ # MCP returns content array; flatten text content
+ content = result.get("content", [])
+ texts = [c.get("text", "") for c in content if c.get("type") == "text"]
+ return "\n".join(texts) if texts else result
+
+ def to_crewai_tools(self, conn: MCPConnection) -> list:
+ """Wrap MCP tools as CrewAI-compatible tool functions.
+
+ Returns a list of callables decorated with ``@tool``.
+ """
+ from crewai.tools import tool as crewai_tool
+
+ wrapped = []
+ for mcp_tool in conn.tools:
+ # Capture in closure
+ _conn = conn
+ _name = mcp_tool.name
+ _desc = mcp_tool.description or f"MCP tool: {_name}"
+
+ @crewai_tool(_name)
+ def _wrapper(params: str = "{}") -> str:
+ __doc__ = _desc # noqa: F841
+ import asyncio as _aio
+ loop = _aio.new_event_loop()
+ try:
+ parsed = json.loads(params) if isinstance(params, str) else params
+ return str(loop.run_until_complete(
+ MCPClient.call_tool(self, _conn, _name, parsed)
+ ))
+ finally:
+ loop.close()
+
+ _wrapper.__doc__ = _desc
+ wrapped.append(_wrapper)
+ return wrapped
+
+ # ------------------------------------------------------------------
+ # Transport internals
+ # ------------------------------------------------------------------
+
+ async def _connect_stdio(self, conn: MCPConnection) -> None:
+ config = conn.config
+ if not config.command:
+ raise ValueError(f"stdio server '{config.name}' requires a command")
+ env = {**os.environ, **config.env}
+ conn._process = await asyncio.create_subprocess_exec(
+ config.command, *config.args,
+ stdin=asyncio.subprocess.PIPE,
+ stdout=asyncio.subprocess.PIPE,
+ stderr=asyncio.subprocess.PIPE,
+ env=env,
+ )
+
+ async def _initialize(self, conn: MCPConnection) -> Dict[str, Any]:
+ return await self._send_request(conn, "initialize", {
+ "protocolVersion": "2024-11-05",
+ "capabilities": {},
+ "clientInfo": {"name": "gitpilot", "version": "1.0"},
+ })
+
+ async def _send_request(
+ self,
+ conn: MCPConnection,
+ method: str,
+ params: Dict[str, Any],
+ ) -> Dict[str, Any]:
+ """Send a JSON-RPC request via the appropriate transport."""
+ msg = {
+ "jsonrpc": MCP_JSONRPC_VERSION,
+ "id": conn.next_id(),
+ "method": method,
+ "params": params,
+ }
+
+ if conn.config.transport == TransportType.STDIO:
+ return await self._send_stdio(conn, msg)
+ else:
+ return await self._send_http(conn, msg)
+
+ async def _send_stdio(
+ self, conn: MCPConnection, msg: dict,
+ ) -> Dict[str, Any]:
+ proc = conn._process
+ if not proc or not proc.stdin or not proc.stdout:
+ raise RuntimeError(f"stdio process not running for '{conn.config.name}'")
+ payload = json.dumps(msg) + "\n"
+ proc.stdin.write(payload.encode())
+ await proc.stdin.drain()
+ line = await proc.stdout.readline()
+ if not line:
+ raise RuntimeError(f"No response from MCP server '{conn.config.name}'")
+ resp = json.loads(line)
+ if "error" in resp:
+ raise RuntimeError(f"MCP error: {resp['error']}")
+ return resp.get("result", {})
+
+ async def _send_http(
+ self, conn: MCPConnection, msg: dict,
+ ) -> Dict[str, Any]:
+ url = conn.config.url
+ if not url:
+ raise ValueError(f"HTTP server '{conn.config.name}' requires a url")
+ headers = {**conn.config.headers, "Content-Type": "application/json"}
+ if conn.config.auth_token:
+ headers["Authorization"] = f"Bearer {conn.config.auth_token}"
+ async with httpx.AsyncClient(timeout=30) as client:
+ resp = await client.post(url, json=msg, headers=headers)
+ resp.raise_for_status()
+ data = resp.json()
+ if "error" in data:
+ raise RuntimeError(f"MCP error: {data['error']}")
+ return data.get("result", {})
+
+ # ------------------------------------------------------------------
+ # Serialisation
+ # ------------------------------------------------------------------
+
+ def to_dict(self) -> Dict[str, Any]:
+ return {
+ "servers": [
+ {
+ "name": c.name,
+ "transport": c.transport.value,
+ "command": c.command,
+ "url": c.url,
+ "tools_count": len(self._connections[c.name].tools)
+ if c.name in self._connections else 0,
+ "connected": c.name in self._connections,
+ }
+ for c in self._configs.values()
+ ]
+ }
diff --git a/gitpilot/memory.py b/gitpilot/memory.py
new file mode 100644
index 0000000000000000000000000000000000000000..552e01e9d46096749c4cec4307616ff087f28589
--- /dev/null
+++ b/gitpilot/memory.py
@@ -0,0 +1,137 @@
+# gitpilot/memory.py
+"""Project context memory — the GITPILOT.md system.
+
+Loads project-specific conventions, rules, and context from:
+
+1. ``.gitpilot/GITPILOT.md`` — project root (committed to repo)
+2. ``.gitpilot/rules/*.md`` — modular rule files
+3. ``.gitpilot/memory.json`` — auto-learned patterns (local only)
+
+The combined context is injected into agent system prompts so they
+follow project conventions automatically.
+"""
+from __future__ import annotations
+
+import json
+import logging
+from dataclasses import dataclass, field
+from pathlib import Path
+from typing import Any, Dict, List
+
+logger = logging.getLogger(__name__)
+
+MEMORY_FILE = "GITPILOT.md"
+RULES_DIR = "rules"
+AUTO_MEMORY_FILE = "memory.json"
+MAX_CONVENTIONS_CHARS = 10_000
+MAX_RULE_CHARS = 5_000
+MAX_PATTERNS = 100
+
+
+@dataclass
+class ProjectContext:
+ """Combined project context for agent injection."""
+
+ conventions: str = ""
+ rules: List[str] = field(default_factory=list)
+ auto_memory: Dict[str, Any] = field(default_factory=dict)
+
+ def to_system_prompt(self) -> str:
+ """Format as a system-prompt section to prepend to agent backstory."""
+ parts: List[str] = []
+ if self.conventions:
+ parts.append(f"## Project Conventions\n\n{self.conventions}")
+ if self.rules:
+ parts.append("## Project Rules\n\n" + "\n\n---\n\n".join(self.rules))
+ patterns = self.auto_memory.get("patterns", [])
+ if patterns:
+ parts.append(
+ "## Learned Patterns\n\n"
+ + "\n".join(f"- {p}" for p in patterns)
+ )
+ return "\n\n".join(parts)
+
+ @property
+ def is_empty(self) -> bool:
+ return not self.conventions and not self.rules and not self.auto_memory
+
+
+class MemoryManager:
+ """Load and manage project-level context and conventions."""
+
+ def __init__(self, workspace_path: Path):
+ self.workspace_path = workspace_path
+ self.gitpilot_dir = workspace_path / ".gitpilot"
+
+ def load_context(self) -> ProjectContext:
+ ctx = ProjectContext()
+
+ # 1. GITPILOT.md
+ md_path = self.gitpilot_dir / MEMORY_FILE
+ if md_path.exists():
+ ctx.conventions = md_path.read_text(encoding="utf-8")[
+ :MAX_CONVENTIONS_CHARS
+ ]
+
+ # 2. rules/*.md
+ rules_dir = self.gitpilot_dir / RULES_DIR
+ if rules_dir.is_dir():
+ for rule_file in sorted(rules_dir.glob("*.md")):
+ content = rule_file.read_text(encoding="utf-8")[:MAX_RULE_CHARS]
+ ctx.rules.append(f"### {rule_file.stem}\n\n{content}")
+
+ # 3. auto-learned memory
+ auto_path = self.gitpilot_dir / AUTO_MEMORY_FILE
+ if auto_path.exists():
+ try:
+ ctx.auto_memory = json.loads(auto_path.read_text())
+ except Exception:
+ pass
+
+ return ctx
+
+ def save_auto_memory(self, memory: Dict[str, Any]):
+ self.gitpilot_dir.mkdir(parents=True, exist_ok=True)
+ auto_path = self.gitpilot_dir / AUTO_MEMORY_FILE
+ auto_path.write_text(json.dumps(memory, indent=2))
+
+ def add_learned_pattern(self, pattern: str):
+ auto_path = self.gitpilot_dir / AUTO_MEMORY_FILE
+ memory: Dict[str, Any] = {}
+ if auto_path.exists():
+ try:
+ memory = json.loads(auto_path.read_text())
+ except Exception:
+ pass
+ patterns = memory.setdefault("patterns", [])
+ if pattern not in patterns:
+ patterns.append(pattern)
+ memory["patterns"] = patterns[-MAX_PATTERNS:]
+ self.save_auto_memory(memory)
+
+ def init_project(self) -> Path:
+ """Create .gitpilot/ with template GITPILOT.md. Returns path."""
+ self.gitpilot_dir.mkdir(parents=True, exist_ok=True)
+ md_path = self.gitpilot_dir / MEMORY_FILE
+ if not md_path.exists():
+ md_path.write_text(
+ "# GitPilot Project Conventions\n\n"
+ "\n"
+ "\n\n"
+ "## Code Style\n\n\n"
+ "## Testing\n\n\n"
+ "## Commit Messages\n\n\n"
+ )
+ (self.gitpilot_dir / RULES_DIR).mkdir(exist_ok=True)
+ return md_path
+
+ def get_conventions_text(self) -> str:
+ md_path = self.gitpilot_dir / MEMORY_FILE
+ if md_path.exists():
+ return md_path.read_text(encoding="utf-8")
+ return ""
+
+ def set_conventions_text(self, text: str):
+ self.gitpilot_dir.mkdir(parents=True, exist_ok=True)
+ md_path = self.gitpilot_dir / MEMORY_FILE
+ md_path.write_text(text, encoding="utf-8")
diff --git a/gitpilot/model_catalog.py b/gitpilot/model_catalog.py
new file mode 100644
index 0000000000000000000000000000000000000000..0656298644852fc50180f366fd0ae4e182b8f9f7
--- /dev/null
+++ b/gitpilot/model_catalog.py
@@ -0,0 +1,197 @@
+# gitpilot/model_catalog.py
+from __future__ import annotations
+
+import os
+from datetime import datetime
+from typing import List, Tuple, Optional, Dict, Any
+
+import requests
+
+from .settings import AppSettings, LLMProvider, get_settings
+
+# --- Watsonx.ai config (public endpoint, no key needed for IBM-managed models) ---
+
+WATSONX_BASE_URLS = [
+ "https://us-south.ml.cloud.ibm.com",
+ "https://eu-de.ml.cloud.ibm.com",
+ "https://jp-tok.ml.cloud.ibm.com",
+ "https://au-syd.ml.cloud.ibm.com",
+]
+
+WATSONX_ENDPOINT = "/ml/v1/foundation_model_specs"
+WATSONX_PARAMS = {
+ "version": "2024-09-16",
+ "filters": "!function_embedding,!lifecycle_withdrawn",
+}
+TODAY = datetime.today().strftime("%Y-%m-%d")
+
+
+def _is_deprecated_or_withdrawn(lifecycle: List[Dict[str, Any]]) -> bool:
+ """Return True if a model lifecycle includes a deprecated/withdrawn item active today."""
+ for entry in lifecycle:
+ if entry.get("id") in {"deprecated", "withdrawn"} and entry.get("start_date", "") <= TODAY:
+ return True
+ return False
+
+
+# --- Provider-specific listing functions --------------------------------------
+
+
+def _list_openai_models(settings: AppSettings) -> Tuple[List[str], Optional[str]]:
+ """
+ Use OpenAI /v1/models endpoint to list models available to the configured key.
+ Requires OPENAI_API_KEY or settings.openai.api_key.
+ """
+ api_key = settings.openai.api_key or os.getenv("OPENAI_API_KEY")
+ if not api_key:
+ return [], "OpenAI API key not configured"
+
+ base_url = os.getenv("OPENAI_BASE_URL", "https://api.openai.com")
+ url = f"{base_url.rstrip('/')}/v1/models"
+
+ try:
+ resp = requests.get(
+ url,
+ headers={"Authorization": f"Bearer {api_key}"},
+ timeout=10,
+ )
+ resp.raise_for_status()
+ data = resp.json().get("data", [])
+ models = sorted({m.get("id", "") for m in data if m.get("id")})
+ return models, None
+ except Exception as e:
+ return [], f"Error listing OpenAI models: {e}"
+
+
+def _list_claude_models(settings: AppSettings) -> Tuple[List[str], Optional[str]]:
+ """
+ Use Anthropic /v1/models endpoint to list Claude models available to the key.
+ Requires ANTHROPIC_API_KEY or settings.claude.api_key.
+ """
+ api_key = settings.claude.api_key or os.getenv("ANTHROPIC_API_KEY")
+ if not api_key:
+ return [], "Claude (Anthropic) API key not configured"
+
+ base_url = os.getenv("ANTHROPIC_BASE_URL", "https://api.anthropic.com")
+ url = f"{base_url.rstrip('/')}/v1/models"
+ anthropic_version = os.getenv("ANTHROPIC_VERSION", "2023-06-01")
+
+ try:
+ resp = requests.get(
+ url,
+ headers={
+ "x-api-key": api_key,
+ "anthropic-version": anthropic_version,
+ },
+ timeout=10,
+ )
+ resp.raise_for_status()
+ data = resp.json().get("data", [])
+ models = sorted({m.get("id", "") for m in data if m.get("id")})
+ return models, None
+ except Exception as e:
+ return [], f"Error listing Claude models: {e}"
+
+
+def _list_watsonx_models(settings: AppSettings) -> Tuple[List[str], Optional[str]]:
+ """
+ List foundation models from Watsonx public specs endpoint.
+ No API key required for IBM-managed models.
+ Returns a unique sorted list of model_id's across major regions.
+ """
+ all_models = set()
+
+ for base in WATSONX_BASE_URLS:
+ url = f"{base}{WATSONX_ENDPOINT}"
+ try:
+ resp = requests.get(url, params=WATSONX_PARAMS, timeout=10)
+ resp.raise_for_status()
+ resources = resp.json().get("resources", [])
+ for m in resources:
+ if _is_deprecated_or_withdrawn(m.get("lifecycle", [])):
+ continue
+ model_id = m.get("model_id")
+ if model_id:
+ all_models.add(model_id)
+ except Exception:
+ # Just skip this region on error
+ continue
+
+ if not all_models:
+ return [], "No Watsonx models found (public specs call failed for all regions?)"
+
+ return sorted(all_models), None
+
+
+def _list_ollama_models(settings: AppSettings) -> Tuple[List[str], Optional[str]]:
+ """
+ List models from a local/remote Ollama server via /api/tags.
+ """
+ base_url = getattr(settings.ollama, "base_url", None) or os.getenv(
+ "OLLAMA_BASE_URL", "http://localhost:11434"
+ )
+ url = f"{base_url.rstrip('/')}/api/tags"
+
+ try:
+ resp = requests.get(url, timeout=5)
+ resp.raise_for_status()
+ data = resp.json().get("models", [])
+ models = sorted({m.get("name", "") for m in data if m.get("name")})
+ return models, None
+ except Exception as e:
+ return [], f"Error listing Ollama models from {url}: {e}"
+
+
+def _list_ollabridge_models(settings: AppSettings) -> Tuple[List[str], Optional[str]]:
+ """
+ List models from an OllaBridge / OllaBridge Cloud instance via /v1/models.
+ Uses the OpenAI-compatible endpoint.
+ """
+ base_url = getattr(settings.ollabridge, "base_url", None) or os.getenv(
+ "OLLABRIDGE_BASE_URL", "http://localhost:8000"
+ )
+ api_key = getattr(settings.ollabridge, "api_key", None) or os.getenv("OLLABRIDGE_API_KEY", "")
+ url = f"{base_url.rstrip('/')}/v1/models"
+
+ headers: Dict[str, str] = {}
+ if api_key:
+ headers["Authorization"] = f"Bearer {api_key}"
+
+ try:
+ resp = requests.get(url, headers=headers, timeout=10)
+ resp.raise_for_status()
+ data = resp.json().get("data", [])
+ models = sorted({m.get("id", "") for m in data if m.get("id")})
+ return models, None
+ except Exception as e:
+ return [], f"Error listing OllaBridge models from {url}: {e}"
+
+
+# --- Public helper ------------------------------------------------------------
+
+
+def list_models_for_provider(
+ provider: LLMProvider,
+ settings: Optional[AppSettings] = None,
+) -> Tuple[List[str], Optional[str]]:
+ """
+ Return (models, error) for a given provider.
+
+ models: list of strings (model IDs / names)
+ error: human-readable error if something went wrong, otherwise None
+ """
+ if settings is None:
+ settings = get_settings()
+
+ if provider == LLMProvider.openai:
+ return _list_openai_models(settings)
+ if provider == LLMProvider.claude:
+ return _list_claude_models(settings)
+ if provider == LLMProvider.watsonx:
+ return _list_watsonx_models(settings)
+ if provider == LLMProvider.ollama:
+ return _list_ollama_models(settings)
+ if provider == LLMProvider.ollabridge:
+ return _list_ollabridge_models(settings)
+
+ return [], f"Unsupported provider: {provider}"
diff --git a/gitpilot/models.py b/gitpilot/models.py
new file mode 100644
index 0000000000000000000000000000000000000000..9c86d3db4ff1723b62aa3ad18f695719753c0a80
--- /dev/null
+++ b/gitpilot/models.py
@@ -0,0 +1,279 @@
+"""
+GitPilot Redesign — Shared Models & Schemas
+Centralized Pydantic models for the redesigned API contract.
+"""
+
+from enum import StrEnum
+from typing import Any, Literal
+
+from pydantic import BaseModel, Field
+
+
+# ─── Enums ────────────────────────────────────────────────
+
+class WorkspaceMode(StrEnum):
+ folder = "folder"
+ local_git = "local_git"
+ github = "github"
+
+
+class ProviderName(StrEnum):
+ openai = "openai"
+ claude = "claude"
+ watsonx = "watsonx"
+ ollama = "ollama"
+ ollabridge = "ollabridge"
+
+
+class ProviderHealth(StrEnum):
+ ok = "ok"
+ warning = "warning"
+ error = "error"
+ unknown = "unknown"
+
+
+class ProviderConnectionType(StrEnum):
+ local = "local"
+ api_key = "api_key"
+ pairing = "pairing"
+ cloud = "cloud"
+ managed = "managed"
+
+
+class SessionMode(StrEnum):
+ folder = "folder"
+ local_git = "local_git"
+ github = "github"
+
+
+# ─── Provider Models ─────────────────────────────────────
+
+class ProviderSummary(BaseModel):
+ configured: bool = False
+ name: ProviderName = ProviderName.ollama
+ source: Literal[".env", "settings", "unknown"] = "unknown"
+ model: str | None = None
+ base_url: str | None = None
+ connection_type: ProviderConnectionType | None = None
+ has_api_key: bool = False
+ health: ProviderHealth | None = ProviderHealth.unknown
+ models_available: bool | None = None
+ warning: str | None = None
+
+
+class ProviderStatusResponse(BaseModel):
+ configured: bool
+ name: ProviderName
+ source: Literal[".env", "settings", "unknown"] = "unknown"
+ model: str | None = None
+ base_url: str | None = None
+ connection_type: ProviderConnectionType | None = None
+ has_api_key: bool = False
+ health: ProviderHealth | None = ProviderHealth.unknown
+ models_available: bool | None = None
+ warning: str | None = None
+
+
+# ─── Workspace Models ────────────────────────────────────
+
+class WorkspaceCapabilitySummary(BaseModel):
+ folder_mode_available: bool = False
+ local_git_available: bool = False
+ github_mode_available: bool = False
+
+
+class WorkspaceSummary(BaseModel):
+ folder_open: bool = False
+ folder_path: str | None = None
+ folder_name: str | None = None
+ git_detected: bool = False
+ repo_root: str | None = None
+ repo_name: str | None = None
+ branch: str | None = None
+ remotes: list[str] = Field(default_factory=list)
+
+
+# ─── GitHub Models ────────────────────────────────────────
+
+class GithubStatusSummary(BaseModel):
+ connected: bool = False
+ token_configured: bool = False
+ username: str | None = None
+
+
+# ─── Status Response ─────────────────────────────────────
+
+class StatusResponse(BaseModel):
+ server_ready: bool = True
+ provider: ProviderStatusResponse
+ workspace: WorkspaceCapabilitySummary
+ github: GithubStatusSummary
+
+
+# ─── Session Models ──────────────────────────────────────
+
+class StartSessionRequest(BaseModel):
+ mode: WorkspaceMode
+ folder_path: str | None = None
+ repo_root: str | None = None
+ repo_full_name: str | None = None
+ branch: str | None = None
+
+
+class StartSessionResponse(BaseModel):
+ session_id: str
+ mode: WorkspaceMode
+ title: str
+ status: Literal["active"] = "active"
+ folder_path: str | None = None
+ repo_root: str | None = None
+ repo_full_name: str | None = None
+ branch: str | None = None
+
+
+# ─── Chat / Task Models ──────────────────────────────────
+
+class PlanStepSummary(BaseModel):
+ step: int
+ title: str
+ action: str
+ file: str | None = None
+ description: str
+ status: Literal["pending", "ready", "applied", "failed"] | None = "pending"
+
+
+class PlanSummary(BaseModel):
+ goal: str
+ summary: str
+ steps: list[PlanStepSummary]
+
+
+class FileReference(BaseModel):
+ path: str
+ line: int | None = None
+
+
+class FileTreeEntry(BaseModel):
+ path: str
+ type: Literal["file", "dir"]
+
+
+class FileInScope(BaseModel):
+ path: str
+ reason: str | None = None
+ confidence: Literal["low", "medium", "high"] | None = None
+
+
+class ProposedEdit(BaseModel):
+ file: str
+ kind: Literal["create", "replace", "patch"]
+ summary: str | None = None
+ diff: str | None = None
+ content: str | None = None
+
+
+class StructuredProjectContext(BaseModel):
+ mode: str | None = None
+ workspaceRoot: str | None = None
+ repoRoot: str | None = None
+ repoName: str | None = None
+ branch: str | None = None
+ languages: list[str] = Field(default_factory=list)
+ manifests: list[str] = Field(default_factory=list)
+ keyFiles: list[str] = Field(default_factory=list)
+ readmePreview: str | None = None
+ treeSummary: list[FileTreeEntry] = Field(default_factory=list)
+ indexedAt: str | None = None
+
+
+class StructuredWorkingSet(BaseModel):
+ currentFile: str | None = None
+ languageId: str | None = None
+ currentSelection: str | None = None
+ openTabs: list[str] = Field(default_factory=list)
+ recentFiles: list[str] = Field(default_factory=list)
+ relatedFiles: list[str] = Field(default_factory=list)
+
+
+class StructuredTaskContext(BaseModel):
+ intent: str | None = None
+ scope: Literal["workspace", "selection", "file"] | None = None
+ summary: str | None = None
+
+
+class ChatMessageRequest(BaseModel):
+ session_id: str
+ message: str
+ scope: Literal["workspace", "selection", "file"] = "workspace"
+ topology_id: str | None = None
+ intent: str | None = None
+ project_context: StructuredProjectContext | None = None
+ working_set: StructuredWorkingSet | None = None
+ task_context: StructuredTaskContext | None = None
+
+
+class ChatMessageResponse(BaseModel):
+ session_id: str
+ answer: str
+ message_id: str | None = None
+ plan: PlanSummary | None = None
+ filesInScope: list[FileInScope] = Field(default_factory=list)
+ edits: list[ProposedEdit] = Field(default_factory=list)
+ references: list[FileReference] = Field(default_factory=list)
+
+
+# ─── Provider Test Models ────────────────────────────────
+
+class OpenAIProviderInput(BaseModel):
+ api_key: str | None = None
+ base_url: str | None = None
+ model: str | None = None
+
+
+class ClaudeProviderInput(BaseModel):
+ api_key: str | None = None
+ base_url: str | None = None
+ model: str | None = None
+
+
+class WatsonxProviderInput(BaseModel):
+ api_key: str | None = None
+ project_id: str | None = None
+ base_url: str | None = None
+ model_id: str | None = None
+
+
+class OllamaProviderInput(BaseModel):
+ base_url: str | None = None
+ model: str | None = None
+
+
+class OllaBridgeProviderInput(BaseModel):
+ base_url: str | None = None
+ model: str | None = None
+ api_key: str | None = None
+ connection_type: ProviderConnectionType | None = None
+
+
+class ProviderTestRequest(BaseModel):
+ provider: ProviderName
+ openai: OpenAIProviderInput | None = None
+ claude: ClaudeProviderInput | None = None
+ watsonx: WatsonxProviderInput | None = None
+ ollama: OllamaProviderInput | None = None
+ ollabridge: OllaBridgeProviderInput | None = None
+
+
+class ProviderTestResponse(ProviderStatusResponse):
+ details: str | None = None
+
+
+# ─── OllaBridge Health ────────────────────────────────────
+
+class OllaBridgeHealthResponse(BaseModel):
+ status: Literal["ok", "error"]
+ base_url: str
+ effective_api_base: str
+ models_available: bool = False
+ auth_mode: str = "unknown"
+ warning: str | None = None
\ No newline at end of file
diff --git a/gitpilot/nl_database.py b/gitpilot/nl_database.py
new file mode 100644
index 0000000000000000000000000000000000000000..6684c52b8ae905a0a614a0f64cf043035ea63249
--- /dev/null
+++ b/gitpilot/nl_database.py
@@ -0,0 +1,381 @@
+# gitpilot/nl_database.py
+"""Natural language database queries via MCP.
+
+Translates plain-English questions into SQL (or other query languages),
+executes them through an MCP database server connection, and returns
+human-readable results.
+
+Architecture::
+
+ User question
+ │
+ ▼
+ NLQueryEngine.ask()
+ │
+ ├─► schema_context() — fetch table/collection metadata
+ ├─► translate() — NL → SQL via LLM or rule-based
+ ├─► validate_query() — safety checks (no DROP, DELETE without WHERE, etc.)
+ ├─► execute() — run via MCP call_tool
+ └─► format_response() — tabular or narrative answer
+
+Inspired by:
+- C3 SQL (2023): zero-shot text-to-SQL with calibrated confidence
+- DIN-SQL (2023): decomposed in-context learning for text-to-SQL
+- BIRD benchmark (2023): bridging text-to-SQL with real-world databases
+
+Security: Queries are validated before execution. Destructive statements
+(DROP, TRUNCATE, ALTER, DELETE without WHERE) are blocked by default.
+"""
+from __future__ import annotations
+
+import re
+from dataclasses import dataclass, field
+from enum import Enum
+from typing import Any, Dict, List, Optional
+
+
+# ---------------------------------------------------------------------------
+# Enums & data models
+# ---------------------------------------------------------------------------
+
+class QueryDialect(str, Enum):
+ POSTGRESQL = "postgresql"
+ MYSQL = "mysql"
+ SQLITE = "sqlite"
+ GENERIC_SQL = "sql"
+
+
+class SafetyLevel(str, Enum):
+ READ_ONLY = "read_only" # SELECT only
+ READ_WRITE = "read_write" # SELECT, INSERT, UPDATE (with WHERE)
+ UNRESTRICTED = "unrestricted" # All statements (use with caution)
+
+
+@dataclass
+class TableSchema:
+ """Metadata about a database table."""
+
+ name: str
+ columns: List[Dict[str, str]] = field(default_factory=list) # [{"name": ..., "type": ...}]
+ primary_key: Optional[str] = None
+ row_count: Optional[int] = None
+ description: str = ""
+
+ def to_prompt_text(self) -> str:
+ """Format as context for LLM prompt."""
+ cols = ", ".join(
+ f"{c['name']} {c.get('type', 'TEXT')}" for c in self.columns
+ )
+ pk = f" PK={self.primary_key}" if self.primary_key else ""
+ return f"TABLE {self.name} ({cols}){pk}"
+
+
+@dataclass
+class QueryResult:
+ """Result of a natural language database query."""
+
+ original_question: str
+ generated_sql: str
+ dialect: QueryDialect
+ rows: List[Dict[str, Any]] = field(default_factory=list)
+ columns: List[str] = field(default_factory=list)
+ row_count: int = 0
+ explanation: str = ""
+ error: Optional[str] = None
+
+ def to_dict(self) -> Dict[str, Any]:
+ return {
+ "original_question": self.original_question,
+ "generated_sql": self.generated_sql,
+ "dialect": self.dialect.value,
+ "rows": self.rows[:100], # Limit for API responses
+ "columns": self.columns,
+ "row_count": self.row_count,
+ "explanation": self.explanation,
+ "error": self.error,
+ }
+
+ def to_table_string(self, max_rows: int = 20) -> str:
+ """Format as a plain-text table for CLI output."""
+ if self.error:
+ return f"Error: {self.error}"
+ if not self.rows:
+ return "No results."
+
+ headers = self.columns or list(self.rows[0].keys())
+ col_widths = [len(h) for h in headers]
+ display_rows = self.rows[:max_rows]
+
+ for row in display_rows:
+ for i, h in enumerate(headers):
+ val = str(row.get(h, ""))
+ col_widths[i] = max(col_widths[i], min(len(val), 40))
+
+ sep = "+-" + "-+-".join("-" * w for w in col_widths) + "-+"
+ header_line = "| " + " | ".join(
+ h.ljust(col_widths[i]) for i, h in enumerate(headers)
+ ) + " |"
+
+ lines = [sep, header_line, sep]
+ for row in display_rows:
+ vals = []
+ for i, h in enumerate(headers):
+ v = str(row.get(h, ""))[:40]
+ vals.append(v.ljust(col_widths[i]))
+ lines.append("| " + " | ".join(vals) + " |")
+ lines.append(sep)
+
+ if self.row_count > max_rows:
+ lines.append(f"... {self.row_count - max_rows} more rows")
+
+ return "\n".join(lines)
+
+
+# ---------------------------------------------------------------------------
+# SQL translation helpers
+# ---------------------------------------------------------------------------
+
+_BLOCKED_PATTERNS_READ_ONLY = [
+ r"\b(?:INSERT|UPDATE|DELETE|DROP|ALTER|TRUNCATE|CREATE|GRANT|REVOKE)\b",
+]
+
+_BLOCKED_PATTERNS_READ_WRITE = [
+ r"\b(?:DROP|ALTER|TRUNCATE|CREATE|GRANT|REVOKE)\b",
+ r"\bDELETE\b(?!.*\bWHERE\b)", # DELETE without WHERE
+]
+
+_NL_TO_SQL_MAPPINGS = [
+ # Pattern → SQL fragment mappings for simple rule-based fallback
+ (r"(?i)\bhow many\b.*\b(\w+)\b", "SELECT COUNT(*) FROM {table}"),
+ (r"(?i)\blist\s+(?:all\s+)?(\w+)\b", "SELECT * FROM {table} LIMIT 100"),
+ (r"(?i)\bshow\s+(?:all\s+)?(\w+)\b", "SELECT * FROM {table} LIMIT 100"),
+ (r"(?i)\baverage\b.*\b(\w+)\b.*\bof\b\s+(\w+)", "SELECT AVG({column}) FROM {table}"),
+ (r"(?i)\btop\s+(\d+)\b.*\b(\w+)\b", "SELECT * FROM {table} LIMIT {limit}"),
+]
+
+
+class NLQueryEngine:
+ """Translate natural language to SQL and execute via MCP.
+
+ Usage::
+
+ engine = NLQueryEngine(dialect=QueryDialect.POSTGRESQL)
+ engine.set_schema([table1, table2])
+
+ # Translate only
+ sql = engine.translate("How many users signed up this month?")
+
+ # Full pipeline
+ result = await engine.ask("Show top 10 customers by revenue")
+ print(result.to_table_string())
+ """
+
+ def __init__(
+ self,
+ dialect: QueryDialect = QueryDialect.POSTGRESQL,
+ safety_level: SafetyLevel = SafetyLevel.READ_ONLY,
+ mcp_client: Optional[Any] = None,
+ mcp_tool_name: str = "query",
+ ) -> None:
+ self.dialect = dialect
+ self.safety_level = safety_level
+ self.mcp_client = mcp_client
+ self.mcp_tool_name = mcp_tool_name
+ self._schema: List[TableSchema] = []
+
+ # --- Schema management ------------------------------------------------
+
+ def set_schema(self, tables: List[TableSchema]) -> None:
+ """Set the database schema for context-aware translation."""
+ self._schema = tables
+
+ def add_table(self, table: TableSchema) -> None:
+ """Add a single table to the schema."""
+ self._schema.append(table)
+
+ def get_schema_context(self) -> str:
+ """Build a prompt-friendly schema description."""
+ if not self._schema:
+ return "No schema information available."
+ return "\n".join(t.to_prompt_text() for t in self._schema)
+
+ def get_table_names(self) -> List[str]:
+ """Return list of known table names."""
+ return [t.name for t in self._schema]
+
+ # --- Translation ------------------------------------------------------
+
+ def translate(self, question: str) -> str:
+ """Translate natural language to SQL.
+
+ Uses rule-based matching as a deterministic fallback.
+ In production, this would call an LLM with the schema context.
+ """
+ table_names = self.get_table_names()
+ question_lower = question.lower()
+
+ # Try to find a matching table name in the question
+ matched_table = ""
+ for name in table_names:
+ if name.lower() in question_lower:
+ matched_table = name
+ break
+
+ # If no exact match, try singular/plural heuristics
+ if not matched_table:
+ for name in table_names:
+ singular = name.rstrip("s").lower()
+ if singular in question_lower:
+ matched_table = name
+ break
+
+ if not matched_table and table_names:
+ matched_table = table_names[0]
+
+ # Rule-based translation
+ for pattern, sql_template in _NL_TO_SQL_MAPPINGS:
+ match = re.search(pattern, question)
+ if match:
+ sql = sql_template.replace("{table}", matched_table)
+ # Handle captures
+ groups = match.groups()
+ if "{limit}" in sql and groups:
+ sql = sql.replace("{limit}", groups[0])
+ if "{column}" in sql and len(groups) > 1:
+ sql = sql.replace("{column}", groups[0])
+ sql = sql.replace("{table}", groups[1] if len(groups) > 1 else matched_table)
+ return sql
+
+ # Default: SELECT * with LIMIT
+ return f"SELECT * FROM {matched_table} LIMIT 100"
+
+ # --- Validation -------------------------------------------------------
+
+ def validate_query(self, sql: str) -> Optional[str]:
+ """Validate a SQL query against safety rules.
+
+ Returns None if valid, or an error message string if blocked.
+ """
+ sql_upper = sql.upper().strip()
+
+ if self.safety_level == SafetyLevel.READ_ONLY:
+ patterns = _BLOCKED_PATTERNS_READ_ONLY
+ elif self.safety_level == SafetyLevel.READ_WRITE:
+ patterns = _BLOCKED_PATTERNS_READ_WRITE
+ else:
+ return None # Unrestricted
+
+ for pattern in patterns:
+ if re.search(pattern, sql_upper):
+ return f"Query blocked by safety policy ({self.safety_level.value}): matches '{pattern}'"
+
+ # Check for multiple statements (SQL injection prevention)
+ statements = [s.strip() for s in sql.split(";") if s.strip()]
+ if len(statements) > 1:
+ return "Multiple SQL statements are not allowed."
+
+ return None
+
+ # --- Execution --------------------------------------------------------
+
+ async def execute(self, sql: str) -> QueryResult:
+ """Execute a SQL query via MCP and return structured results."""
+ if self.mcp_client is None:
+ return QueryResult(
+ original_question="",
+ generated_sql=sql,
+ dialect=self.dialect,
+ error="No MCP client configured. Cannot execute queries.",
+ )
+
+ try:
+ raw = await self.mcp_client.call_tool(
+ self.mcp_tool_name,
+ {"query": sql},
+ )
+ except Exception as exc:
+ return QueryResult(
+ original_question="",
+ generated_sql=sql,
+ dialect=self.dialect,
+ error=f"Execution error: {exc}",
+ )
+
+ rows = raw if isinstance(raw, list) else raw.get("rows", []) if isinstance(raw, dict) else []
+ columns = list(rows[0].keys()) if rows else []
+
+ return QueryResult(
+ original_question="",
+ generated_sql=sql,
+ dialect=self.dialect,
+ rows=rows,
+ columns=columns,
+ row_count=len(rows),
+ )
+
+ # --- Full pipeline ----------------------------------------------------
+
+ async def ask(self, question: str) -> QueryResult:
+ """Full NL-to-SQL pipeline: translate → validate → execute.
+
+ This is the main entry point for natural language queries.
+ """
+ sql = self.translate(question)
+
+ # Validate before execution
+ error = self.validate_query(sql)
+ if error:
+ return QueryResult(
+ original_question=question,
+ generated_sql=sql,
+ dialect=self.dialect,
+ error=error,
+ )
+
+ result = await self.execute(sql)
+ result.original_question = question
+ return result
+
+ def explain(self, sql: str) -> str:
+ """Return a human-readable explanation of what a SQL query does."""
+ upper = sql.upper().strip()
+ parts = []
+
+ if upper.startswith("SELECT"):
+ # Extract main components
+ table_match = re.search(r"\bFROM\s+(\w+)", sql, re.IGNORECASE)
+ where_match = re.search(r"\bWHERE\s+(.+?)(?:\bORDER|\bLIMIT|\bGROUP|$)", sql, re.IGNORECASE)
+ limit_match = re.search(r"\bLIMIT\s+(\d+)", sql, re.IGNORECASE)
+
+ table = table_match.group(1) if table_match else "unknown"
+
+ if "COUNT(*)" in upper:
+ parts.append(f"Count all rows in '{table}'")
+ elif "AVG(" in upper:
+ parts.append(f"Calculate averages from '{table}'")
+ elif "SUM(" in upper:
+ parts.append(f"Sum values from '{table}'")
+ else:
+ parts.append(f"Retrieve data from '{table}'")
+
+ if where_match:
+ parts.append(f"filtered by: {where_match.group(1).strip()}")
+ if limit_match:
+ parts.append(f"limited to {limit_match.group(1)} rows")
+
+ elif upper.startswith("INSERT"):
+ table_match = re.search(r"\bINTO\s+(\w+)", sql, re.IGNORECASE)
+ table = table_match.group(1) if table_match else "unknown"
+ parts.append(f"Insert new data into '{table}'")
+
+ elif upper.startswith("UPDATE"):
+ table_match = re.search(r"\bUPDATE\s+(\w+)", sql, re.IGNORECASE)
+ table = table_match.group(1) if table_match else "unknown"
+ parts.append(f"Update records in '{table}'")
+
+ elif upper.startswith("DELETE"):
+ table_match = re.search(r"\bFROM\s+(\w+)", sql, re.IGNORECASE)
+ table = table_match.group(1) if table_match else "unknown"
+ parts.append(f"Delete records from '{table}'")
+
+ return ". ".join(parts) if parts else "Unknown query type."
diff --git a/gitpilot/ollabridge_proxy.py b/gitpilot/ollabridge_proxy.py
new file mode 100644
index 0000000000000000000000000000000000000000..bcdc33b85288d792a5d52c6e44220dda6c5fedae
--- /dev/null
+++ b/gitpilot/ollabridge_proxy.py
@@ -0,0 +1,193 @@
+# gitpilot/ollabridge_proxy.py
+"""OllaBridge Cloud proxy endpoints for GitPilot.
+
+Provides server-side proxy for OllaBridge Cloud device pairing
+and model discovery, avoiding CORS issues when the frontend
+calls remote OllaBridge instances.
+"""
+from __future__ import annotations
+
+import logging
+
+import httpx
+from fastapi import APIRouter, Query
+from pydantic import BaseModel
+
+logger = logging.getLogger(__name__)
+
+router = APIRouter(prefix="/api/ollabridge", tags=["ollabridge"])
+
+
+class PairRequest(BaseModel):
+ base_url: str
+ code: str
+
+
+class PairResponse(BaseModel):
+ success: bool
+ token: str | None = None
+ error: str | None = None
+
+
+@router.post("/pair", response_model=PairResponse)
+async def proxy_pair(req: PairRequest):
+ """Proxy device pairing request to OllaBridge Cloud.
+
+ Forwards the pairing code to the OllaBridge /device/pair-simple endpoint
+ and returns the device token on success.
+ """
+ base = req.base_url.rstrip("/")
+ try:
+ async with httpx.AsyncClient(timeout=15.0) as client:
+ resp = await client.post(
+ f"{base}/device/pair-simple",
+ json={"code": req.code},
+ headers={"Content-Type": "application/json"},
+ )
+ if resp.status_code == 200:
+ data = resp.json()
+ if data.get("status") == "ok":
+ return PairResponse(
+ success=True,
+ token=data.get("device_token"),
+ )
+ # Endpoint returned an error in the response body
+ return PairResponse(
+ success=False,
+ error=data.get("error") or "Pairing failed",
+ )
+ # Try to extract error message from non-200 responses
+ try:
+ err_data = resp.json()
+ detail = err_data.get("detail") or err_data.get("error")
+ if isinstance(detail, list):
+ err_msg = "; ".join(
+ e.get("msg", str(e)) if isinstance(e, dict) else str(e)
+ for e in detail
+ )
+ elif detail:
+ err_msg = str(detail)
+ else:
+ err_msg = f"OllaBridge returned HTTP {resp.status_code}"
+ except Exception:
+ # Response is not JSON (e.g. HTML error page from HF Spaces)
+ body_preview = resp.text[:200] if resp.text else ""
+ logger.warning(
+ "OllaBridge pair: HTTP %d, non-JSON body: %s",
+ resp.status_code,
+ body_preview,
+ )
+ if resp.status_code == 503:
+ err_msg = "OllaBridge is starting up. Please try again in a moment."
+ elif resp.status_code >= 500:
+ err_msg = (
+ f"OllaBridge server error (HTTP {resp.status_code}). "
+ "The service may be restarting — try again shortly."
+ )
+ elif resp.status_code == 422:
+ err_msg = "Invalid pairing request format."
+ else:
+ err_msg = f"OllaBridge returned HTTP {resp.status_code}"
+ return PairResponse(success=False, error=err_msg)
+ except httpx.ConnectError:
+ return PairResponse(success=False, error=f"Cannot reach {base}")
+ except httpx.TimeoutException:
+ return PairResponse(success=False, error="Connection timed out")
+ except Exception as exc:
+ logger.warning("OllaBridge pair proxy error: %s", exc)
+ return PairResponse(success=False, error=str(exc))
+
+
+class ModelsResponse(BaseModel):
+ models: list[str]
+ error: str | None = None
+
+
+class OllaBridgeNormalizedHealth(BaseModel):
+ status: str # "ok" or "error"
+ base_url: str
+ effective_api_base: str
+ models_available: bool = False
+ auth_mode: str = "unknown"
+ warning: str | None = None
+
+
+@router.get("/models", response_model=ModelsResponse)
+async def proxy_models(base_url: str = "https://ruslanmv-ollabridge.hf.space", api_key: str = ""):
+ """Proxy model listing request to an OllaBridge instance."""
+ base = base_url.rstrip("/")
+ try:
+ headers: dict[str, str] = {"Accept": "application/json"}
+ if api_key:
+ headers["Authorization"] = f"Bearer {api_key}"
+ async with httpx.AsyncClient(timeout=10.0) as client:
+ resp = await client.get(f"{base}/v1/models", headers=headers)
+ if resp.status_code == 200:
+ data = resp.json()
+ if isinstance(data, dict) and "data" in data:
+ models = sorted({m.get("id", "") for m in data["data"] if m.get("id")})
+ return ModelsResponse(models=models)
+ if isinstance(data, dict) and "models" in data:
+ models = sorted({
+ m.get("name", m.get("model", ""))
+ for m in data["models"]
+ if m.get("name") or m.get("model")
+ })
+ return ModelsResponse(models=models)
+ return ModelsResponse(models=[], error=f"HTTP {resp.status_code}")
+ except Exception as exc:
+ return ModelsResponse(models=[], error=str(exc))
+
+
+@router.get("/health")
+async def proxy_health(base_url: str = "https://ruslanmv-ollabridge.hf.space"):
+ """Check OllaBridge instance health."""
+ base = base_url.rstrip("/")
+ try:
+ async with httpx.AsyncClient(timeout=5.0) as client:
+ resp = await client.get(f"{base}/health")
+ if resp.status_code == 200:
+ return {"status": "ok", "url": base, "data": resp.json()}
+ return {"status": "error", "url": base, "http_status": resp.status_code}
+ except Exception as exc:
+ return {"status": "error", "url": base, "error": str(exc)}
+
+
+@router.get("/normalized-health")
+async def ollabridge_normalized_health(
+ base_url: str = Query(default="http://127.0.0.1:8000"),
+ api_key: str | None = Query(default=None),
+):
+ """Normalized health check with machine-friendly fields for the redesigned UI."""
+ effective_base = base_url.rstrip("/")
+ warning = None
+ if effective_base.endswith("/v1"):
+ effective_base = effective_base[:-3]
+ warning = "Do not include /v1; GitPilot adds it automatically."
+
+ effective_api_base = f"{effective_base}/v1"
+
+ auth_mode = "local"
+ if api_key:
+ auth_mode = "api_key"
+
+ try:
+ headers = {}
+ if api_key:
+ headers["Authorization"] = f"Bearer {api_key}"
+ async with httpx.AsyncClient(timeout=5.0) as client:
+ resp = await client.get(f"{effective_api_base}/models", headers=headers)
+ models_available = resp.status_code == 200
+ status = "ok" if resp.status_code == 200 else "error"
+ except Exception:
+ status = "error"
+ models_available = False
+
+ return OllaBridgeNormalizedHealth(
+ status=status,
+ base_url=base_url,
+ effective_api_base=effective_api_base,
+ models_available=models_available,
+ auth_mode=auth_mode,
+ warning=warning,
+ )
diff --git a/gitpilot/permissions.py b/gitpilot/permissions.py
new file mode 100644
index 0000000000000000000000000000000000000000..62c07e07d7d1d0b45bf621a7eac8fda30f8abc20
--- /dev/null
+++ b/gitpilot/permissions.py
@@ -0,0 +1,131 @@
+# gitpilot/permissions.py
+"""Fine-grained permission system for tool execution.
+
+Controls what agents can do based on configurable policies.
+Three modes:
+
+- **NORMAL** – ask before risky operations (default)
+- **PLAN** – read-only; all writes and shell commands blocked
+- **AUTO** – allow everything without confirmation
+
+Permissions live in ``.gitpilot/permissions.json`` or are set via API.
+"""
+from __future__ import annotations
+
+import fnmatch
+import json
+import logging
+from dataclasses import dataclass, field
+from enum import Enum
+from pathlib import Path
+from typing import Any, Dict, List, Optional, Set
+
+logger = logging.getLogger(__name__)
+
+
+class PermissionMode(str, Enum):
+ NORMAL = "normal"
+ PLAN = "plan"
+ AUTO = "auto"
+
+
+class Action(str, Enum):
+ READ_FILE = "read_file"
+ WRITE_FILE = "write_file"
+ DELETE_FILE = "delete_file"
+ RUN_COMMAND = "run_command"
+ GIT_COMMIT = "git_commit"
+ GIT_PUSH = "git_push"
+ CREATE_ISSUE = "create_issue"
+ CREATE_PR = "create_pr"
+ MERGE_PR = "merge_pr"
+
+
+RISKY_ACTIONS: frozenset[Action] = frozenset([
+ Action.DELETE_FILE,
+ Action.GIT_PUSH,
+ Action.MERGE_PR,
+ Action.RUN_COMMAND,
+])
+
+READ_ONLY_ACTIONS: frozenset[Action] = frozenset([
+ Action.READ_FILE,
+])
+
+
+@dataclass
+class PermissionPolicy:
+ mode: PermissionMode = PermissionMode.NORMAL
+ allowed_actions: Optional[Set[Action]] = None
+ blocked_paths: List[str] = field(default_factory=lambda: [
+ ".env", ".env.*", "*.pem", "*.key", "credentials*", "secrets*",
+ ])
+ allowed_commands: Optional[List[str]] = None
+ require_confirmation: Set[Action] = field(
+ default_factory=lambda: set(RISKY_ACTIONS),
+ )
+
+
+class PermissionManager:
+ """Check and enforce permissions for agent actions."""
+
+ def __init__(self, policy: Optional[PermissionPolicy] = None):
+ self.policy = policy or PermissionPolicy()
+
+ def check(
+ self, action: Action, context: Optional[Dict[str, Any]] = None,
+ ) -> bool:
+ """Return ``True`` if allowed, raise ``PermissionError`` if blocked."""
+ if self.policy.mode == PermissionMode.PLAN:
+ if action not in READ_ONLY_ACTIONS:
+ raise PermissionError(
+ f"Action '{action.value}' blocked in plan mode (read-only)"
+ )
+ return True
+
+ if context and "path" in context:
+ self._check_path(context["path"])
+
+ if self.policy.allowed_actions is not None:
+ if action not in self.policy.allowed_actions:
+ raise PermissionError(
+ f"Action '{action.value}' not in allowed actions"
+ )
+
+ return True
+
+ def needs_confirmation(self, action: Action) -> bool:
+ if self.policy.mode == PermissionMode.AUTO:
+ return False
+ if self.policy.mode == PermissionMode.PLAN:
+ return False
+ return action in self.policy.require_confirmation
+
+ def _check_path(self, path: str):
+ basename = path.split("/")[-1] if "/" in path else path
+ for pattern in self.policy.blocked_paths:
+ if fnmatch.fnmatch(path, pattern) or fnmatch.fnmatch(basename, pattern):
+ raise PermissionError(
+ f"Access to '{path}' blocked by policy (pattern '{pattern}')"
+ )
+
+ def to_dict(self) -> Dict[str, Any]:
+ return {
+ "mode": self.policy.mode.value,
+ "blocked_paths": self.policy.blocked_paths,
+ "allowed_commands": self.policy.allowed_commands,
+ }
+
+ def load_from_file(self, path: Path):
+ if not path.exists():
+ return
+ try:
+ data = json.loads(path.read_text())
+ if "mode" in data:
+ self.policy.mode = PermissionMode(data["mode"])
+ if "blocked_paths" in data:
+ self.policy.blocked_paths = data["blocked_paths"]
+ if "allowed_commands" in data:
+ self.policy.allowed_commands = data["allowed_commands"]
+ except Exception as e:
+ logger.warning("Failed to load permissions: %s", e)
diff --git a/gitpilot/plugins.py b/gitpilot/plugins.py
new file mode 100644
index 0000000000000000000000000000000000000000..f97d100f21b90de0c598498f98ab8c5c465691ef
--- /dev/null
+++ b/gitpilot/plugins.py
@@ -0,0 +1,253 @@
+# gitpilot/plugins.py
+"""Plugin system for GitPilot.
+
+Plugins extend GitPilot with additional skills, hooks, MCP configs,
+and custom agent types. Plugins are installed from git URLs or local
+directories into ``~/.gitpilot/plugins/``.
+
+A plugin is a directory containing a ``plugin.json`` manifest:
+
+.. code-block:: json
+
+ {
+ "name": "my-plugin",
+ "version": "1.0.0",
+ "description": "Does amazing things",
+ "skills": ["skills/*.md"],
+ "hooks": ["hooks.json"],
+ "mcp": ["mcp.json"]
+ }
+"""
+from __future__ import annotations
+
+import json
+import logging
+import shutil
+import subprocess
+from dataclasses import dataclass, field
+from pathlib import Path
+from typing import Any, Dict, List, Optional
+
+logger = logging.getLogger(__name__)
+
+PLUGINS_DIR = Path.home() / ".gitpilot" / "plugins"
+MANIFEST_FILE = "plugin.json"
+
+
+@dataclass
+class PluginInfo:
+ """Metadata about an installed plugin."""
+
+ name: str
+ version: str = "0.0.0"
+ description: str = ""
+ author: str = ""
+ source: str = "" # git URL or local path
+ skills: List[str] = field(default_factory=list)
+ hooks: List[str] = field(default_factory=list)
+ mcp_configs: List[str] = field(default_factory=list)
+ path: Optional[Path] = None
+
+ @classmethod
+ def from_manifest(cls, manifest_path: Path) -> "PluginInfo":
+ data = json.loads(manifest_path.read_text())
+ return cls(
+ name=data["name"],
+ version=data.get("version", "0.0.0"),
+ description=data.get("description", ""),
+ author=data.get("author", ""),
+ source=data.get("source", ""),
+ skills=data.get("skills", []),
+ hooks=data.get("hooks", []),
+ mcp_configs=data.get("mcp", []),
+ path=manifest_path.parent,
+ )
+
+ def to_dict(self) -> Dict[str, Any]:
+ return {
+ "name": self.name,
+ "version": self.version,
+ "description": self.description,
+ "author": self.author,
+ "source": self.source,
+ "skills": self.skills,
+ "hooks": self.hooks,
+ "mcp_configs": self.mcp_configs,
+ "path": str(self.path) if self.path else None,
+ }
+
+
+class PluginManager:
+ """Discover, install, and manage GitPilot plugins."""
+
+ def __init__(self, plugins_dir: Optional[Path] = None) -> None:
+ self.plugins_dir = plugins_dir or PLUGINS_DIR
+ self.plugins_dir.mkdir(parents=True, exist_ok=True)
+ self._cache: Dict[str, PluginInfo] = {}
+
+ # ------------------------------------------------------------------
+ # Install / Uninstall
+ # ------------------------------------------------------------------
+
+ def install(self, source: str) -> PluginInfo:
+ """Install a plugin from a git URL or local directory path.
+
+ Returns the installed PluginInfo.
+ """
+ source_path = Path(source)
+
+ if source_path.is_dir():
+ return self._install_from_local(source_path)
+
+ if source.startswith(("http://", "https://", "git@")):
+ return self._install_from_git(source)
+
+ raise ValueError(f"Invalid plugin source: {source}. Provide a git URL or local path.")
+
+ def _install_from_git(self, url: str) -> PluginInfo:
+ # Derive plugin name from URL
+ name = url.rstrip("/").split("/")[-1]
+ if name.endswith(".git"):
+ name = name[:-4]
+ dest = self.plugins_dir / name
+
+ if dest.exists():
+ # Update existing
+ subprocess.run(
+ ["git", "pull", "--ff-only"],
+ cwd=str(dest),
+ capture_output=True,
+ check=True,
+ )
+ else:
+ subprocess.run(
+ ["git", "clone", "--depth=1", url, str(dest)],
+ capture_output=True,
+ check=True,
+ )
+
+ return self._load_plugin(dest)
+
+ def _install_from_local(self, source: Path) -> PluginInfo:
+ manifest = source / MANIFEST_FILE
+ if not manifest.exists():
+ raise FileNotFoundError(f"No {MANIFEST_FILE} found in {source}")
+ info = PluginInfo.from_manifest(manifest)
+ dest = self.plugins_dir / info.name
+ if dest.exists():
+ shutil.rmtree(dest)
+ shutil.copytree(source, dest)
+ return self._load_plugin(dest)
+
+ def uninstall(self, plugin_name: str) -> bool:
+ """Uninstall a plugin by name. Returns True if removed."""
+ dest = self.plugins_dir / plugin_name
+ if not dest.exists():
+ return False
+ shutil.rmtree(dest)
+ self._cache.pop(plugin_name, None)
+ logger.info("Uninstalled plugin: %s", plugin_name)
+ return True
+
+ # ------------------------------------------------------------------
+ # Discovery
+ # ------------------------------------------------------------------
+
+ def list_installed(self) -> List[PluginInfo]:
+ """List all installed plugins."""
+ plugins = []
+ if not self.plugins_dir.exists():
+ return plugins
+ for child in sorted(self.plugins_dir.iterdir()):
+ if child.is_dir() and (child / MANIFEST_FILE).exists():
+ try:
+ plugins.append(self._load_plugin(child))
+ except Exception as e:
+ logger.warning("Bad plugin at %s: %s", child, e)
+ return plugins
+
+ def get_plugin(self, name: str) -> Optional[PluginInfo]:
+ """Get a specific plugin by name."""
+ if name in self._cache:
+ return self._cache[name]
+ path = self.plugins_dir / name
+ if path.exists() and (path / MANIFEST_FILE).exists():
+ return self._load_plugin(path)
+ return None
+
+ def _load_plugin(self, path: Path) -> PluginInfo:
+ info = PluginInfo.from_manifest(path / MANIFEST_FILE)
+ info.path = path
+ self._cache[info.name] = info
+ return info
+
+ # ------------------------------------------------------------------
+ # Skill loading
+ # ------------------------------------------------------------------
+
+ def load_all_skills(self) -> List[Dict[str, Any]]:
+ """Load skill definitions from all installed plugins.
+
+ Returns raw skill dicts (name, description, prompt_template, etc.)
+ that can be passed to the SkillManager.
+ """
+ from .skills import Skill
+
+ skills: List[Dict[str, Any]] = []
+ for plugin in self.list_installed():
+ if not plugin.path:
+ continue
+ for pattern in plugin.skills:
+ for skill_file in plugin.path.glob(pattern):
+ try:
+ skill = Skill.from_file(skill_file)
+ skills.append({
+ "skill": skill,
+ "plugin": plugin.name,
+ })
+ except Exception as e:
+ logger.warning("Failed to load skill %s: %s", skill_file, e)
+ return skills
+
+ # ------------------------------------------------------------------
+ # Hook loading
+ # ------------------------------------------------------------------
+
+ def load_all_hooks(self) -> List[Dict[str, Any]]:
+ """Load hook definitions from all installed plugins."""
+ all_hooks: List[Dict[str, Any]] = []
+ for plugin in self.list_installed():
+ if not plugin.path:
+ continue
+ for hook_file_pattern in plugin.hooks:
+ for hook_file in plugin.path.glob(hook_file_pattern):
+ try:
+ hooks = json.loads(hook_file.read_text())
+ for h in hooks:
+ h["plugin"] = plugin.name
+ all_hooks.append(h)
+ except Exception as e:
+ logger.warning("Failed to load hooks from %s: %s", hook_file, e)
+ return all_hooks
+
+ # ------------------------------------------------------------------
+ # MCP config loading
+ # ------------------------------------------------------------------
+
+ def load_all_mcp_configs(self) -> List[Dict[str, Any]]:
+ """Load MCP server configs from all installed plugins."""
+ configs: List[Dict[str, Any]] = []
+ for plugin in self.list_installed():
+ if not plugin.path:
+ continue
+ for mcp_pattern in plugin.mcp_configs:
+ for mcp_file in plugin.path.glob(mcp_pattern):
+ try:
+ data = json.loads(mcp_file.read_text())
+ servers = data if isinstance(data, list) else data.get("servers", [])
+ for s in servers:
+ s["plugin"] = plugin.name
+ configs.append(s)
+ except Exception as e:
+ logger.warning("Failed to load MCP config from %s: %s", mcp_file, e)
+ return configs
diff --git a/gitpilot/pr_tools.py b/gitpilot/pr_tools.py
new file mode 100644
index 0000000000000000000000000000000000000000..c64407024949ebaa2028e38f6521e5383a791e5a
--- /dev/null
+++ b/gitpilot/pr_tools.py
@@ -0,0 +1,174 @@
+"""CrewAI tools for GitHub Pull Request management.
+
+These tools allow agents to create, list, review, and merge pull requests.
+"""
+import asyncio
+from typing import Optional
+
+from crewai.tools import tool
+
+from .agent_tools import get_repo_context
+from . import github_pulls as gp
+
+
+def _run_async(coro):
+ loop = asyncio.new_event_loop()
+ asyncio.set_event_loop(loop)
+ try:
+ return loop.run_until_complete(coro)
+ finally:
+ loop.close()
+
+
+def _fmt_pr(pr: dict) -> str:
+ return (
+ f"#{pr.get('number')} [{pr.get('state')}] {pr.get('title')}\n"
+ f" {pr.get('head', {}).get('ref', '?')} -> {pr.get('base', {}).get('ref', '?')}\n"
+ f" Author: {pr.get('user', {}).get('login', 'unknown')} | "
+ f"Draft: {pr.get('draft', False)}\n"
+ f" URL: {pr.get('html_url', '')}"
+ )
+
+
+@tool("List pull requests")
+def list_pull_requests(state: str = "open", per_page: int = 20) -> str:
+ """Lists pull requests in the current repository. state: open/closed/all."""
+ try:
+ owner, repo, token, _branch = get_repo_context()
+ prs = _run_async(
+ gp.list_pull_requests(owner, repo, state=state, per_page=per_page, token=token)
+ )
+ if not prs:
+ return f"No {state} pull requests in {owner}/{repo}."
+ header = f"Pull requests in {owner}/{repo} (state={state}):\n"
+ return header + "\n".join(_fmt_pr(p) for p in prs)
+ except Exception as e:
+ return f"Error listing PRs: {e}"
+
+
+@tool("Get pull request details")
+def get_pull_request(pull_number: int) -> str:
+ """Gets full details of a pull request by number."""
+ try:
+ owner, repo, token, _branch = get_repo_context()
+ pr = _run_async(gp.get_pull_request(owner, repo, pull_number, token=token))
+ body = (pr.get("body") or "")[:500]
+ return (
+ f"PR #{pr.get('number')}: {pr.get('title')}\n"
+ f"State: {pr.get('state')} | Mergeable: {pr.get('mergeable')}\n"
+ f"Head: {pr.get('head', {}).get('ref')} -> Base: {pr.get('base', {}).get('ref')}\n"
+ f"Author: {pr.get('user', {}).get('login', 'unknown')}\n"
+ f"Additions: {pr.get('additions', 0)} | Deletions: {pr.get('deletions', 0)} | "
+ f"Changed files: {pr.get('changed_files', 0)}\n"
+ f"Body:\n{body}\n"
+ f"URL: {pr.get('html_url', '')}"
+ )
+ except Exception as e:
+ return f"Error getting PR: {e}"
+
+
+@tool("Create a pull request")
+def create_pull_request(
+ title: str,
+ head: str,
+ base: str,
+ body: str = "",
+ draft: bool = False,
+) -> str:
+ """Creates a new pull request. head=source branch, base=target branch."""
+ try:
+ owner, repo, token, _branch = get_repo_context()
+ pr = _run_async(
+ gp.create_pull_request(
+ owner, repo, title=title, head=head, base=base,
+ body=body or None, draft=draft, token=token,
+ )
+ )
+ return (
+ f"Created PR #{pr.get('number')}: {pr.get('title')}\n"
+ f"URL: {pr.get('html_url', '')}"
+ )
+ except Exception as e:
+ return f"Error creating PR: {e}"
+
+
+@tool("Merge a pull request")
+def merge_pull_request(
+ pull_number: int,
+ merge_method: str = "merge",
+ commit_title: str = "",
+) -> str:
+ """Merges a pull request. merge_method: merge, squash, or rebase."""
+ try:
+ owner, repo, token, _branch = get_repo_context()
+ result = _run_async(
+ gp.merge_pull_request(
+ owner, repo, pull_number,
+ merge_method=merge_method,
+ commit_title=commit_title or None,
+ token=token,
+ )
+ )
+ sha = result.get("sha", "unknown") if isinstance(result, dict) else "unknown"
+ return f"PR #{pull_number} merged successfully. Merge commit: {sha}"
+ except Exception as e:
+ return f"Error merging PR: {e}"
+
+
+@tool("List files changed in a pull request")
+def list_pr_files(pull_number: int) -> str:
+ """Lists all files changed in a pull request with status and patch info."""
+ try:
+ owner, repo, token, _branch = get_repo_context()
+ files = _run_async(gp.list_pr_files(owner, repo, pull_number, token=token))
+ if not files:
+ return f"No files changed in PR #{pull_number}."
+ lines = [f"Files changed in PR #{pull_number}:"]
+ for f in files:
+ lines.append(
+ f" [{f.get('status', '?')}] {f.get('filename', '?')} "
+ f"(+{f.get('additions', 0)} -{f.get('deletions', 0)})"
+ )
+ return "\n".join(lines)
+ except Exception as e:
+ return f"Error listing PR files: {e}"
+
+
+@tool("Add a review to a pull request")
+def create_pr_review(
+ pull_number: int,
+ body: str,
+ event: str = "COMMENT",
+) -> str:
+ """Adds a review to a PR. event: APPROVE, REQUEST_CHANGES, or COMMENT."""
+ try:
+ owner, repo, token, _branch = get_repo_context()
+ review = _run_async(
+ gp.create_pr_review(owner, repo, pull_number, body=body, event=event, token=token)
+ )
+ return f"Review submitted on PR #{pull_number} (event={event})\nURL: {review.get('html_url', '')}"
+ except Exception as e:
+ return f"Error creating review: {e}"
+
+
+@tool("Comment on a pull request")
+def add_pr_comment(pull_number: int, body: str) -> str:
+ """Adds a general comment to a pull request."""
+ try:
+ owner, repo, token, _branch = get_repo_context()
+ comment = _run_async(gp.add_pr_comment(owner, repo, pull_number, body, token=token))
+ return f"Comment added to PR #{pull_number}\nURL: {comment.get('html_url', '')}"
+ except Exception as e:
+ return f"Error commenting on PR: {e}"
+
+
+# Export all PR tools
+PR_TOOLS = [
+ list_pull_requests,
+ get_pull_request,
+ create_pull_request,
+ merge_pull_request,
+ list_pr_files,
+ create_pr_review,
+ add_pr_comment,
+]
diff --git a/gitpilot/predictions.py b/gitpilot/predictions.py
new file mode 100644
index 0000000000000000000000000000000000000000..b4e4c788535d44ceade26026b0fe46c45e4d23a1
--- /dev/null
+++ b/gitpilot/predictions.py
@@ -0,0 +1,235 @@
+# gitpilot/predictions.py
+"""Predictive workflow engine — suggest next actions proactively.
+
+Analyses the current session state and recent actions to predict what
+the user likely needs next. Suggestions are scored by relevance and
+presented as actionable prompts.
+
+Based on the concept of *proactive assistance* from HCI research
+(Horvitz, 1999) and GitHub's own next-action prediction patterns.
+
+Trigger rules::
+
+ After merging a PR → suggest updating changelog
+ After creating issue → suggest assigning and labeling
+ After test failure → suggest debugging approach
+ After dep update → suggest full test suite
+ Before release → suggest version bump
+"""
+from __future__ import annotations
+
+import re
+from dataclasses import dataclass, field
+from enum import Enum
+from typing import Any, Dict, List, Optional
+
+
+class ActionCategory(str, Enum):
+ TESTING = "testing"
+ DOCUMENTATION = "documentation"
+ RELEASE = "release"
+ REVIEW = "review"
+ ISSUE_MGMT = "issue_management"
+ CLEANUP = "cleanup"
+ SECURITY = "security"
+ DEPLOYMENT = "deployment"
+
+
+@dataclass
+class SuggestedAction:
+ """A suggested next action for the user."""
+
+ title: str
+ description: str
+ category: ActionCategory
+ prompt: str # Ready-to-use prompt if the user accepts
+ relevance_score: float = 0.5 # 0.0 - 1.0
+ auto_executable: bool = False # Can be run without confirmation
+
+ def to_dict(self) -> Dict[str, Any]:
+ return {
+ "title": self.title,
+ "description": self.description,
+ "category": self.category.value,
+ "prompt": self.prompt,
+ "relevance_score": self.relevance_score,
+ "auto_executable": self.auto_executable,
+ }
+
+
+# ---------------------------------------------------------------------------
+# Prediction rules
+# ---------------------------------------------------------------------------
+
+@dataclass
+class PredictionRule:
+ """A rule that fires when certain conditions are met."""
+
+ name: str
+ trigger_patterns: List[str] # Regex patterns to match against context
+ action: SuggestedAction
+ cooldown_actions: int = 3 # Don't re-suggest within N actions
+
+
+_DEFAULT_RULES: List[PredictionRule] = [
+ PredictionRule(
+ name="post_merge_changelog",
+ trigger_patterns=[r"\bmerge\b.*\bpr\b", r"\bmerged\b", r"\bpull request.*merged\b"],
+ action=SuggestedAction(
+ title="Update Changelog",
+ description="A PR was just merged. Consider updating the changelog.",
+ category=ActionCategory.DOCUMENTATION,
+ prompt="Update the CHANGELOG.md with the latest merged PR changes.",
+ relevance_score=0.85,
+ ),
+ ),
+ PredictionRule(
+ name="post_issue_create_label",
+ trigger_patterns=[r"\bcreate.*issue\b", r"\bnew issue\b", r"\bissue.*created\b"],
+ action=SuggestedAction(
+ title="Label and Assign Issue",
+ description="A new issue was created. Consider adding labels and assignees.",
+ category=ActionCategory.ISSUE_MGMT,
+ prompt="Add appropriate labels and assign the newly created issue.",
+ relevance_score=0.75,
+ ),
+ ),
+ PredictionRule(
+ name="post_test_failure_debug",
+ trigger_patterns=[r"\btest.*fail\b", r"\bfailed\b.*\btest\b", r"\berror.*pytest\b"],
+ action=SuggestedAction(
+ title="Debug Test Failures",
+ description="Tests failed. Let me help identify the root cause.",
+ category=ActionCategory.TESTING,
+ prompt="Analyze the test failures and suggest fixes for each failing test.",
+ relevance_score=0.95,
+ ),
+ ),
+ PredictionRule(
+ name="post_dep_update_test",
+ trigger_patterns=[r"\bdependenc\w+.*update\b", r"\bupgrade\b.*\bpackage\b", r"\bnpm update\b"],
+ action=SuggestedAction(
+ title="Run Full Test Suite",
+ description="Dependencies were updated. Run the full test suite to verify compatibility.",
+ category=ActionCategory.TESTING,
+ prompt="Run the complete test suite to verify no regressions from the dependency update.",
+ relevance_score=0.90,
+ ),
+ ),
+ PredictionRule(
+ name="pre_release_version",
+ trigger_patterns=[r"\brelease\b", r"\bversion bump\b", r"\btag\b.*\brelease\b"],
+ action=SuggestedAction(
+ title="Version Bump & Tag",
+ description="Preparing a release. Consider bumping the version number.",
+ category=ActionCategory.RELEASE,
+ prompt="Bump the version number, update the changelog, and create a release tag.",
+ relevance_score=0.80,
+ ),
+ ),
+ PredictionRule(
+ name="post_edit_lint",
+ trigger_patterns=[r"\bedit\b.*\bfile\b", r"\bmodif\w+\b.*\bcode\b", r"\bwrote\b.*\bfile\b"],
+ action=SuggestedAction(
+ title="Run Linter",
+ description="Files were modified. Run the linter to check for style issues.",
+ category=ActionCategory.CLEANUP,
+ prompt="Run the project linter on the modified files and fix any issues.",
+ relevance_score=0.65,
+ auto_executable=True,
+ ),
+ ),
+ PredictionRule(
+ name="security_scan_suggestion",
+ trigger_patterns=[r"\bauth\w*\b", r"\bpassword\b", r"\bsecret\b", r"\btoken\b.*\bhandl\b"],
+ action=SuggestedAction(
+ title="Security Review",
+ description="Security-sensitive code was touched. Consider a security review.",
+ category=ActionCategory.SECURITY,
+ prompt="Review the security-sensitive changes for potential vulnerabilities.",
+ relevance_score=0.88,
+ ),
+ ),
+ PredictionRule(
+ name="post_commit_pr",
+ trigger_patterns=[r"\bcommit\w*\b.*\bbranch\b", r"\bpush\w*\b.*\bfeature\b"],
+ action=SuggestedAction(
+ title="Create Pull Request",
+ description="Changes were committed to a feature branch. Create a PR for review.",
+ category=ActionCategory.REVIEW,
+ prompt="Create a pull request for the current feature branch with a description of changes.",
+ relevance_score=0.70,
+ ),
+ ),
+]
+
+
+class PredictiveEngine:
+ """Predict what the user needs next based on context.
+
+ Usage::
+
+ engine = PredictiveEngine()
+ suggestions = engine.predict("Tests failed in auth module")
+ for s in suggestions:
+ print(f"[{s.relevance_score}] {s.title}: {s.prompt}")
+ """
+
+ def __init__(
+ self,
+ custom_rules: Optional[List[PredictionRule]] = None,
+ min_score: float = 0.5,
+ ) -> None:
+ self.rules = (custom_rules or []) + _DEFAULT_RULES
+ self.min_score = min_score
+ self._recent_suggestions: List[str] = []
+
+ def predict(self, context: str) -> List[SuggestedAction]:
+ """Predict next actions based on the given context string.
+
+ The context can be:
+ - The last user message
+ - A summary of recent session activity
+ - An agent's output/result
+ """
+ matches: List[SuggestedAction] = []
+ context_lower = context.lower()
+
+ for rule in self.rules:
+ # Skip recently suggested actions
+ if rule.name in self._recent_suggestions[-3:]:
+ continue
+ for pattern in rule.trigger_patterns:
+ if re.search(pattern, context_lower):
+ matches.append(rule.action)
+ self._recent_suggestions.append(rule.name)
+ break
+
+ return self.score_and_sort(matches)
+
+ def score_and_sort(
+ self, actions: List[SuggestedAction],
+ ) -> List[SuggestedAction]:
+ """Filter by minimum score and sort by relevance (descending)."""
+ filtered = [a for a in actions if a.relevance_score >= self.min_score]
+ return sorted(filtered, key=lambda a: a.relevance_score, reverse=True)
+
+ def add_rule(self, rule: PredictionRule) -> None:
+ """Add a custom prediction rule."""
+ self.rules.insert(0, rule) # Custom rules take priority
+
+ def clear_history(self) -> None:
+ """Clear the recent suggestion history."""
+ self._recent_suggestions.clear()
+
+ def list_rules(self) -> List[Dict[str, Any]]:
+ """List all prediction rules."""
+ return [
+ {
+ "name": r.name,
+ "trigger_patterns": r.trigger_patterns,
+ "action_title": r.action.title,
+ "category": r.action.category.value,
+ }
+ for r in self.rules
+ ]
diff --git a/gitpilot/py.typed b/gitpilot/py.typed
new file mode 100644
index 0000000000000000000000000000000000000000..cb5707d7ddc4ba1712aadae9fc34250aa1795291
--- /dev/null
+++ b/gitpilot/py.typed
@@ -0,0 +1,2 @@
+# Marker file for PEP 561
+# This package supports type checking
diff --git a/gitpilot/reasoning_normalizer.py b/gitpilot/reasoning_normalizer.py
new file mode 100644
index 0000000000000000000000000000000000000000..439f29edacbacc8b1c92bfcdedee265333fffdf0
--- /dev/null
+++ b/gitpilot/reasoning_normalizer.py
@@ -0,0 +1,410 @@
+# gitpilot/reasoning_normalizer.py
+"""
+Reasoning Model Normalizer — makes CrewAI compatible with DeepSeek-R1,
+QwQ, Marco-O1, and other reasoning models.
+
+Why this module exists:
+─────────────────────────────────────────────────────────────────
+Reasoning models (deepseek-r1, qwq, marco-o1) produce output like:
+
+
+ Let me think about this step by step...
+ I need to call Get repository summary first.
+
+ Thought: I need to call Get repository summary
+ Action: Get repository summary
+ Action Input: {}
+
+CrewAI's ReAct parser expects output starting with `Thought:` and
+cannot handle the `... ` wrapper. When a reasoning model
+returns such output, CrewAI's parser fails silently and returns None,
+causing:
+
+ ValueError: Invalid response from LLM call - None or empty.
+
+This affects ALL deepseek-r1 sizes (1.5b → 70b). It's not a model
+quality issue — it's a format incompatibility.
+
+Industry-standard solution:
+─────────────────────────────────────────────────────────────────
+Strip the reasoning content (between and tags)
+BEFORE passing the response to CrewAI's ReAct parser. This is what
+vLLM's DeepSeekR1ReasoningParser and Ollama's `think` parameter do
+internally.
+
+References:
+- vLLM reasoning parser: https://docs.vllm.ai/en/v0.9.0/api/vllm/reasoning/deepseek_r1_reasoning_parser.html
+- DeepSeek official docs: https://api-docs.deepseek.com/guides/reasoning_model
+- Ollama thinking capability: https://docs.ollama.com/capabilities/thinking
+- Aider's workaround: https://github.com/Aider-AI/aider/issues/3008
+
+Our approach:
+─────────────────────────────────────────────────────────────────
+1. `strip_reasoning_content(text)` — pure function that handles all
+ edge cases: nested tags, orphaned tags, partial tags, no tags.
+
+2. `ReasoningAwareLLM` — transparent wrapper around crewai.LLM that
+ intercepts responses and strips reasoning before CrewAI parses.
+
+3. `wrap_if_reasoning_model(llm, settings)` — factory that returns
+ either the wrapped or the original LLM depending on the model
+ name. Zero overhead for non-reasoning models.
+"""
+from __future__ import annotations
+
+import logging
+import re
+from typing import Any, List, Optional
+
+logger = logging.getLogger(__name__)
+
+
+# ─────────────────────────────────────────────────────────────────
+# Pattern matching — reasoning models that emit blocks
+# ─────────────────────────────────────────────────────────────────
+
+# Model name patterns that indicate a reasoning model.
+# Uses substring matching so "deepseek-r1" catches all size variants
+# (deepseek-r1:1.5b, deepseek-r1:14b, deepseek-r1:70b, deepseek-r1:latest).
+REASONING_MODEL_PATTERNS = (
+ "deepseek-r1",
+ "deepseek-reasoner",
+ "qwq", # Qwen reasoning models (qwq-32b, qwq-preview)
+ "marco-o1", # Alibaba reasoning model
+ "o1-", # OpenAI o1-series via Ollama proxies
+ "r1-distill", # DeepSeek R1 distilled variants
+ "openthinker", # Open-source reasoning model
+)
+
+
+def is_reasoning_model(model_name: str) -> bool:
+ """Return True if the given model name matches a known reasoning model."""
+ if not model_name:
+ return False
+ m = str(model_name).lower().strip()
+ # Strip provider prefixes: "ollama/deepseek-r1:14b" -> "deepseek-r1:14b"
+ if "/" in m:
+ m = m.split("/", 1)[1]
+ return any(pattern in m for pattern in REASONING_MODEL_PATTERNS)
+
+
+# ─────────────────────────────────────────────────────────────────
+# Core: strip reasoning content from model output
+# ─────────────────────────────────────────────────────────────────
+
+# Matches ... blocks including multiline content
+_THINK_BLOCK_RE = re.compile(
+ r"]*>.*? ",
+ re.DOTALL | re.IGNORECASE,
+)
+
+# Matches orphaned at the start (no closing tag — truncated output)
+_ORPHAN_THINK_OPEN_RE = re.compile(
+ r"^\s*]*>.*?(?=Thought:|Action:|Final Answer:|\Z)",
+ re.DOTALL | re.IGNORECASE,
+)
+
+# Matches orphaned closing (appears without opening)
+_ORPHAN_THINK_CLOSE_RE = re.compile(
+ r" ",
+ re.IGNORECASE,
+)
+
+# Alternative tag styles some models use
+_ALT_THINK_PATTERNS = [
+ # ...
+ (re.compile(r"]*>.*? ", re.DOTALL | re.IGNORECASE), ""),
+ # ...
+ (re.compile(r"]*>.*? ", re.DOTALL | re.IGNORECASE), ""),
+]
+
+
+def strip_reasoning_content(text: str) -> tuple[str, str]:
+ """Strip reasoning content from model output.
+
+ Handles all edge cases:
+ - Standard: ... followed by answer
+ - Nested: with inner tags (iterative removal)
+ - Orphan open: ... (no closing tag — truncated)
+ - Orphan close: ... (appears without opening)
+ - Alternative tags: ,
+ - No tags: pass through unchanged
+
+ Args:
+ text: Raw LLM output that may contain reasoning blocks
+
+ Returns:
+ (cleaned_text, reasoning_content) — cleaned_text is safe for
+ CrewAI's ReAct parser, reasoning_content contains what was
+ stripped (for logging/debugging).
+ """
+ if not text or not isinstance(text, str):
+ return text or "", ""
+
+ original = text
+ reasoning_parts: List[str] = []
+
+ # 1. Extract and remove all well-formed ... blocks
+ def _capture(match: re.Match) -> str:
+ reasoning_parts.append(match.group(0))
+ return ""
+
+ # Iteratively remove until no more matches (handles nested tags)
+ prev = None
+ while prev != text:
+ prev = text
+ text = _THINK_BLOCK_RE.sub(_capture, text)
+
+ # 2. Handle orphan opening (no closing tag — common with streaming cutoffs)
+ orphan_match = _ORPHAN_THINK_OPEN_RE.match(text)
+ if orphan_match:
+ reasoning_parts.append(orphan_match.group(0))
+ text = text[orphan_match.end():]
+
+ # 3. Remove any remaining orphan tags
+ text = _ORPHAN_THINK_CLOSE_RE.sub("", text)
+
+ # 4. Handle alternative tag styles
+ for pattern, replacement in _ALT_THINK_PATTERNS:
+ def _capture_alt(match: re.Match) -> str:
+ reasoning_parts.append(match.group(0))
+ return replacement
+ text = pattern.sub(_capture_alt, text)
+
+ # 5. Clean up: strip leading/trailing whitespace, collapse triple+ newlines
+ text = text.strip()
+ text = re.sub(r"\n{3,}", "\n\n", text)
+
+ reasoning = "\n".join(reasoning_parts).strip()
+
+ if reasoning and logger.isEnabledFor(logging.DEBUG):
+ logger.debug(
+ "[ReasoningNormalizer] Stripped %d chars of reasoning from %d-char response",
+ len(reasoning), len(original),
+ )
+
+ return text, reasoning
+
+
+# ─────────────────────────────────────────────────────────────────
+# ReasoningAwareLLM — transparent wrapper around crewai.LLM
+# ─────────────────────────────────────────────────────────────────
+
+
+class ReasoningAwareLLM:
+ """Drop-in wrapper around crewai.LLM that normalizes reasoning model output.
+
+ Intercepts every response from the wrapped LLM and strips ...
+ blocks before returning to CrewAI's ReAct parser. The wrapper is fully
+ transparent — all other attributes and methods delegate to the inner LLM.
+
+ Usage:
+ from crewai import LLM
+ from gitpilot.reasoning_normalizer import ReasoningAwareLLM
+
+ base_llm = LLM(model="ollama/deepseek-r1:14b", ...)
+ wrapped = ReasoningAwareLLM(base_llm)
+ # Pass `wrapped` to CrewAI Agents — they won't see tags
+
+ Why a wrapper instead of subclassing:
+ - CrewAI's LLM class changes between versions; composition is safer
+ - Works with any LLM-like object that has a `.call()` or `.__call__()` method
+ - Zero risk of breaking CrewAI internals
+ """
+
+ def __init__(self, inner_llm: Any) -> None:
+ """Wrap an existing CrewAI LLM instance.
+
+ Args:
+ inner_llm: Any object with a .call() method (typically crewai.LLM)
+ """
+ object.__setattr__(self, "_inner", inner_llm)
+ object.__setattr__(self, "_last_reasoning", "")
+ object.__setattr__(self, "_strip_count", 0)
+
+ # ── Core method: intercept .call() ────────────────────────────
+
+ def call(
+ self,
+ messages: Any,
+ tools: Optional[List[Any]] = None,
+ callbacks: Optional[List[Any]] = None,
+ available_functions: Optional[dict] = None,
+ from_task: Optional[Any] = None,
+ from_agent: Optional[Any] = None,
+ **kwargs: Any,
+ ) -> Any:
+ """Call the wrapped LLM and normalize any reasoning content.
+
+ Forwards all arguments to the inner LLM, then strips blocks
+ from string responses before returning. Non-string responses (dicts,
+ tool calls) are passed through unchanged.
+ """
+ # Also clean up historical messages — DeepSeek's own docs recommend
+ # removing {cot} from prior messages to avoid context
+ # length explosion and performance degradation.
+ cleaned_messages = self._clean_messages(messages)
+
+ try:
+ result = self._inner.call(
+ cleaned_messages,
+ tools=tools,
+ callbacks=callbacks,
+ available_functions=available_functions,
+ from_task=from_task,
+ from_agent=from_agent,
+ **kwargs,
+ )
+ except TypeError:
+ # Older CrewAI versions don't accept from_task/from_agent
+ try:
+ result = self._inner.call(
+ cleaned_messages,
+ tools=tools,
+ callbacks=callbacks,
+ available_functions=available_functions,
+ )
+ except TypeError:
+ # Minimal signature fallback
+ result = self._inner.call(cleaned_messages)
+
+ return self._normalize_response(result)
+
+ def __call__(self, *args: Any, **kwargs: Any) -> Any:
+ """Support callable-style invocation (some CrewAI code paths use this)."""
+ return self.call(*args, **kwargs)
+
+ # ── Response normalization ────────────────────────────────────
+
+ def _normalize_response(self, response: Any) -> Any:
+ """Strip reasoning content from the LLM response.
+
+ Handles multiple response shapes that CrewAI might return:
+ - Plain string (most common)
+ - Dict with 'content' key
+ - Dict with 'choices' → OpenAI-compatible shape
+ - Objects with .content attribute
+ """
+ # Plain string
+ if isinstance(response, str):
+ cleaned, reasoning = strip_reasoning_content(response)
+ if reasoning:
+ object.__setattr__(self, "_last_reasoning", reasoning)
+ object.__setattr__(self, "_strip_count", self._strip_count + 1)
+ logger.info(
+ "[ReasoningNormalizer] Stripped reasoning block from LLM response "
+ "(%d chars removed, %d chars remaining)",
+ len(reasoning), len(cleaned),
+ )
+ # If the cleaned result is empty, return a safe fallback so CrewAI
+ # doesn't crash with "Invalid response from LLM call - None or empty"
+ if not cleaned.strip():
+ logger.warning(
+ "[ReasoningNormalizer] Response was empty after stripping reasoning. "
+ "This usually means the model only produced content without "
+ "a real answer. Returning fallback text."
+ )
+ return (
+ "Thought: I need to gather more information before I can answer.\n"
+ "Final Answer: I apologize, but I was unable to produce a clear "
+ "response. Please try again or switch to a non-reasoning model."
+ )
+ return cleaned
+
+ # Dict response (openai-compatible shape)
+ if isinstance(response, dict):
+ # OpenAI format: {"choices": [{"message": {"content": "..."}}]}
+ choices = response.get("choices")
+ if isinstance(choices, list) and choices:
+ for choice in choices:
+ msg = choice.get("message") if isinstance(choice, dict) else None
+ if isinstance(msg, dict) and isinstance(msg.get("content"), str):
+ cleaned, reasoning = strip_reasoning_content(msg["content"])
+ msg["content"] = cleaned
+ if reasoning:
+ # Store in reasoning_content (vLLM/DeepSeek convention)
+ msg.setdefault("reasoning_content", reasoning)
+ return response
+ # Simple dict with content field
+ if isinstance(response.get("content"), str):
+ cleaned, reasoning = strip_reasoning_content(response["content"])
+ response["content"] = cleaned
+ if reasoning:
+ response.setdefault("reasoning_content", reasoning)
+ return response
+
+ # Object with .content attribute
+ if hasattr(response, "content") and isinstance(response.content, str):
+ cleaned, reasoning = strip_reasoning_content(response.content)
+ try:
+ response.content = cleaned
+ except AttributeError:
+ pass # read-only attribute
+ return response
+
+ # Unknown shape — pass through unchanged
+ return response
+
+ def _clean_messages(self, messages: Any) -> Any:
+ """Strip blocks from historical messages.
+
+ DeepSeek's official docs state that leaving {cot} in
+ prior messages degrades performance and explodes context length.
+ """
+ if not isinstance(messages, list):
+ return messages
+
+ cleaned = []
+ for msg in messages:
+ if isinstance(msg, dict):
+ content = msg.get("content")
+ if isinstance(content, str) and ("" in content.lower() or "" in content.lower()):
+ new_content, _ = strip_reasoning_content(content)
+ cleaned.append({**msg, "content": new_content})
+ continue
+ cleaned.append(msg)
+ return cleaned
+
+ # ── Delegate all other attribute access to the inner LLM ──────
+
+ def __getattr__(self, name: str) -> Any:
+ """Forward any attribute not defined on the wrapper to the inner LLM."""
+ return getattr(self._inner, name)
+
+ def __setattr__(self, name: str, value: Any) -> None:
+ """Forward attribute sets to the inner LLM (except our private state)."""
+ if name in ("_inner", "_last_reasoning", "_strip_count"):
+ object.__setattr__(self, name, value)
+ else:
+ setattr(self._inner, name, value)
+
+ def __repr__(self) -> str:
+ return f"ReasoningAwareLLM(inner={self._inner!r}, strip_count={self._strip_count})"
+
+
+# ─────────────────────────────────────────────────────────────────
+# Factory: wrap only if needed
+# ─────────────────────────────────────────────────────────────────
+
+
+def wrap_if_reasoning_model(llm: Any, model_name: str) -> Any:
+ """Return a ReasoningAwareLLM wrapper if model_name is a reasoning model,
+ otherwise return the original llm unchanged.
+
+ This is the safe entry point — zero overhead for non-reasoning models.
+
+ Args:
+ llm: The underlying LLM instance (e.g., crewai.LLM)
+ model_name: The model identifier (e.g., "ollama/deepseek-r1:14b")
+
+ Returns:
+ Either the wrapped LLM or the original.
+ """
+ if is_reasoning_model(model_name):
+ logger.info(
+ "[ReasoningNormalizer] Wrapping LLM with ReasoningAwareLLM "
+ "for reasoning model: %s",
+ model_name,
+ )
+ return ReasoningAwareLLM(llm)
+ return llm
diff --git a/gitpilot/resilience.py b/gitpilot/resilience.py
new file mode 100644
index 0000000000000000000000000000000000000000..d02c223c1532fadb28e6f0bccd46afc752deae79
--- /dev/null
+++ b/gitpilot/resilience.py
@@ -0,0 +1,206 @@
+# gitpilot/resilience.py
+"""Production resilience primitives: circuit breaker, timeouts, health probes.
+
+Keeps the app responsive when upstream LLM providers are slow or down.
+"""
+from __future__ import annotations
+
+import asyncio
+import logging
+import os
+import time
+from dataclasses import dataclass, field
+from enum import Enum
+from typing import Any, Callable, TypeVar
+
+import httpx
+
+logger = logging.getLogger(__name__)
+
+# ---------------------------------------------------------------------------
+# Configuration (overridable via environment variables)
+# ---------------------------------------------------------------------------
+AGENT_TIMEOUT_SECONDS = int(os.getenv("GITPILOT_AGENT_TIMEOUT", "300")) # 5 min
+LLM_CONNECT_TIMEOUT = float(os.getenv("GITPILOT_LLM_CONNECT_TIMEOUT", "15"))
+LLM_READ_TIMEOUT = float(os.getenv("GITPILOT_LLM_READ_TIMEOUT", "120"))
+
+
+# ---------------------------------------------------------------------------
+# Circuit Breaker
+# ---------------------------------------------------------------------------
+class CircuitState(Enum):
+ CLOSED = "closed" # normal operation
+ OPEN = "open" # failing — reject immediately
+ HALF_OPEN = "half_open" # testing recovery
+
+
+@dataclass
+class CircuitBreaker:
+ """Simple circuit breaker for upstream LLM / OllaBridge calls.
+
+ - After ``failure_threshold`` consecutive failures → OPEN (reject fast).
+ - After ``recovery_timeout`` seconds → HALF_OPEN (allow one probe).
+ - If the probe succeeds → CLOSED; if it fails → OPEN again.
+ """
+
+ name: str = "llm"
+ failure_threshold: int = int(os.getenv("GITPILOT_CB_FAILURES", "3"))
+ recovery_timeout: float = float(os.getenv("GITPILOT_CB_RECOVERY", "30"))
+
+ state: CircuitState = field(default=CircuitState.CLOSED, init=False)
+ _failure_count: int = field(default=0, init=False)
+ _last_failure_time: float = field(default=0.0, init=False)
+
+ def record_success(self) -> None:
+ self._failure_count = 0
+ if self.state != CircuitState.CLOSED:
+ logger.info("[CircuitBreaker:%s] recovered → CLOSED", self.name)
+ self.state = CircuitState.CLOSED
+
+ def record_failure(self) -> None:
+ self._failure_count += 1
+ self._last_failure_time = time.monotonic()
+ if self._failure_count >= self.failure_threshold:
+ if self.state != CircuitState.OPEN:
+ logger.warning(
+ "[CircuitBreaker:%s] %d failures → OPEN (reject for %ds)",
+ self.name,
+ self._failure_count,
+ self.recovery_timeout,
+ )
+ self.state = CircuitState.OPEN
+
+ def allow_request(self) -> bool:
+ """Return True if the request should proceed."""
+ if self.state == CircuitState.CLOSED:
+ return True
+ if self.state == CircuitState.OPEN:
+ elapsed = time.monotonic() - self._last_failure_time
+ if elapsed >= self.recovery_timeout:
+ logger.info("[CircuitBreaker:%s] recovery window → HALF_OPEN", self.name)
+ self.state = CircuitState.HALF_OPEN
+ return True
+ return False
+ # HALF_OPEN — allow exactly one probe
+ return True
+
+ def status_dict(self) -> dict:
+ return {
+ "name": self.name,
+ "state": self.state.value,
+ "failure_count": self._failure_count,
+ }
+
+
+# Global circuit breaker instance for LLM calls
+llm_circuit = CircuitBreaker(name="llm_provider")
+
+
+# ---------------------------------------------------------------------------
+# Timeout wrapper for CrewAI kickoff calls
+# ---------------------------------------------------------------------------
+T = TypeVar("T")
+
+
+async def run_with_timeout(
+ coro,
+ timeout: float = AGENT_TIMEOUT_SECONDS,
+ label: str = "agent",
+) -> Any:
+ """Run an async operation with a hard timeout.
+
+ Wraps ``asyncio.wait_for`` with a descriptive error message.
+ Use this around ``asyncio.to_thread(ctx.run, _run)`` calls.
+ """
+ try:
+ return await asyncio.wait_for(coro, timeout=timeout)
+ except asyncio.TimeoutError:
+ logger.error("[Timeout] %s exceeded %ds limit", label, timeout)
+ raise TimeoutError(
+ f"Agent operation '{label}' timed out after {timeout}s. "
+ "The LLM provider may be overloaded or unreachable."
+ ) from None
+
+
+# ---------------------------------------------------------------------------
+# Deep health probe
+# ---------------------------------------------------------------------------
+async def deep_health_check() -> dict:
+ """Check LLM provider connectivity and return detailed status."""
+ from .settings import get_settings, LLMProvider
+
+ settings = get_settings()
+ provider = settings.provider
+ result: dict[str, Any] = {
+ "status": "healthy",
+ "service": "gitpilot-backend",
+ "provider": provider.value,
+ "circuit_breaker": llm_circuit.status_dict(),
+ "crewai_loaded": False,
+ }
+
+ # Check if CrewAI is loaded
+ try:
+ from .agentic import _crewai_cache
+ result["crewai_loaded"] = bool(_crewai_cache)
+ except Exception:
+ pass
+
+ # Check LLM provider connectivity
+ try:
+ base_url = _get_provider_base_url(settings, provider)
+ if base_url:
+ async with httpx.AsyncClient(
+ timeout=httpx.Timeout(connect=5.0, read=5.0, write=5.0, pool=5.0)
+ ) as client:
+ if provider == LLMProvider.ollabridge:
+ resp = await client.get(f"{base_url.rstrip('/')}/health")
+ else:
+ resp = await client.get(f"{base_url.rstrip('/')}/v1/models")
+ result["provider_reachable"] = resp.status_code < 500
+ result["provider_status_code"] = resp.status_code
+ else:
+ result["provider_reachable"] = None
+ except httpx.ConnectError:
+ result["status"] = "degraded"
+ result["provider_reachable"] = False
+ result["provider_error"] = "connection_refused"
+ except httpx.TimeoutException:
+ result["status"] = "degraded"
+ result["provider_reachable"] = False
+ result["provider_error"] = "timeout"
+ except Exception as exc:
+ result["status"] = "degraded"
+ result["provider_reachable"] = False
+ result["provider_error"] = str(exc)
+
+ # Memory info
+ try:
+ import resource
+ usage = resource.getrusage(resource.RUSAGE_SELF)
+ result["memory_mb"] = round(usage.ru_maxrss / 1024, 1) # Linux: KB → MB
+ except Exception:
+ pass
+
+ return result
+
+
+def _get_provider_base_url(settings, provider) -> str | None:
+ """Extract the base URL for the active provider."""
+ from .settings import LLMProvider
+
+ if provider == LLMProvider.ollabridge:
+ return settings.ollabridge.base_url or os.getenv(
+ "OLLABRIDGE_BASE_URL", "http://localhost:8000"
+ )
+ if provider == LLMProvider.ollama:
+ return settings.ollama.base_url or os.getenv(
+ "OLLAMA_BASE_URL", "http://localhost:11434"
+ )
+ if provider == LLMProvider.openai:
+ return settings.openai.base_url or "https://api.openai.com"
+ if provider == LLMProvider.claude:
+ return settings.claude.base_url or "https://api.anthropic.com"
+ if provider == LLMProvider.watsonx:
+ return settings.watsonx.base_url or "https://us-south.ml.cloud.ibm.com"
+ return None
diff --git a/gitpilot/search_tools.py b/gitpilot/search_tools.py
new file mode 100644
index 0000000000000000000000000000000000000000..8cb5837f893ed475a90d3939cd75bcfe77198933
--- /dev/null
+++ b/gitpilot/search_tools.py
@@ -0,0 +1,212 @@
+"""CrewAI tools for GitHub Search operations.
+
+Provides tools for searching code, issues, repositories, and users
+across GitHub.
+"""
+import asyncio
+from typing import Optional
+
+from crewai.tools import tool
+
+from .agent_tools import get_repo_context
+from . import github_search as gs
+
+
+def _run_async(coro):
+ loop = asyncio.new_event_loop()
+ asyncio.set_event_loop(loop)
+ try:
+ return loop.run_until_complete(coro)
+ finally:
+ loop.close()
+
+
+@tool("Search code in repository")
+def search_code(
+ query: str,
+ language: str = "",
+ path: str = "",
+ per_page: int = 20,
+) -> str:
+ """Searches for code by keywords, symbols, or patterns. Scoped to the current repository by default. Optional: language filter, path filter."""
+ try:
+ owner, repo, token, _branch = get_repo_context()
+ result = _run_async(
+ gs.search_code(
+ query,
+ owner=owner,
+ repo=repo,
+ language=language or None,
+ path=path or None,
+ per_page=per_page,
+ token=token,
+ )
+ )
+ items = result.get("items", [])
+ total = result.get("total_count", 0)
+ if not items:
+ return f"No code matches found for '{query}' in {owner}/{repo}."
+ lines = [f"Code search results for '{query}' in {owner}/{repo} ({total} total):"]
+ for item in items:
+ lines.append(
+ f" {item.get('path', '?')} "
+ f"(score: {item.get('score', '?')})\n"
+ f" URL: {item.get('html_url', '')}"
+ )
+ return "\n".join(lines)
+ except Exception as e:
+ return f"Error searching code: {e}"
+
+
+@tool("Search code globally")
+def search_code_global(
+ query: str,
+ language: str = "",
+ per_page: int = 10,
+) -> str:
+ """Searches for code across ALL of GitHub (not scoped to a repo). Use for finding examples or patterns globally."""
+ try:
+ _owner, _repo, token, _branch = get_repo_context()
+ result = _run_async(
+ gs.search_code(query, language=language or None, per_page=per_page, token=token)
+ )
+ items = result.get("items", [])
+ total = result.get("total_count", 0)
+ if not items:
+ return f"No global code matches found for '{query}'."
+ lines = [f"Global code search for '{query}' ({total} total):"]
+ for item in items:
+ repo_name = item.get("repository", {}).get("full_name", "?")
+ lines.append(
+ f" [{repo_name}] {item.get('path', '?')}\n"
+ f" URL: {item.get('html_url', '')}"
+ )
+ return "\n".join(lines)
+ except Exception as e:
+ return f"Error searching code globally: {e}"
+
+
+@tool("Search issues and pull requests")
+def search_issues(
+ query: str,
+ state: str = "",
+ label: str = "",
+ is_pr: str = "",
+ per_page: int = 20,
+) -> str:
+ """Searches issues/PRs by keywords. Scoped to current repo. is_pr: 'true' for PRs only, 'false' for issues only, empty for both."""
+ try:
+ owner, repo, token, _branch = get_repo_context()
+ pr_flag = None
+ if is_pr.lower() == "true":
+ pr_flag = True
+ elif is_pr.lower() == "false":
+ pr_flag = False
+ result = _run_async(
+ gs.search_issues(
+ query,
+ owner=owner,
+ repo=repo,
+ state=state or None,
+ label=label or None,
+ is_pr=pr_flag,
+ per_page=per_page,
+ token=token,
+ )
+ )
+ items = result.get("items", [])
+ total = result.get("total_count", 0)
+ if not items:
+ return f"No matching issues/PRs for '{query}' in {owner}/{repo}."
+ lines = [f"Issue/PR search for '{query}' ({total} total):"]
+ for item in items:
+ kind = "PR" if "pull_request" in item else "Issue"
+ lines.append(
+ f" [{kind}] #{item.get('number')} [{item.get('state')}] {item.get('title')}\n"
+ f" URL: {item.get('html_url', '')}"
+ )
+ return "\n".join(lines)
+ except Exception as e:
+ return f"Error searching issues: {e}"
+
+
+@tool("Search GitHub users and organizations")
+def search_users(
+ query: str,
+ type_filter: str = "",
+ location: str = "",
+ language: str = "",
+ per_page: int = 10,
+) -> str:
+ """Searches for GitHub users or organizations. type_filter: 'user' or 'org'. Optional: location, language."""
+ try:
+ _owner, _repo, token, _branch = get_repo_context()
+ result = _run_async(
+ gs.search_users(
+ query,
+ type_filter=type_filter or None,
+ location=location or None,
+ language=language or None,
+ per_page=per_page,
+ token=token,
+ )
+ )
+ items = result.get("items", [])
+ total = result.get("total_count", 0)
+ if not items:
+ return f"No users/orgs found for '{query}'."
+ lines = [f"User search for '{query}' ({total} total):"]
+ for item in items:
+ lines.append(
+ f" @{item.get('login', '?')} ({item.get('type', '?')})\n"
+ f" URL: {item.get('html_url', '')}"
+ )
+ return "\n".join(lines)
+ except Exception as e:
+ return f"Error searching users: {e}"
+
+
+@tool("Search repositories")
+def search_repositories(
+ query: str,
+ language: str = "",
+ sort: str = "",
+ per_page: int = 10,
+) -> str:
+ """Searches for repositories across GitHub. Optional: language filter, sort (stars/forks/updated)."""
+ try:
+ _owner, _repo, token, _branch = get_repo_context()
+ result = _run_async(
+ gs.search_repositories(
+ query,
+ language=language or None,
+ sort=sort or None,
+ per_page=per_page,
+ token=token,
+ )
+ )
+ items = result.get("items", [])
+ total = result.get("total_count", 0)
+ if not items:
+ return f"No repositories found for '{query}'."
+ lines = [f"Repository search for '{query}' ({total} total):"]
+ for item in items:
+ lines.append(
+ f" {item.get('full_name', '?')} "
+ f"({item.get('stargazers_count', 0)} stars)\n"
+ f" {item.get('description', 'No description')[:100]}\n"
+ f" URL: {item.get('html_url', '')}"
+ )
+ return "\n".join(lines)
+ except Exception as e:
+ return f"Error searching repositories: {e}"
+
+
+# Export all search tools
+SEARCH_TOOLS = [
+ search_code,
+ search_code_global,
+ search_issues,
+ search_users,
+ search_repositories,
+]
diff --git a/gitpilot/security.py b/gitpilot/security.py
new file mode 100644
index 0000000000000000000000000000000000000000..8509b139908c9c44b37f2c965ef70f5d7798e24b
--- /dev/null
+++ b/gitpilot/security.py
@@ -0,0 +1,513 @@
+# gitpilot/security.py
+"""AI-powered security scanner — beyond traditional SAST.
+
+Combines pattern-based detection with semantic analysis to find
+vulnerabilities that static analysis tools typically miss:
+
+• **Secret detection** — API keys, tokens, passwords in code and config
+• **Dependency audit** — known CVEs in transitive dependency trees
+• **Code-flow analysis** — injection, XSS, SSRF via taint-style tracking
+• **Configuration review** — insecure defaults, overly permissive CORS, etc.
+• **AI reasoning** — uses LLM to evaluate context-dependent risks
+
+Inspired by:
+- OWASP Top 10 (2021) categorisation
+- Semgrep's rule-based approach with semantic matching
+- GitHub Advanced Security's CodeQL data-flow analysis
+- Google's *"Fixing a Trillion Bugs"* (2024) on AI-assisted vulnerability detection
+"""
+from __future__ import annotations
+
+import os
+import re
+from dataclasses import dataclass, field
+from enum import Enum
+from pathlib import Path
+from typing import Any
+
+# ---------------------------------------------------------------------------
+# Enums & data models
+# ---------------------------------------------------------------------------
+
+class Severity(str, Enum):
+ CRITICAL = "critical"
+ HIGH = "high"
+ MEDIUM = "medium"
+ LOW = "low"
+ INFO = "info"
+
+
+class VulnerabilityCategory(str, Enum):
+ SECRET_LEAK = "secret_leak"
+ INJECTION = "injection"
+ XSS = "xss"
+ SSRF = "ssrf"
+ AUTH_ISSUE = "auth_issue"
+ CRYPTO_WEAKNESS = "crypto_weakness"
+ INSECURE_CONFIG = "insecure_config"
+ DEPENDENCY_CVE = "dependency_cve"
+ PATH_TRAVERSAL = "path_traversal"
+ SENSITIVE_DATA = "sensitive_data"
+
+
+@dataclass
+class Finding:
+ """A single security finding."""
+
+ rule_id: str
+ title: str
+ description: str
+ severity: Severity
+ category: VulnerabilityCategory
+ file_path: str = ""
+ line_number: int = 0
+ snippet: str = ""
+ recommendation: str = ""
+ cwe_id: str | None = None
+ confidence: float = 0.8 # 0.0 - 1.0
+
+ def to_dict(self) -> dict[str, Any]:
+ return {
+ "rule_id": self.rule_id,
+ "title": self.title,
+ "description": self.description,
+ "severity": self.severity.value,
+ "category": self.category.value,
+ "file_path": self.file_path,
+ "line_number": self.line_number,
+ "snippet": self.snippet,
+ "recommendation": self.recommendation,
+ "cwe_id": self.cwe_id,
+ "confidence": self.confidence,
+ }
+
+
+@dataclass
+class ScanResult:
+ """Aggregate result of a security scan."""
+
+ findings: list[Finding] = field(default_factory=list)
+ files_scanned: int = 0
+ scan_duration_ms: float = 0.0
+ summary: dict[str, int] = field(default_factory=dict)
+
+ def to_dict(self) -> dict[str, Any]:
+ return {
+ "findings": [f.to_dict() for f in self.findings],
+ "files_scanned": self.files_scanned,
+ "scan_duration_ms": self.scan_duration_ms,
+ "summary": self.summary,
+ "total_findings": len(self.findings),
+ }
+
+ def by_severity(self, severity: Severity) -> list[Finding]:
+ return [f for f in self.findings if f.severity == severity]
+
+
+# ---------------------------------------------------------------------------
+# Secret detection patterns
+# ---------------------------------------------------------------------------
+
+_SECRET_PATTERNS: list[dict[str, Any]] = [
+ {
+ "rule_id": "SEC001",
+ "title": "AWS Access Key",
+ "pattern": r"(?:AKIA|ABIA|ACCA|ASIA)[0-9A-Z]{16}",
+ "severity": Severity.CRITICAL,
+ "cwe": "CWE-798",
+ },
+ {
+ "rule_id": "SEC002",
+ "title": "GitHub Token",
+ "pattern": r"gh[pousr]_[A-Za-z0-9_]{36,255}",
+ "severity": Severity.CRITICAL,
+ "cwe": "CWE-798",
+ },
+ {
+ "rule_id": "SEC003",
+ "title": "Generic API Key",
+ "pattern": r"(?i)(?:api[_-]?key|apikey)\s*[:=]\s*['\"]([A-Za-z0-9_\-]{20,})['\"]",
+ "severity": Severity.HIGH,
+ "cwe": "CWE-798",
+ },
+ {
+ "rule_id": "SEC004",
+ "title": "Password in Code",
+ "pattern": r"(?i)(?:password|passwd|pwd)\s*[:=]\s*['\"]([^'\"]{8,})['\"]",
+ "severity": Severity.HIGH,
+ "cwe": "CWE-798",
+ },
+ {
+ "rule_id": "SEC005",
+ "title": "Private Key",
+ "pattern": r"-----BEGIN (?:RSA |EC |DSA )?PRIVATE KEY-----",
+ "severity": Severity.CRITICAL,
+ "cwe": "CWE-321",
+ },
+ {
+ "rule_id": "SEC006",
+ "title": "JWT Secret",
+ "pattern": r"(?i)(?:jwt[_-]?secret|token[_-]?secret)\s*[:=]\s*['\"]([^'\"]{8,})['\"]",
+ "severity": Severity.HIGH,
+ "cwe": "CWE-798",
+ },
+ {
+ "rule_id": "SEC007",
+ "title": "Slack Token",
+ "pattern": r"xox[bporas]-[0-9]{10,13}-[0-9]{10,13}[a-zA-Z0-9-]*",
+ "severity": Severity.HIGH,
+ "cwe": "CWE-798",
+ },
+]
+
+# ---------------------------------------------------------------------------
+# Code vulnerability patterns
+# ---------------------------------------------------------------------------
+
+_CODE_PATTERNS: list[dict[str, Any]] = [
+ {
+ "rule_id": "SEC100",
+ "title": "SQL Injection Risk",
+ "pattern": r"(?i)(?:execute|cursor\.execute|\.query)\s*\(\s*[f'\"].*\{.*\}",
+ "severity": Severity.HIGH,
+ "category": VulnerabilityCategory.INJECTION,
+ "cwe": "CWE-89",
+ "recommendation": "Use parameterised queries instead of string interpolation.",
+ "file_types": {".py", ".rb", ".js", ".ts"},
+ },
+ {
+ "rule_id": "SEC101",
+ "title": "Command Injection Risk",
+ "pattern": r"(?i)(?:os\.system|subprocess\.call|subprocess\.Popen|exec|eval)\s*\(.*[\+f\{]",
+ "severity": Severity.CRITICAL,
+ "category": VulnerabilityCategory.INJECTION,
+ "cwe": "CWE-78",
+ "recommendation": "Use subprocess with a list of arguments instead of shell=True.",
+ "file_types": {".py"},
+ },
+ {
+ "rule_id": "SEC102",
+ "title": "Cross-Site Scripting (XSS)",
+ "pattern": r"(?i)(?:innerHTML|outerHTML|document\.write|\.html\()\s*[=\(]\s*[^'\"]*[\+`\$]",
+ "severity": Severity.HIGH,
+ "category": VulnerabilityCategory.XSS,
+ "cwe": "CWE-79",
+ "recommendation": "Sanitise user input before inserting into the DOM.",
+ "file_types": {".js", ".ts", ".jsx", ".tsx", ".html"},
+ },
+ {
+ "rule_id": "SEC103",
+ "title": "Path Traversal",
+ "pattern": r"(?i)(?:open|read_file|send_file|join)\s*\(.*(?:request\.|params\[|input\()",
+ "severity": Severity.HIGH,
+ "category": VulnerabilityCategory.PATH_TRAVERSAL,
+ "cwe": "CWE-22",
+ "recommendation": "Validate and canonicalise file paths. Reject path traversal sequences.",
+ "file_types": {".py", ".js", ".ts", ".rb", ".go"},
+ },
+ {
+ "rule_id": "SEC104",
+ "title": "Insecure Random",
+ "pattern": r"(?i)\brandom\b\.(?:random|randint|choice|seed)\b",
+ "severity": Severity.MEDIUM,
+ "category": VulnerabilityCategory.CRYPTO_WEAKNESS,
+ "cwe": "CWE-330",
+ "recommendation": "Use secrets module or os.urandom() for security-sensitive randomness.",
+ "file_types": {".py"},
+ },
+ {
+ "rule_id": "SEC105",
+ "title": "SSRF Risk",
+ "pattern": r"(?i)(?:requests\.get|httpx\.get|fetch|urllib\.request)\s*\(.*(?:request\.|params|input)",
+ "severity": Severity.HIGH,
+ "category": VulnerabilityCategory.SSRF,
+ "cwe": "CWE-918",
+ "recommendation": "Validate and allowlist URLs before making HTTP requests.",
+ "file_types": {".py", ".js", ".ts"},
+ },
+ {
+ "rule_id": "SEC106",
+ "title": "Weak Cryptographic Algorithm",
+ "pattern": r"(?i)(?:md5|sha1)\s*\(",
+ "severity": Severity.MEDIUM,
+ "category": VulnerabilityCategory.CRYPTO_WEAKNESS,
+ "cwe": "CWE-327",
+ "recommendation": "Use SHA-256 or stronger for cryptographic purposes.",
+ "file_types": {".py", ".js", ".ts", ".go", ".java"},
+ },
+ {
+ "rule_id": "SEC107",
+ "title": "Insecure CORS Configuration",
+ "pattern": r"""(?i)(?:access-control-allow-origin|cors_allow_origins?)\s*[:=]\s*['"]\*['"]""",
+ "severity": Severity.MEDIUM,
+ "category": VulnerabilityCategory.INSECURE_CONFIG,
+ "cwe": "CWE-942",
+ "recommendation": "Restrict CORS to specific trusted origins instead of '*'.",
+ "file_types": {".py", ".js", ".ts", ".json", ".yaml", ".yml"},
+ },
+ {
+ "rule_id": "SEC108",
+ "title": "Disabled SSL Verification",
+ "pattern": r"(?i)(?:verify\s*=\s*False|rejectUnauthorized\s*[:=]\s*false|InsecureSkipVerify\s*[:=]\s*true)",
+ "severity": Severity.HIGH,
+ "category": VulnerabilityCategory.INSECURE_CONFIG,
+ "cwe": "CWE-295",
+ "recommendation": "Never disable SSL certificate verification in production.",
+ "file_types": {".py", ".js", ".ts", ".go"},
+ },
+]
+
+# Directories to always skip
+_SKIP_DIRS = {
+ ".git", "node_modules", "__pycache__", ".tox", ".venv",
+ "venv", "dist", "build", ".eggs", ".mypy_cache", ".ruff_cache",
+}
+
+# File extensions to scan
+_SCANNABLE_EXTENSIONS = {
+ ".py", ".js", ".ts", ".jsx", ".tsx", ".rb", ".go", ".java",
+ ".rs", ".c", ".cpp", ".h", ".cs", ".php", ".sh", ".bash",
+ ".yaml", ".yml", ".json", ".toml", ".ini", ".cfg", ".env",
+ ".html", ".xml", ".sql",
+}
+
+
+# ---------------------------------------------------------------------------
+# Security scanner
+# ---------------------------------------------------------------------------
+
+class SecurityScanner:
+ """AI-powered security scanner with pattern and semantic analysis.
+
+ Usage::
+
+ scanner = SecurityScanner()
+ result = scanner.scan_directory("/path/to/repo")
+ for finding in result.findings:
+ print(f"[{finding.severity.value}] {finding.title} in {finding.file_path}:{finding.line_number}")
+ """
+
+ def __init__(
+ self,
+ extra_secret_patterns: list[dict[str, Any]] | None = None,
+ extra_code_patterns: list[dict[str, Any]] | None = None,
+ min_confidence: float = 0.5,
+ ) -> None:
+ self.secret_patterns = _SECRET_PATTERNS + (extra_secret_patterns or [])
+ self.code_patterns = _CODE_PATTERNS + (extra_code_patterns or [])
+ self.min_confidence = min_confidence
+
+ # --- Public API -------------------------------------------------------
+
+ def scan_directory(self, directory: str) -> ScanResult:
+ """Recursively scan a directory for security issues."""
+ import time
+
+ start = time.monotonic()
+ result = ScanResult()
+ root = Path(directory)
+
+ if not root.is_dir():
+ return result
+
+ for path in self._walk(root):
+ findings = self.scan_file(str(path))
+ result.findings.extend(findings)
+ result.files_scanned += 1
+
+ result.scan_duration_ms = (time.monotonic() - start) * 1000
+ result.summary = self._build_summary(result.findings)
+ return result
+
+ def scan_file(self, file_path: str) -> list[Finding]:
+ """Scan a single file for security issues."""
+ findings: list[Finding] = []
+ path = Path(file_path)
+ suffix = path.suffix.lower()
+
+ try:
+ content = path.read_text(errors="replace")
+ except (OSError, UnicodeDecodeError):
+ return findings
+
+ lines = content.splitlines()
+
+ # Secret detection (all file types)
+ findings.extend(self._check_secrets(lines, file_path))
+
+ # Code pattern detection (filtered by file type)
+ findings.extend(self._check_code_patterns(lines, file_path, suffix))
+
+ # Filter by confidence threshold
+ return [f for f in findings if f.confidence >= self.min_confidence]
+
+ def scan_diff(self, diff_text: str) -> list[Finding]:
+ """Scan a git diff for security issues in added lines only.
+
+ This is useful for CI/CD pipelines to check only new changes.
+ """
+ findings: list[Finding] = []
+ current_file = ""
+ current_line = 0
+
+ for line in diff_text.splitlines():
+ # Track file name
+ if line.startswith("+++ b/"):
+ current_file = line[6:]
+ continue
+ # Track line numbers from hunk headers
+ if line.startswith("@@"):
+ match = re.search(r"\+(\d+)", line)
+ if match:
+ current_line = int(match.group(1)) - 1
+ continue
+ # Only scan added lines
+ if line.startswith("+") and not line.startswith("+++"):
+ current_line += 1
+ added_text = line[1:]
+ suffix = Path(current_file).suffix.lower() if current_file else ""
+
+ for sp in self.secret_patterns:
+ if re.search(sp["pattern"], added_text):
+ findings.append(Finding(
+ rule_id=sp["rule_id"],
+ title=sp["title"],
+ description=f"Potential {sp['title'].lower()} found in diff.",
+ severity=sp["severity"],
+ category=VulnerabilityCategory.SECRET_LEAK,
+ file_path=current_file,
+ line_number=current_line,
+ snippet=added_text.strip()[:200],
+ recommendation="Remove the secret and rotate it immediately.",
+ cwe_id=sp.get("cwe"),
+ ))
+
+ for cp in self.code_patterns:
+ file_types = cp.get("file_types", set())
+ if file_types and suffix not in file_types:
+ continue
+ if re.search(cp["pattern"], added_text):
+ findings.append(Finding(
+ rule_id=cp["rule_id"],
+ title=cp["title"],
+ description=f"Potential {cp['title'].lower()} in new code.",
+ severity=cp["severity"],
+ category=cp["category"],
+ file_path=current_file,
+ line_number=current_line,
+ snippet=added_text.strip()[:200],
+ recommendation=cp.get("recommendation", ""),
+ cwe_id=cp.get("cwe"),
+ ))
+ elif not line.startswith("-"):
+ current_line += 1
+
+ return findings
+
+ # --- Internal helpers -------------------------------------------------
+
+ def _walk(self, root: Path):
+ """Walk directory, skipping non-scannable paths."""
+ for entry in sorted(root.iterdir()):
+ if entry.name.startswith(".") and entry.name in _SKIP_DIRS:
+ continue
+ if entry.is_dir():
+ if entry.name in _SKIP_DIRS:
+ continue
+ yield from self._walk(entry)
+ elif entry.is_file() and entry.suffix.lower() in _SCANNABLE_EXTENSIONS:
+ yield entry
+
+ def _check_secrets(self, lines: list[str], file_path: str) -> list[Finding]:
+ """Check all lines for secret patterns."""
+ findings: list[Finding] = []
+ # Skip likely test/fixture files for lower confidence
+ is_test = "test" in file_path.lower() or "fixture" in file_path.lower()
+
+ for i, line in enumerate(lines, start=1):
+ for sp in self.secret_patterns:
+ if re.search(sp["pattern"], line):
+ findings.append(Finding(
+ rule_id=sp["rule_id"],
+ title=sp["title"],
+ description=f"Potential {sp['title'].lower()} detected.",
+ severity=sp["severity"],
+ category=VulnerabilityCategory.SECRET_LEAK,
+ file_path=file_path,
+ line_number=i,
+ snippet=self._redact(line.strip(), sp["pattern"]),
+ recommendation="Remove the secret from source code and rotate it.",
+ cwe_id=sp.get("cwe"),
+ confidence=0.6 if is_test else 0.9,
+ ))
+ return findings
+
+ def _check_code_patterns(
+ self, lines: list[str], file_path: str, suffix: str,
+ ) -> list[Finding]:
+ """Check lines against code vulnerability patterns."""
+ findings: list[Finding] = []
+ for i, line in enumerate(lines, start=1):
+ for cp in self.code_patterns:
+ file_types = cp.get("file_types", set())
+ if file_types and suffix not in file_types:
+ continue
+ if re.search(cp["pattern"], line):
+ findings.append(Finding(
+ rule_id=cp["rule_id"],
+ title=cp["title"],
+ description=f"Potential {cp['title'].lower()} vulnerability.",
+ severity=cp["severity"],
+ category=cp["category"],
+ file_path=file_path,
+ line_number=i,
+ snippet=line.strip()[:200],
+ recommendation=cp.get("recommendation", ""),
+ cwe_id=cp.get("cwe"),
+ ))
+ return findings
+
+ @staticmethod
+ def _redact(text: str, pattern: str) -> str:
+ """Partially redact matched secrets in snippets."""
+ def _mask(m: re.Match) -> str:
+ val = m.group(0)
+ if len(val) <= 8:
+ return val[:2] + "***"
+ return val[:4] + "***" + val[-4:]
+
+ return re.sub(pattern, _mask, text)[:200]
+
+ @staticmethod
+ def _build_summary(findings: list[Finding]) -> dict[str, int]:
+ """Build a severity summary dict."""
+ summary: dict[str, int] = {}
+ for f in findings:
+ key = f.severity.value
+ summary[key] = summary.get(key, 0) + 1
+ return summary
+
+
+def scan_current_workspace(path: str) -> dict:
+ """Lightweight API-friendly entry point for quick action security scan.
+
+ Returns normalized diagnostics suitable for the extension quick action.
+ """
+
+ if not os.path.isdir(path):
+ return {
+ "success": False,
+ "error": f"Path is not a directory: {path}",
+ "findings": [],
+ "summary": {},
+ }
+
+ scanner = SecurityScanner()
+ result = scanner.scan_directory(path)
+ return {
+ "success": True,
+ "files_scanned": result.files_scanned,
+ "scan_duration_ms": result.scan_duration_ms,
+ "findings": [f.to_dict() for f in result.findings],
+ "summary": result.summary,
+ }
diff --git a/gitpilot/session.py b/gitpilot/session.py
new file mode 100644
index 0000000000000000000000000000000000000000..6b5f90740d9c0589fd9bcc947c6f69ba6f6bbfdb
--- /dev/null
+++ b/gitpilot/session.py
@@ -0,0 +1,327 @@
+# gitpilot/session.py
+"""Session persistence, resumption, and checkpoint management.
+
+Sessions track the full conversation and workspace state. Checkpoints
+snapshot the workspace at key moments so users can rewind.
+"""
+from __future__ import annotations
+
+import json
+import logging
+import os
+import shutil
+import uuid
+from dataclasses import asdict, dataclass, field
+from datetime import UTC, datetime
+from pathlib import Path
+from typing import Any
+
+logger = logging.getLogger(__name__)
+
+SESSION_ROOT = Path.home() / ".gitpilot" / "sessions"
+
+
+@dataclass
+class Message:
+ role: str # user | assistant | system
+ content: str
+ timestamp: str = field(
+ default_factory=lambda: datetime.now(UTC).isoformat(),
+ )
+ metadata: dict[str, Any] = field(default_factory=dict)
+
+
+@dataclass
+class Checkpoint:
+ id: str = field(default_factory=lambda: uuid.uuid4().hex[:12])
+ message_index: int = 0
+ description: str = ""
+ timestamp: str = field(
+ default_factory=lambda: datetime.now(UTC).isoformat(),
+ )
+ snapshot_path: str | None = None
+
+
+@dataclass
+class Session:
+ id: str = field(default_factory=lambda: uuid.uuid4().hex[:16])
+ name: str | None = None
+ repo_full_name: str | None = None
+ branch: str | None = None
+ messages: list[Message] = field(default_factory=list)
+ checkpoints: list[Checkpoint] = field(default_factory=list)
+ created_at: str = field(
+ default_factory=lambda: datetime.now(UTC).isoformat(),
+ )
+ updated_at: str = field(
+ default_factory=lambda: datetime.now(UTC).isoformat(),
+ )
+ pr_number: int | None = None
+ status: str = "active" # active | paused | completed
+ metadata: dict[str, Any] = field(default_factory=dict)
+
+ # Session mode fields
+ mode: str | None = None # "folder" | "local_git" | "github"
+ folder_path: str | None = None
+ repo_root: str | None = None
+
+ # Multi-repo context support
+ # Each entry: {"full_name": "owner/repo", "branch": "main", "mode": "read"|"write"}
+ repos: list[dict[str, Any]] = field(default_factory=list)
+ active_repo: str | None = None # full_name of the write-target repo
+
+ def add_message(self, role: str, content: str, **meta):
+ self.messages.append(Message(role=role, content=content, metadata=meta))
+ self.updated_at = datetime.now(UTC).isoformat()
+
+ def to_dict(self) -> dict[str, Any]:
+ return asdict(self)
+
+ @classmethod
+ def from_dict(cls, data: dict[str, Any]) -> Session:
+ data = dict(data) # shallow copy
+ data["messages"] = [Message(**m) for m in data.get("messages", [])]
+ data["checkpoints"] = [Checkpoint(**c) for c in data.get("checkpoints", [])]
+
+ # Backwards-compatible migration: populate repos from legacy single-repo
+ if not data.get("repos") and data.get("repo_full_name"):
+ data["repos"] = [{
+ "full_name": data["repo_full_name"],
+ "branch": data.get("branch", "main"),
+ "mode": "write",
+ }]
+ data.setdefault("active_repo", data["repo_full_name"])
+ data.setdefault("repos", [])
+ data.setdefault("active_repo", None)
+
+ # Session mode fields (backwards-compatible)
+ data.setdefault("mode", None)
+ data.setdefault("folder_path", None)
+ data.setdefault("repo_root", None)
+
+ return cls(**data)
+
+
+class SessionManager:
+ """Manage session lifecycle: create, save, load, list, fork, rewind."""
+
+ def __init__(self, root: Path | None = None):
+ self.root = root or SESSION_ROOT
+ self.root.mkdir(parents=True, exist_ok=True)
+ # Per-instance cache for list_sessions() (see list_sessions() docstring)
+ self._list_cache: dict[str, Any] = {}
+
+ def _session_path(self, session_id: str) -> Path:
+ return self.root / f"{session_id}.json"
+
+ def create(
+ self,
+ repo_full_name: str | None = None,
+ branch: str | None = None,
+ name: str | None = None,
+ ) -> Session:
+ session = Session(
+ repo_full_name=repo_full_name, branch=branch, name=name,
+ )
+ self.save(session)
+ return session
+
+ def create_folder_session(
+ self, folder_path: str, name: str | None = None,
+ ) -> Session:
+ """Create a session for folder-only mode (no git required)."""
+ folder_name = os.path.basename(os.path.normpath(folder_path))
+ session_name = name or f"Folder: {folder_name}"
+ session = self.create(name=session_name)
+ session.mode = "folder"
+ session.folder_path = folder_path
+ self.save(session)
+ return session
+
+ def create_local_git_session(
+ self, repo_root: str, branch: str | None = None, name: str | None = None,
+ ) -> Session:
+ """Create a session for local git mode."""
+ repo_name = os.path.basename(os.path.normpath(repo_root))
+ session_name = name or f"Local Git: {repo_name}"
+ if branch:
+ session_name += f" ({branch})"
+ session = self.create(name=session_name)
+ session.mode = "local_git"
+ session.repo_root = repo_root
+ session.folder_path = repo_root
+ session.branch = branch
+ self.save(session)
+ return session
+
+ def create_github_session(
+ self, repo_full_name: str, branch: str | None = None, name: str | None = None,
+ ) -> Session:
+ """Create a session for GitHub mode."""
+ session_name = name or f"GitHub: {repo_full_name}"
+ if branch:
+ session_name += f" ({branch})"
+ session = self.create(
+ name=session_name,
+ repo_full_name=repo_full_name
+ )
+ session.mode = "github"
+ session.branch = branch
+ self.save(session)
+ return session
+
+ def save(self, session: Session):
+ path = self._session_path(session.id)
+ path.write_text(json.dumps(session.to_dict(), indent=2))
+ self.invalidate_list_cache()
+
+ def load(self, session_id: str) -> Session:
+ path = self._session_path(session_id)
+ if not path.exists():
+ raise FileNotFoundError(f"Session not found: {session_id}")
+ return Session.from_dict(json.loads(path.read_text()))
+
+ def _list_sessions_dir_fingerprint(self) -> tuple[float, int]:
+ """Cheap fingerprint of the sessions directory — (mtime, file_count).
+ If either changes, the cache is stale.
+ """
+ try:
+ stat = self.root.stat()
+ files = list(self.root.glob("*.json"))
+ # Also check any file mtime that's newer than dir mtime
+ # (WSL sometimes doesn't update dir mtime on file edits)
+ max_file_mtime = max(
+ (f.stat().st_mtime for f in files),
+ default=stat.st_mtime,
+ )
+ return (max(stat.st_mtime, max_file_mtime), len(files))
+ except Exception:
+ return (0.0, 0)
+
+ def list_sessions(
+ self,
+ repo_full_name: str | None = None,
+ limit: int = 50,
+ ) -> list[dict[str, Any]]:
+ """List sessions with mtime-based in-memory cache.
+
+ Cache key includes the filter args so different queries don't collide.
+ Cache is invalidated when the sessions directory mtime or file count
+ changes (i.e., any create/update/delete triggers a refresh).
+ """
+ fingerprint = self._list_sessions_dir_fingerprint()
+ cache_key = (fingerprint, repo_full_name, limit)
+
+ cached = self._list_cache.get("entry")
+ if cached is not None and cached[0] == cache_key:
+ return cached[1]
+
+ sessions = []
+ for path in sorted(self.root.glob("*.json"), reverse=True):
+ try:
+ data = json.loads(path.read_text())
+ if repo_full_name and data.get("repo_full_name") != repo_full_name:
+ continue
+ sessions.append({
+ "id": data["id"],
+ "name": data.get("name"),
+ "repo": data.get("repo_full_name"),
+ "branch": data.get("branch"),
+ "message_count": len(data.get("messages", [])),
+ "status": data.get("status", "active"),
+ "updated_at": data.get("updated_at"),
+ "pr_number": data.get("pr_number"),
+ "repos": data.get("repos", []),
+ "active_repo": data.get("active_repo"),
+ })
+ if len(sessions) >= limit:
+ break
+ except Exception:
+ logger.debug("Failed to read session file %s", path, exc_info=True)
+ continue
+
+ # Store in cache
+ self._list_cache["entry"] = (cache_key, sessions)
+ return sessions
+
+ def invalidate_list_cache(self) -> None:
+ """Explicitly invalidate the list_sessions cache.
+
+ Called after save/delete to ensure the next list returns fresh data
+ even if the filesystem mtime hasn't updated yet (WSL edge case).
+ """
+ self._list_cache.pop("entry", None)
+
+ def delete(self, session_id: str) -> bool:
+ path = self._session_path(session_id)
+ if path.exists():
+ path.unlink()
+ self.invalidate_list_cache()
+ return True
+ return False
+
+ def fork(self, session_id: str, at_message: int | None = None) -> Session:
+ original = self.load(session_id)
+ messages = original.messages
+ if at_message is not None:
+ messages = messages[: at_message + 1]
+
+ forked = Session(
+ repo_full_name=original.repo_full_name,
+ branch=original.branch,
+ name=f"Fork of {original.name or original.id}",
+ messages=list(messages),
+ metadata={"forked_from": original.id},
+ )
+ self.save(forked)
+ return forked
+
+ def create_checkpoint(
+ self,
+ session: Session,
+ workspace_path: Path | None = None,
+ description: str = "",
+ ) -> Checkpoint:
+ checkpoint = Checkpoint(
+ message_index=len(session.messages),
+ description=description or f"Checkpoint at message {len(session.messages)}",
+ )
+ if workspace_path and workspace_path.exists():
+ snap_dir = self.root / "snapshots" / session.id
+ snap_dir.mkdir(parents=True, exist_ok=True)
+ archive_base = str(snap_dir / checkpoint.id)
+ shutil.make_archive(archive_base, "gztar", root_dir=str(workspace_path))
+ checkpoint.snapshot_path = archive_base + ".tar.gz"
+
+ session.checkpoints.append(checkpoint)
+ self.save(session)
+ return checkpoint
+
+ def rewind_to_checkpoint(
+ self,
+ session: Session,
+ checkpoint_id: str,
+ workspace_path: Path | None = None,
+ ) -> Session:
+ checkpoint = None
+ for cp in session.checkpoints:
+ if cp.id == checkpoint_id:
+ checkpoint = cp
+ break
+ if checkpoint is None:
+ raise ValueError(f"Checkpoint not found: {checkpoint_id}")
+
+ session.messages = session.messages[: checkpoint.message_index]
+
+ if checkpoint.snapshot_path and workspace_path:
+ snap = Path(checkpoint.snapshot_path)
+ if snap.exists():
+ if workspace_path.exists():
+ shutil.rmtree(workspace_path)
+ workspace_path.mkdir(parents=True, exist_ok=True)
+ shutil.unpack_archive(str(snap), str(workspace_path))
+
+ idx = session.checkpoints.index(checkpoint)
+ session.checkpoints = session.checkpoints[: idx + 1]
+ self.save(session)
+ return session
diff --git a/gitpilot/settings.py b/gitpilot/settings.py
new file mode 100644
index 0000000000000000000000000000000000000000..66cccc5ffa7da46fd87ba0d7198ff2844b51ea34
--- /dev/null
+++ b/gitpilot/settings.py
@@ -0,0 +1,479 @@
+from __future__ import annotations
+
+import contextlib
+import enum
+import json
+import logging
+import os
+from pathlib import Path
+from typing import Any
+
+from dotenv import load_dotenv
+from pydantic import BaseModel, Field
+
+from gitpilot.models import (
+ ProviderConnectionType,
+ ProviderName,
+ ProviderSummary,
+)
+
+# Load .env file if it exists (from project root or current directory)
+load_dotenv()
+
+CONFIG_DIR = Path(os.getenv("GITPILOT_CONFIG_DIR", str(Path.home() / ".gitpilot")))
+CONFIG_FILE = CONFIG_DIR / "settings.json"
+
+logger = logging.getLogger(__name__)
+
+
+class LLMProvider(enum.StrEnum):
+ openai = "openai"
+ claude = "claude"
+ watsonx = "watsonx"
+ ollama = "ollama"
+ ollabridge = "ollabridge"
+
+
+class OpenAIConfig(BaseModel):
+ api_key: str = Field(default="")
+ model: str = Field(default="gpt-4o-mini")
+ base_url: str = Field(default="") # Optional: for Azure OpenAI or proxies
+
+
+class ClaudeConfig(BaseModel):
+ api_key: str = Field(default="")
+ model: str = Field(default="claude-sonnet-4-5")
+ base_url: str = Field(default="") # Optional: for proxies
+
+
+class WatsonxConfig(BaseModel):
+ api_key: str = Field(default="")
+ project_id: str = Field(default="")
+ model_id: str = Field(default="meta-llama/llama-3-3-70b-instruct")
+ base_url: str = Field(default="https://api.watsonx.ai/v1")
+
+
+class OllamaConfig(BaseModel):
+ base_url: str = Field(default="http://localhost:11434")
+ model: str = Field(default="llama3")
+
+
+class OllaBridgeConfig(BaseModel):
+ base_url: str = Field(default="http://localhost:8000")
+ model: str = Field(default="qwen2.5:1.5b")
+ api_key: str = Field(default="") # Optional: for authenticated endpoints
+
+
+class AppSettings(BaseModel):
+ provider: LLMProvider = Field(default=LLMProvider.ollabridge)
+
+ openai: OpenAIConfig = Field(default_factory=OpenAIConfig)
+ claude: ClaudeConfig = Field(default_factory=ClaudeConfig)
+ watsonx: WatsonxConfig = Field(default_factory=WatsonxConfig)
+ ollama: OllamaConfig = Field(default_factory=OllamaConfig)
+ ollabridge: OllaBridgeConfig = Field(default_factory=OllaBridgeConfig)
+
+ # Lite Mode: optimized for small LLMs (< 7B parameters).
+ # Uses simplified prompts, single-agent execution, and pre-fetched context
+ # instead of multi-agent pipelines with tool-calling.
+ # Default is False — user must explicitly opt-in via settings or env var.
+ lite_mode: bool = Field(default=False)
+
+ langflow_url: str = Field(default="http://localhost:7860")
+ langflow_api_key: str | None = None
+ langflow_plan_flow_id: str | None = None
+
+ @classmethod
+ def from_disk(cls) -> AppSettings:
+ """
+ Load settings from disk and merge with environment variables.
+
+ Production behavior:
+ - Persisted settings are the primary source of truth for user-editable choices.
+ - `.env` is used to bootstrap defaults on first run when no config file exists.
+ - Secrets from environment variables are always merged in, so they never need
+ to be written to disk from the UI.
+ - Test runs can isolate config by setting GITPILOT_CONFIG_DIR.
+ """
+ config_exists = CONFIG_FILE.exists()
+
+ if config_exists:
+ data = json.loads(CONFIG_FILE.read_text("utf-8"))
+ settings = cls.model_validate(data)
+ else:
+ settings = cls()
+
+ # ---------------------------------------------------------------------
+ # Secrets and connection settings always merge from environment.
+ # This lets operators provide credentials via .env / deployment config
+ # without overwriting the user's provider/model choices every restart.
+ # ---------------------------------------------------------------------
+
+ # OpenAI
+ if os.getenv("OPENAI_API_KEY"):
+ settings.openai.api_key = os.getenv("OPENAI_API_KEY", "")
+ if os.getenv("OPENAI_BASE_URL"):
+ settings.openai.base_url = os.getenv("OPENAI_BASE_URL", "")
+
+ # Claude
+ if os.getenv("ANTHROPIC_API_KEY"):
+ settings.claude.api_key = os.getenv("ANTHROPIC_API_KEY", "")
+ if os.getenv("ANTHROPIC_BASE_URL"):
+ settings.claude.base_url = os.getenv("ANTHROPIC_BASE_URL", "")
+
+ # Watsonx
+ if os.getenv("WATSONX_API_KEY"):
+ settings.watsonx.api_key = os.getenv("WATSONX_API_KEY", "")
+ if os.getenv("WATSONX_PROJECT_ID") or os.getenv("PROJECT_ID"):
+ settings.watsonx.project_id = os.getenv(
+ "WATSONX_PROJECT_ID", os.getenv("PROJECT_ID", "")
+ )
+ if os.getenv("WATSONX_BASE_URL"):
+ settings.watsonx.base_url = os.getenv("WATSONX_BASE_URL", "")
+
+ # Ollama
+ if os.getenv("OLLAMA_BASE_URL"):
+ settings.ollama.base_url = os.getenv("OLLAMA_BASE_URL", "")
+
+ # OllaBridge
+ if os.getenv("OLLABRIDGE_BASE_URL"):
+ settings.ollabridge.base_url = os.getenv("OLLABRIDGE_BASE_URL", "")
+ if os.getenv("OLLABRIDGE_API_KEY"):
+ settings.ollabridge.api_key = os.getenv("OLLABRIDGE_API_KEY", "")
+
+ # LangFlow (optional)
+ if os.getenv("GITPILOT_LANGFLOW_URL"):
+ settings.langflow_url = os.getenv("GITPILOT_LANGFLOW_URL", "")
+ if os.getenv("GITPILOT_LANGFLOW_API_KEY"):
+ settings.langflow_api_key = os.getenv("GITPILOT_LANGFLOW_API_KEY")
+ if os.getenv("GITPILOT_LANGFLOW_PLAN_FLOW_ID"):
+ settings.langflow_plan_flow_id = os.getenv("GITPILOT_LANGFLOW_PLAN_FLOW_ID")
+
+ # Lite mode may be intentionally controlled by env in CI or deployments.
+ env_lite = os.getenv("GITPILOT_LITE_MODE", "").strip().lower()
+ if env_lite in ("1", "true", "yes", "on"):
+ settings.lite_mode = True
+ elif env_lite in ("0", "false", "no", "off"):
+ settings.lite_mode = False
+
+ # ---------------------------------------------------------------------
+ # First-run bootstrap defaults from environment.
+ # These only apply when no persisted config exists yet.
+ # This allows `.env` to help initial setup, while later VS Code changes
+ # remain persistent across restarts.
+ # ---------------------------------------------------------------------
+ if not config_exists:
+ env_provider = os.getenv("GITPILOT_PROVIDER")
+ if env_provider:
+ with contextlib.suppress(ValueError):
+ settings.provider = LLMProvider(env_provider.lower())
+
+ if os.getenv("GITPILOT_OPENAI_MODEL"):
+ settings.openai.model = os.getenv("GITPILOT_OPENAI_MODEL", settings.openai.model)
+
+ if os.getenv("GITPILOT_CLAUDE_MODEL"):
+ settings.claude.model = os.getenv("GITPILOT_CLAUDE_MODEL", settings.claude.model)
+
+ if os.getenv("GITPILOT_WATSONX_MODEL"):
+ settings.watsonx.model_id = os.getenv(
+ "GITPILOT_WATSONX_MODEL", settings.watsonx.model_id
+ )
+
+ if os.getenv("GITPILOT_OLLAMA_MODEL"):
+ settings.ollama.model = os.getenv("GITPILOT_OLLAMA_MODEL", settings.ollama.model)
+
+ if os.getenv("GITPILOT_OLLABRIDGE_MODEL"):
+ settings.ollabridge.model = os.getenv(
+ "GITPILOT_OLLABRIDGE_MODEL", settings.ollabridge.model
+ )
+
+ return settings
+
+ def save(self) -> None:
+ """
+ Save settings to disk.
+
+ Skipped on Vercel/serverless deployments where the filesystem is ephemeral.
+ """
+ if os.getenv("GITPILOT_VERCEL_DEPLOYMENT") or os.getenv("VERCEL"):
+ logger.warning(
+ "Settings persistence disabled on Vercel. "
+ "Use environment variables for configuration."
+ )
+ return
+
+ CONFIG_DIR.mkdir(parents=True, exist_ok=True)
+ CONFIG_FILE.write_text(self.model_dump_json(indent=2), "utf-8")
+
+ # ── Provider introspection helpers ─────────────────────────────────────
+
+ def is_provider_configured(self) -> bool:
+ """Return True if the active provider has the required configuration."""
+ p = self.provider
+ if p == LLMProvider.openai:
+ return bool(self.openai.api_key)
+ if p == LLMProvider.claude:
+ return bool(self.claude.api_key)
+ if p == LLMProvider.watsonx:
+ return bool(self.watsonx.api_key and self.watsonx.project_id)
+ if p == LLMProvider.ollama:
+ return True
+ if p == LLMProvider.ollabridge:
+ return True
+ return False
+
+ def get_effective_model(self) -> str | None:
+ """Return the model string for the active provider."""
+ p = self.provider
+ if p == LLMProvider.openai:
+ return self.openai.model or None
+ if p == LLMProvider.claude:
+ return self.claude.model or None
+ if p == LLMProvider.watsonx:
+ return self.watsonx.model_id or None
+ if p == LLMProvider.ollama:
+ return self.ollama.model or None
+ if p == LLMProvider.ollabridge:
+ return self.ollabridge.model or None
+ return None
+
+ def get_provider_summary(self) -> ProviderSummary:
+ """Build a ProviderSummary for the active provider."""
+ p = self.provider
+
+ # Source detection:
+ # - If there is no config file yet, and matching env vars exist, this is a bootstrap/env source.
+ # - Otherwise, treat user-facing settings as persistent settings, even if secrets still come from env.
+ config_exists = CONFIG_FILE.exists()
+
+ env_key_map = {
+ LLMProvider.openai: "OPENAI_API_KEY",
+ LLMProvider.claude: "ANTHROPIC_API_KEY",
+ LLMProvider.watsonx: "WATSONX_API_KEY",
+ LLMProvider.ollama: "OLLAMA_BASE_URL",
+ LLMProvider.ollabridge: "OLLABRIDGE_BASE_URL",
+ }
+
+ source: str = (
+ ".env"
+ if (not config_exists and os.getenv(env_key_map.get(p, "")))
+ else "settings"
+ )
+
+ if p == LLMProvider.openai:
+ model = self.openai.model
+ base_url = self.openai.base_url or None
+ conn = ProviderConnectionType.api_key
+ has_key = bool(self.openai.api_key)
+ elif p == LLMProvider.claude:
+ model = self.claude.model
+ base_url = self.claude.base_url or None
+ conn = ProviderConnectionType.api_key
+ has_key = bool(self.claude.api_key)
+ elif p == LLMProvider.watsonx:
+ model = self.watsonx.model_id
+ base_url = self.watsonx.base_url or None
+ conn = ProviderConnectionType.api_key
+ has_key = bool(self.watsonx.api_key)
+ elif p == LLMProvider.ollama:
+ model = self.ollama.model
+ base_url = self.ollama.base_url or None
+ conn = ProviderConnectionType.local
+ has_key = False
+ elif p == LLMProvider.ollabridge:
+ model = self.ollabridge.model
+ base_url = self.ollabridge.base_url or None
+ conn = ProviderConnectionType.local
+ has_key = bool(self.ollabridge.api_key)
+ else:
+ model = None
+ base_url = None
+ conn = None
+ has_key = False
+
+ return ProviderSummary(
+ configured=self.is_provider_configured(),
+ name=ProviderName(p.value),
+ source=source,
+ model=model,
+ base_url=base_url,
+ connection_type=conn,
+ has_api_key=has_key,
+ )
+
+
+_settings = AppSettings.from_disk()
+
+
+def get_settings() -> AppSettings:
+ return _settings
+
+
+def reload_settings() -> AppSettings:
+ """
+ Reload settings from disk/environment.
+
+ Useful in tests and in controlled runtime refresh paths.
+ """
+ global _settings # noqa: PLW0602
+ _settings = AppSettings.from_disk()
+ return _settings
+
+
+_AUTOCONFIG_LAST_RUN_TS: float = 0.0
+_AUTOCONFIG_TTL_SECONDS: float = 20.0
+
+
+def autoconfigure_local_provider(force: bool = False) -> AppSettings:
+ """
+ Prefer a zero-config local provider when it is available.
+
+ Performance improvements:
+ - Uses a TTL so repeated calls do not re-probe local providers constantly.
+ - Supports force=True for explicit bootstrap actions.
+ - Keeps cloud-provider user choices intact.
+ - Only switches among local providers when appropriate.
+
+ Rules:
+ - Never override an explicitly configured cloud provider.
+ - If the active provider is ollama or ollabridge, pick a valid default model.
+ - If the app is still on the local default provider but has no model, prefer Ollama first.
+ """
+ global _settings # noqa: PLW0602
+ global _AUTOCONFIG_LAST_RUN_TS # noqa: PLW0603
+
+ import time
+
+ now = time.time()
+ if not force and (now - _AUTOCONFIG_LAST_RUN_TS) < _AUTOCONFIG_TTL_SECONDS:
+ return _settings
+
+ try:
+ from gitpilot.model_catalog import list_models_for_provider
+
+ available: dict[LLMProvider, list[str]] = {}
+ for provider in (LLMProvider.ollama, LLMProvider.ollabridge):
+ models, _error = list_models_for_provider(provider, _settings)
+ if models:
+ available[provider] = models
+
+ _AUTOCONFIG_LAST_RUN_TS = now
+
+ if not available:
+ return _settings
+
+ current = _settings.provider
+ current_model = _settings.get_effective_model()
+ changed = False
+
+ # Only auto-switch among local providers.
+ auto_switch_allowed = current in (
+ LLMProvider.ollama,
+ LLMProvider.ollabridge,
+ )
+
+ preferred = current
+ if auto_switch_allowed:
+ if current == LLMProvider.ollabridge and LLMProvider.ollama in available:
+ preferred = LLMProvider.ollama
+ elif current not in available:
+ preferred = (
+ LLMProvider.ollama
+ if LLMProvider.ollama in available
+ else next(iter(available))
+ )
+
+ if preferred in available and preferred != current:
+ _settings.provider = preferred
+ current = preferred
+ changed = True
+
+ selected_provider = _settings.provider
+ if selected_provider in available:
+ models = available[selected_provider]
+ if not current_model or current_model not in models:
+ default_model = models[0]
+ if selected_provider == LLMProvider.ollama:
+ _settings.ollama.model = default_model
+ elif selected_provider == LLMProvider.ollabridge:
+ _settings.ollabridge.model = default_model
+ changed = True
+
+ if changed:
+ _settings.save()
+
+ except Exception as exc:
+ logger.debug("Local provider autoconfiguration skipped: %s", exc)
+ return _settings
+
+ return _settings
+
+def set_provider(provider: LLMProvider) -> AppSettings:
+ global _settings # noqa: PLW0602
+ _settings.provider = provider
+ _settings.save()
+ return _settings
+
+
+def _merge_model_config(
+ existing: BaseModel,
+ incoming: dict[str, Any],
+) -> dict[str, Any]:
+ """
+ Merge partial provider updates into an existing config model.
+
+ This prevents API updates like {"model": "..."} from wiping out api_key/base_url.
+ """
+ current = existing.model_dump()
+ current.update(incoming)
+ return current
+
+
+def update_settings(updates: dict[str, Any]) -> AppSettings:
+ """
+ Update settings with partial or full configuration.
+
+ Important behavior:
+ - Partial provider updates are merged into existing configs.
+ - User changes made from VS Code persist to disk.
+ - Environment secrets are still layered back in on next reload.
+ """
+ global _settings # noqa: PLW0602
+
+ if "provider" in updates:
+ _settings.provider = LLMProvider(updates["provider"])
+
+ if "openai" in updates:
+ merged = _merge_model_config(_settings.openai, updates["openai"])
+ _settings.openai = OpenAIConfig(**merged)
+
+ if "claude" in updates:
+ merged = _merge_model_config(_settings.claude, updates["claude"])
+ _settings.claude = ClaudeConfig(**merged)
+
+ if "watsonx" in updates:
+ merged = _merge_model_config(_settings.watsonx, updates["watsonx"])
+ _settings.watsonx = WatsonxConfig(**merged)
+
+ if "ollama" in updates:
+ merged = _merge_model_config(_settings.ollama, updates["ollama"])
+ _settings.ollama = OllamaConfig(**merged)
+
+ if "ollabridge" in updates:
+ merged = _merge_model_config(_settings.ollabridge, updates["ollabridge"])
+ _settings.ollabridge = OllaBridgeConfig(**merged)
+
+ if "lite_mode" in updates:
+ _settings.lite_mode = bool(updates["lite_mode"])
+
+ if "langflow_url" in updates:
+ _settings.langflow_url = updates["langflow_url"] or _settings.langflow_url
+
+ if "langflow_api_key" in updates:
+ _settings.langflow_api_key = updates["langflow_api_key"]
+
+ if "langflow_plan_flow_id" in updates:
+ _settings.langflow_plan_flow_id = updates["langflow_plan_flow_id"]
+
+ _settings.save()
+ return _settings
\ No newline at end of file
diff --git a/gitpilot/skills.py b/gitpilot/skills.py
new file mode 100644
index 0000000000000000000000000000000000000000..dbd22046ce06194211c5391b455f0c002710dc90
--- /dev/null
+++ b/gitpilot/skills.py
@@ -0,0 +1,237 @@
+# gitpilot/skills.py
+"""Skill system for GitPilot — reusable, invocable workflows.
+
+Skills are markdown files that define prompt templates. Users invoke
+them via ``/skill-name`` in chat. They live in:
+
+- ``.gitpilot/skills/*.md`` — project-level skills
+- ``~/.gitpilot/skills/*.md`` — global user skills
+- Plugin skills (discovered via PluginManager)
+
+Each markdown file has YAML front-matter followed by a prompt template::
+
+ ---
+ name: review
+ description: Review code quality for the current branch
+ auto_trigger: false
+ required_tools:
+ - git_diff
+ - read_local_file
+ ---
+
+ Review the code changes on the current branch.
+ Focus on: security issues, performance, and maintainability.
+ Use git_diff to see what changed, then read relevant files.
+ Provide a structured review with severity ratings.
+"""
+from __future__ import annotations
+
+import logging
+import re
+from dataclasses import dataclass, field
+from pathlib import Path
+from typing import Any, Dict, List, Optional
+
+logger = logging.getLogger(__name__)
+
+SKILLS_DIR_NAME = "skills"
+_FRONT_MATTER_RE = re.compile(r"^---\s*\n(.*?)\n---\s*\n", re.DOTALL)
+
+
+@dataclass
+class Skill:
+ """A reusable, invocable workflow template."""
+
+ name: str
+ description: str = ""
+ prompt_template: str = ""
+ auto_trigger: bool = False
+ required_tools: List[str] = field(default_factory=list)
+ source_file: Optional[Path] = None
+
+ @classmethod
+ def from_file(cls, path: Path) -> "Skill":
+ """Parse a skill from a markdown file with YAML front-matter."""
+ text = path.read_text(encoding="utf-8")
+ meta: Dict[str, Any] = {}
+ prompt = text
+
+ m = _FRONT_MATTER_RE.match(text)
+ if m:
+ meta = _parse_yaml_simple(m.group(1))
+ prompt = text[m.end():]
+
+ return cls(
+ name=meta.get("name", path.stem),
+ description=meta.get("description", ""),
+ prompt_template=prompt.strip(),
+ auto_trigger=meta.get("auto_trigger", False),
+ required_tools=meta.get("required_tools", []),
+ source_file=path,
+ )
+
+ def render(self, context: Optional[Dict[str, str]] = None) -> str:
+ """Render the prompt template with optional variable substitution.
+
+ Variables use ``{{var_name}}`` syntax.
+ """
+ result = self.prompt_template
+ if context:
+ for key, value in context.items():
+ result = result.replace("{{" + key + "}}", str(value))
+ return result
+
+ def to_dict(self) -> Dict[str, Any]:
+ return {
+ "name": self.name,
+ "description": self.description,
+ "auto_trigger": self.auto_trigger,
+ "required_tools": self.required_tools,
+ "source": str(self.source_file) if self.source_file else None,
+ }
+
+
+class SkillManager:
+ """Discover and manage skills from project, user, and plugin sources."""
+
+ def __init__(
+ self,
+ workspace_path: Optional[Path] = None,
+ user_dir: Optional[Path] = None,
+ ) -> None:
+ self.workspace_path = workspace_path
+ self.user_dir = user_dir or (Path.home() / ".gitpilot")
+ self._skills: Dict[str, Skill] = {}
+
+ def load_all(self) -> int:
+ """Load skills from all sources. Returns count of skills loaded."""
+ count = 0
+
+ # 1. Project-level skills
+ if self.workspace_path:
+ project_skills = self.workspace_path / ".gitpilot" / SKILLS_DIR_NAME
+ count += self._load_from_dir(project_skills, prefix="project")
+
+ # 2. Global user skills
+ user_skills = self.user_dir / SKILLS_DIR_NAME
+ count += self._load_from_dir(user_skills, prefix="user")
+
+ logger.info("Loaded %d skills total", count)
+ return count
+
+ def register(self, skill: Skill) -> None:
+ """Register a skill (e.g. from a plugin)."""
+ self._skills[skill.name] = skill
+
+ def get(self, name: str) -> Optional[Skill]:
+ """Get a skill by name (used for /command invocation)."""
+ return self._skills.get(name)
+
+ def list_skills(self) -> List[Dict[str, Any]]:
+ """List all loaded skills."""
+ return [s.to_dict() for s in self._skills.values()]
+
+ def find_auto_triggers(self, context: str) -> List[Skill]:
+ """Find skills that should auto-trigger based on context.
+
+ Auto-trigger skills are checked against the context string
+ (e.g. user message or session state) and returned if their
+ name or description is relevant.
+ """
+ matches = []
+ ctx_lower = context.lower()
+ for skill in self._skills.values():
+ if not skill.auto_trigger:
+ continue
+ # Simple keyword match for now
+ if skill.name.lower() in ctx_lower:
+ matches.append(skill)
+ elif any(word in ctx_lower for word in skill.description.lower().split()):
+ matches.append(skill)
+ return matches
+
+ def invoke(
+ self,
+ name: str,
+ context: Optional[Dict[str, str]] = None,
+ ) -> Optional[str]:
+ """Invoke a skill by name and return the rendered prompt.
+
+ Returns None if the skill is not found.
+ """
+ skill = self.get(name)
+ if not skill:
+ return None
+ return skill.render(context)
+
+ def _load_from_dir(self, skills_dir: Path, prefix: str = "") -> int:
+ if not skills_dir.is_dir():
+ return 0
+ count = 0
+ for md_file in sorted(skills_dir.glob("*.md")):
+ try:
+ skill = Skill.from_file(md_file)
+ self._skills[skill.name] = skill
+ count += 1
+ except Exception as e:
+ logger.warning("Failed to load skill %s: %s", md_file, e)
+ return count
+
+
+def _parse_yaml_simple(text: str) -> Dict[str, Any]:
+ """Minimal YAML front-matter parser (no external dependency).
+
+ Handles:
+ key: value
+ key: true/false
+ key:
+ - item1
+ - item2
+ """
+ result: Dict[str, Any] = {}
+ lines = text.strip().split("\n")
+ current_key: Optional[str] = None
+ current_list: Optional[List[str]] = None
+
+ for line in lines:
+ stripped = line.strip()
+ if not stripped or stripped.startswith("#"):
+ continue
+
+ # List item
+ if stripped.startswith("- ") and current_key:
+ if current_list is None:
+ current_list = []
+ current_list.append(stripped[2:].strip())
+ result[current_key] = current_list
+ continue
+
+ # Key-value
+ if ":" in stripped:
+ if current_list is not None:
+ current_list = None
+
+ key, _, value = stripped.partition(":")
+ key = key.strip()
+ value = value.strip()
+ current_key = key
+
+ if not value:
+ # Next lines might be a list
+ current_list = []
+ result[key] = current_list
+ continue
+
+ # Parse value types
+ if value.lower() == "true":
+ result[key] = True
+ elif value.lower() == "false":
+ result[key] = False
+ elif value.isdigit():
+ result[key] = int(value)
+ else:
+ result[key] = value
+
+ current_list = None
+
+ return result
diff --git a/gitpilot/skills/fix-hf-space.md b/gitpilot/skills/fix-hf-space.md
new file mode 100644
index 0000000000000000000000000000000000000000..6d22492471c2f770326ae578e504bd624a9e78df
--- /dev/null
+++ b/gitpilot/skills/fix-hf-space.md
@@ -0,0 +1,49 @@
+# /fix-hf-space
+
+Analyze and repair a broken HuggingFace Space
+
+## Description
+
+This skill diagnoses and fixes broken HuggingFace Spaces by:
+1. Cloning the Space repository
+2. Analyzing for dead dependencies, deprecated APIs, and SDK issues
+3. Generating a complete fix using OllaBridge LLM (or template fallback)
+4. Pushing the fix and managing ZeroGPU hardware if needed
+
+Works with RepoGuardian's Space analyzer for structured diagnosis.
+
+## Arguments
+
+- `space_id` (required): HuggingFace Space ID, e.g. `ruslanmv/Logo-Creator`
+- `--push`: Push fixes to the Space repo (default: dry run)
+- `--hardware`: Also manage ZeroGPU hardware allocation
+
+## Prompt
+
+Fix the broken HuggingFace Space `{space_id}`.
+
+Steps:
+1. Clone the Space: `clone_hf_space("{space_id}")`
+2. Get runtime info: `get_space_runtime_info("{space_id}")`
+3. Analyze for issues: `analyze_hf_space(repo_dir)`
+4. Generate fix: `generate_space_fix("{space_id}", diagnosis, app_content)`
+5. Push fix: `push_space_fix(repo_dir, fix)`
+6. Manage hardware: `manage_space_hardware("{space_id}", token, "zero-a10g")`
+
+Use OllaBridge Cloud ({ollabridge_url}) for intelligent analysis.
+Report all issues found and actions taken.
+
+## Example
+
+```bash
+gitpilot skill fix-hf-space ruslanmv/Logo-Creator --push --hardware
+```
+
+## Required Tools
+
+- `clone_hf_space`
+- `analyze_hf_space`
+- `generate_space_fix`
+- `push_space_fix`
+- `manage_space_hardware`
+- `get_space_runtime_info`
diff --git a/gitpilot/smart_model_router.py b/gitpilot/smart_model_router.py
new file mode 100644
index 0000000000000000000000000000000000000000..9776a2c061265112eb1b590b6c98128a5d16c298
--- /dev/null
+++ b/gitpilot/smart_model_router.py
@@ -0,0 +1,268 @@
+# gitpilot/smart_model_router.py
+"""Smart multi-model routing for GitPilot.
+
+Routes different tasks to different LLM models based on complexity,
+task type, and cost constraints. This allows GitPilot to use cheap
+models for simple tasks and powerful models for complex reasoning.
+
+Complexity is estimated from the request text using heuristics:
+
+- **low** — simple queries, listings, status checks → fast/cheap model
+- **medium** — code generation, edits, reviews → balanced model
+- **high** — complex reasoning, architecture, security analysis → strongest model
+
+The router respects a configurable cost budget and tracks usage.
+"""
+from __future__ import annotations
+
+import logging
+import re
+from dataclasses import dataclass, field
+from enum import Enum
+from typing import Any, Dict, List, Optional
+
+logger = logging.getLogger(__name__)
+
+
+class Complexity(str, Enum):
+ LOW = "low"
+ MEDIUM = "medium"
+ HIGH = "high"
+
+
+class TaskTier(str, Enum):
+ """Maps complexity to model tier."""
+
+ FAST = "fast" # cheap, low-latency
+ BALANCED = "balanced" # good quality/cost ratio
+ POWERFUL = "powerful" # strongest available
+
+
+# Default model mapping per provider + tier
+DEFAULT_MODEL_MAP: Dict[str, Dict[str, str]] = {
+ "openai": {
+ "fast": "gpt-4o-mini",
+ "balanced": "gpt-4o",
+ "powerful": "o1",
+ },
+ "claude": {
+ "fast": "claude-haiku-4-5-20251001",
+ "balanced": "claude-sonnet-4-5-20250929",
+ "powerful": "claude-opus-4-6",
+ },
+ "ollama": {
+ "fast": "llama3",
+ "balanced": "llama3",
+ "powerful": "llama3:70b",
+ },
+ "watsonx": {
+ "fast": "meta-llama/llama-3-1-8b-instruct",
+ "balanced": "meta-llama/llama-3-3-70b-instruct",
+ "powerful": "meta-llama/llama-3-3-70b-instruct",
+ },
+}
+
+# Patterns for complexity estimation
+_HIGH_COMPLEXITY_PATTERNS = [
+ re.compile(r"\b(architect|design|refactor|migrate|security audit|threat model)\b", re.I),
+ re.compile(r"\b(analyze .*across|cross-repo|monorepo|dependency graph)\b", re.I),
+ re.compile(r"\b(implement .*system|build .*from scratch|rewrite)\b", re.I),
+ re.compile(r"\b(debug.*complex|race condition|memory leak|deadlock)\b", re.I),
+]
+
+_LOW_COMPLEXITY_PATTERNS = [
+ re.compile(r"\b(list|show|status|version|help|what is|how do)\b", re.I),
+ re.compile(r"\b(rename|typo|comment|format|lint)\b", re.I),
+ re.compile(r"\b(git status|git log|git diff)\b", re.I),
+]
+
+# Category -> default tier
+_CATEGORY_TIER_MAP: Dict[str, TaskTier] = {
+ "plan_execute": TaskTier.POWERFUL,
+ "issue_management": TaskTier.FAST,
+ "pr_management": TaskTier.BALANCED,
+ "code_search": TaskTier.FAST,
+ "code_review": TaskTier.POWERFUL,
+ "learning": TaskTier.BALANCED,
+ "conversational": TaskTier.FAST,
+ "local_edit": TaskTier.BALANCED,
+ "terminal": TaskTier.FAST,
+}
+
+
+@dataclass
+class ModelSelection:
+ """Result of model selection."""
+
+ model: str
+ tier: TaskTier
+ complexity: Complexity
+ provider: str
+ reason: str
+
+
+@dataclass
+class UsageRecord:
+ """Track model usage for budgeting."""
+
+ model: str
+ tokens_in: int = 0
+ tokens_out: int = 0
+ estimated_cost_usd: float = 0.0
+
+
+@dataclass
+class ModelRouterConfig:
+ """Configuration for the model router."""
+
+ provider: str = "openai"
+ model_map: Dict[str, Dict[str, str]] = field(default_factory=dict)
+ # Override: force a specific model for all tasks
+ force_model: Optional[str] = None
+ # Budget: max estimated cost per session (USD, 0 = unlimited)
+ budget_usd: float = 0.0
+ # Override tier for specific categories
+ category_overrides: Dict[str, str] = field(default_factory=dict)
+
+
+class ModelRouter:
+ """Route tasks to optimal models based on complexity and cost.
+
+ Usage::
+
+ router = ModelRouter(config=ModelRouterConfig(provider="openai"))
+ selection = router.select("Implement authentication system")
+ # → ModelSelection(model="o1", tier=POWERFUL, complexity=HIGH)
+
+ selection = router.select("list open issues")
+ # → ModelSelection(model="gpt-4o-mini", tier=FAST, complexity=LOW)
+ """
+
+ def __init__(self, config: Optional[ModelRouterConfig] = None) -> None:
+ self.config = config or ModelRouterConfig()
+ self._usage: List[UsageRecord] = []
+
+ def select(
+ self,
+ request: str,
+ category: Optional[str] = None,
+ ) -> ModelSelection:
+ """Select the best model for a given request.
+
+ Args:
+ request: The user's request text.
+ category: Optional RequestCategory value (e.g. "code_review").
+ """
+ # Force model override
+ if self.config.force_model:
+ return ModelSelection(
+ model=self.config.force_model,
+ tier=TaskTier.BALANCED,
+ complexity=Complexity.MEDIUM,
+ provider=self.config.provider,
+ reason="Force model override",
+ )
+
+ complexity = self.estimate_complexity(request)
+ tier = self._select_tier(complexity, category)
+ model = self._resolve_model(tier)
+
+ return ModelSelection(
+ model=model,
+ tier=tier,
+ complexity=complexity,
+ provider=self.config.provider,
+ reason=self._explain(complexity, tier, category),
+ )
+
+ def estimate_complexity(self, request: str) -> Complexity:
+ """Estimate the complexity of a request using heuristics."""
+ text = request.strip()
+
+ # Check high complexity patterns
+ for pattern in _HIGH_COMPLEXITY_PATTERNS:
+ if pattern.search(text):
+ return Complexity.HIGH
+
+ # Check low complexity patterns
+ for pattern in _LOW_COMPLEXITY_PATTERNS:
+ if pattern.search(text):
+ return Complexity.LOW
+
+ # Length-based heuristic
+ word_count = len(text.split())
+ if word_count > 100:
+ return Complexity.HIGH
+ if word_count < 15:
+ return Complexity.LOW
+
+ return Complexity.MEDIUM
+
+ def record_usage(self, record: UsageRecord) -> None:
+ """Record a model usage for budget tracking."""
+ self._usage.append(record)
+
+ def get_total_cost(self) -> float:
+ """Get total estimated cost across all recorded usage."""
+ return sum(r.estimated_cost_usd for r in self._usage)
+
+ def is_budget_exceeded(self) -> bool:
+ """Check if the session budget has been exceeded."""
+ if self.config.budget_usd <= 0:
+ return False
+ return self.get_total_cost() >= self.config.budget_usd
+
+ def get_usage_summary(self) -> Dict[str, Any]:
+ """Get a summary of model usage."""
+ by_model: Dict[str, Dict[str, Any]] = {}
+ for r in self._usage:
+ if r.model not in by_model:
+ by_model[r.model] = {"calls": 0, "tokens_in": 0, "tokens_out": 0, "cost": 0.0}
+ by_model[r.model]["calls"] += 1
+ by_model[r.model]["tokens_in"] += r.tokens_in
+ by_model[r.model]["tokens_out"] += r.tokens_out
+ by_model[r.model]["cost"] += r.estimated_cost_usd
+ return {
+ "total_cost_usd": self.get_total_cost(),
+ "budget_usd": self.config.budget_usd,
+ "budget_exceeded": self.is_budget_exceeded(),
+ "models": by_model,
+ }
+
+ def _select_tier(self, complexity: Complexity, category: Optional[str]) -> TaskTier:
+ # Check category overrides first
+ if category and category in self.config.category_overrides:
+ return TaskTier(self.config.category_overrides[category])
+
+ # Check category defaults
+ if category and category in _CATEGORY_TIER_MAP:
+ category_tier = _CATEGORY_TIER_MAP[category]
+ # Upgrade tier based on complexity
+ if complexity == Complexity.HIGH and category_tier == TaskTier.FAST:
+ return TaskTier.BALANCED
+ return category_tier
+
+ # Pure complexity-based
+ return {
+ Complexity.LOW: TaskTier.FAST,
+ Complexity.MEDIUM: TaskTier.BALANCED,
+ Complexity.HIGH: TaskTier.POWERFUL,
+ }[complexity]
+
+ def _resolve_model(self, tier: TaskTier) -> str:
+ # Check user-configured model map first
+ provider = self.config.provider
+ if self.config.model_map.get(provider, {}).get(tier.value):
+ return self.config.model_map[provider][tier.value]
+ # Fall back to defaults
+ provider_models = DEFAULT_MODEL_MAP.get(provider, DEFAULT_MODEL_MAP["openai"])
+ return provider_models.get(tier.value, provider_models["balanced"])
+
+ def _explain(
+ self, complexity: Complexity, tier: TaskTier, category: Optional[str],
+ ) -> str:
+ parts = [f"Complexity={complexity.value}"]
+ if category:
+ parts.append(f"category={category}")
+ parts.append(f"→ tier={tier.value}")
+ return ", ".join(parts)
diff --git a/gitpilot/terminal.py b/gitpilot/terminal.py
new file mode 100644
index 0000000000000000000000000000000000000000..bcfe9e04d7ae2fc1fd7997b6bda13df90334d2e1
--- /dev/null
+++ b/gitpilot/terminal.py
@@ -0,0 +1,200 @@
+# gitpilot/terminal.py
+"""Sandboxed terminal command executor.
+
+Runs shell commands within the workspace directory with configurable
+timeout, size limits, and directory restrictions.
+"""
+from __future__ import annotations
+
+import asyncio
+import logging
+import os
+import signal
+import time
+from dataclasses import dataclass, field
+from pathlib import Path
+from typing import Any, AsyncIterator, Dict, List, Optional
+
+logger = logging.getLogger(__name__)
+
+DEFAULT_TIMEOUT_SEC = 120
+MAX_OUTPUT_BYTES = 512_000
+
+BLOCKED_PATTERNS = [
+ "rm -rf /",
+ "mkfs",
+ "dd if=/dev/zero",
+ ":(){ :|:& };:",
+]
+
+
+@dataclass
+class CommandResult:
+ """Result of a terminal command execution."""
+
+ command: str
+ exit_code: int
+ stdout: str
+ stderr: str
+ duration_ms: int
+ truncated: bool = False
+ timed_out: bool = False
+
+
+@dataclass
+class TerminalSession:
+ """An active terminal session bound to a workspace."""
+
+ workspace_path: Path
+ env: Dict[str, str] = field(default_factory=dict)
+ history: List[CommandResult] = field(default_factory=list)
+ cwd: Optional[Path] = None
+
+ def __post_init__(self):
+ if self.cwd is None:
+ self.cwd = self.workspace_path
+
+
+class TerminalExecutor:
+ """Execute shell commands safely within a workspace directory.
+
+ Security:
+ - Commands run via subprocess (never os.system)
+ - Working directory locked to workspace
+ - Configurable timeout with process-group kill
+ - Output size capping
+ - Blocked command patterns
+ """
+
+ def __init__(
+ self,
+ allowed_commands: Optional[List[str]] = None,
+ blocked_patterns: Optional[List[str]] = None,
+ ):
+ self.allowed_commands = allowed_commands
+ self.blocked_patterns = blocked_patterns or list(BLOCKED_PATTERNS)
+
+ def _validate_command(self, command: str):
+ cmd_lower = command.lower().strip()
+ for blocked in self.blocked_patterns:
+ if blocked in cmd_lower:
+ raise PermissionError(f"Command blocked: {command}")
+ if self.allowed_commands is not None:
+ base_cmd = cmd_lower.split()[0] if cmd_lower else ""
+ if base_cmd not in self.allowed_commands:
+ raise PermissionError(f"Command not in allowlist: {base_cmd}")
+
+ async def execute(
+ self,
+ session: TerminalSession,
+ command: str,
+ timeout: int = DEFAULT_TIMEOUT_SEC,
+ env: Optional[Dict[str, str]] = None,
+ ) -> CommandResult:
+ """Execute a command and return captured output."""
+ self._validate_command(command)
+
+ resolved_cwd = session.cwd.resolve()
+ ws_resolved = session.workspace_path.resolve()
+ if not str(resolved_cwd).startswith(str(ws_resolved)):
+ session.cwd = session.workspace_path
+
+ full_env = {**os.environ, **session.env, **(env or {})}
+ start = time.monotonic()
+
+ try:
+ proc = await asyncio.create_subprocess_shell(
+ command,
+ cwd=str(session.cwd),
+ stdout=asyncio.subprocess.PIPE,
+ stderr=asyncio.subprocess.PIPE,
+ env=full_env,
+ )
+
+ try:
+ stdout_bytes, stderr_bytes = await asyncio.wait_for(
+ proc.communicate(), timeout=timeout,
+ )
+ timed_out = False
+ except asyncio.TimeoutError:
+ try:
+ proc.kill()
+ except ProcessLookupError:
+ pass
+ stdout_bytes, stderr_bytes = b"", b""
+ timed_out = True
+
+ duration_ms = int((time.monotonic() - start) * 1000)
+
+ truncated = False
+ if len(stdout_bytes) > MAX_OUTPUT_BYTES:
+ stdout_bytes = stdout_bytes[:MAX_OUTPUT_BYTES]
+ truncated = True
+ if len(stderr_bytes) > MAX_OUTPUT_BYTES:
+ stderr_bytes = stderr_bytes[:MAX_OUTPUT_BYTES]
+ truncated = True
+
+ result = CommandResult(
+ command=command,
+ exit_code=proc.returncode if not timed_out else -1,
+ stdout=stdout_bytes.decode("utf-8", errors="replace"),
+ stderr=stderr_bytes.decode("utf-8", errors="replace"),
+ duration_ms=duration_ms,
+ truncated=truncated,
+ timed_out=timed_out,
+ )
+ except Exception as e:
+ duration_ms = int((time.monotonic() - start) * 1000)
+ result = CommandResult(
+ command=command, exit_code=-1,
+ stdout="", stderr=str(e),
+ duration_ms=duration_ms,
+ )
+
+ session.history.append(result)
+ return result
+
+ async def execute_streaming(
+ self,
+ session: TerminalSession,
+ command: str,
+ timeout: int = DEFAULT_TIMEOUT_SEC,
+ ) -> AsyncIterator[Dict[str, Any]]:
+ """Execute command and yield output lines as they arrive."""
+ self._validate_command(command)
+
+ full_env = {**os.environ, **session.env}
+ proc = await asyncio.create_subprocess_shell(
+ command,
+ cwd=str(session.cwd),
+ stdout=asyncio.subprocess.PIPE,
+ stderr=asyncio.subprocess.STDOUT,
+ env=full_env,
+ )
+
+ start = time.monotonic()
+ try:
+ while True:
+ if time.monotonic() - start > timeout:
+ proc.kill()
+ yield {"type": "error", "data": "Command timed out"}
+ break
+ try:
+ line = await asyncio.wait_for(
+ proc.stdout.readline(), timeout=5.0,
+ )
+ except asyncio.TimeoutError:
+ continue
+ if not line:
+ break
+ yield {
+ "type": "stdout",
+ "data": line.decode("utf-8", errors="replace"),
+ }
+ finally:
+ await proc.wait()
+ yield {
+ "type": "exit",
+ "exit_code": proc.returncode,
+ "duration_ms": int((time.monotonic() - start) * 1000),
+ }
diff --git a/gitpilot/test_detection.py b/gitpilot/test_detection.py
new file mode 100644
index 0000000000000000000000000000000000000000..2d0de6a6949c1c989cdb1f248f8867699347c668
--- /dev/null
+++ b/gitpilot/test_detection.py
@@ -0,0 +1,92 @@
+# gitpilot/test_detection.py
+"""
+Auto-detect test framework and build the test command.
+
+Works for all platforms (server-side detection for web/HF Spaces,
+VS Code can call this via the API or detect locally).
+"""
+from __future__ import annotations
+
+import json
+import logging
+from pathlib import Path
+from typing import Optional
+
+logger = logging.getLogger(__name__)
+
+# (marker file/dir, framework name, command)
+FRAMEWORK_MARKERS = [
+ ("jest.config.js", "jest", "npx jest --ci --no-coverage 2>&1"),
+ ("jest.config.ts", "jest", "npx jest --ci --no-coverage 2>&1"),
+ ("jest.config.mjs", "jest", "npx jest --ci --no-coverage 2>&1"),
+ ("vitest.config.ts", "vitest", "npx vitest run 2>&1"),
+ ("vitest.config.js", "vitest", "npx vitest run 2>&1"),
+ ("vitest.config.mjs", "vitest", "npx vitest run 2>&1"),
+ (".mocharc.yml", "mocha", "npx mocha 2>&1"),
+ (".mocharc.json", "mocha", "npx mocha 2>&1"),
+ ("pytest.ini", "pytest", "python -m pytest --tb=short -q 2>&1"),
+ ("setup.cfg", "pytest", "python -m pytest --tb=short -q 2>&1"),
+ ("conftest.py", "pytest", "python -m pytest --tb=short -q 2>&1"),
+ ("Cargo.toml", "cargo", "cargo test 2>&1"),
+ ("go.mod", "go", "go test ./... 2>&1"),
+ ("mix.exs", "elixir", "mix test 2>&1"),
+ ("Gemfile", "ruby", "bundle exec rake test 2>&1"),
+]
+
+
+async def detect_test_command(workspace_path: Path) -> Optional[str]:
+ """Detect the test runner from project manifest files."""
+ for marker, name, command in FRAMEWORK_MARKERS:
+ if (workspace_path / marker).exists():
+ logger.info("Detected test framework: %s (via %s)", name, marker)
+ return command
+
+ # Check pyproject.toml for [tool.pytest]
+ pyproject = workspace_path / "pyproject.toml"
+ if pyproject.exists():
+ try:
+ content = pyproject.read_text(errors="replace")
+ if "[tool.pytest" in content:
+ return "python -m pytest --tb=short -q 2>&1"
+ except OSError:
+ pass
+
+ # Check package.json scripts.test
+ pkg = workspace_path / "package.json"
+ if pkg.exists():
+ try:
+ data = json.loads(pkg.read_text())
+ test_script = data.get("scripts", {}).get("test", "")
+ if test_script and "no test specified" not in test_script:
+ return "npm test -- --ci 2>&1"
+ except (json.JSONDecodeError, OSError):
+ pass
+
+ return None
+
+
+async def detect_framework_name(workspace_path: Path) -> Optional[str]:
+ """Return just the framework name (for UI display)."""
+ for marker, name, _ in FRAMEWORK_MARKERS:
+ if (workspace_path / marker).exists():
+ return name
+
+ pyproject = workspace_path / "pyproject.toml"
+ if pyproject.exists():
+ try:
+ content = pyproject.read_text(errors="replace")
+ if "[tool.pytest" in content:
+ return "pytest"
+ except OSError:
+ pass
+
+ pkg = workspace_path / "package.json"
+ if pkg.exists():
+ try:
+ data = json.loads(pkg.read_text())
+ if data.get("scripts", {}).get("test"):
+ return "npm"
+ except (json.JSONDecodeError, OSError):
+ pass
+
+ return None
diff --git a/gitpilot/topology_registry.py b/gitpilot/topology_registry.py
new file mode 100644
index 0000000000000000000000000000000000000000..921273cf9b534effd9e2abd6a56f1c475764dcb7
--- /dev/null
+++ b/gitpilot/topology_registry.py
@@ -0,0 +1,1024 @@
+# gitpilot/topology_registry.py
+"""Topology Registry — switchable agent workflow presets for GitPilot.
+
+A topology controls three things simultaneously:
+ 1. **Visualization** — the ReactFlow node/edge graph shown in Agent Workflow view.
+ 2. **Routing** — which agent(s) are selected for a given user request.
+ 3. **Execution** — the runtime pattern (single_task, crew_pipeline, or react_loop).
+
+There are 7 built-in topologies:
+ - T1 ``default`` — Fan-out CrewAI routing (the original architecture)
+ - T2 ``gitpilot_code`` — Hub-and-spoke ReAct loop with on-demand subagents
+ - T3 ``feature_builder`` — 5-agent pipeline: explore > plan > implement > review > PR
+ - T4 ``bug_hunter`` — 4-agent pipeline: explore > fix > verify > PR
+ - T5 ``code_inspector`` — 2-agent read-only: explore > review
+ - T6 ``architect_mode`` — 2-agent read-only: explore > plan
+ - T7 ``quick_fix`` — 2-agent fast path: implement > git
+
+This module is **purely additive** — it does not modify any existing routing,
+agent, or execution logic. The existing ``get_flow_definition()`` and
+``dispatch_request()`` continue to work unchanged. New code paths can opt-in
+to topology-aware behaviour by importing from this module.
+"""
+from __future__ import annotations
+
+import logging
+import re
+from dataclasses import dataclass, field
+from enum import Enum
+from typing import Any, Dict, List, Optional, Tuple
+
+logger = logging.getLogger(__name__)
+
+
+# ---------------------------------------------------------------------------
+# Enums
+# ---------------------------------------------------------------------------
+
+class TopologyCategory(str, Enum):
+ """Whether a topology is a system-level architecture or a task pipeline."""
+ system = "system"
+ pipeline = "pipeline"
+
+
+class ExecutionStyle(str, Enum):
+ """How the topology's agents are orchestrated at runtime."""
+ single_task = "single_task" # One agent, one task (T1 default)
+ react_loop = "react_loop" # while(tool_use) main agent loop (T2)
+ crew_pipeline = "crew_pipeline" # Sequential multi-agent CrewAI crew (T3-T7)
+
+
+class RoutingStrategy(str, Enum):
+ """How a request is mapped to agents within a topology."""
+ classify_and_dispatch = "classify_and_dispatch" # Regex/LLM picks one agent
+ always_main_agent = "always_main_agent" # Everything goes to one agent
+ fixed_sequence = "fixed_sequence" # Ordered chain of agents
+
+
+# ---------------------------------------------------------------------------
+# Data classes
+# ---------------------------------------------------------------------------
+
+@dataclass
+class RoutingPolicy:
+ """Defines how a topology selects agents for a request."""
+ strategy: RoutingStrategy
+ primary_agent: Optional[str] = None
+ sequence: Optional[List[str]] = None
+ classifier_hints: List[str] = field(default_factory=list)
+
+
+@dataclass
+class TopologyMeta:
+ """Lightweight summary of a topology (no graph data)."""
+ id: str
+ name: str
+ description: str
+ category: TopologyCategory
+ icon: str
+ agents_used: List[str]
+ execution_style: ExecutionStyle
+
+
+@dataclass
+class Topology:
+ """Complete topology definition including the flow graph."""
+ id: str
+ name: str
+ description: str
+ category: TopologyCategory
+ icon: str
+ agents_used: List[str]
+ execution_style: ExecutionStyle
+ routing_policy: RoutingPolicy
+ flow_graph: Dict[str, Any] # {"nodes": [...], "edges": [...]}
+
+ def to_meta(self) -> TopologyMeta:
+ return TopologyMeta(
+ id=self.id,
+ name=self.name,
+ description=self.description,
+ category=self.category,
+ icon=self.icon,
+ agents_used=self.agents_used,
+ execution_style=self.execution_style,
+ )
+
+ def to_dict(self) -> Dict[str, Any]:
+ return {
+ "id": self.id,
+ "name": self.name,
+ "description": self.description,
+ "category": self.category.value,
+ "icon": self.icon,
+ "agents_used": self.agents_used,
+ "execution_style": self.execution_style.value,
+ "routing_policy": {
+ "strategy": self.routing_policy.strategy.value,
+ "primary_agent": self.routing_policy.primary_agent,
+ "sequence": self.routing_policy.sequence,
+ "classifier_hints": self.routing_policy.classifier_hints,
+ },
+ "flow_graph": self.flow_graph,
+ }
+
+
+# ---------------------------------------------------------------------------
+# T1 — Default CrewAI Routing
+# ---------------------------------------------------------------------------
+
+_T1_FLOW_GRAPH: Dict[str, Any] = {
+ "nodes": [
+ {
+ "id": "user_request",
+ "type": "user",
+ "data": {
+ "label": "User Request",
+ "description": "Incoming task from user",
+ },
+ "position": {"x": 400, "y": 0},
+ },
+ {
+ "id": "router",
+ "type": "router",
+ "data": {
+ "label": "Task Router",
+ "description": "Classifies request type and dispatches to the best agent",
+ "model": "regex + heuristics",
+ },
+ "position": {"x": 400, "y": 100},
+ },
+ {
+ "id": "repo_explorer",
+ "type": "agent",
+ "data": {
+ "label": "Repo Explorer",
+ "model": "Haiku 4.5",
+ "mode": "read-only",
+ "tools": ["Glob", "Grep", "Read", "LS", "Bash(ro)"],
+ "description": "Searches and maps codebase structure",
+ },
+ "position": {"x": 0, "y": 250},
+ },
+ {
+ "id": "planner",
+ "type": "agent",
+ "data": {
+ "label": "Planner",
+ "model": "Sonnet 4.5",
+ "mode": "read-only",
+ "tools": ["Glob", "Grep", "Read", "Bash(ro)"],
+ "description": "Designs implementation plans and strategies",
+ },
+ "position": {"x": 160, "y": 250},
+ },
+ {
+ "id": "code_writer",
+ "type": "agent",
+ "data": {
+ "label": "Code Writer",
+ "model": "Sonnet 4.5",
+ "mode": "read-write",
+ "tools": ["Read", "Write", "Edit", "MultiEdit", "Bash", "Glob", "Grep"],
+ "description": "Implements code changes, creates files, runs tests",
+ },
+ "position": {"x": 320, "y": 250},
+ },
+ {
+ "id": "reviewer",
+ "type": "agent",
+ "data": {
+ "label": "Reviewer",
+ "model": "Sonnet 4.5",
+ "mode": "read-only",
+ "tools": ["Read", "Grep", "Glob", "Bash(git diff)"],
+ "description": "Reviews code for quality, security, and best practices",
+ },
+ "position": {"x": 480, "y": 250},
+ },
+ {
+ "id": "issue_manager",
+ "type": "agent",
+ "data": {
+ "label": "Issue Manager",
+ "model": "Sonnet 4.5",
+ "mode": "read-write",
+ "tools": ["GitHub API", "Read"],
+ "description": "Creates, updates, and manages GitHub issues",
+ },
+ "position": {"x": 640, "y": 250},
+ },
+ {
+ "id": "pr_manager",
+ "type": "agent",
+ "data": {
+ "label": "PR Manager",
+ "model": "Sonnet 4.5",
+ "mode": "git-ops",
+ "tools": ["Bash(git)", "Bash(gh)", "Read"],
+ "description": "Creates branches, commits, pushes, opens PRs",
+ },
+ "position": {"x": 800, "y": 250},
+ },
+ {
+ "id": "search_agent",
+ "type": "agent",
+ "data": {
+ "label": "Search Agent",
+ "model": "Sonnet 4.5",
+ "mode": "read-only",
+ "tools": ["WebSearch", "WebFetch", "Read"],
+ "description": "Researches external documentation and APIs",
+ },
+ "position": {"x": 160, "y": 400},
+ },
+ {
+ "id": "learning_agent",
+ "type": "agent",
+ "data": {
+ "label": "Learning Agent",
+ "model": "Sonnet 4.5",
+ "mode": "read-only",
+ "tools": ["WebSearch", "WebFetch", "Read"],
+ "description": "Explains concepts, generates tutorials",
+ },
+ "position": {"x": 320, "y": 400},
+ },
+ {
+ "id": "local_editor",
+ "type": "agent",
+ "data": {
+ "label": "Local Editor",
+ "model": "Sonnet 4.5",
+ "mode": "read-write",
+ "tools": ["Read", "Write", "Edit", "Glob"],
+ "description": "Edits local files without git operations",
+ },
+ "position": {"x": 480, "y": 400},
+ },
+ {
+ "id": "terminal_agent",
+ "type": "agent",
+ "data": {
+ "label": "Terminal Agent",
+ "model": "Sonnet 4.5",
+ "mode": "read-write",
+ "tools": ["Bash"],
+ "description": "Runs shell commands, manages environment",
+ },
+ "position": {"x": 640, "y": 400},
+ },
+ {
+ "id": "github_tools",
+ "type": "tool_group",
+ "data": {
+ "label": "GitHub Tools",
+ "tools": ["GitHub API", "Bash(gh)"],
+ "description": "GitHub REST/GraphQL API and CLI",
+ },
+ "position": {"x": 0, "y": 400},
+ },
+ {
+ "id": "local_tools",
+ "type": "tool_group",
+ "data": {
+ "label": "Local Tools",
+ "tools": ["Read", "Write", "Edit", "Bash", "Glob", "Grep"],
+ "description": "Filesystem and shell tools",
+ },
+ "position": {"x": 800, "y": 400},
+ },
+ {
+ "id": "output",
+ "type": "output",
+ "data": {
+ "label": "Result",
+ "description": "Response returned to user",
+ },
+ "position": {"x": 400, "y": 550},
+ },
+ ],
+ "edges": [
+ {"id": "e-user-router", "source": "user_request", "target": "router", "animated": True},
+ {"id": "e-router-explorer", "source": "router", "target": "repo_explorer", "label": "explore"},
+ {"id": "e-router-planner", "source": "router", "target": "planner", "label": "plan"},
+ {"id": "e-router-codewriter", "source": "router", "target": "code_writer", "label": "implement"},
+ {"id": "e-router-reviewer", "source": "router", "target": "reviewer", "label": "review"},
+ {"id": "e-router-issue", "source": "router", "target": "issue_manager", "label": "issue"},
+ {"id": "e-router-pr", "source": "router", "target": "pr_manager", "label": "pr"},
+ {"id": "e-router-search", "source": "router", "target": "search_agent", "label": "search"},
+ {"id": "e-router-learning", "source": "router", "target": "learning_agent", "label": "learn"},
+ {"id": "e-router-editor", "source": "router", "target": "local_editor", "label": "edit"},
+ {"id": "e-router-terminal", "source": "router", "target": "terminal_agent", "label": "terminal"},
+ {"id": "e-explorer-output", "source": "repo_explorer", "target": "output"},
+ {"id": "e-planner-output", "source": "planner", "target": "output"},
+ {"id": "e-codewriter-output", "source": "code_writer", "target": "output"},
+ {"id": "e-reviewer-output", "source": "reviewer", "target": "output"},
+ {"id": "e-issue-output", "source": "issue_manager", "target": "output"},
+ {"id": "e-pr-output", "source": "pr_manager", "target": "output"},
+ {"id": "e-search-output", "source": "search_agent", "target": "output"},
+ {"id": "e-learning-output", "source": "learning_agent", "target": "output"},
+ {"id": "e-editor-output", "source": "local_editor", "target": "output"},
+ {"id": "e-terminal-output", "source": "terminal_agent", "target": "output"},
+ {"id": "e-explorer-github", "source": "repo_explorer", "target": "github_tools", "type": "bidirectional", "animated": False},
+ {"id": "e-pr-github", "source": "pr_manager", "target": "github_tools", "type": "bidirectional", "animated": False},
+ {"id": "e-codewriter-local", "source": "code_writer", "target": "local_tools", "type": "bidirectional", "animated": False},
+ {"id": "e-terminal-local", "source": "terminal_agent", "target": "local_tools", "type": "bidirectional", "animated": False},
+ ],
+}
+
+T1_DEFAULT = Topology(
+ id="default",
+ name="Default (CrewAI Routing)",
+ description="Router dispatches to specialized agents based on task type",
+ category=TopologyCategory.system,
+ icon="\U0001f500", # shuffle arrows
+ agents_used=[
+ "repo_explorer", "planner", "code_writer", "reviewer",
+ "issue_manager", "pr_manager", "search_agent",
+ "learning_agent", "local_editor", "terminal_agent",
+ ],
+ execution_style=ExecutionStyle.single_task,
+ routing_policy=RoutingPolicy(
+ strategy=RoutingStrategy.classify_and_dispatch,
+ classifier_hints=[],
+ ),
+ flow_graph=_T1_FLOW_GRAPH,
+)
+
+# ---------------------------------------------------------------------------
+# T2 — GitPilot Code (ReAct Loop + Subagents)
+# ---------------------------------------------------------------------------
+
+_T2_FLOW_GRAPH: Dict[str, Any] = {
+ "nodes": [
+ {
+ "id": "user_request",
+ "type": "user",
+ "data": {"label": "User Request", "description": "Incoming task or feedback"},
+ "position": {"x": 400, "y": 0},
+ },
+ {
+ "id": "main_react_agent",
+ "type": "agent",
+ "data": {
+ "label": "Main ReAct Agent",
+ "model": "Opus 4.6",
+ "mode": "read-write",
+ "tools": ["ALL"],
+ "description": "Central agent running in a while(tool_use) loop. Reasons, acts, observes, repeats. Delegates complex subtasks to subagents.",
+ },
+ "position": {"x": 400, "y": 150},
+ },
+ {
+ "id": "todo_write",
+ "type": "tool",
+ "data": {
+ "label": "TodoWrite",
+ "tools": ["TodoWrite"],
+ "description": "Creates and tracks step-by-step TODO lists for complex tasks",
+ },
+ "position": {"x": 150, "y": 150},
+ },
+ {
+ "id": "fs_tools",
+ "type": "tool_group",
+ "data": {
+ "label": "File Tools",
+ "tools": ["Read", "Write", "Edit", "MultiEdit"],
+ "description": "Read, create, and edit files in the repository",
+ },
+ "position": {"x": 650, "y": 80},
+ },
+ {
+ "id": "search_tools",
+ "type": "tool_group",
+ "data": {
+ "label": "Search Tools",
+ "tools": ["Glob", "Grep", "LS"],
+ "description": "Find files by pattern, search contents, list directories",
+ },
+ "position": {"x": 650, "y": 160},
+ },
+ {
+ "id": "bash_tool",
+ "type": "tool",
+ "data": {
+ "label": "Bash",
+ "tools": ["Bash"],
+ "description": "Execute shell commands (git, npm, pytest, etc.)",
+ },
+ "position": {"x": 650, "y": 240},
+ },
+ {
+ "id": "web_tools",
+ "type": "tool_group",
+ "data": {
+ "label": "Web Tools",
+ "tools": ["WebSearch", "WebFetch"],
+ "description": "Search the web and fetch page contents",
+ },
+ "position": {"x": 150, "y": 240},
+ },
+ {
+ "id": "subagent_explore",
+ "type": "agent",
+ "data": {
+ "label": "Explore Subagent",
+ "model": "Haiku 4.5",
+ "mode": "read-only",
+ "tools": ["Glob", "Grep", "Read", "LS", "Bash(ro)"],
+ "description": "Fast, cheap codebase exploration. Returns concise summary without polluting main context.",
+ },
+ "position": {"x": 100, "y": 400},
+ },
+ {
+ "id": "subagent_plan",
+ "type": "agent",
+ "data": {
+ "label": "Plan Subagent",
+ "model": "Sonnet 4.5",
+ "mode": "read-only",
+ "tools": ["Glob", "Grep", "Read", "Bash(ro)"],
+ "description": "Researches codebase and designs implementation plans before execution.",
+ },
+ "position": {"x": 270, "y": 400},
+ },
+ {
+ "id": "subagent_review",
+ "type": "agent",
+ "data": {
+ "label": "Review Subagent",
+ "model": "Sonnet 4.5",
+ "mode": "read-only",
+ "tools": ["Read", "Grep", "Glob", "Bash(git diff)"],
+ "description": "Reviews code changes for security, quality, and best practices.",
+ },
+ "position": {"x": 440, "y": 400},
+ },
+ {
+ "id": "subagent_research",
+ "type": "agent",
+ "data": {
+ "label": "Research Subagent",
+ "model": "Sonnet 4.5",
+ "mode": "read-only",
+ "tools": ["WebSearch", "WebFetch", "Read"],
+ "description": "Gathers external knowledge from documentation, APIs, and examples.",
+ },
+ "position": {"x": 610, "y": 400},
+ },
+ {
+ "id": "subagent_gitops",
+ "type": "agent",
+ "data": {
+ "label": "GitOps Subagent",
+ "model": "Sonnet 4.5",
+ "mode": "git-ops",
+ "tools": ["Bash(git)", "Bash(gh)", "Read"],
+ "description": "Handles git operations: commit, push, create PR.",
+ },
+ "position": {"x": 780, "y": 400},
+ },
+ {
+ "id": "output",
+ "type": "output",
+ "data": {"label": "Result", "description": "Response returned to user (when loop ends)"},
+ "position": {"x": 400, "y": 550},
+ },
+ ],
+ "edges": [
+ {"id": "e-user-main", "source": "user_request", "target": "main_react_agent", "animated": True},
+ {"id": "e-main-todo", "source": "main_react_agent", "target": "todo_write", "type": "bidirectional"},
+ {"id": "e-main-fs", "source": "main_react_agent", "target": "fs_tools", "type": "bidirectional"},
+ {"id": "e-main-search", "source": "main_react_agent", "target": "search_tools", "type": "bidirectional"},
+ {"id": "e-main-bash", "source": "main_react_agent", "target": "bash_tool", "type": "bidirectional"},
+ {"id": "e-main-web", "source": "main_react_agent", "target": "web_tools", "type": "bidirectional"},
+ {"id": "e-main-explore", "source": "main_react_agent", "target": "subagent_explore", "label": "Task(explore)"},
+ {"id": "e-explore-main", "source": "subagent_explore", "target": "main_react_agent", "label": "summary", "animated": True},
+ {"id": "e-main-plan", "source": "main_react_agent", "target": "subagent_plan", "label": "Task(plan)"},
+ {"id": "e-plan-main", "source": "subagent_plan", "target": "main_react_agent", "label": "plan", "animated": True},
+ {"id": "e-main-review", "source": "main_react_agent", "target": "subagent_review", "label": "Task(review)"},
+ {"id": "e-review-main", "source": "subagent_review", "target": "main_react_agent", "label": "findings", "animated": True},
+ {"id": "e-main-research", "source": "main_react_agent", "target": "subagent_research","label": "Task(research)"},
+ {"id": "e-research-main", "source": "subagent_research", "target": "main_react_agent", "label": "info", "animated": True},
+ {"id": "e-main-gitops", "source": "main_react_agent", "target": "subagent_gitops", "label": "Task(gitops)"},
+ {"id": "e-gitops-main", "source": "subagent_gitops", "target": "main_react_agent", "label": "PR URL", "animated": True},
+ {"id": "e-main-output", "source": "main_react_agent", "target": "output", "label": "no tool calls = done"},
+ ],
+}
+
+T2_CLAUDE_CODE = Topology(
+ id="gitpilot_code",
+ name="GitPilot Code (ReAct + Subagents)",
+ description="Single main agent in a ReAct loop with on-demand subagents",
+ category=TopologyCategory.system,
+ icon="\U0001f9e0", # brain
+ agents_used=[
+ "main_react_agent", "subagent_explore", "subagent_plan",
+ "subagent_review", "subagent_research", "subagent_gitops",
+ ],
+ execution_style=ExecutionStyle.react_loop,
+ routing_policy=RoutingPolicy(
+ strategy=RoutingStrategy.always_main_agent,
+ primary_agent="main_react_agent",
+ classifier_hints=[],
+ ),
+ flow_graph=_T2_FLOW_GRAPH,
+)
+
+# ---------------------------------------------------------------------------
+# T3 — Feature Builder (5-agent pipeline)
+# ---------------------------------------------------------------------------
+
+_T3_FLOW_GRAPH: Dict[str, Any] = {
+ "nodes": [
+ {"id": "user_request", "type": "user", "data": {"label": "User Request", "description": "New feature or enhancement request"}, "position": {"x": 400, "y": 0}},
+ {"id": "explorer", "type": "agent", "data": {"label": "Explorer", "model": "Haiku 4.5", "mode": "read-only", "tools": ["Glob","Grep","Read","LS","Bash(ro)"], "description": "Maps codebase structure and discovers relevant files"}, "position": {"x": 100, "y": 150}},
+ {"id": "planner", "type": "agent", "data": {"label": "Planner", "model": "Sonnet 4.5", "mode": "read-only", "tools": ["Glob","Grep","Read","Bash(ro)"], "description": "Designs step-by-step implementation plan"}, "position": {"x": 250, "y": 150}},
+ {"id": "developer", "type": "agent", "data": {"label": "Developer", "model": "Sonnet 4.5", "mode": "read-write", "tools": ["Read","Write","Edit","MultiEdit","Bash","Glob","Grep"], "description": "Implements code changes and runs tests"}, "position": {"x": 400, "y": 150}},
+ {"id": "reviewer", "type": "agent", "data": {"label": "Reviewer", "model": "Sonnet 4.5", "mode": "read-only", "tools": ["Read","Grep","Glob","Bash(git diff)"], "description": "Reviews code for quality, security, and best practices"}, "position": {"x": 550, "y": 150}},
+ {"id": "git_agent", "type": "agent", "data": {"label": "Git Agent", "model": "Sonnet 4.5", "mode": "git-ops", "tools": ["Bash(git)","Bash(gh)","Read"], "description": "Creates branch, commits, pushes, opens PR"}, "position": {"x": 700, "y": 150}},
+ {"id": "output", "type": "output", "data": {"label": "PR Created", "description": "Feature implemented and PR opened"}, "position": {"x": 700, "y": 300}},
+ ],
+ "edges": [
+ {"id": "e-user-exp", "source": "user_request", "target": "explorer", "animated": True},
+ {"id": "e-exp-plan", "source": "explorer", "target": "planner", "label": "analysis", "animated": True},
+ {"id": "e-plan-dev", "source": "planner", "target": "developer", "label": "plan", "animated": True},
+ {"id": "e-dev-rev", "source": "developer", "target": "reviewer", "label": "changes", "animated": True},
+ {"id": "e-rev-git", "source": "reviewer", "target": "git_agent", "label": "approved", "animated": True},
+ {"id": "e-git-output", "source": "git_agent", "target": "output", "label": "PR URL", "animated": True},
+ ],
+}
+
+T3_FEATURE_BUILDER = Topology(
+ id="feature_builder",
+ name="Feature Builder",
+ description="Full pipeline: explore > plan > implement > review > PR",
+ category=TopologyCategory.pipeline,
+ icon="\U0001f680", # rocket
+ agents_used=["explorer", "planner", "developer", "reviewer", "git_agent"],
+ execution_style=ExecutionStyle.crew_pipeline,
+ routing_policy=RoutingPolicy(
+ strategy=RoutingStrategy.fixed_sequence,
+ sequence=["explorer", "planner", "developer", "reviewer", "git_agent"],
+ classifier_hints=[
+ "add", "create", "implement", "build", "new feature",
+ "endpoint", "component", "module", "integrate", "migration",
+ "refactor", "rewrite", "enhance", "upgrade",
+ ],
+ ),
+ flow_graph=_T3_FLOW_GRAPH,
+)
+
+# ---------------------------------------------------------------------------
+# T4 — Bug Hunter (4-agent pipeline)
+# ---------------------------------------------------------------------------
+
+_T4_FLOW_GRAPH: Dict[str, Any] = {
+ "nodes": [
+ {"id": "user_request", "type": "user", "data": {"label": "Bug Report", "description": "Bug description or error report"}, "position": {"x": 400, "y": 0}},
+ {"id": "explorer", "type": "agent", "data": {"label": "Explorer", "model": "Haiku 4.5", "mode": "read-only", "tools": ["Glob","Grep","Read","LS","Bash(ro)"], "description": "Traces error patterns and locates root cause"}, "position": {"x": 175, "y": 150}},
+ {"id": "developer", "type": "agent", "data": {"label": "Developer", "model": "Sonnet 4.5", "mode": "read-write", "tools": ["Read","Write","Edit","MultiEdit","Bash","Glob","Grep"], "description": "Applies targeted fix and runs tests"}, "position": {"x": 350, "y": 150}},
+ {"id": "reviewer", "type": "agent", "data": {"label": "Reviewer", "model": "Sonnet 4.5", "mode": "read-only", "tools": ["Read","Grep","Glob","Bash(git diff)"], "description": "Verifies fix and checks for regressions"}, "position": {"x": 525, "y": 150}},
+ {"id": "git_agent", "type": "agent", "data": {"label": "Git Agent", "model": "Sonnet 4.5", "mode": "git-ops", "tools": ["Bash(git)","Bash(gh)","Read"], "description": "Commits fix, pushes, opens hotfix PR"}, "position": {"x": 700, "y": 150}},
+ {"id": "output", "type": "output", "data": {"label": "Hotfix PR", "description": "Bug fixed and hotfix PR opened"}, "position": {"x": 700, "y": 300}},
+ ],
+ "edges": [
+ {"id": "e-user-exp", "source": "user_request", "target": "explorer", "animated": True},
+ {"id": "e-exp-dev", "source": "explorer", "target": "developer", "label": "root cause", "animated": True},
+ {"id": "e-dev-rev", "source": "developer", "target": "reviewer", "label": "fix applied","animated": True},
+ {"id": "e-rev-git", "source": "reviewer", "target": "git_agent", "label": "verified", "animated": True},
+ {"id": "e-git-output", "source": "git_agent", "target": "output", "label": "PR URL", "animated": True},
+ ],
+}
+
+T4_BUG_HUNTER = Topology(
+ id="bug_hunter",
+ name="Bug Hunter",
+ description="Diagnose > fix > verify > ship hotfix",
+ category=TopologyCategory.pipeline,
+ icon="\U0001f41b", # bug
+ agents_used=["explorer", "developer", "reviewer", "git_agent"],
+ execution_style=ExecutionStyle.crew_pipeline,
+ routing_policy=RoutingPolicy(
+ strategy=RoutingStrategy.fixed_sequence,
+ sequence=["explorer", "developer", "reviewer", "git_agent"],
+ classifier_hints=[
+ "fix", "bug", "error", "broken", "failing", "crash", "exception",
+ "debug", "traceback", "500", "403", "404", "timeout", "leak",
+ "regression", "hotfix", "patch", "not working", "tests failing",
+ ],
+ ),
+ flow_graph=_T4_FLOW_GRAPH,
+)
+
+# ---------------------------------------------------------------------------
+# T5 — Code Inspector (2-agent read-only)
+# ---------------------------------------------------------------------------
+
+_T5_FLOW_GRAPH: Dict[str, Any] = {
+ "nodes": [
+ {"id": "user_request", "type": "user", "data": {"label": "Review Request", "description": "Code review or audit request"}, "position": {"x": 300, "y": 0}},
+ {"id": "explorer", "type": "agent", "data": {"label": "Explorer", "model": "Haiku 4.5", "mode": "read-only", "tools": ["Glob","Grep","Read","LS","Bash(ro)"], "description": "Discovers modified files and gathers context"}, "position": {"x": 200, "y": 150}},
+ {"id": "reviewer", "type": "agent", "data": {"label": "Reviewer", "model": "Sonnet 4.5", "mode": "read-only", "tools": ["Read","Grep","Glob","Bash(git diff)"], "description": "Deep analysis: security, quality, performance"}, "position": {"x": 400, "y": 150}},
+ {"id": "output", "type": "output", "data": {"label": "Review Report", "description": "Structured review with severity levels"}, "position": {"x": 400, "y": 300}},
+ ],
+ "edges": [
+ {"id": "e-user-exp", "source": "user_request", "target": "explorer", "animated": True},
+ {"id": "e-exp-rev", "source": "explorer", "target": "reviewer", "label": "scope + context", "animated": True},
+ {"id": "e-rev-output", "source": "reviewer", "target": "output", "label": "report", "animated": True},
+ ],
+}
+
+T5_CODE_INSPECTOR = Topology(
+ id="code_inspector",
+ name="Code Inspector",
+ description="Read-only analysis: explore changes > review for issues",
+ category=TopologyCategory.pipeline,
+ icon="\U0001f50d", # magnifying glass
+ agents_used=["explorer", "reviewer"],
+ execution_style=ExecutionStyle.crew_pipeline,
+ routing_policy=RoutingPolicy(
+ strategy=RoutingStrategy.fixed_sequence,
+ sequence=["explorer", "reviewer"],
+ classifier_hints=[
+ "review", "audit", "security", "inspect", "analyze code",
+ "vulnerabilities", "quality", "what changed", "diff",
+ "pre-merge", "check quality", "code smell", "coverage",
+ ],
+ ),
+ flow_graph=_T5_FLOW_GRAPH,
+)
+
+# ---------------------------------------------------------------------------
+# T6 — Architect Mode (2-agent read-only)
+# ---------------------------------------------------------------------------
+
+_T6_FLOW_GRAPH: Dict[str, Any] = {
+ "nodes": [
+ {"id": "user_request", "type": "user", "data": {"label": "Architecture Question", "description": "Design or strategy question"}, "position": {"x": 300, "y": 0}},
+ {"id": "explorer", "type": "agent", "data": {"label": "Explorer", "model": "Haiku 4.5", "mode": "read-only", "tools": ["Glob","Grep","Read","LS","Bash(ro)"], "description": "Deep codebase research: structure, deps, patterns"}, "position": {"x": 200, "y": 150}},
+ {"id": "planner", "type": "agent", "data": {"label": "Planner", "model": "Sonnet 4.5", "mode": "read-only", "tools": ["Glob","Grep","Read","Bash(ro)"], "description": "Synthesizes findings into actionable plan"}, "position": {"x": 400, "y": 150}},
+ {"id": "output", "type": "output", "data": {"label": "Implementation Plan", "description": "Plan awaiting user approval before execution"}, "position": {"x": 400, "y": 300}},
+ ],
+ "edges": [
+ {"id": "e-user-exp", "source": "user_request", "target": "explorer", "animated": True},
+ {"id": "e-exp-plan", "source": "explorer", "target": "planner", "label": "deep analysis", "animated": True},
+ {"id": "e-plan-output", "source": "planner", "target": "output", "label": "plan + approval", "animated": True},
+ ],
+}
+
+T6_ARCHITECT_MODE = Topology(
+ id="architect_mode",
+ name="Architect Mode",
+ description="Research codebase > design plan (no code changes)",
+ category=TopologyCategory.pipeline,
+ icon="\U0001f4d0", # triangular ruler
+ agents_used=["explorer", "planner"],
+ execution_style=ExecutionStyle.crew_pipeline,
+ routing_policy=RoutingPolicy(
+ strategy=RoutingStrategy.fixed_sequence,
+ sequence=["explorer", "planner"],
+ classifier_hints=[
+ "plan", "design", "architect", "strategy", "how should",
+ "approach", "migration", "refactor plan", "proposal",
+ "trade-offs", "options", "recommend", "evaluate",
+ ],
+ ),
+ flow_graph=_T6_FLOW_GRAPH,
+)
+
+# ---------------------------------------------------------------------------
+# T7 — Quick Fix (2-agent fast path)
+# ---------------------------------------------------------------------------
+
+_T7_FLOW_GRAPH: Dict[str, Any] = {
+ "nodes": [
+ {"id": "user_request", "type": "user", "data": {"label": "Quick Edit", "description": "Trivial change request"}, "position": {"x": 300, "y": 0}},
+ {"id": "developer", "type": "agent", "data": {"label": "Developer", "model": "Sonnet 4.5", "mode": "read-write", "tools": ["Read","Write","Edit","MultiEdit","Bash","Glob","Grep"], "description": "Makes targeted change, verifies with quick test"}, "position": {"x": 200, "y": 150}},
+ {"id": "git_agent", "type": "agent", "data": {"label": "Git Agent", "model": "Sonnet 4.5", "mode": "git-ops", "tools": ["Bash(git)","Bash(gh)","Read"], "description": "Commits and pushes the change"}, "position": {"x": 400, "y": 150}},
+ {"id": "output", "type": "output", "data": {"label": "Committed & Pushed", "description": "Change committed and pushed"}, "position": {"x": 400, "y": 300}},
+ ],
+ "edges": [
+ {"id": "e-user-dev", "source": "user_request", "target": "developer", "animated": True},
+ {"id": "e-dev-git", "source": "developer", "target": "git_agent", "label": "changes ready", "animated": True},
+ {"id": "e-git-output", "source": "git_agent", "target": "output", "label": "pushed", "animated": True},
+ ],
+}
+
+T7_QUICK_FIX = Topology(
+ id="quick_fix",
+ name="Quick Fix",
+ description="Minimal pipeline: edit > commit > done",
+ category=TopologyCategory.pipeline,
+ icon="\u26a1", # lightning bolt
+ agents_used=["developer", "git_agent"],
+ execution_style=ExecutionStyle.crew_pipeline,
+ routing_policy=RoutingPolicy(
+ strategy=RoutingStrategy.fixed_sequence,
+ sequence=["developer", "git_agent"],
+ classifier_hints=[
+ "typo", "rename", "update readme", "config", "small change",
+ "one-liner", "documentation", "comment", "formatting",
+ "version bump", "update dependency", "quick",
+ ],
+ ),
+ flow_graph=_T7_FLOW_GRAPH,
+)
+
+# ---------------------------------------------------------------------------
+# T8 — Lite Mode (single-agent, optimized for small LLMs < 7B)
+# ---------------------------------------------------------------------------
+
+_T8_FLOW_GRAPH: Dict[str, Any] = {
+ "nodes": [
+ {
+ "id": "user_request",
+ "type": "user",
+ "data": {
+ "label": "User Request",
+ "description": "Incoming task or question",
+ },
+ "position": {"x": 300, "y": 0},
+ },
+ {
+ "id": "intent_classifier",
+ "type": "router",
+ "data": {
+ "label": "Intent Classifier",
+ "description": "Regex-based instant classification: QUESTION vs ACTION (no LLM call)",
+ "model": "regex",
+ },
+ "position": {"x": 300, "y": 100},
+ },
+ {
+ "id": "pre_fetch",
+ "type": "tool_group",
+ "data": {
+ "label": "Pre-Fetch Context",
+ "tools": ["GitHub API"],
+ "description": "Fetches file list, README content, and directory structure via API",
+ },
+ "position": {"x": 100, "y": 200},
+ },
+ {
+ "id": "lite_agent",
+ "type": "agent",
+ "data": {
+ "label": "GitPilot Lite",
+ "model": "Any (1.5B+)",
+ "mode": "read-write",
+ "tools": [],
+ "description": "Single LLM call with pre-injected context. Prompt adapts to intent type.",
+ },
+ "position": {"x": 300, "y": 200},
+ },
+ {
+ "id": "validator",
+ "type": "tool",
+ "data": {
+ "label": "File Validator",
+ "tools": ["regex"],
+ "description": "Validates MODIFY/DELETE targets exist in repo, strips hallucinated paths",
+ },
+ "position": {"x": 500, "y": 200},
+ },
+ {
+ "id": "output",
+ "type": "output",
+ "data": {
+ "label": "Result",
+ "description": "Answer (question) or validated plan (action)",
+ },
+ "position": {"x": 300, "y": 320},
+ },
+ ],
+ "edges": [
+ {"id": "e-user-classify", "source": "user_request", "target": "intent_classifier", "animated": True},
+ {"id": "e-classify-prefetch", "source": "intent_classifier", "target": "pre_fetch", "label": "always", "animated": True},
+ {"id": "e-prefetch-lite", "source": "pre_fetch", "target": "lite_agent", "label": "context", "animated": True},
+ {"id": "e-lite-validator", "source": "lite_agent", "target": "validator", "label": "action only", "animated": True},
+ {"id": "e-lite-output-q", "source": "lite_agent", "target": "output", "label": "question → answer"},
+ {"id": "e-validator-output", "source": "validator", "target": "output", "label": "validated plan", "animated": True},
+ ],
+}
+
+T8_LITE_MODE = Topology(
+ id="lite_mode",
+ name="Lite Mode (Small LLMs)",
+ description="Smart intent detection + single agent + file validation — optimized for models under 7B",
+ category=TopologyCategory.system,
+ icon="\U0001f4a1", # light bulb
+ agents_used=["lite_agent"],
+ execution_style=ExecutionStyle.single_task,
+ routing_policy=RoutingPolicy(
+ strategy=RoutingStrategy.always_main_agent,
+ primary_agent="lite_agent",
+ classifier_hints=[],
+ ),
+ flow_graph=_T8_FLOW_GRAPH,
+)
+
+
+# ===========================================================================
+# Registry singleton
+# ===========================================================================
+
+TOPOLOGY_REGISTRY: Dict[str, Topology] = {
+ t.id: t
+ for t in [
+ T1_DEFAULT,
+ T2_CLAUDE_CODE,
+ T3_FEATURE_BUILDER,
+ T4_BUG_HUNTER,
+ T5_CODE_INSPECTOR,
+ T6_ARCHITECT_MODE,
+ T7_QUICK_FIX,
+ T8_LITE_MODE,
+ ]
+}
+
+DEFAULT_TOPOLOGY_ID = "default"
+
+
+def list_topologies() -> List[Dict[str, Any]]:
+ """Return lightweight summaries of all registered topologies."""
+ result = []
+ for t in TOPOLOGY_REGISTRY.values():
+ meta = t.to_meta()
+ result.append({
+ "id": meta.id,
+ "name": meta.name,
+ "description": meta.description,
+ "category": meta.category.value,
+ "icon": meta.icon,
+ "agents_used": meta.agents_used,
+ "execution_style": meta.execution_style.value,
+ })
+ return result
+
+
+def get_topology(topology_id: str) -> Optional[Topology]:
+ """Look up a topology by ID. Returns None if not found."""
+ return TOPOLOGY_REGISTRY.get(topology_id)
+
+
+def get_topology_graph(topology_id: Optional[str] = None) -> Dict[str, Any]:
+ """Return the flow graph for a given topology.
+
+ If *topology_id* is ``None`` or unrecognised, falls back to ``"default"``.
+ The returned dict is the same shape as the legacy ``get_flow_definition()``
+ output so the frontend ``FlowViewer`` can consume it without changes.
+ """
+ tid = topology_id or DEFAULT_TOPOLOGY_ID
+ topo = TOPOLOGY_REGISTRY.get(tid)
+ if topo is None:
+ topo = TOPOLOGY_REGISTRY[DEFAULT_TOPOLOGY_ID]
+
+ # Build the response — include topology metadata alongside the graph
+ # so the frontend can display the name/description even without a
+ # separate metadata call.
+ graph = topo.flow_graph.copy()
+
+ # For backward compat with the legacy FlowViewer which expects flat
+ # ``nodes`` with ``label``/``type``/``description`` keys, we normalise
+ # the new richer node format. New FlowViewer uses the ``data`` sub-key
+ # directly but old code reads top-level keys.
+ legacy_nodes = []
+ for n in graph.get("nodes", []):
+ ln = dict(n) # shallow copy
+ # Hoist data.label / data.description to top level for legacy compat
+ d = n.get("data", {})
+ ln.setdefault("label", d.get("label", n["id"]))
+ ln.setdefault("description", d.get("description", ""))
+ legacy_nodes.append(ln)
+
+ return {
+ "topology_id": topo.id,
+ "topology_name": topo.name,
+ "topology_icon": topo.icon,
+ "topology_description": topo.description,
+ "execution_style": topo.execution_style.value,
+ "nodes": legacy_nodes,
+ "edges": graph.get("edges", []),
+ }
+
+
+# ===========================================================================
+# Topology classifier — keyword-based auto-detection
+# ===========================================================================
+
+@dataclass
+class ClassificationResult:
+ """Result of classifying a user message against topology hints."""
+ recommended: str
+ confidence: float
+ alternatives: List[Dict[str, Any]]
+
+ def to_dict(self) -> Dict[str, Any]:
+ return {
+ "recommended_topology": self.recommended,
+ "confidence": round(self.confidence, 3),
+ "alternatives": self.alternatives,
+ }
+
+
+def classify_message(message: str) -> ClassificationResult:
+ """Classify a user message and recommend the best topology.
+
+ Uses a two-pass approach:
+ 1. Keyword hit scoring against each topology's ``classifier_hints``.
+ 2. Tie-breaking heuristics (message length, question marks, etc.).
+
+ System topologies (T1, T2) act as fallbacks — they are only recommended
+ when no pipeline topology scores above the confidence threshold.
+ """
+ msg_lower = message.lower().strip()
+ scores: List[Tuple[str, float]] = []
+
+ for tid, topo in TOPOLOGY_REGISTRY.items():
+ hints = topo.routing_policy.classifier_hints
+ if not hints:
+ continue # system topologies have no hints
+
+ hit_count = 0
+ for hint in hints:
+ # Match whole-word (ish) — allow the hint to appear as a substring
+ # but prefer word boundaries.
+ pattern = r"(?:^|\W)" + re.escape(hint) + r"(?:\W|$)"
+ if re.search(pattern, msg_lower):
+ hit_count += 1
+
+ if hit_count > 0:
+ # Normalise: score = hits / total_hints, capped at 1.0
+ raw_score = min(hit_count / max(len(hints), 1), 1.0)
+ # Boost by number of distinct hits so more-specific matches win
+ boosted = raw_score * (1 + 0.1 * min(hit_count, 5))
+ scores.append((tid, min(boosted, 1.0)))
+
+ # Sort descending by score
+ scores.sort(key=lambda x: x[1], reverse=True)
+
+ if not scores or scores[0][1] < 0.05:
+ # Nothing matched — fall back to default
+ return ClassificationResult(
+ recommended="default",
+ confidence=0.5,
+ alternatives=[
+ {"id": "gitpilot_code", "confidence": 0.45},
+ ],
+ )
+
+ best_id, best_score = scores[0]
+ alternatives = [
+ {"id": tid, "confidence": round(sc, 3)}
+ for tid, sc in scores[1:4]
+ ]
+
+ # Always include the two system topologies as alternatives if not already present
+ present_ids = {best_id} | {a["id"] for a in alternatives}
+ for sys_id, sys_conf in [("default", 0.3), ("gitpilot_code", 0.35)]:
+ if sys_id not in present_ids:
+ alternatives.append({"id": sys_id, "confidence": sys_conf})
+
+ return ClassificationResult(
+ recommended=best_id,
+ confidence=round(best_score, 3),
+ alternatives=alternatives,
+ )
+
+
+# ===========================================================================
+# User preference persistence
+# ===========================================================================
+
+_TOPOLOGY_PREF_KEY = "active_topology"
+
+
+def get_saved_topology_preference() -> Optional[str]:
+ """Read the user's saved topology preference from settings file."""
+ import json
+ from .settings import CONFIG_DIR
+
+ pref_file = CONFIG_DIR / "topology_pref.json"
+ if pref_file.exists():
+ try:
+ data = json.loads(pref_file.read_text("utf-8"))
+ value = data.get(_TOPOLOGY_PREF_KEY)
+ if value in TOPOLOGY_REGISTRY:
+ return value
+ except Exception:
+ pass
+ return None
+
+def save_topology_preference(topology_id: str) -> None:
+ """Persist the user's selected topology preference."""
+ import json
+ from .settings import CONFIG_DIR
+ CONFIG_DIR.mkdir(parents=True, exist_ok=True)
+ pref_file = CONFIG_DIR / "topology_pref.json"
+ pref_file.write_text(
+ json.dumps({_TOPOLOGY_PREF_KEY: topology_id}, indent=2),
+ "utf-8",
+ )
diff --git a/gitpilot/use_case.py b/gitpilot/use_case.py
new file mode 100644
index 0000000000000000000000000000000000000000..036e9136bff476a56f0e67edfdf04d48403839ed
--- /dev/null
+++ b/gitpilot/use_case.py
@@ -0,0 +1,407 @@
+# gitpilot/use_case.py
+"""Use Case manager — guided requirement clarification and spec generation.
+
+Non-destructive, additive feature. Stores use cases under:
+ ~/.gitpilot/workspaces/{owner}/{repo}/.gitpilot/context/use_cases/
+
+Each use case is a JSON file with structured spec + message history.
+"""
+from __future__ import annotations
+
+import json
+import logging
+import time
+import uuid
+from dataclasses import asdict, dataclass, field
+from pathlib import Path
+from typing import Any, Dict, List, Optional
+
+logger = logging.getLogger(__name__)
+
+
+# ---------------------------------------------------------------------------
+# Data classes
+# ---------------------------------------------------------------------------
+@dataclass
+class UseCaseSpec:
+ """Structured spec extracted from use-case conversation."""
+ title: str = ""
+ summary: str = ""
+ problem: str = ""
+ users: str = ""
+ requirements: List[str] = field(default_factory=list)
+ acceptance_criteria: List[str] = field(default_factory=list)
+ constraints: List[str] = field(default_factory=list)
+ open_questions: List[str] = field(default_factory=list)
+ notes: str = ""
+
+
+@dataclass
+class UseCaseMessage:
+ role: str # "user" or "assistant"
+ content: str
+ ts: str = ""
+
+
+@dataclass
+class UseCase:
+ use_case_id: str
+ title: str
+ created_at: str
+ updated_at: str
+ is_active: bool = False
+ is_finalized: bool = False
+ spec: UseCaseSpec = field(default_factory=UseCaseSpec)
+ messages: List[UseCaseMessage] = field(default_factory=list)
+
+ def to_dict(self) -> dict:
+ d = asdict(self)
+ return d
+
+ def to_summary(self) -> dict:
+ return {
+ "use_case_id": self.use_case_id,
+ "title": self.title,
+ "created_at": self.created_at,
+ "updated_at": self.updated_at,
+ "is_active": self.is_active,
+ "is_finalized": self.is_finalized,
+ }
+
+
+# ---------------------------------------------------------------------------
+# Guided assistant prompts
+# ---------------------------------------------------------------------------
+GUIDED_SYSTEM_PROMPT = """\
+You are a requirements analyst helping clarify a software use case.
+Your job is to ask structured questions and extract a clear spec.
+
+After each user message, do TWO things:
+1. Respond conversationally (acknowledge, ask follow-up questions)
+2. Update the structured spec with any new information
+
+Focus on extracting:
+- Summary: what is being built
+- Problem: what problem it solves
+- Users/Personas: who will use it
+- Requirements: functional requirements (bullet list)
+- Acceptance Criteria: how to verify it works (bullet list)
+- Constraints: technical or business constraints
+- Open Questions: anything still unclear
+
+Be concise but thorough. Ask one or two questions at a time.
+"""
+
+INITIAL_ASSISTANT_MESSAGE = (
+ "Welcome! Let's define this use case together.\n\n"
+ "To get started, could you describe:\n"
+ "1. **What** you want to build (high-level summary)\n"
+ "2. **Who** will use it (target users)\n"
+ "3. **Why** it's needed (the problem it solves)\n\n"
+ "You can paste meeting notes, transcripts, or just describe it in your own words."
+)
+
+
+# ---------------------------------------------------------------------------
+# Use Case Manager
+# ---------------------------------------------------------------------------
+class UseCaseManager:
+ """Manages use cases stored as JSON files."""
+
+ def __init__(self, workspace_path: Path):
+ self.workspace_path = workspace_path
+ self.use_cases_dir = workspace_path / ".gitpilot" / "context" / "use_cases"
+
+ def _ensure_dir(self):
+ self.use_cases_dir.mkdir(parents=True, exist_ok=True)
+
+ def _uc_path(self, use_case_id: str) -> Path:
+ return self.use_cases_dir / f"{use_case_id}.json"
+
+ def _now(self) -> str:
+ return time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
+
+ # ------------------------------------------------------------------
+ # CRUD
+ # ------------------------------------------------------------------
+ def list_use_cases(self) -> List[dict]:
+ """Return summary list of all use cases."""
+ self._ensure_dir()
+ results = []
+ for f in sorted(self.use_cases_dir.glob("*.json")):
+ try:
+ data = json.loads(f.read_text(encoding="utf-8"))
+ results.append({
+ "use_case_id": data.get("use_case_id", f.stem),
+ "title": data.get("title", "(untitled)"),
+ "created_at": data.get("created_at", ""),
+ "updated_at": data.get("updated_at", ""),
+ "is_active": data.get("is_active", False),
+ "is_finalized": data.get("is_finalized", False),
+ })
+ except Exception:
+ logger.warning("Skipping corrupt use case: %s", f)
+ return results
+
+ def create_use_case(self, title: str = "New Use Case", initial_notes: str = "") -> UseCase:
+ """Create a new use case with initial assistant message."""
+ self._ensure_dir()
+ uc_id = uuid.uuid4().hex[:12]
+ now = self._now()
+
+ uc = UseCase(
+ use_case_id=uc_id,
+ title=title,
+ created_at=now,
+ updated_at=now,
+ spec=UseCaseSpec(title=title, notes=initial_notes),
+ messages=[
+ UseCaseMessage(role="assistant", content=INITIAL_ASSISTANT_MESSAGE, ts=now),
+ ],
+ )
+ self._save(uc)
+ return uc
+
+ def get_use_case(self, use_case_id: str) -> Optional[UseCase]:
+ """Load a use case by ID."""
+ path = self._uc_path(use_case_id)
+ if not path.exists():
+ return None
+ try:
+ data = json.loads(path.read_text(encoding="utf-8"))
+ return self._from_dict(data)
+ except Exception as e:
+ logger.warning("Failed to load use case %s: %s", use_case_id, e)
+ return None
+
+ def chat(self, use_case_id: str, user_message: str) -> Optional[UseCase]:
+ """Process a user message and return updated use case with assistant response.
+
+ This is the guided chat: we parse the user message, update the spec,
+ and generate an assistant response with follow-up questions.
+ """
+ uc = self.get_use_case(use_case_id)
+ if not uc:
+ return None
+
+ now = self._now()
+
+ # Add user message
+ uc.messages.append(UseCaseMessage(role="user", content=user_message, ts=now))
+
+ # Parse and update spec from user message
+ self._update_spec_from_message(uc.spec, user_message)
+
+ # Generate assistant response
+ response = self._generate_response(uc)
+ uc.messages.append(UseCaseMessage(role="assistant", content=response, ts=now))
+
+ uc.updated_at = now
+ self._save(uc)
+ return uc
+
+ def finalize(self, use_case_id: str) -> Optional[UseCase]:
+ """Mark a use case as finalized and active, export markdown."""
+ uc = self.get_use_case(use_case_id)
+ if not uc:
+ return None
+
+ now = self._now()
+
+ # Deactivate all others
+ for f in self.use_cases_dir.glob("*.json"):
+ try:
+ data = json.loads(f.read_text(encoding="utf-8"))
+ if data.get("is_active"):
+ data["is_active"] = False
+ f.write_text(json.dumps(data, indent=2), encoding="utf-8")
+ except Exception:
+ pass
+
+ # Mark this one as active + finalized
+ uc.is_active = True
+ uc.is_finalized = True
+ uc.updated_at = now
+ self._save(uc)
+
+ # Export markdown
+ self._export_markdown(uc)
+
+ return uc
+
+ def get_active_use_case(self) -> Optional[UseCase]:
+ """Return the currently active use case, if any."""
+ self._ensure_dir()
+ for f in self.use_cases_dir.glob("*.json"):
+ try:
+ data = json.loads(f.read_text(encoding="utf-8"))
+ if data.get("is_active"):
+ return self._from_dict(data)
+ except Exception:
+ continue
+ return None
+
+ # ------------------------------------------------------------------
+ # Internal helpers
+ # ------------------------------------------------------------------
+ def _save(self, uc: UseCase):
+ path = self._uc_path(uc.use_case_id)
+ path.write_text(json.dumps(uc.to_dict(), indent=2), encoding="utf-8")
+
+ def _from_dict(self, data: dict) -> UseCase:
+ spec_data = data.get("spec", {})
+ spec = UseCaseSpec(
+ title=spec_data.get("title", ""),
+ summary=spec_data.get("summary", ""),
+ problem=spec_data.get("problem", ""),
+ users=spec_data.get("users", ""),
+ requirements=spec_data.get("requirements", []),
+ acceptance_criteria=spec_data.get("acceptance_criteria", []),
+ constraints=spec_data.get("constraints", []),
+ open_questions=spec_data.get("open_questions", []),
+ notes=spec_data.get("notes", ""),
+ )
+ messages = [
+ UseCaseMessage(
+ role=m.get("role", "user"),
+ content=m.get("content", ""),
+ ts=m.get("ts", ""),
+ )
+ for m in data.get("messages", [])
+ ]
+ return UseCase(
+ use_case_id=data.get("use_case_id", ""),
+ title=data.get("title", ""),
+ created_at=data.get("created_at", ""),
+ updated_at=data.get("updated_at", ""),
+ is_active=data.get("is_active", False),
+ is_finalized=data.get("is_finalized", False),
+ spec=spec,
+ messages=messages,
+ )
+
+ def _update_spec_from_message(self, spec: UseCaseSpec, message: str):
+ """Parse user message and update spec fields heuristically."""
+ msg_lower = message.lower()
+ lines = [l.strip() for l in message.split("\n") if l.strip()]
+
+ for line in lines:
+ ll = line.lower()
+
+ # Detect labeled sections
+ if ll.startswith("summary:"):
+ spec.summary = line.split(":", 1)[1].strip()
+ elif ll.startswith("problem:"):
+ spec.problem = line.split(":", 1)[1].strip()
+ elif ll.startswith("users:") or ll.startswith("personas:"):
+ spec.users = line.split(":", 1)[1].strip()
+ elif ll.startswith("notes:"):
+ spec.notes = line.split(":", 1)[1].strip()
+ elif ll.startswith("constraint:") or ll.startswith("constraints:"):
+ val = line.split(":", 1)[1].strip()
+ if val and val not in spec.constraints:
+ spec.constraints.append(val)
+ elif ll.startswith("- ") or ll.startswith("* "):
+ # Bullet items — classify by context
+ item = line[2:].strip()
+ if not item:
+ continue
+ # If it looks like acceptance criteria
+ if any(kw in item.lower() for kw in ["should", "must", "verify", "test", "given", "when", "then"]):
+ if item not in spec.acceptance_criteria:
+ spec.acceptance_criteria.append(item)
+ else:
+ if item not in spec.requirements:
+ spec.requirements.append(item)
+
+ # If no summary yet and message is substantial, use first sentence
+ if not spec.summary and len(message) > 20:
+ first_sentence = message.split(".")[0].strip()
+ if len(first_sentence) > 10:
+ spec.summary = first_sentence[:200]
+
+ def _generate_response(self, uc: UseCase) -> str:
+ """Generate a guided assistant response based on current spec state."""
+ spec = uc.spec
+ missing = []
+
+ if not spec.summary:
+ missing.append("a **summary** of what you're building")
+ if not spec.problem:
+ missing.append("the **problem** this solves")
+ if not spec.users:
+ missing.append("the **target users/personas**")
+ if not spec.requirements:
+ missing.append("**functional requirements** (as bullet points)")
+ if not spec.acceptance_criteria:
+ missing.append("**acceptance criteria** (how to verify it works)")
+
+ if missing:
+ items = "\n".join(f"- {m}" for m in missing[:3])
+ return (
+ f"Thanks for the details! I've updated the spec preview.\n\n"
+ f"To make the spec more complete, could you provide:\n{items}\n\n"
+ f"You can paste structured info or just describe it naturally."
+ )
+
+ # Spec is reasonably complete
+ if spec.open_questions:
+ q_list = "\n".join(f"- {q}" for q in spec.open_questions[:3])
+ return (
+ f"The spec is taking shape nicely. There are some open questions:\n{q_list}\n\n"
+ f"Would you like to address these, or shall we **Finalize** the use case?"
+ )
+
+ return (
+ "The spec looks fairly complete! Here's what we have:\n\n"
+ f"**Summary:** {spec.summary}\n"
+ f"**Requirements:** {len(spec.requirements)} items\n"
+ f"**Acceptance Criteria:** {len(spec.acceptance_criteria)} items\n\n"
+ "You can add more details or click **Finalize** to save this as the active use case."
+ )
+
+ def _export_markdown(self, uc: UseCase):
+ """Export use case as a markdown file."""
+ spec = uc.spec
+ lines = [
+ f"# Use Case: {spec.title or uc.title}",
+ "",
+ f"**ID:** {uc.use_case_id}",
+ f"**Created:** {uc.created_at}",
+ f"**Finalized:** {uc.updated_at}",
+ f"**Status:** {'Active' if uc.is_active else 'Inactive'}",
+ "",
+ ]
+
+ if spec.summary:
+ lines.extend(["## Summary", "", spec.summary, ""])
+ if spec.problem:
+ lines.extend(["## Problem", "", spec.problem, ""])
+ if spec.users:
+ lines.extend(["## Users / Personas", "", spec.users, ""])
+ if spec.requirements:
+ lines.extend(["## Requirements", ""])
+ for r in spec.requirements:
+ lines.append(f"- {r}")
+ lines.append("")
+ if spec.acceptance_criteria:
+ lines.extend(["## Acceptance Criteria", ""])
+ for ac in spec.acceptance_criteria:
+ lines.append(f"- {ac}")
+ lines.append("")
+ if spec.constraints:
+ lines.extend(["## Constraints", ""])
+ for c in spec.constraints:
+ lines.append(f"- {c}")
+ lines.append("")
+ if spec.open_questions:
+ lines.extend(["## Open Questions", ""])
+ for q in spec.open_questions:
+ lines.append(f"- {q}")
+ lines.append("")
+ if spec.notes:
+ lines.extend(["## Notes", "", spec.notes, ""])
+
+ md_path = self.use_cases_dir / f"{uc.use_case_id}.md"
+ md_path.write_text("\n".join(lines), encoding="utf-8")
+ logger.info("Exported use case markdown: %s", md_path)
diff --git a/gitpilot/version.py b/gitpilot/version.py
new file mode 100644
index 0000000000000000000000000000000000000000..438d5af086c6deefccfe7f02dba9369de426166a
--- /dev/null
+++ b/gitpilot/version.py
@@ -0,0 +1,8 @@
+from importlib.metadata import PackageNotFoundError, version
+
+PACKAGE_NAME = "gitcopilot"
+
+try:
+ __version__ = version(PACKAGE_NAME)
+except PackageNotFoundError:
+ __version__ = "0.0.0+local"
diff --git a/gitpilot/vision.py b/gitpilot/vision.py
new file mode 100644
index 0000000000000000000000000000000000000000..e1ca2d1d66519e509566c7104469a9d04c0b3a4b
--- /dev/null
+++ b/gitpilot/vision.py
@@ -0,0 +1,298 @@
+# gitpilot/vision.py
+"""Vision & image analysis for GitPilot.
+
+Uses multimodal LLM capabilities to analyse screenshots, architecture
+diagrams, error images, and design mockups. Supports multiple providers:
+
+- **OpenAI** (GPT-4o, GPT-4o-mini) — via base64 image in messages
+- **Anthropic** (Claude) — via base64 image in messages
+- **Ollama** (LLaVA, etc.) — local multimodal models
+
+The module reads images, encodes them, and sends them alongside a
+text prompt to the configured LLM provider.
+"""
+from __future__ import annotations
+
+import base64
+import logging
+import mimetypes
+from dataclasses import dataclass
+from pathlib import Path
+from typing import Any, Dict, List, Optional
+
+import httpx
+
+logger = logging.getLogger(__name__)
+
+MAX_IMAGE_SIZE_BYTES = 20 * 1024 * 1024 # 20 MB
+SUPPORTED_FORMATS = {".png", ".jpg", ".jpeg", ".gif", ".webp", ".bmp", ".svg"}
+
+
+@dataclass
+class ImageAnalysisResult:
+ """Result of an image analysis."""
+
+ description: str
+ confidence: str = "high" # high | medium | low
+ metadata: Dict[str, Any] = None
+
+ def __post_init__(self):
+ if self.metadata is None:
+ self.metadata = {}
+
+ def to_dict(self) -> Dict[str, Any]:
+ return {
+ "description": self.description,
+ "confidence": self.confidence,
+ "metadata": self.metadata,
+ }
+
+
+def _encode_image(image_path: Path) -> tuple:
+ """Read and base64-encode an image. Returns (base64_str, mime_type)."""
+ if not image_path.exists():
+ raise FileNotFoundError(f"Image not found: {image_path}")
+
+ suffix = image_path.suffix.lower()
+ if suffix not in SUPPORTED_FORMATS:
+ raise ValueError(f"Unsupported image format: {suffix}")
+
+ size = image_path.stat().st_size
+ if size > MAX_IMAGE_SIZE_BYTES:
+ raise ValueError(f"Image too large: {size} bytes (max {MAX_IMAGE_SIZE_BYTES})")
+
+ mime = mimetypes.guess_type(str(image_path))[0] or "image/png"
+ data = image_path.read_bytes()
+ return base64.b64encode(data).decode("utf-8"), mime
+
+
+class VisionAnalyzer:
+ """Analyze images using multimodal LLM capabilities.
+
+ Usage::
+
+ analyzer = VisionAnalyzer(provider="openai", api_key="sk-...")
+ result = await analyzer.analyze_image(
+ Path("screenshot.png"),
+ prompt="Describe this UI and identify any bugs",
+ )
+ """
+
+ def __init__(
+ self,
+ provider: str = "openai",
+ api_key: Optional[str] = None,
+ model: Optional[str] = None,
+ base_url: Optional[str] = None,
+ ) -> None:
+ self.provider = provider.lower()
+ self.api_key = api_key
+ self.model = model or self._default_model()
+ self.base_url = base_url
+
+ def _default_model(self) -> str:
+ defaults = {
+ "openai": "gpt-4o",
+ "claude": "claude-sonnet-4-5-20250929",
+ "ollama": "llava",
+ }
+ return defaults.get(self.provider, "gpt-4o")
+
+ async def analyze_image(
+ self,
+ image_path: Path,
+ prompt: str = "Describe this image in detail.",
+ ) -> ImageAnalysisResult:
+ """Analyze a single image with a text prompt."""
+ b64, mime = _encode_image(image_path)
+
+ if self.provider == "openai":
+ text = await self._call_openai(b64, mime, prompt)
+ elif self.provider in ("claude", "anthropic"):
+ text = await self._call_anthropic(b64, mime, prompt)
+ elif self.provider == "ollama":
+ text = await self._call_ollama(b64, prompt)
+ else:
+ raise ValueError(f"Unsupported vision provider: {self.provider}")
+
+ return ImageAnalysisResult(
+ description=text,
+ metadata={"model": self.model, "image": str(image_path)},
+ )
+
+ async def compare_screenshots(
+ self,
+ before: Path,
+ after: Path,
+ prompt: str = "Compare these two screenshots and describe the differences.",
+ ) -> ImageAnalysisResult:
+ """Compare two screenshots and describe differences."""
+ b64_before, mime_before = _encode_image(before)
+ b64_after, mime_after = _encode_image(after)
+
+ combined_prompt = (
+ f"{prompt}\n\n"
+ "The first image is the 'before' state and the second is the 'after' state."
+ )
+
+ if self.provider == "openai":
+ text = await self._call_openai_multi(
+ [(b64_before, mime_before), (b64_after, mime_after)],
+ combined_prompt,
+ )
+ elif self.provider in ("claude", "anthropic"):
+ text = await self._call_anthropic_multi(
+ [(b64_before, mime_before), (b64_after, mime_after)],
+ combined_prompt,
+ )
+ else:
+ # Fallback: analyze each separately
+ r1 = await self.analyze_image(before, "Describe this screenshot.")
+ r2 = await self.analyze_image(after, "Describe this screenshot.")
+ text = f"Before: {r1.description}\n\nAfter: {r2.description}"
+
+ return ImageAnalysisResult(
+ description=text,
+ metadata={
+ "model": self.model,
+ "before": str(before),
+ "after": str(after),
+ },
+ )
+
+ async def extract_text(self, image_path: Path) -> str:
+ """Extract text from an image (OCR via multimodal LLM)."""
+ result = await self.analyze_image(
+ image_path,
+ prompt=(
+ "Extract ALL text visible in this image. "
+ "Return only the extracted text, preserving layout where possible. "
+ "If there are code snippets, preserve formatting and indentation."
+ ),
+ )
+ return result.description
+
+ # ------------------------------------------------------------------
+ # Provider implementations
+ # ------------------------------------------------------------------
+
+ async def _call_openai(self, b64: str, mime: str, prompt: str) -> str:
+ url = self.base_url or "https://api.openai.com/v1"
+ async with httpx.AsyncClient(timeout=60) as client:
+ resp = await client.post(
+ f"{url}/chat/completions",
+ headers={"Authorization": f"Bearer {self.api_key}"},
+ json={
+ "model": self.model,
+ "messages": [{
+ "role": "user",
+ "content": [
+ {"type": "text", "text": prompt},
+ {"type": "image_url", "image_url": {
+ "url": f"data:{mime};base64,{b64}",
+ }},
+ ],
+ }],
+ "max_tokens": 4096,
+ },
+ )
+ resp.raise_for_status()
+ return resp.json()["choices"][0]["message"]["content"]
+
+ async def _call_openai_multi(
+ self, images: List[tuple], prompt: str,
+ ) -> str:
+ url = self.base_url or "https://api.openai.com/v1"
+ content: List[Dict[str, Any]] = [{"type": "text", "text": prompt}]
+ for b64, mime in images:
+ content.append({
+ "type": "image_url",
+ "image_url": {"url": f"data:{mime};base64,{b64}"},
+ })
+ async with httpx.AsyncClient(timeout=60) as client:
+ resp = await client.post(
+ f"{url}/chat/completions",
+ headers={"Authorization": f"Bearer {self.api_key}"},
+ json={
+ "model": self.model,
+ "messages": [{"role": "user", "content": content}],
+ "max_tokens": 4096,
+ },
+ )
+ resp.raise_for_status()
+ return resp.json()["choices"][0]["message"]["content"]
+
+ async def _call_anthropic(self, b64: str, mime: str, prompt: str) -> str:
+ url = self.base_url or "https://api.anthropic.com/v1"
+ media_type = mime or "image/png"
+ async with httpx.AsyncClient(timeout=60) as client:
+ resp = await client.post(
+ f"{url}/messages",
+ headers={
+ "x-api-key": self.api_key,
+ "anthropic-version": "2023-06-01",
+ "content-type": "application/json",
+ },
+ json={
+ "model": self.model,
+ "max_tokens": 4096,
+ "messages": [{
+ "role": "user",
+ "content": [
+ {"type": "image", "source": {
+ "type": "base64",
+ "media_type": media_type,
+ "data": b64,
+ }},
+ {"type": "text", "text": prompt},
+ ],
+ }],
+ },
+ )
+ resp.raise_for_status()
+ data = resp.json()
+ return data["content"][0]["text"]
+
+ async def _call_anthropic_multi(
+ self, images: List[tuple], prompt: str,
+ ) -> str:
+ url = self.base_url or "https://api.anthropic.com/v1"
+ content: List[Dict[str, Any]] = []
+ for b64, mime in images:
+ content.append({
+ "type": "image",
+ "source": {"type": "base64", "media_type": mime or "image/png", "data": b64},
+ })
+ content.append({"type": "text", "text": prompt})
+ async with httpx.AsyncClient(timeout=60) as client:
+ resp = await client.post(
+ f"{url}/messages",
+ headers={
+ "x-api-key": self.api_key,
+ "anthropic-version": "2023-06-01",
+ "content-type": "application/json",
+ },
+ json={
+ "model": self.model,
+ "max_tokens": 4096,
+ "messages": [{"role": "user", "content": content}],
+ },
+ )
+ resp.raise_for_status()
+ data = resp.json()
+ return data["content"][0]["text"]
+
+ async def _call_ollama(self, b64: str, prompt: str) -> str:
+ url = self.base_url or "http://localhost:11434"
+ async with httpx.AsyncClient(timeout=120) as client:
+ resp = await client.post(
+ f"{url}/api/generate",
+ json={
+ "model": self.model,
+ "prompt": prompt,
+ "images": [b64],
+ "stream": False,
+ },
+ )
+ resp.raise_for_status()
+ return resp.json().get("response", "")
diff --git a/gitpilot/web/index.html b/gitpilot/web/index.html
new file mode 100644
index 0000000000000000000000000000000000000000..3de2667ee02871f5dcc75133d2bc6cc192ad7028
--- /dev/null
+++ b/gitpilot/web/index.html
@@ -0,0 +1,13 @@
+
+
+
+
+ GitPilot
+
+
+
+
+
+
+
+
diff --git a/gitpilot/workspace.py b/gitpilot/workspace.py
new file mode 100644
index 0000000000000000000000000000000000000000..cc49a2779b93aeeced224991e787699b7ad2e25b
--- /dev/null
+++ b/gitpilot/workspace.py
@@ -0,0 +1,387 @@
+# gitpilot/workspace.py
+"""Local workspace manager — clone, sync, and operate on repositories locally.
+
+Manages a workspace directory (~/.gitpilot/workspaces/{owner}/{repo}) where
+repositories are cloned and kept in sync. All local file operations go through
+this module to ensure path-traversal safety and consistency.
+"""
+from __future__ import annotations
+
+import asyncio
+import logging
+import os
+import shutil
+from dataclasses import dataclass
+from pathlib import Path
+from typing import Any
+
+from gitpilot.models import WorkspaceSummary
+
+logger = logging.getLogger(__name__)
+
+WORKSPACE_ROOT = Path.home() / ".gitpilot" / "workspaces"
+
+
+@dataclass
+class WorkspaceInfo:
+ """Metadata about an active workspace."""
+
+ owner: str
+ repo: str
+ path: Path
+ branch: str
+ remote_url: str
+ is_dirty: bool = False
+ last_sync: str | None = None
+
+
+class WorkspaceManager:
+ """Manages local git clones for repository operations.
+
+ Responsibilities:
+ - Clone repositories on first access (shallow for speed)
+ - Checkout and track branches
+ - Provide safe file read / write / delete / search
+ - Sync with remote (pull / push)
+ - Create feature branches, commit, and push
+ """
+
+ def __init__(self, root: Path | None = None):
+ self.root = root or WORKSPACE_ROOT
+ self.root.mkdir(parents=True, exist_ok=True)
+ self._active: dict[str, WorkspaceInfo] = {}
+
+ def workspace_path(self, owner: str, repo: str) -> Path:
+ return self.root / owner / repo
+
+ # ------------------------------------------------------------------
+ # Workspace lifecycle
+ # ------------------------------------------------------------------
+
+ async def ensure_workspace(
+ self,
+ owner: str,
+ repo: str,
+ token: str,
+ branch: str | None = None,
+ ) -> WorkspaceInfo:
+ """Clone if absent, fetch if present, checkout *branch*."""
+ ws_path = self.workspace_path(owner, repo)
+ remote_url = f"https://x-access-token:{token}@github.com/{owner}/{repo}.git"
+
+ if not (ws_path / ".git").exists():
+ ws_path.mkdir(parents=True, exist_ok=True)
+ await self._run_git(
+ ["git", "clone", "--depth=1", remote_url, str(ws_path)],
+ cwd=ws_path.parent,
+ )
+ else:
+ await self._run_git(
+ ["git", "fetch", "origin", "--prune"],
+ cwd=ws_path,
+ env={"GIT_TERMINAL_PROMPT": "0"},
+ )
+
+ target_branch = branch or await self._default_branch(ws_path)
+ await self._checkout(ws_path, target_branch)
+
+ info = WorkspaceInfo(
+ owner=owner,
+ repo=repo,
+ path=ws_path,
+ branch=target_branch,
+ remote_url=remote_url,
+ )
+ self._active[f"{owner}/{repo}"] = info
+ return info
+
+ async def cleanup(self, owner: str, repo: str) -> bool:
+ ws_path = self.workspace_path(owner, repo)
+ if ws_path.exists():
+ shutil.rmtree(ws_path)
+ self._active.pop(f"{owner}/{repo}", None)
+ return True
+ return False
+
+ # ------------------------------------------------------------------
+ # File operations
+ # ------------------------------------------------------------------
+
+ def _safe_resolve(self, ws: WorkspaceInfo, file_path: str) -> Path:
+ full = (ws.path / file_path).resolve()
+ if not str(full).startswith(str(ws.path.resolve())):
+ raise PermissionError(f"Path traversal blocked: {file_path}")
+ return full
+
+ async def read_file(self, ws: WorkspaceInfo, file_path: str) -> str:
+ full = self._safe_resolve(ws, file_path)
+ return full.read_text(encoding="utf-8", errors="replace")
+
+ async def write_file(
+ self, ws: WorkspaceInfo, file_path: str, content: str
+ ) -> dict[str, Any]:
+ full = self._safe_resolve(ws, file_path)
+ full.parent.mkdir(parents=True, exist_ok=True)
+ full.write_text(content, encoding="utf-8")
+ return {"path": file_path, "size": len(content)}
+
+ async def delete_file(self, ws: WorkspaceInfo, file_path: str) -> bool:
+ full = self._safe_resolve(ws, file_path)
+ if full.exists():
+ full.unlink()
+ return True
+ return False
+
+ async def list_files(
+ self, ws: WorkspaceInfo, directory: str = "."
+ ) -> list[str]:
+ result = await self._run_git(
+ ["git", "ls-files", "--cached", "--others",
+ "--exclude-standard", directory],
+ cwd=ws.path,
+ )
+ return [f for f in result.stdout.strip().split("\n") if f]
+
+ async def search_files(
+ self, ws: WorkspaceInfo, pattern: str, path: str = "."
+ ) -> list[dict[str, Any]]:
+ try:
+ result = await self._run_git(
+ ["git", "grep", "-n", "--no-color", "-I", pattern, "--", path],
+ cwd=ws.path, check=False,
+ )
+ matches = []
+ for line in result.stdout.strip().split("\n"):
+ if ":" in line and line:
+ parts = line.split(":", 2)
+ if len(parts) >= 3:
+ matches.append({
+ "file": parts[0],
+ "line": int(parts[1]) if parts[1].isdigit() else 0,
+ "content": parts[2],
+ })
+ return matches
+ except Exception:
+ return []
+
+ # ------------------------------------------------------------------
+ # Git operations
+ # ------------------------------------------------------------------
+
+ async def create_branch(
+ self, ws: WorkspaceInfo, branch_name: str
+ ) -> str:
+ await self._run_git(
+ ["git", "checkout", "-b", branch_name], cwd=ws.path,
+ )
+ ws.branch = branch_name
+ return branch_name
+
+ async def commit(
+ self, ws: WorkspaceInfo, message: str, files: list[str] | None = None,
+ ) -> dict[str, str]:
+ if files:
+ await self._run_git(["git", "add", "--"] + files, cwd=ws.path)
+ else:
+ await self._run_git(["git", "add", "-A"], cwd=ws.path)
+
+ await self._run_git(["git", "commit", "-m", message], cwd=ws.path)
+ sha_result = await self._run_git(
+ ["git", "rev-parse", "HEAD"], cwd=ws.path,
+ )
+ return {"sha": sha_result.stdout.strip(), "message": message}
+
+ async def push(
+ self, ws: WorkspaceInfo, force: bool = False,
+ ) -> dict[str, str]:
+ cmd = ["git", "push", "-u", "origin", ws.branch]
+ if force:
+ cmd.insert(2, "--force-with-lease")
+ await self._run_git(cmd, cwd=ws.path)
+ return {"branch": ws.branch, "status": "pushed"}
+
+ async def diff(self, ws: WorkspaceInfo, staged: bool = False) -> str:
+ cmd = ["git", "diff"]
+ if staged:
+ cmd.append("--staged")
+ result = await self._run_git(cmd, cwd=ws.path)
+ return result.stdout
+
+ async def status(self, ws: WorkspaceInfo) -> dict[str, Any]:
+ result = await self._run_git(
+ ["git", "status", "--porcelain=v2", "--branch"], cwd=ws.path,
+ )
+ return self._parse_status(result.stdout)
+
+ async def log(
+ self, ws: WorkspaceInfo, count: int = 10,
+ ) -> list[dict[str, str]]:
+ result = await self._run_git(
+ ["git", "log", f"-{count}", "--format=%H|%an|%ae|%s|%aI"],
+ cwd=ws.path,
+ )
+ commits: list[dict[str, str]] = []
+ for line in result.stdout.strip().split("\n"):
+ if "|" in line:
+ parts = line.split("|", 4)
+ commits.append({
+ "sha": parts[0],
+ "author": parts[1],
+ "email": parts[2],
+ "message": parts[3],
+ "date": parts[4] if len(parts) > 4 else "",
+ })
+ return commits
+
+ async def stash(self, ws: WorkspaceInfo, pop: bool = False) -> str:
+ cmd = ["git", "stash", "pop" if pop else "push"]
+ result = await self._run_git(cmd, cwd=ws.path)
+ return result.stdout.strip()
+
+ async def merge(
+ self, ws: WorkspaceInfo, branch: str,
+ ) -> dict[str, Any]:
+ result = await self._run_git(
+ ["git", "merge", branch], cwd=ws.path, check=False,
+ )
+ return {
+ "success": result.returncode == 0,
+ "output": result.stdout,
+ "conflicts": result.returncode != 0,
+ }
+
+ # ------------------------------------------------------------------
+ # Internal helpers
+ # ------------------------------------------------------------------
+
+ async def _run_git(self, cmd, cwd=None, env=None, check=True):
+ full_env = {**os.environ, **(env or {})}
+ proc = await asyncio.create_subprocess_exec(
+ *cmd,
+ cwd=cwd,
+ stdout=asyncio.subprocess.PIPE,
+ stderr=asyncio.subprocess.PIPE,
+ env=full_env,
+ )
+ stdout, stderr = await proc.communicate()
+
+ class _Result:
+ pass
+
+ r = _Result()
+ r.stdout = stdout.decode("utf-8", errors="replace")
+ r.stderr = stderr.decode("utf-8", errors="replace")
+ r.returncode = proc.returncode
+ if check and proc.returncode != 0:
+ raise RuntimeError(
+ f"Git command failed ({proc.returncode}): {' '.join(cmd)}\n{r.stderr}"
+ )
+ return r
+
+ async def _default_branch(self, ws_path: Path) -> str:
+ result = await self._run_git(
+ ["git", "symbolic-ref", "refs/remotes/origin/HEAD"],
+ cwd=ws_path, check=False,
+ )
+ if result.returncode == 0:
+ return result.stdout.strip().split("/")[-1]
+ return "main"
+
+ async def _checkout(self, ws_path: Path, branch: str):
+ result = await self._run_git(
+ ["git", "checkout", branch], cwd=ws_path, check=False,
+ )
+ if result.returncode != 0:
+ await self._run_git(
+ ["git", "checkout", "-b", branch, f"origin/{branch}"],
+ cwd=ws_path, check=False,
+ )
+
+ @staticmethod
+ def _parse_status(raw: str) -> dict[str, Any]:
+ modified, added, deleted, untracked = [], [], [], []
+ branch_name = "unknown"
+ for line in raw.split("\n"):
+ if line.startswith("# branch.head"):
+ branch_name = line.split()[-1]
+ elif line.startswith("1 "):
+ parts = line.split()
+ xy = parts[1] if len(parts) > 1 else ""
+ path = parts[-1] if parts else ""
+ if "M" in xy:
+ modified.append(path)
+ elif "A" in xy:
+ added.append(path)
+ elif "D" in xy:
+ deleted.append(path)
+ elif line.startswith("? "):
+ untracked.append(line[2:])
+ return {
+ "branch": branch_name,
+ "modified": modified,
+ "added": added,
+ "deleted": deleted,
+ "untracked": untracked,
+ "clean": not any([modified, added, deleted, untracked]),
+ }
+
+
+async def summarize_workspace(folder_path: str) -> WorkspaceSummary:
+ """Summarize workspace state for the redesigned UI status endpoint."""
+ folder_path = os.path.abspath(folder_path)
+ folder_name = os.path.basename(folder_path) if folder_path else None
+ folder_open = os.path.isdir(folder_path) if folder_path else False
+
+ summary = WorkspaceSummary(
+ folder_open=folder_open,
+ folder_path=folder_path,
+ folder_name=folder_name,
+ )
+
+ if not folder_open:
+ return summary
+
+ # Check for git repo
+ git_dir = os.path.join(folder_path, ".git")
+ if not os.path.exists(git_dir):
+ return summary
+
+ summary.git_detected = True
+ summary.repo_root = folder_path
+
+ # Get repo name from folder
+ summary.repo_name = folder_name
+
+ # Get branch and remotes via git CLI
+ try:
+ proc = await asyncio.create_subprocess_exec(
+ "git", "rev-parse", "--abbrev-ref", "HEAD",
+ cwd=folder_path,
+ stdout=asyncio.subprocess.PIPE,
+ stderr=asyncio.subprocess.PIPE,
+ )
+ stdout, _ = await proc.communicate()
+ if proc.returncode == 0:
+ summary.branch = stdout.decode().strip()
+ except Exception:
+ logger.debug("Branch detection failed", exc_info=True)
+
+ try:
+ proc = await asyncio.create_subprocess_exec(
+ "git", "remote", "-v",
+ cwd=folder_path,
+ stdout=asyncio.subprocess.PIPE,
+ stderr=asyncio.subprocess.PIPE,
+ )
+ stdout, _ = await proc.communicate()
+ if proc.returncode == 0:
+ remotes = set()
+ for line in stdout.decode().strip().splitlines():
+ parts = line.split()
+ if parts:
+ remotes.add(parts[0])
+ summary.remotes = sorted(remotes)
+ except Exception:
+ logger.debug("Remote detection failed", exc_info=True)
+
+ return summary
diff --git a/pyproject.toml b/pyproject.toml
new file mode 100644
index 0000000000000000000000000000000000000000..a647ce194cf4ec2c70c71b90c08b8f3d897ed8b5
--- /dev/null
+++ b/pyproject.toml
@@ -0,0 +1,105 @@
+[build-system]
+requires = ["setuptools>=64", "wheel"]
+build-backend = "setuptools.build_meta"
+
+[project]
+# PyPI project name – MUST match what you configured in PyPI Trusted Publishers
+name = "gitcopilot"
+version = "0.2.6"
+description = "Multi-agent AI coding assistant — four specialized agents (Explorer, Planner, Coder, Reviewer) that read your repo, plan safe changes, write code, run tests, and wait for your approval. Any LLM."
+requires-python = ">=3.11,<3.13"
+readme = "README.md"
+
+# Use a simple SPDX license string, not a table
+license = "Apache-2.0"
+
+authors = [{ name = "Ruslan Magana Vsevolodovna" }]
+keywords = [
+ "ai",
+ "github",
+ "copilot",
+ "agentic",
+ "crewai",
+ "llm",
+ "openai",
+ "claude",
+ "watsonx",
+ "ollama",
+ "fastapi",
+ "react",
+]
+classifiers = [
+ "Development Status :: 4 - Beta",
+ "Intended Audience :: Developers",
+ # License classifier removed to avoid deprecation warning
+ "Programming Language :: Python :: 3",
+ "Programming Language :: Python :: 3.11",
+ "Programming Language :: Python :: 3.12",
+ "Programming Language :: JavaScript",
+ "Framework :: FastAPI",
+ "Topic :: Software Development :: Libraries :: Python Modules",
+ "Topic :: Software Development :: Version Control :: Git",
+ "Topic :: Scientific/Engineering :: Artificial Intelligence",
+ "Operating System :: OS Independent",
+]
+dependencies = [
+ "fastapi>=0.111.0",
+ "uvicorn[standard]>=0.30.0",
+ "httpx>=0.27.0",
+ "python-dotenv>=1.1.0",
+ "typer>=0.12.0",
+ "pydantic>=2.7.0",
+ "crewai[anthropic]>=0.76.9",
+ "anthropic>=0.39.0",
+ "crewai-tools>=0.13.4",
+ "ibm-watsonx-ai>=1.1.0",
+ "langchain-ibm>=0.3.0",
+ "rich>=13.0.0",
+ "pyjwt[crypto]>=2.8.0",
+ "litellm>=1.80.5",
+]
+
+[project.urls]
+Homepage = "https://github.com/ruslanmv/gitpilot"
+Documentation = "https://github.com/ruslanmv/gitpilot#readme"
+Repository = "https://github.com/ruslanmv/gitpilot"
+Issues = "https://github.com/ruslanmv/gitpilot/issues"
+
+[project.optional-dependencies]
+dev = [
+ "ruff>=0.6",
+ "pytest>=8.2",
+ "pytest-asyncio>=0.23",
+ "build>=1.2.1",
+ "twine>=5.0.0",
+]
+
+[project.scripts]
+# CLI entry points: these remain based on the python package `gitpilot`
+gitpilot = "gitpilot.cli:main"
+gitpilot-api = "gitpilot.cli:serve_only"
+
+[tool.setuptools.packages.find]
+where = ["."]
+include = ["gitpilot*"] # package directory is gitpilot/
+
+[tool.setuptools.package-data]
+gitpilot = ["web/*", "web/**/*", "py.typed"]
+
+[tool.setuptools]
+include-package-data = true
+
+[tool.ruff]
+line-length = 100
+target-version = "py312"
+
+[tool.ruff.lint]
+select = ["E", "F", "W", "I", "N", "UP", "S", "B", "A", "C4", "DTZ", "ICN", "PIE", "PT", "RET", "SIM", "ARG", "PL"]
+ignore = ["S101", "PLR0913", "PLR2004"]
+
+[tool.pytest.ini_options]
+testpaths = ["tests"]
+python_files = ["test_*.py"]
+python_classes = ["Test*"]
+python_functions = ["test_*"]
+asyncio_mode = "auto"