+
+**β Don't forget to star GitPilot if you find it useful! β**
+
+[β Star on GitHub](https://github.com/ruslanmv/gitpilot) β’ [π Documentation](https://github.com/ruslanmv/gitpilot#readme) β’ [π Report Bug](https://github.com/ruslanmv/gitpilot/issues) β’ [π‘ Request Feature](https://github.com/ruslanmv/gitpilot/issues)
+
+**GitPilot** β Your AI Coding Companion for GitHub π
+
+Made with β€οΈ by [Ruslan Magana Vsevolodovna](https://github.com/ruslanmv)
+
+
\ No newline at end of file
diff --git a/deploy/huggingface/start.sh b/deploy/huggingface/start.sh
new file mode 100644
index 0000000000000000000000000000000000000000..9db846f327fe4b65a8683d69339315a154e1d19f
--- /dev/null
+++ b/deploy/huggingface/start.sh
@@ -0,0 +1,54 @@
+#!/bin/bash
+# =============================================================================
+# GitPilot β HF Spaces Startup Script
+# =============================================================================
+# Starts GitPilot FastAPI server with React frontend on HuggingFace Spaces.
+# Pre-configured to use OllaBridge Cloud as the default LLM provider.
+# =============================================================================
+
+set -e
+
+echo "=============================================="
+echo " GitPilot β Hugging Face Spaces"
+echo "=============================================="
+echo ""
+
+# -- Ensure writable directories exist ----------------------------------------
+mkdir -p /tmp/gitpilot /tmp/gitpilot/workspaces /tmp/gitpilot/sessions
+export HOME=/tmp
+export GITPILOT_CONFIG_DIR=/tmp/gitpilot
+
+# -- Display configuration ---------------------------------------------------
+echo "[1/2] Configuration:"
+echo " Provider: ${GITPILOT_PROVIDER:-ollabridge}"
+echo " OllaBridge URL: ${OLLABRIDGE_BASE_URL:-https://ruslanmv-ollabridge.hf.space}"
+echo " Model: ${GITPILOT_OLLABRIDGE_MODEL:-qwen2.5:1.5b}"
+echo ""
+
+# -- Check OllaBridge Cloud connectivity (non-blocking) ----------------------
+echo "[2/2] Checking LLM provider..."
+if curl -sf "${OLLABRIDGE_BASE_URL:-https://ruslanmv-ollabridge.hf.space}/health" > /dev/null 2>&1; then
+ echo " OllaBridge Cloud is reachable"
+else
+ echo " OllaBridge Cloud not reachable (will retry on first request)"
+ echo " You can configure a different provider in Admin / LLM Settings"
+fi
+echo ""
+
+echo "=============================================="
+echo " Ready! Endpoints:"
+echo " - UI: / (React frontend)"
+echo " - API: /api/health"
+echo " - API Docs: /docs"
+echo " - Chat: /api/chat/message"
+echo " - Settings: /api/settings"
+echo "=============================================="
+echo ""
+
+# -- Start GitPilot (foreground) ----------------------------------------------
+exec python -m uvicorn gitpilot.api:app \
+ --host "${HOST:-0.0.0.0}" \
+ --port "${PORT:-7860}" \
+ --workers 1 \
+ --timeout-keep-alive 120 \
+ --no-access-log
diff --git a/frontend/.dockerignore b/frontend/.dockerignore
new file mode 100644
index 0000000000000000000000000000000000000000..9da1acab57b3a83f0649dc5deb28b33600fe4ad3
--- /dev/null
+++ b/frontend/.dockerignore
@@ -0,0 +1,39 @@
+# Node
+node_modules/
+npm-debug.log*
+yarn-debug.log*
+yarn-error.log*
+.pnpm-debug.log*
+
+# Build
+dist/
+build/
+
+# Environment
+.env
+.env.local
+.env.development
+.env.test
+.env.production.local
+
+# IDE
+.vscode/
+.idea/
+*.swp
+*.swo
+*~
+
+# OS
+.DS_Store
+Thumbs.db
+
+# Git
+.git
+.gitignore
+
+# Testing
+coverage/
+.nyc_output/
+
+# Misc
+*.log
diff --git a/frontend/.env.example b/frontend/.env.example
new file mode 100644
index 0000000000000000000000000000000000000000..81999b91b4596670b57112acfcda425f8b267fd4
--- /dev/null
+++ b/frontend/.env.example
@@ -0,0 +1,7 @@
+# Frontend Environment Variables
+
+# Backend API URL
+# Leave empty for local development (uses Vite proxy to localhost:8000)
+# Set to your Render backend URL for production deployment on Vercel
+# Example: VITE_BACKEND_URL=https://gitpilot-backend.onrender.com
+VITE_BACKEND_URL=
diff --git a/frontend/.env.production.example b/frontend/.env.production.example
new file mode 100644
index 0000000000000000000000000000000000000000..57ccba24efcbd8fda76f886e361e4e5826e52ac0
--- /dev/null
+++ b/frontend/.env.production.example
@@ -0,0 +1,5 @@
+# Production Environment Variables (Vercel)
+
+# Backend API URL - REQUIRED for production
+# Point this to your Render backend URL
+VITE_BACKEND_URL=https://gitpilot-backend.onrender.com
diff --git a/frontend/App.jsx b/frontend/App.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..fceabed0d25e27e909aab7d0cf771ac0f4046d04
--- /dev/null
+++ b/frontend/App.jsx
@@ -0,0 +1,909 @@
+// frontend/App.jsx
+import React, { useCallback, useEffect, useMemo, useRef, useState } from "react";
+import LoginPage from "./components/LoginPage.jsx";
+import RepoSelector from "./components/RepoSelector.jsx";
+import ProjectContextPanel from "./components/ProjectContextPanel.jsx";
+import ChatPanel from "./components/ChatPanel.jsx";
+import LlmSettings from "./components/LlmSettings.jsx";
+import FlowViewer from "./components/FlowViewer.jsx";
+import Footer from "./components/Footer.jsx";
+import ProjectSettingsModal from "./components/ProjectSettingsModal.jsx";
+import SessionSidebar from "./components/SessionSidebar.jsx";
+import ContextBar from "./components/ContextBar.jsx";
+import AddRepoModal from "./components/AddRepoModal.jsx";
+import { apiUrl, safeFetchJSON, fetchStatus } from "./utils/api.js";
+
+function makeRepoKey(repo) {
+ if (!repo) return null;
+ return repo.full_name || `${repo.owner}/${repo.name}`;
+}
+
+function uniq(arr) {
+ return Array.from(new Set((arr || []).filter(Boolean)));
+}
+
+export default function App() {
+ // ---- Multi-repo context state ----
+ const [contextRepos, setContextRepos] = useState([]);
+ // Each entry: { repoKey: "owner/repo", repo: {...}, branch: "main" }
+ const [activeRepoKey, setActiveRepoKey] = useState(null);
+ const [addRepoOpen, setAddRepoOpen] = useState(false);
+
+ const [activePage, setActivePage] = useState("workspace");
+ const [isAuthenticated, setIsAuthenticated] = useState(false);
+ const [isLoading, setIsLoading] = useState(true);
+ const [userInfo, setUserInfo] = useState(null);
+
+ // Repo + Session State Machine
+ const [repoStateByKey, setRepoStateByKey] = useState({});
+ const [toast, setToast] = useState(null);
+ const [settingsOpen, setSettingsOpen] = useState(false);
+ const [adminTab, setAdminTab] = useState("overview");
+ const [adminStatus, setAdminStatus] = useState(null);
+
+ // Fetch admin status when overview tab is active
+ useEffect(() => {
+ if (activePage === "admin" && adminTab === "overview") {
+ fetchStatus()
+ .then(data => setAdminStatus(data))
+ .catch(() => setAdminStatus(null));
+ }
+ }, [activePage, adminTab]);
+
+ // Claude-Code-on-Web: Session sidebar + Environment state
+ const [activeSessionId, setActiveSessionId] = useState(null);
+ const [activeEnvId, setActiveEnvId] = useState("default");
+ const [sessionRefreshNonce, setSessionRefreshNonce] = useState(0);
+
+ // ---- Derived `repo` β keeps all downstream consumers unchanged ----
+ const repo = useMemo(() => {
+ const entry = contextRepos.find((r) => r.repoKey === activeRepoKey);
+ return entry?.repo || null;
+ }, [contextRepos, activeRepoKey]);
+
+ const repoKey = activeRepoKey;
+
+ // Convenient selectors
+ const currentRepoState = repoKey ? repoStateByKey[repoKey] : null;
+
+ const defaultBranch = currentRepoState?.defaultBranch || repo?.default_branch || "main";
+ const currentBranch = currentRepoState?.currentBranch || defaultBranch;
+ const sessionBranches = currentRepoState?.sessionBranches || [];
+ const lastExecution = currentRepoState?.lastExecution || null;
+ const pulseNonce = currentRepoState?.pulseNonce || 0;
+ const chatByBranch = currentRepoState?.chatByBranch || {};
+
+ // ---------------------------------------------------------------------------
+ // Multi-repo context management
+ // ---------------------------------------------------------------------------
+ const addRepoToContext = useCallback((r) => {
+ const key = makeRepoKey(r);
+ if (!key) return;
+
+ setContextRepos((prev) => {
+ // Don't add duplicates
+ if (prev.some((e) => e.repoKey === key)) {
+ // Already in context β just activate it
+ setActiveRepoKey(key);
+ return prev;
+ }
+ const entry = { repoKey: key, repo: r, branch: r.default_branch || "main" };
+ const next = [...prev, entry];
+ return next;
+ });
+ setActiveRepoKey(key);
+ setAddRepoOpen(false);
+ }, []);
+
+ const removeRepoFromContext = useCallback((key) => {
+ setContextRepos((prev) => {
+ const next = prev.filter((e) => e.repoKey !== key);
+ // Reassign active if we removed the active one
+ setActiveRepoKey((curActive) => {
+ if (curActive === key) {
+ return next.length > 0 ? next[0].repoKey : null;
+ }
+ return curActive;
+ });
+ return next;
+ });
+ }, []);
+
+ const clearAllContext = useCallback(() => {
+ setContextRepos([]);
+ setActiveRepoKey(null);
+ }, []);
+
+ const handleContextBranchChange = useCallback((targetRepoKey, newBranch) => {
+ // Update branch in contextRepos
+ setContextRepos((prev) =>
+ prev.map((e) =>
+ e.repoKey === targetRepoKey ? { ...e, branch: newBranch } : e
+ )
+ );
+ // Update branch in repoStateByKey
+ setRepoStateByKey((prev) => {
+ const cur = prev[targetRepoKey];
+ if (!cur) return prev;
+ return {
+ ...prev,
+ [targetRepoKey]: { ...cur, currentBranch: newBranch },
+ };
+ });
+ }, []);
+
+ // Init / reconcile repo state when active repo changes
+ useEffect(() => {
+ if (!repoKey || !repo) return;
+
+ setRepoStateByKey((prev) => {
+ const existing = prev[repoKey];
+ const d = repo.default_branch || "main";
+
+ if (!existing) {
+ return {
+ ...prev,
+ [repoKey]: {
+ defaultBranch: d,
+ currentBranch: d,
+ sessionBranches: [],
+ lastExecution: null,
+ pulseNonce: 0,
+ chatByBranch: {
+ [d]: { messages: [], plan: null },
+ },
+ },
+ };
+ }
+
+ const next = { ...existing };
+ next.defaultBranch = d;
+
+ if (!next.chatByBranch?.[d]) {
+ next.chatByBranch = {
+ ...(next.chatByBranch || {}),
+ [d]: { messages: [], plan: null },
+ };
+ }
+
+ if (!next.currentBranch) next.currentBranch = d;
+
+ return { ...prev, [repoKey]: next };
+ });
+ }, [repoKey, repo?.id, repo?.default_branch]);
+
+ const showToast = (title, message) => {
+ setToast({ title, message });
+ window.setTimeout(() => setToast(null), 5000);
+ };
+
+ // ---------------------------------------------------------------------------
+ // Session management β every chat is backed by a Session (Claude Code parity)
+ // ---------------------------------------------------------------------------
+
+ // Guard against double-creation during concurrent send() calls
+ const _creatingSessionRef = useRef(false);
+
+ /**
+ * ensureSession β Create a session on-demand (implicit).
+ *
+ * Called by ChatPanel before the first message is sent. If a session
+ * already exists it returns the current ID immediately. Otherwise it
+ * creates one, seeds the initial messages into chatBySession so the
+ * useEffect reset doesn't wipe them, and returns the new ID.
+ *
+ * @param {string} [sessionName] β optional title (first user prompt, truncated)
+ * @param {Array} [seedMessages] β messages to pre-populate into the new session
+ * @returns {Promise} the session ID
+ */
+ const ensureSession = useCallback(async (sessionName, seedMessages) => {
+ if (activeSessionId) return activeSessionId;
+ if (!repo) return null;
+ if (_creatingSessionRef.current) return null; // already in flight
+ _creatingSessionRef.current = true;
+
+ try {
+ const token = localStorage.getItem("github_token");
+ const headers = {
+ "Content-Type": "application/json",
+ ...(token ? { Authorization: `Bearer ${token}` } : {}),
+ };
+ const res = await fetch("/api/sessions", {
+ method: "POST",
+ headers,
+ body: JSON.stringify({
+ repo_full_name: repoKey,
+ branch: currentBranch,
+ name: sessionName || undefined,
+ repos: contextRepos.map((e) => ({
+ full_name: e.repoKey,
+ branch: e.branch,
+ mode: e.repoKey === activeRepoKey ? "write" : "read",
+ })),
+ active_repo: activeRepoKey,
+ }),
+ });
+ if (!res.ok) return null;
+ const data = await res.json();
+ const newId = data.session_id;
+
+ // Seed the session's chat state BEFORE setting activeSessionId so
+ // the ChatPanel useEffect sync picks up the messages instead of []
+ if (seedMessages && seedMessages.length > 0) {
+ setChatBySession((prev) => ({
+ ...prev,
+ [newId]: { messages: seedMessages, plan: null },
+ }));
+ }
+
+ setActiveSessionId(newId);
+ setSessionRefreshNonce((n) => n + 1);
+ return newId;
+ } catch (err) {
+ console.warn("Failed to create session:", err);
+ return null;
+ } finally {
+ _creatingSessionRef.current = false;
+ }
+ }, [activeSessionId, repo, repoKey, currentBranch, contextRepos, activeRepoKey]);
+
+ // Explicit "New Session" button β clears chat and starts fresh
+ const handleNewSession = async () => {
+ // Clear the current session so ensureSession creates a new one
+ setActiveSessionId(null);
+ try {
+ const token = localStorage.getItem("github_token");
+ const headers = {
+ "Content-Type": "application/json",
+ ...(token ? { Authorization: `Bearer ${token}` } : {}),
+ };
+ const res = await fetch("/api/sessions", {
+ method: "POST",
+ headers,
+ body: JSON.stringify({
+ repo_full_name: repoKey,
+ branch: currentBranch,
+ repos: contextRepos.map((e) => ({
+ full_name: e.repoKey,
+ branch: e.branch,
+ mode: e.repoKey === activeRepoKey ? "write" : "read",
+ })),
+ active_repo: activeRepoKey,
+ }),
+ });
+ if (!res.ok) return;
+ const data = await res.json();
+ setActiveSessionId(data.session_id);
+ setSessionRefreshNonce((n) => n + 1);
+ showToast("Session Created", `New session started.`);
+ } catch (err) {
+ console.warn("Failed to create session:", err);
+ }
+ };
+
+ const handleSelectSession = (session) => {
+ setActiveSessionId(session.id);
+ if (session.branch && session.branch !== currentBranch) {
+ handleBranchChange(session.branch);
+ }
+ };
+
+ // When a session is deleted: if it was the active session, clear the
+ // chat so the user returns to a fresh "new conversation" state.
+ // Non-active session deletions only affect the sidebar (handled there).
+ const handleDeleteSession = useCallback((deletedId) => {
+ if (deletedId === activeSessionId) {
+ setActiveSessionId(null);
+ // Clean up the in-memory chat state for the deleted session
+ setChatBySession((prev) => {
+ const next = { ...prev };
+ delete next[deletedId];
+ return next;
+ });
+ // Also clear the branch-keyed chat (the persistence effect may have
+ // written the first user message there before the session was created)
+ if (repoKey) {
+ setRepoStateByKey((prev) => {
+ const cur = prev[repoKey];
+ if (!cur) return prev;
+ const branchKey = cur.currentBranch || cur.defaultBranch || defaultBranch;
+ return {
+ ...prev,
+ [repoKey]: {
+ ...cur,
+ chatByBranch: {
+ ...(cur.chatByBranch || {}),
+ [branchKey]: { messages: [], plan: null },
+ },
+ },
+ };
+ });
+ }
+ }
+ }, [activeSessionId, repoKey, defaultBranch]);
+
+ // ---------------------------------------------------------------------------
+ // Chat persistence helpers
+ // ---------------------------------------------------------------------------
+ const updateChatForCurrentBranch = (patch) => {
+ if (!repoKey) return;
+
+ setRepoStateByKey((prev) => {
+ const cur = prev[repoKey];
+ if (!cur) return prev;
+
+ const branchKey = cur.currentBranch || cur.defaultBranch || defaultBranch;
+
+ const existing = cur.chatByBranch?.[branchKey] || {
+ messages: [],
+ plan: null,
+ };
+
+ return {
+ ...prev,
+ [repoKey]: {
+ ...cur,
+ chatByBranch: {
+ ...(cur.chatByBranch || {}),
+ [branchKey]: { ...existing, ...patch },
+ },
+ },
+ };
+ });
+ };
+
+ const currentChatState = useMemo(() => {
+ const b = currentBranch || defaultBranch;
+ return chatByBranch[b] || { messages: [], plan: null };
+ }, [chatByBranch, currentBranch, defaultBranch]);
+
+ // ---------------------------------------------------------------------------
+ // Session-scoped chat state: isolate messages per (session + branch) instead
+ // of per-branch alone. This prevents session A's messages from leaking into
+ // session B when both sessions share the same branch.
+ // ---------------------------------------------------------------------------
+ const [chatBySession, setChatBySession] = useState({});
+
+ const sessionChatState = useMemo(() => {
+ if (!activeSessionId) {
+ // No session β fall back to legacy branch-keyed chat
+ return currentChatState;
+ }
+ return chatBySession[activeSessionId] || { messages: [], plan: null };
+ }, [activeSessionId, chatBySession, currentChatState]);
+
+ const updateSessionChat = (patch) => {
+ if (activeSessionId) {
+ setChatBySession((prev) => ({
+ ...prev,
+ [activeSessionId]: {
+ ...(prev[activeSessionId] || { messages: [], plan: null }),
+ ...patch,
+ },
+ }));
+ } else {
+ // No active session β use legacy branch-keyed persistence
+ updateChatForCurrentBranch(patch);
+ }
+ };
+
+ // ---------------------------------------------------------------------------
+ // Branch change (manual β for active repo)
+ // ---------------------------------------------------------------------------
+ const handleBranchChange = (nextBranch) => {
+ if (!repoKey) return;
+ if (!nextBranch || nextBranch === currentBranch) return;
+
+ setRepoStateByKey((prev) => {
+ const cur = prev[repoKey];
+ if (!cur) return prev;
+
+ const nextState = { ...cur, currentBranch: nextBranch };
+
+ // If switching BACK to main/default -> clear main chat (new task start)
+ if (nextBranch === cur.defaultBranch) {
+ nextState.chatByBranch = {
+ ...nextState.chatByBranch,
+ [nextBranch]: { messages: [], plan: null },
+ };
+ }
+
+ return { ...prev, [repoKey]: nextState };
+ });
+
+ // Also update contextRepos branch tracking
+ setContextRepos((prev) =>
+ prev.map((e) =>
+ e.repoKey === repoKey ? { ...e, branch: nextBranch } : e
+ )
+ );
+
+ if (nextBranch === defaultBranch) {
+ showToast("New Session", `Switched to ${defaultBranch}. Chat cleared.`);
+ } else {
+ showToast("Context Switched", `Now viewing ${nextBranch}.`);
+ }
+ };
+
+ // ---------------------------------------------------------------------------
+ // Execution complete
+ // ---------------------------------------------------------------------------
+ const handleExecutionComplete = ({
+ branch,
+ mode,
+ commit_url,
+ message,
+ completionMsg,
+ sourceBranch,
+ }) => {
+ if (!repoKey || !branch) return;
+
+ setRepoStateByKey((prev) => {
+ const cur =
+ prev[repoKey] || {
+ defaultBranch,
+ currentBranch: defaultBranch,
+ sessionBranches: [],
+ lastExecution: null,
+ pulseNonce: 0,
+ chatByBranch: { [defaultBranch]: { messages: [], plan: null } },
+ };
+
+ const next = { ...cur };
+ next.lastExecution = { mode, branch, ts: Date.now() };
+
+ if (!next.chatByBranch) next.chatByBranch = {};
+
+ const prevBranchKey =
+ sourceBranch || cur.currentBranch || cur.defaultBranch || defaultBranch;
+
+ const successSystemMsg = {
+ role: "system",
+ isSuccess: true,
+ link: commit_url,
+ content:
+ mode === "hard-switch"
+ ? `π± **Session Started:** Created branch \`${branch}\`.`
+ : `β
**Update Published:** Commits pushed to \`${branch}\`.`,
+ };
+
+ const normalizedCompletion =
+ completionMsg && (completionMsg.answer || completionMsg.content || completionMsg.executionLog)
+ ? {
+ from: completionMsg.from || "ai",
+ role: completionMsg.role || "assistant",
+ answer: completionMsg.answer,
+ content: completionMsg.content,
+ executionLog: completionMsg.executionLog,
+ }
+ : null;
+
+ if (mode === "hard-switch") {
+ next.sessionBranches = uniq([...(next.sessionBranches || []), branch]);
+ next.currentBranch = branch;
+ next.pulseNonce = (next.pulseNonce || 0) + 1;
+
+ const existingTargetChat = next.chatByBranch[branch];
+ const isExistingSession =
+ existingTargetChat && (existingTargetChat.messages || []).length > 0;
+
+ if (isExistingSession) {
+ const appended = [
+ ...(existingTargetChat.messages || []),
+ ...(normalizedCompletion ? [normalizedCompletion] : []),
+ successSystemMsg,
+ ];
+
+ next.chatByBranch[branch] = {
+ ...existingTargetChat,
+ messages: appended,
+ plan: null,
+ };
+ } else {
+ const prevChat =
+ (cur.chatByBranch && cur.chatByBranch[prevBranchKey]) || { messages: [], plan: null };
+
+ next.chatByBranch[branch] = {
+ messages: [
+ ...(prevChat.messages || []),
+ ...(normalizedCompletion ? [normalizedCompletion] : []),
+ successSystemMsg,
+ ],
+ plan: null,
+ };
+ }
+
+ if (!next.chatByBranch[next.defaultBranch]) {
+ next.chatByBranch[next.defaultBranch] = { messages: [], plan: null };
+ }
+ } else if (mode === "sticky") {
+ next.currentBranch = cur.currentBranch || branch;
+
+ const targetChat = next.chatByBranch[branch] || { messages: [], plan: null };
+
+ next.chatByBranch[branch] = {
+ messages: [
+ ...(targetChat.messages || []),
+ ...(normalizedCompletion ? [normalizedCompletion] : []),
+ successSystemMsg,
+ ],
+ plan: null,
+ };
+ }
+
+ return { ...prev, [repoKey]: next };
+ });
+
+ if (mode === "hard-switch") {
+ showToast("Context Switched", `Active on ${branch}.`);
+ } else {
+ showToast("Changes Committed", `Updated ${branch}.`);
+ }
+ };
+
+ // ---------------------------------------------------------------------------
+ // Auth & Render
+ // ---------------------------------------------------------------------------
+ useEffect(() => {
+ checkAuthentication();
+ }, []);
+
+ const checkAuthentication = async () => {
+ const token = localStorage.getItem("github_token");
+ const user = localStorage.getItem("github_user");
+ if (token && user) {
+ try {
+ const data = await safeFetchJSON(apiUrl("/api/auth/validate"), {
+ method: "POST",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify({ access_token: token }),
+ });
+ if (data.authenticated) {
+ setIsAuthenticated(true);
+ setUserInfo(JSON.parse(user));
+ setIsLoading(false);
+ return;
+ }
+ } catch (err) {
+ console.error(err);
+ }
+ localStorage.removeItem("github_token");
+ localStorage.removeItem("github_user");
+ }
+ setIsAuthenticated(false);
+ setIsLoading(false);
+ };
+
+ const handleAuthenticated = (session) => {
+ setIsAuthenticated(true);
+ setUserInfo(session.user);
+ };
+
+ const handleLogout = () => {
+ localStorage.removeItem("github_token");
+ localStorage.removeItem("github_user");
+ setIsAuthenticated(false);
+ setUserInfo(null);
+ clearAllContext();
+ };
+
+ if (isLoading)
+ return (
+
+ );
+
+ if (!isAuthenticated) return ;
+
+ const hasContext = contextRepos.length > 0;
+
+ return (
+
+
+
+ {/* ---- Brand ---- */}
+
+
GP
+
+
GitPilot
+
Agentic GitHub Copilot
+
+
+
+ {/* ---- Navigation ---- */}
+
+ setActivePage("workspace")}
+ >
+ Workspace
+
+ setActivePage("flow")}
+ >
+ Agent Workflow
+
+ setActivePage("admin")}
+ >
+ Admin
+
+
+
+ {/* ---- Repository Switcher (shown when no context) ---- */}
+ {!hasContext && (
+ addRepoToContext(r)} />
+ )}
+
+ {/* ---- Sessions ---- */}
+ {repo && (
+
+ )}
+
+ {/* ---- User ---- */}
+ {userInfo && (
+
+
+
+
+
{userInfo.name || userInfo.login}
+
@{userInfo.login}
+
+
+
+ Logout
+
+
+ )}
+
+
+
+ {activePage === "admin" && (
+
+ {/* Admin Navigation */}
+
+ {["overview", "providers", "workspace-modes", "integrations", "sessions", "skills", "security", "advanced"].map(tab => (
+ setAdminTab(tab)}
+ style={{
+ padding: "8px 16px",
+ borderRadius: "6px",
+ border: adminTab === tab ? "1px solid #3B82F6" : "1px solid #333",
+ background: adminTab === tab ? "#1e3a5f" : "#1a1b26",
+ color: adminTab === tab ? "#93c5fd" : "#a0a0b0",
+ cursor: "pointer",
+ fontSize: "13px",
+ textTransform: "capitalize",
+ }}
+ >
+ {tab.replace("-", " ")}
+
+ ))}
+
+
+ {/* Overview */}
+ {adminTab === "overview" && (
+
+
+
Server
+
{adminStatus?.server_ready ? "Connected" : "Checking..."}
+
127.0.0.1:8000
+
+
+
Provider
+
{adminStatus?.provider?.name || "Loading..."}
+
{adminStatus?.provider?.configured ? `${adminStatus.provider.model || "Ready"}` : "Not configured"}
+
+
+
Workspace Modes
+
Folder: {adminStatus?.workspace?.folder_mode_available ? "Yes" : "β"}
+
Local Git: {adminStatus?.workspace?.local_git_available ? "Yes" : "β"}
+
GitHub: {adminStatus?.workspace?.github_mode_available ? "Yes" : "Optional"}
+
+
+
GitHub
+
{adminStatus?.github?.connected ? "Connected" : "Optional"}
+
{adminStatus?.github?.username || "Not linked"}
+
+
+
+
Get Started
+
setAdminTab("providers")} style={{ padding: "6px 12px", background: "#3B82F6", color: "#fff", border: "none", borderRadius: "4px", cursor: "pointer", fontSize: "12px", marginRight: "4px" }}>Configure Provider
+
+
+ )}
+
+ {/* Providers */}
+ {adminTab === "providers" && (
+
+
AI Providers
+
+
+ )}
+
+ {/* Workspace Modes */}
+ {adminTab === "workspace-modes" && (
+
+
+
Folder Mode
+
Work with any local folder. No Git required.
+
Requires: Open folder
+
Enables: Chat, explain, review
+
+
+
Local Git Mode
+
Full repo + branch context for AI assistance.
+
Requires: Git repository
+
Enables: All local features
+
+
+
GitHub Mode
+
PRs, issues, remote workflows via GitHub API.
+
Requires: GitHub token
+
Enables: Full platform features
+
+
+ )}
+
+ {/* Integrations */}
+ {adminTab === "integrations" && (
+
+
GitHub Integration
+
GitHub is optional. Connect to enable PRs, issues, and remote workflows.
+
Connect GitHub
+
+ )}
+
+ {/* Security */}
+ {adminTab === "security" && (
+
+
Security Scanning
+
Run security scans on your workspace to detect vulnerabilities, secrets, and code issues.
+
Scan Workspace
+
+ )}
+
+ {/* Sessions */}
+ {adminTab === "sessions" && (
+
+
Sessions
+
Session management is available in the main workspace view.
+
+ )}
+
+ {/* Skills & Plugins */}
+ {adminTab === "skills" && (
+
+
Skills & Plugins
+
Skills and plugins extend GitPilot capabilities. View and manage them from the main workspace.
+
+ )}
+
+ {/* Advanced */}
+ {adminTab === "advanced" && (
+
+
Advanced Settings
+
Advanced configuration options are available in the Settings modal.
+
setSettingsOpen(true)} style={{ padding: "8px 16px", background: "#3B82F6", color: "#fff", border: "none", borderRadius: "4px", cursor: "pointer", marginTop: "12px" }}>Open Settings
+
+ )}
+
+ )}
+ {activePage === "flow" && }
+ {activePage === "workspace" &&
+ (repo ? (
+
+ {/* ---- Context Bar (single source of truth for repo selection) ---- */}
+
setAddRepoOpen(true)}
+ onBranchChange={handleContextBranchChange}
+ />
+
+
+
+ setSettingsOpen(true)}
+ />
+
+
+
+
+ GitPilot chat
+
+
+
+
+
+
+ ) : (
+
+
π€
+
Select a repository
+
Select a repo to begin agentic workflow.
+
+ ))}
+
+
+
+
+
+ {repo && (
+
setSettingsOpen(false)}
+ activeEnvId={activeEnvId}
+ onEnvChange={setActiveEnvId}
+ />
+ )}
+
+ {/* Add Repo Modal */}
+ setAddRepoOpen(false)}
+ excludeKeys={contextRepos.map((e) => e.repoKey)}
+ />
+
+ {toast && (
+
+
{toast.title}
+
{toast.message}
+
+ )}
+
+
+
+ );
+}
diff --git a/frontend/components/AddRepoModal.jsx b/frontend/components/AddRepoModal.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..7832877ed985bac5ec81f1b4c43978e525dd8bd2
--- /dev/null
+++ b/frontend/components/AddRepoModal.jsx
@@ -0,0 +1,256 @@
+import React, { useCallback, useEffect, useState } from "react";
+import { createPortal } from "react-dom";
+import { authFetch } from "../utils/api.js";
+
+/**
+ * AddRepoModal β lightweight portal modal for adding repos to context.
+ *
+ * Embeds a minimal repo search/list (not the full RepoSelector) to keep
+ * the modal focused. Filters out repos already in context.
+ */
+export default function AddRepoModal({ isOpen, onSelect, onClose, excludeKeys = [] }) {
+ const [query, setQuery] = useState("");
+ const [repos, setRepos] = useState([]);
+ const [loading, setLoading] = useState(false);
+
+ const fetchRepos = useCallback(
+ async (searchQuery) => {
+ setLoading(true);
+ try {
+ const params = new URLSearchParams({ per_page: "50" });
+ if (searchQuery) params.set("query", searchQuery);
+ const res = await authFetch(`/api/repos?${params}`);
+ if (!res.ok) return;
+ const data = await res.json();
+ setRepos(data.repositories || []);
+ } catch (err) {
+ console.warn("AddRepoModal: fetch failed:", err);
+ } finally {
+ setLoading(false);
+ }
+ },
+ []
+ );
+
+ useEffect(() => {
+ if (isOpen) {
+ setQuery("");
+ fetchRepos("");
+ }
+ }, [isOpen, fetchRepos]);
+
+ // Debounced search
+ useEffect(() => {
+ if (!isOpen) return;
+ const t = setTimeout(() => fetchRepos(query), 300);
+ return () => clearTimeout(t);
+ }, [query, isOpen, fetchRepos]);
+
+ const excludeSet = new Set(excludeKeys);
+ const filtered = repos.filter((r) => {
+ const key = r.full_name || `${r.owner}/${r.name}`;
+ return !excludeSet.has(key);
+ });
+
+ if (!isOpen) return null;
+
+ return createPortal(
+ {
+ if (e.target === e.currentTarget) onClose();
+ }}
+ >
+
e.stopPropagation()}>
+
+ Add Repository
+
+ ×
+
+
+
+
+ setQuery(e.target.value)}
+ style={styles.searchInput}
+ autoFocus
+ onKeyDown={(e) => {
+ if (e.key === "Escape") onClose();
+ }}
+ />
+
+
+
+ {loading && filtered.length === 0 && (
+
Loading...
+ )}
+ {!loading && filtered.length === 0 && (
+
+ {excludeKeys.length > 0 && repos.length > 0
+ ? "All matching repos are already in context"
+ : "No repositories found"}
+
+ )}
+ {filtered.map((r) => {
+ const key = r.full_name || `${r.owner}/${r.name}`;
+ return (
+
onSelect(r)}
+ >
+
+ {r.name}
+ {r.owner}
+
+
+ {r.private && Private }
+ {r.default_branch || "main"}
+
+
+ );
+ })}
+ {loading && filtered.length > 0 && (
+
Updating...
+ )}
+
+
+
,
+ document.body
+ );
+}
+
+const styles = {
+ overlay: {
+ position: "fixed",
+ top: 0,
+ left: 0,
+ right: 0,
+ bottom: 0,
+ backgroundColor: "rgba(0, 0, 0, 0.6)",
+ zIndex: 10000,
+ display: "flex",
+ alignItems: "center",
+ justifyContent: "center",
+ },
+ modal: {
+ width: 440,
+ maxHeight: "70vh",
+ backgroundColor: "#131316",
+ border: "1px solid #27272A",
+ borderRadius: 12,
+ display: "flex",
+ flexDirection: "column",
+ overflow: "hidden",
+ boxShadow: "0 12px 40px rgba(0,0,0,0.5)",
+ },
+ header: {
+ display: "flex",
+ justifyContent: "space-between",
+ alignItems: "center",
+ padding: "12px 14px",
+ borderBottom: "1px solid #27272A",
+ backgroundColor: "#18181B",
+ },
+ headerTitle: {
+ fontSize: 14,
+ fontWeight: 600,
+ color: "#E4E4E7",
+ },
+ closeBtn: {
+ width: 26,
+ height: 26,
+ borderRadius: 6,
+ border: "1px solid #3F3F46",
+ background: "transparent",
+ color: "#A1A1AA",
+ fontSize: 16,
+ cursor: "pointer",
+ display: "flex",
+ alignItems: "center",
+ justifyContent: "center",
+ },
+ searchBox: {
+ padding: "10px 12px",
+ borderBottom: "1px solid #27272A",
+ },
+ searchInput: {
+ width: "100%",
+ padding: "8px 10px",
+ borderRadius: 6,
+ border: "1px solid #3F3F46",
+ background: "#18181B",
+ color: "#E4E4E7",
+ fontSize: 13,
+ outline: "none",
+ fontFamily: "monospace",
+ boxSizing: "border-box",
+ },
+ list: {
+ flex: 1,
+ overflowY: "auto",
+ maxHeight: 360,
+ },
+ statusRow: {
+ padding: "16px 12px",
+ textAlign: "center",
+ fontSize: 12,
+ color: "#71717A",
+ },
+ repoRow: {
+ display: "flex",
+ alignItems: "center",
+ justifyContent: "space-between",
+ width: "100%",
+ padding: "10px 14px",
+ border: "none",
+ borderBottom: "1px solid rgba(39, 39, 42, 0.5)",
+ background: "transparent",
+ color: "#E4E4E7",
+ cursor: "pointer",
+ textAlign: "left",
+ transition: "background-color 0.1s",
+ },
+ repoInfo: {
+ display: "flex",
+ flexDirection: "column",
+ gap: 2,
+ minWidth: 0,
+ },
+ repoName: {
+ fontSize: 13,
+ fontWeight: 600,
+ fontFamily: "monospace",
+ overflow: "hidden",
+ textOverflow: "ellipsis",
+ whiteSpace: "nowrap",
+ },
+ repoOwner: {
+ fontSize: 11,
+ color: "#71717A",
+ },
+ repoMeta: {
+ display: "flex",
+ alignItems: "center",
+ gap: 8,
+ flexShrink: 0,
+ },
+ privateBadge: {
+ fontSize: 9,
+ padding: "1px 5px",
+ borderRadius: 8,
+ backgroundColor: "rgba(239, 68, 68, 0.12)",
+ color: "#F87171",
+ fontWeight: 600,
+ textTransform: "uppercase",
+ },
+ branchHint: {
+ fontSize: 10,
+ color: "#52525B",
+ fontFamily: "monospace",
+ },
+};
diff --git a/frontend/components/AssistantMessage.jsx b/frontend/components/AssistantMessage.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..5dd34a1f20c9d1026069ef69d69b92f2582c5d81
--- /dev/null
+++ b/frontend/components/AssistantMessage.jsx
@@ -0,0 +1,116 @@
+import React from "react";
+import PlanView from "./PlanView.jsx";
+
+export default function AssistantMessage({ answer, plan, executionLog }) {
+ const styles = {
+ container: {
+ marginBottom: "20px",
+ padding: "20px",
+ backgroundColor: "#18181B", // Zinc-900
+ borderRadius: "12px",
+ border: "1px solid #27272A", // Zinc-800
+ color: "#F4F4F5", // Zinc-100
+ fontFamily: '-apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, sans-serif',
+ boxShadow: "0 4px 6px -1px rgba(0, 0, 0, 0.1), 0 2px 4px -1px rgba(0, 0, 0, 0.06)",
+ },
+ section: {
+ marginBottom: "20px",
+ },
+ lastSection: {
+ marginBottom: "0",
+ },
+ header: {
+ display: "flex",
+ alignItems: "center",
+ marginBottom: "12px",
+ paddingBottom: "8px",
+ borderBottom: "1px solid #3F3F46", // Zinc-700
+ },
+ title: {
+ fontSize: "12px",
+ fontWeight: "600",
+ textTransform: "uppercase",
+ letterSpacing: "0.05em",
+ color: "#A1A1AA", // Zinc-400
+ margin: 0,
+ },
+ content: {
+ fontSize: "14px",
+ lineHeight: "1.6",
+ whiteSpace: "pre-wrap",
+ },
+ executionList: {
+ listStyle: "none",
+ padding: 0,
+ margin: 0,
+ display: "flex",
+ flexDirection: "column",
+ gap: "8px",
+ },
+ executionStep: {
+ display: "flex",
+ flexDirection: "column",
+ gap: "4px",
+ padding: "10px",
+ backgroundColor: "#09090B", // Zinc-950
+ borderRadius: "6px",
+ border: "1px solid #27272A",
+ fontSize: "13px",
+ },
+ stepNumber: {
+ fontSize: "11px",
+ fontWeight: "600",
+ color: "#10B981", // Emerald-500
+ textTransform: "uppercase",
+ },
+ stepSummary: {
+ color: "#D4D4D8", // Zinc-300
+ fontFamily: "ui-monospace, SFMono-Regular, Menlo, Monaco, Consolas, monospace",
+ },
+ };
+
+ return (
+
+ {/* Answer section */}
+
+
+ {/* Action Plan section */}
+ {plan && (
+
+ )}
+
+ {/* Execution Log section (shown after execution) */}
+ {executionLog && (
+
+
+
+
+ {executionLog.steps.map((s) => (
+
+ Step {s.step_number}
+ {s.summary}
+
+ ))}
+
+
+
+ )}
+
+ );
+}
\ No newline at end of file
diff --git a/frontend/components/BranchPicker.jsx b/frontend/components/BranchPicker.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..e04e04d77dbda1a8532b26c9c2d3fc92fb7d2616
--- /dev/null
+++ b/frontend/components/BranchPicker.jsx
@@ -0,0 +1,398 @@
+import React, { useCallback, useEffect, useRef, useState } from "react";
+import { createPortal } from "react-dom";
+
+/**
+ * BranchPicker β Claude-Code-on-Web parity branch selector.
+ *
+ * Fetches branches from the new /api/repos/{owner}/{repo}/branches endpoint.
+ * Shows search, default branch badge, AI session branch highlighting.
+ *
+ * Fixes applied:
+ * - Dropdown portaled to document.body (avoids overflow:hidden clipping)
+ * - Branches cached per repo (no "No branches found" flash)
+ * - Shows "Loading..." only on first fetch, keeps stale data otherwise
+ */
+
+// Simple per-repo branch cache so reopening the dropdown is instant
+const branchCache = {};
+
+/**
+ * Props:
+ * repo, currentBranch, defaultBranch, sessionBranches, onBranchChange
+ * β standard branch-picker props
+ *
+ * externalAnchorRef (optional) β a React ref pointing to an external DOM
+ * element to anchor the dropdown to. When provided:
+ * - BranchPicker skips rendering its own trigger button
+ * - the dropdown opens immediately on mount
+ * - closing the dropdown calls onClose()
+ *
+ * onClose (optional) β called when the dropdown is dismissed (outside
+ * click or Escape). Only meaningful with externalAnchorRef.
+ */
+export default function BranchPicker({
+ repo,
+ currentBranch,
+ defaultBranch,
+ sessionBranches = [],
+ onBranchChange,
+ externalAnchorRef,
+ onClose,
+}) {
+ const isExternalMode = !!externalAnchorRef;
+ const [open, setOpen] = useState(isExternalMode);
+ const [query, setQuery] = useState("");
+ const [branches, setBranches] = useState([]);
+ const [loading, setLoading] = useState(false);
+ const [error, setError] = useState(null);
+ const triggerRef = useRef(null);
+ const dropdownRef = useRef(null);
+ const inputRef = useRef(null);
+
+ const branch = currentBranch || defaultBranch || "main";
+ const isAiSession = sessionBranches.includes(branch) && branch !== defaultBranch;
+
+ // The element used for dropdown positioning
+ const anchorRef = isExternalMode ? externalAnchorRef : triggerRef;
+
+ const cacheKey = repo ? `${repo.owner}/${repo.name}` : null;
+
+ // Seed from cache on mount / repo change
+ useEffect(() => {
+ if (cacheKey && branchCache[cacheKey]) {
+ setBranches(branchCache[cacheKey]);
+ }
+ }, [cacheKey]);
+
+ // Fetch branches from GitHub via backend
+ const fetchBranches = useCallback(async (searchQuery) => {
+ if (!repo) return;
+ setLoading(true);
+ setError(null);
+ try {
+ const token = localStorage.getItem("github_token");
+ const headers = token ? { Authorization: `Bearer ${token}` } : {};
+ const params = new URLSearchParams({ per_page: "100" });
+ if (searchQuery) params.set("query", searchQuery);
+
+ const res = await fetch(
+ `/api/repos/${repo.owner}/${repo.name}/branches?${params}`,
+ { headers, cache: "no-cache" }
+ );
+ if (!res.ok) {
+ const errData = await res.json().catch(() => ({}));
+ const detail = errData.detail || `HTTP ${res.status}`;
+ console.warn("BranchPicker: fetch failed:", detail);
+ setError(detail);
+ return;
+ }
+ const data = await res.json();
+ const fetched = data.branches || [];
+ setBranches(fetched);
+
+ // Only cache the unfiltered result
+ if (!searchQuery && cacheKey) {
+ branchCache[cacheKey] = fetched;
+ }
+ } catch (err) {
+ console.warn("Failed to fetch branches:", err);
+ } finally {
+ setLoading(false);
+ }
+ }, [repo, cacheKey]);
+
+ // Fetch + focus when opened
+ useEffect(() => {
+ if (open) {
+ fetchBranches(query);
+ setTimeout(() => inputRef.current?.focus(), 50);
+ }
+ }, [open]); // eslint-disable-line react-hooks/exhaustive-deps
+
+ // Debounced search
+ useEffect(() => {
+ if (!open) return;
+ const t = setTimeout(() => fetchBranches(query), 300);
+ return () => clearTimeout(t);
+ }, [query, open, fetchBranches]);
+
+ // Close on outside click
+ useEffect(() => {
+ if (!open) return;
+ const handler = (e) => {
+ const inAnchor = anchorRef.current && anchorRef.current.contains(e.target);
+ const inDropdown = dropdownRef.current && dropdownRef.current.contains(e.target);
+ if (!inAnchor && !inDropdown) {
+ handleClose();
+ }
+ };
+ document.addEventListener("mousedown", handler);
+ return () => document.removeEventListener("mousedown", handler);
+ }, [open]); // eslint-disable-line react-hooks/exhaustive-deps
+
+ const handleClose = useCallback(() => {
+ setOpen(false);
+ setQuery("");
+ onClose?.();
+ }, [onClose]);
+
+ const handleSelect = (branchName) => {
+ handleClose();
+ if (branchName !== branch) {
+ onBranchChange?.(branchName);
+ }
+ };
+
+ // Merge API branches with session branches (AI branches might not show in GitHub API)
+ const allBranches = [...branches];
+ for (const sb of sessionBranches) {
+ if (!allBranches.find((b) => b.name === sb)) {
+ allBranches.push({ name: sb, is_default: false, protected: false });
+ }
+ }
+
+ // Calculate portal position from anchor element
+ const getDropdownPosition = () => {
+ if (!anchorRef.current) return { top: 0, left: 0 };
+ const rect = anchorRef.current.getBoundingClientRect();
+ return {
+ top: rect.bottom + 4,
+ left: rect.left,
+ };
+ };
+
+ const pos = open ? getDropdownPosition() : { top: 0, left: 0 };
+
+ return (
+
+ {/* Trigger button β hidden when using external anchor */}
+ {!isExternalMode && (
+
setOpen((v) => !v)}
+ >
+
+
+
+
+
+
+ {branch}
+
+
+
+
+ )}
+
+ {/* Dropdown β portaled to document.body to escape overflow:hidden */}
+ {open && createPortal(
+
+ {/* Search input */}
+
+ setQuery(e.target.value)}
+ style={styles.searchInput}
+ onKeyDown={(e) => {
+ if (e.key === "Escape") {
+ handleClose();
+ }
+ }}
+ />
+
+
+ {/* Branch list */}
+
+ {loading && allBranches.length === 0 && (
+
Loading...
+ )}
+
+ {!loading && error && (
+
{error}
+ )}
+
+ {!loading && !error && allBranches.length === 0 && (
+
No branches found
+ )}
+
+ {allBranches.map((b) => {
+ const isDefault = b.is_default || b.name === defaultBranch;
+ const isAi = sessionBranches.includes(b.name);
+ const isCurrent = b.name === branch;
+
+ return (
+
handleSelect(b.name)}
+ >
+
+ ✓
+
+
+ {b.name}
+
+ {isDefault && (
+
default
+ )}
+ {isAi && !isDefault && (
+
AI
+ )}
+ {b.protected && (
+
+
+
+
+
+ )}
+
+ );
+ })}
+
+ {/* Subtle loading indicator when refreshing with cached data visible */}
+ {loading && allBranches.length > 0 && (
+
Updating...
+ )}
+
+
,
+ document.body
+ )}
+
+ );
+}
+
+const styles = {
+ container: {
+ position: "relative",
+ },
+ trigger: {
+ display: "flex",
+ alignItems: "center",
+ gap: 6,
+ padding: "4px 8px",
+ borderRadius: 4,
+ border: "1px solid #3F3F46",
+ background: "transparent",
+ fontSize: 13,
+ cursor: "pointer",
+ fontFamily: "monospace",
+ maxWidth: 200,
+ },
+ branchName: {
+ whiteSpace: "nowrap",
+ overflow: "hidden",
+ textOverflow: "ellipsis",
+ maxWidth: 140,
+ },
+ dropdown: {
+ position: "fixed",
+ width: 280,
+ backgroundColor: "#1F1F23",
+ border: "1px solid #27272A",
+ borderRadius: 8,
+ boxShadow: "0 8px 24px rgba(0,0,0,0.6)",
+ zIndex: 9999,
+ overflow: "hidden",
+ },
+ searchBox: {
+ padding: "8px 10px",
+ borderBottom: "1px solid #27272A",
+ },
+ searchInput: {
+ width: "100%",
+ padding: "6px 8px",
+ borderRadius: 4,
+ border: "1px solid #3F3F46",
+ background: "#131316",
+ color: "#E4E4E7",
+ fontSize: 12,
+ outline: "none",
+ fontFamily: "monospace",
+ boxSizing: "border-box",
+ },
+ branchList: {
+ maxHeight: 260,
+ overflowY: "auto",
+ },
+ branchRow: {
+ display: "flex",
+ alignItems: "center",
+ gap: 6,
+ padding: "7px 10px",
+ cursor: "pointer",
+ transition: "background-color 0.1s",
+ borderBottom: "1px solid rgba(39, 39, 42, 0.5)",
+ },
+ loadingRow: {
+ padding: "12px 10px",
+ textAlign: "center",
+ fontSize: 12,
+ color: "#71717A",
+ },
+ errorRow: {
+ padding: "12px 10px",
+ textAlign: "center",
+ fontSize: 11,
+ color: "#F59E0B",
+ },
+ defaultBadge: {
+ fontSize: 9,
+ padding: "1px 5px",
+ borderRadius: 8,
+ backgroundColor: "rgba(16, 185, 129, 0.15)",
+ color: "#10B981",
+ fontWeight: 600,
+ textTransform: "uppercase",
+ letterSpacing: "0.04em",
+ flexShrink: 0,
+ },
+ aiBadge: {
+ fontSize: 9,
+ padding: "1px 5px",
+ borderRadius: 8,
+ backgroundColor: "rgba(59, 130, 246, 0.15)",
+ color: "#60a5fa",
+ fontWeight: 700,
+ flexShrink: 0,
+ },
+ protectedBadge: {
+ color: "#F59E0B",
+ flexShrink: 0,
+ display: "flex",
+ alignItems: "center",
+ },
+};
diff --git a/frontend/components/ChatPanel.jsx b/frontend/components/ChatPanel.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..053690ac940ae2cc215191b8eed1cd6d5c19b030
--- /dev/null
+++ b/frontend/components/ChatPanel.jsx
@@ -0,0 +1,686 @@
+// frontend/components/ChatPanel.jsx
+import React, { useEffect, useRef, useState } from "react";
+import AssistantMessage from "./AssistantMessage.jsx";
+import DiffStats from "./DiffStats.jsx";
+import DiffViewer from "./DiffViewer.jsx";
+import CreatePRButton from "./CreatePRButton.jsx";
+import StreamingMessage from "./StreamingMessage.jsx";
+import { SessionWebSocket } from "../utils/ws.js";
+
+// Helper to get headers (inline safety if utility is missing)
+const getHeaders = () => ({
+ "Content-Type": "application/json",
+ Authorization: `Bearer ${localStorage.getItem("github_token") || ""}`,
+});
+
+export default function ChatPanel({
+ repo,
+ defaultBranch = "main",
+ currentBranch, // do NOT default here; parent must pass the real one
+ onExecutionComplete,
+ sessionChatState,
+ onSessionChatStateChange,
+ sessionId,
+ onEnsureSession,
+ canChat = true, // readiness gate: false disables composer and shows blocker
+ chatBlocker = null, // { message: string, cta?: string, onCta?: () => void }
+}) {
+ // Initialize state from props or defaults
+ const [messages, setMessages] = useState(sessionChatState?.messages || []);
+ const [goal, setGoal] = useState("");
+ const [plan, setPlan] = useState(sessionChatState?.plan || null);
+
+ const [loadingPlan, setLoadingPlan] = useState(false);
+ const [executing, setExecuting] = useState(false);
+ const [status, setStatus] = useState("");
+
+ // Claude-Code-on-Web: WebSocket streaming + diff + PR
+ const [wsConnected, setWsConnected] = useState(false);
+ const [streamingEvents, setStreamingEvents] = useState([]);
+ const [diffData, setDiffData] = useState(null);
+ const [showDiffViewer, setShowDiffViewer] = useState(false);
+ const wsRef = useRef(null);
+
+ // Ref mirrors streamingEvents so WS callbacks avoid stale closures
+ const streamingEventsRef = useRef([]);
+ useEffect(() => { streamingEventsRef.current = streamingEvents; }, [streamingEvents]);
+
+ // Skip the session-sync useEffect reset when we just created a session
+ // (the parent already seeded the messages into chatBySession)
+ const skipNextSyncRef = useRef(false);
+
+ const messagesEndRef = useRef(null);
+ const prevMsgCountRef = useRef((sessionChatState?.messages || []).length);
+
+ // ---------------------------------------------------------------------------
+ // WebSocket connection management
+ // ---------------------------------------------------------------------------
+ useEffect(() => {
+ // Clean up previous connection
+ if (wsRef.current) {
+ wsRef.current.close();
+ wsRef.current = null;
+ setWsConnected(false);
+ }
+
+ if (!sessionId) return;
+
+ const ws = new SessionWebSocket(sessionId, {
+ onConnect: () => setWsConnected(true),
+ onDisconnect: () => setWsConnected(false),
+ onMessage: (data) => {
+ if (data.type === "agent_message") {
+ setStreamingEvents((prev) => [...prev, data]);
+ } else if (data.type === "tool_use" || data.type === "tool_result") {
+ setStreamingEvents((prev) => [...prev, data]);
+ } else if (data.type === "diff_update") {
+ setDiffData(data.stats || data);
+ } else if (data.type === "session_restored") {
+ // Session loaded
+ }
+ },
+ onStatusChange: (newStatus) => {
+ if (newStatus === "waiting") {
+ // Always clear loading state when agent finishes
+ setLoadingPlan(false);
+
+ // Consolidate streaming events into a chat message (use ref to
+ // avoid stale closure β streamingEvents state would be stale here)
+ const events = streamingEventsRef.current;
+ if (events.length > 0) {
+ const textParts = events
+ .filter((e) => e.type === "agent_message")
+ .map((e) => e.content);
+ if (textParts.length > 0) {
+ const consolidated = {
+ from: "ai",
+ role: "assistant",
+ answer: textParts.join(""),
+ content: textParts.join(""),
+ };
+ setMessages((prev) => [...prev, consolidated]);
+ }
+ setStreamingEvents([]);
+ }
+ }
+ },
+ onError: (err) => {
+ console.warn("[ws] Error:", err);
+ setLoadingPlan(false);
+ },
+ });
+
+ ws.connect();
+ wsRef.current = ws;
+
+ return () => {
+ ws.close();
+ };
+ }, [sessionId]); // eslint-disable-line react-hooks/exhaustive-deps
+
+ // ---------------------------------------------------------------------------
+ // 1) SESSION SYNC: Restore chat when branch, repo, OR session changes
+ // IMPORTANT: Do NOT depend on sessionChatState here (prevents prop/state loop)
+ // ---------------------------------------------------------------------------
+ useEffect(() => {
+ // When send() just created a session, the parent seeded the messages
+ // into chatBySession already. Skip the reset so we don't wipe
+ // the optimistic user message that was already rendered.
+ if (skipNextSyncRef.current) {
+ skipNextSyncRef.current = false;
+ return;
+ }
+
+ const nextMessages = sessionChatState?.messages || [];
+ const nextPlan = sessionChatState?.plan || null;
+
+ setMessages(nextMessages);
+ setPlan(nextPlan);
+
+ // Reset transient UI state on branch/repo/session switch
+ setGoal("");
+ setStatus("");
+ setLoadingPlan(false);
+ setExecuting(false);
+ setStreamingEvents([]);
+ setDiffData(null);
+
+ // Update msg count tracker so auto-scroll doesn't "jump" on switch
+ prevMsgCountRef.current = nextMessages.length;
+ // eslint-disable-next-line react-hooks/exhaustive-deps
+ }, [currentBranch, repo?.full_name, sessionId]);
+
+ // ---------------------------------------------------------------------------
+ // 2) PERSISTENCE: Save chat to Parent (no loop now because sync only on branch)
+ // ---------------------------------------------------------------------------
+ useEffect(() => {
+ if (typeof onSessionChatStateChange === "function") {
+ // Avoid wiping parent state on mount
+ if (messages.length > 0 || plan) {
+ onSessionChatStateChange({ messages, plan });
+ }
+ }
+ // eslint-disable-next-line react-hooks/exhaustive-deps
+ }, [messages, plan]);
+
+ // ---------------------------------------------------------------------------
+ // 3) AUTO-SCROLL: Only scroll when a message is appended (reduces flicker)
+ // ---------------------------------------------------------------------------
+ useEffect(() => {
+ const curCount = messages.length + streamingEvents.length;
+ const prevCount = prevMsgCountRef.current;
+
+ // Only scroll when new messages are added
+ if (curCount > prevCount) {
+ prevMsgCountRef.current = curCount;
+ requestAnimationFrame(() => {
+ messagesEndRef.current?.scrollIntoView({ behavior: "smooth" });
+ });
+ } else {
+ prevMsgCountRef.current = curCount;
+ }
+ }, [messages.length, streamingEvents.length]);
+
+ // ---------------------------------------------------------------------------
+ // HANDLERS
+ // ---------------------------------------------------------------------------
+ // ---------------------------------------------------------------------------
+ // Persist a message to the backend session (fire-and-forget)
+ // ---------------------------------------------------------------------------
+ const persistMessage = (sid, role, content) => {
+ if (!sid) return;
+ fetch(`/api/sessions/${sid}/message`, {
+ method: "POST",
+ headers: getHeaders(),
+ body: JSON.stringify({ role, content }),
+ }).catch(() => {}); // best-effort
+ };
+
+ const send = async () => {
+ if (!repo || !goal.trim()) return;
+
+ const text = goal.trim();
+
+ // Optimistic update (user bubble appears immediately)
+ const userMsg = { from: "user", role: "user", text, content: text };
+ setMessages((prev) => [...prev, userMsg]);
+
+ setLoadingPlan(true);
+ setStatus("");
+ setPlan(null);
+ setStreamingEvents([]);
+
+ // ------- Implicit session creation (Claude Code parity) -------
+ // Every chat must be backed by a session. If none exists yet,
+ // create one on-demand before sending the plan request.
+ let sid = sessionId;
+ if (!sid && typeof onEnsureSession === "function") {
+ // Derive a short title from the first message
+ const sessionName = text.length > 60 ? text.slice(0, 57) + "..." : text;
+
+ // Tell the sync useEffect to skip the reset that would otherwise
+ // wipe the optimistic user message when activeSessionId changes.
+ skipNextSyncRef.current = true;
+
+ sid = await onEnsureSession(sessionName, [userMsg]);
+ if (!sid) {
+ // Session creation failed β continue without session
+ skipNextSyncRef.current = false;
+ }
+ }
+
+ // Persist user message to backend session
+ persistMessage(sid, "user", text);
+
+ // Always use HTTP for plan generation (the original reliable flow).
+ // WebSocket is only used for real-time streaming feedback display.
+ const effectiveBranch = currentBranch || defaultBranch || "HEAD";
+
+ try {
+ const res = await fetch("/api/chat/plan", {
+ method: "POST",
+ headers: getHeaders(),
+ body: JSON.stringify({
+ repo_owner: repo.owner,
+ repo_name: repo.name,
+ goal: text,
+ branch_name: effectiveBranch,
+ }),
+ });
+
+ const data = await res.json();
+ if (!res.ok) throw new Error(data.detail || "Failed to generate plan");
+
+ setPlan(data);
+
+ // Extract summary from nested plan structure or top-level
+ const summary =
+ data.plan?.summary || data.summary || data.message ||
+ "Here is the proposed plan for your request.";
+
+ // Assistant response (Answer + Action Plan)
+ setMessages((prev) => [
+ ...prev,
+ {
+ from: "ai",
+ role: "assistant",
+ answer: summary,
+ content: summary,
+ plan: data,
+ },
+ ]);
+
+ // Persist assistant response to backend session
+ persistMessage(sid, "assistant", summary);
+
+ // Clear input only after success
+ setGoal("");
+ } catch (err) {
+ const msg = String(err?.message || err);
+ console.error(err);
+ setStatus(msg);
+ setMessages((prev) => [
+ ...prev,
+ { from: "ai", role: "system", content: `Error: ${msg}` },
+ ]);
+ } finally {
+ setLoadingPlan(false);
+ }
+ };
+
+ const execute = async () => {
+ if (!repo || !plan) return;
+
+ setExecuting(true);
+ setStatus("");
+
+ try {
+ // Guard: currentBranch might be missing if parent didn't pass it yet
+ const safeCurrent = currentBranch || defaultBranch || "HEAD";
+ const safeDefault = defaultBranch || "main";
+
+ // Sticky vs Hard Switch:
+ // - If on default branch -> undefined (backend creates new branch)
+ // - If already on AI branch -> currentBranch (backend updates existing)
+ const branch_name = safeCurrent === safeDefault ? undefined : safeCurrent;
+
+ const res = await fetch("/api/chat/execute", {
+ method: "POST",
+ headers: getHeaders(),
+ body: JSON.stringify({
+ repo_owner: repo.owner,
+ repo_name: repo.name,
+ plan,
+ branch_name,
+ }),
+ });
+
+ const data = await res.json();
+ if (!res.ok) throw new Error(data.detail || "Execution failed");
+
+ setStatus(data.message || "Execution completed.");
+
+ const completionMsg = {
+ from: "ai",
+ role: "assistant",
+ answer: data.message || "Execution completed.",
+ content: data.message || "Execution completed.",
+ executionLog: data.executionLog,
+ };
+
+ // Show completion immediately (keeps old "Execution Log" section)
+ setMessages((prev) => [...prev, completionMsg]);
+
+ // Clear active plan UI
+ setPlan(null);
+
+ // Pass completionMsg upward for seeding branch history
+ if (typeof onExecutionComplete === "function") {
+ onExecutionComplete({
+ branch: data.branch || data.branch_name,
+ mode: data.mode,
+ commit_url: data.commit_url || data.html_url,
+ message: data.message,
+ completionMsg,
+ sourceBranch: safeCurrent,
+ });
+ }
+ } catch (err) {
+ console.error(err);
+ setStatus(String(err?.message || err));
+ } finally {
+ setExecuting(false);
+ }
+ };
+
+ // ---------------------------------------------------------------------------
+ // RENDER
+ // ---------------------------------------------------------------------------
+ const isOnSessionBranch = currentBranch && currentBranch !== defaultBranch;
+
+ return (
+
+
+
+
+ {messages.map((m, idx) => {
+ // Success message (App.jsx injected)
+ if (m.isSuccess) {
+ return (
+
+ );
+ }
+
+ // User message
+ if (m.from === "user" || m.role === "user") {
+ return (
+
+ {m.text || m.content}
+
+ );
+ }
+
+ // Assistant message (Answer / Plan / Execution Log)
+ return (
+
+
+ {/* Diff stats indicator (Claude-Code-on-Web parity) */}
+ {m.diff && (
+
{
+ setDiffData(m.diff);
+ setShowDiffViewer(true);
+ }} />
+ )}
+
+ );
+ })}
+
+ {/* Streaming events (real-time agent output) */}
+ {streamingEvents.length > 0 && (
+
+
+
+ )}
+
+ {loadingPlan && streamingEvents.length === 0 && (
+
+ Thinking...
+
+ )}
+
+ {!messages.length && !plan && !loadingPlan && streamingEvents.length === 0 && (
+
+
π¬
+
Tell GitPilot what you want to do with this repository.
+
+ It will propose a safe step-by-step plan before any execution.
+
+
+ )}
+
+
+
+
+ {/* Diff stats bar (when agent has made changes) */}
+ {diffData && (
+
+ setShowDiffViewer(true)} />
+
+ )}
+
+
+ {/* Readiness blocker banner */}
+ {!canChat && chatBlocker && (
+
+ {chatBlocker.message || "Chat is not ready yet."}
+ {chatBlocker.cta && chatBlocker.onCta && (
+
+ {chatBlocker.cta}
+
+ )}
+
+ )}
+ {status && (
+
+ {status}
+
+ )}
+
+
+
+
+ {/* WebSocket connection indicator */}
+ {sessionId && (
+
+
+
+ {wsConnected ? "Live" : "Connecting..."}
+
+
+ )}
+
+
+ {/* Diff Viewer overlay */}
+ {showDiffViewer && (
+
setShowDiffViewer(false)}
+ />
+ )}
+
+ );
+}
diff --git a/frontend/components/ContextBar.jsx b/frontend/components/ContextBar.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..be13192a04dbea56e257044078db9e1398a44db8
--- /dev/null
+++ b/frontend/components/ContextBar.jsx
@@ -0,0 +1,156 @@
+import React, { useCallback, useRef, useState } from "react";
+import BranchPicker from "./BranchPicker.jsx";
+
+/**
+ * ContextBar β horizontal repo chip bar for multi-repo workspace context.
+ *
+ * Uses CSS classes for hover-reveal X (Claude-style: subtle by default,
+ * visible on chip hover, red on X hover). Each chip owns its own remove
+ * button β removing one repo never affects the others.
+ */
+export default function ContextBar({
+ contextRepos,
+ activeRepoKey,
+ repoStateByKey,
+ onActivate,
+ onRemove,
+ onAdd,
+ onBranchChange,
+ mode, // workspace mode: "github", "local-git", "folder" (optional)
+}) {
+ if (!contextRepos || contextRepos.length === 0) return null;
+
+ return (
+
+ {/* Workspace mode indicator */}
+ {mode && (
+
+ {mode === "github" ? "GH" : mode === "local-git" ? "Git" : "Dir"}
+
+ )}
+
+ {contextRepos.map((entry) => {
+ const isActive = entry.repoKey === activeRepoKey;
+ return (
+ onActivate(entry.repoKey)}
+ onRemove={() => onRemove(entry.repoKey)}
+ onBranchChange={(newBranch) =>
+ onBranchChange(entry.repoKey, newBranch)
+ }
+ />
+ );
+ })}
+
+
+
+
+
+
+
+
+
+
+ {contextRepos.length} {contextRepos.length === 1 ? "repo" : "repos"}
+
+
+ );
+}
+
+function RepoChip({ entry, isActive, repoState, onActivate, onRemove, onBranchChange }) {
+ const [branchOpen, setBranchOpen] = useState(false);
+ const [hovered, setHovered] = useState(false);
+ const branchBtnRef = useRef(null);
+ const repo = entry.repo;
+ const branch = repoState?.currentBranch || entry.branch || repo?.default_branch || "main";
+ const defaultBranch = repoState?.defaultBranch || repo?.default_branch || "main";
+ const sessionBranches = repoState?.sessionBranches || [];
+ const displayName = repo?.name || entry.repoKey?.split("/")[1] || entry.repoKey;
+
+ const handleChipClick = useCallback(
+ (e) => {
+ if (e.target.closest("[data-chip-action]")) return;
+ onActivate();
+ },
+ [onActivate]
+ );
+
+ return (
+ setHovered(true)}
+ onMouseLeave={() => setHovered(false)}
+ title={isActive ? `Active (write): ${entry.repoKey}` : `Click to activate ${entry.repoKey}`}
+ >
+ {/* Active indicator bar */}
+ {isActive &&
}
+
+ {/* Repo name */}
+
{displayName}
+
+ {/* Separator dot */}
+
+
+ {/* Branch name β single click opens GitHub branch list */}
+
{
+ e.stopPropagation();
+ setBranchOpen((v) => !v);
+ }}
+ >
+ {branch}
+
+
+ {/* Write badge for active repo */}
+ {isActive &&
write }
+
+ {/* Remove button: hidden by default, revealed on hover */}
+
{
+ e.stopPropagation();
+ onRemove();
+ }}
+ title={`Remove ${displayName} from context`}
+ >
+
+
+
+
+
+
+ {/* BranchPicker in external-anchor mode: dropdown opens immediately,
+ positioned from the branch button, fetches all branches from GitHub */}
+ {branchOpen && (
+
{
+ onBranchChange(newBranch);
+ setBranchOpen(false);
+ }}
+ onClose={() => setBranchOpen(false)}
+ />
+ )}
+
+ );
+}
diff --git a/frontend/components/CreatePRButton.jsx b/frontend/components/CreatePRButton.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..559eb9c277ceed34cb8a455eea1347189f52cf56
--- /dev/null
+++ b/frontend/components/CreatePRButton.jsx
@@ -0,0 +1,159 @@
+import React, { useState } from "react";
+
+/**
+ * CreatePRButton β Claude-Code-on-Web parity PR creation action.
+ *
+ * When clicked, pushes session changes to a new branch and opens a PR.
+ * Shows loading state and links to the created PR on GitHub.
+ */
+export default function CreatePRButton({
+ repo,
+ sessionId,
+ branch,
+ defaultBranch,
+ disabled,
+ onPRCreated,
+}) {
+ const [creating, setCreating] = useState(false);
+ const [prUrl, setPrUrl] = useState(null);
+ const [error, setError] = useState(null);
+
+ const handleCreate = async () => {
+ if (!repo || !branch || branch === defaultBranch) return;
+
+ setCreating(true);
+ setError(null);
+
+ try {
+ const token = localStorage.getItem("github_token");
+ const headers = {
+ "Content-Type": "application/json",
+ ...(token ? { Authorization: `Bearer ${token}` } : {}),
+ };
+
+ const owner = repo.full_name?.split("/")[0] || repo.owner;
+ const name = repo.full_name?.split("/")[1] || repo.name;
+
+ const res = await fetch(`/api/repos/${owner}/${name}/pulls`, {
+ method: "POST",
+ headers,
+ body: JSON.stringify({
+ title: `[GitPilot] Changes from session ${sessionId ? sessionId.slice(0, 8) : branch}`,
+ head: branch,
+ base: defaultBranch || "main",
+ body: [
+ "## Summary",
+ "",
+ `Changes created by GitPilot AI assistant on branch \`${branch}\`.`,
+ "",
+ sessionId ? `Session ID: \`${sessionId}\`` : "",
+ "",
+ "---",
+ "*This PR was generated by [GitPilot](https://github.com/ruslanmv/gitpilot).*",
+ ]
+ .filter(Boolean)
+ .join("\n"),
+ }),
+ });
+
+ const data = await res.json();
+ if (!res.ok) throw new Error(data.detail || "Failed to create PR");
+
+ const url = data.html_url || data.url;
+ setPrUrl(url);
+ onPRCreated?.({ pr_url: url, pr_number: data.number, branch });
+ } catch (err) {
+ setError(err.message);
+ } finally {
+ setCreating(false);
+ }
+ };
+
+ if (prUrl) {
+ return (
+
+
+
+
+
+
+
+ View PR on GitHub →
+
+ );
+ }
+
+ return (
+
+
+
+
+
+
+
+
+ {creating ? "Creating PR..." : "Create PR"}
+
+ {error && (
+
{error}
+ )}
+
+ );
+}
+
+const styles = {
+ btn: {
+ display: "flex",
+ alignItems: "center",
+ gap: 6,
+ height: 38,
+ padding: "0 14px",
+ borderRadius: 8,
+ border: "1px solid rgba(16, 185, 129, 0.3)",
+ background: "rgba(16, 185, 129, 0.08)",
+ color: "#10B981",
+ fontSize: 13,
+ fontWeight: 600,
+ cursor: "pointer",
+ whiteSpace: "nowrap",
+ transition: "background-color 0.15s",
+ },
+ prLink: {
+ display: "flex",
+ alignItems: "center",
+ gap: 6,
+ height: 38,
+ padding: "0 14px",
+ borderRadius: 8,
+ background: "rgba(16, 185, 129, 0.10)",
+ color: "#10B981",
+ fontSize: 13,
+ fontWeight: 600,
+ textDecoration: "none",
+ whiteSpace: "nowrap",
+ },
+ error: {
+ fontSize: 11,
+ color: "#EF4444",
+ marginTop: 4,
+ },
+};
diff --git a/frontend/components/DiffStats.jsx b/frontend/components/DiffStats.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..2460e6ef496ccc5e95b2159c698afda556a99734
--- /dev/null
+++ b/frontend/components/DiffStats.jsx
@@ -0,0 +1,59 @@
+import React from "react";
+
+/**
+ * DiffStats β Claude-Code-on-Web parity inline diff indicator.
+ *
+ * Clickable "+N -N in M files" badge that appears in agent messages.
+ * Clicking opens the DiffViewer overlay.
+ */
+export default function DiffStats({ diff, onClick }) {
+ if (!diff || (!diff.additions && !diff.deletions && !diff.files_changed)) {
+ return null;
+ }
+
+ return (
+
+
+
+
+
+ +{diff.additions || 0}
+ -{diff.deletions || 0}
+
+ in {diff.files_changed || (diff.files || []).length} file{(diff.files_changed || (diff.files || []).length) !== 1 ? "s" : ""}
+
+
+
+
+
+ );
+}
+
+const styles = {
+ container: {
+ display: "inline-flex",
+ alignItems: "center",
+ gap: 6,
+ padding: "5px 10px",
+ borderRadius: 6,
+ border: "1px solid #27272A",
+ backgroundColor: "rgba(24, 24, 27, 0.8)",
+ cursor: "pointer",
+ fontSize: 12,
+ fontFamily: "monospace",
+ color: "#A1A1AA",
+ transition: "border-color 0.15s, background-color 0.15s",
+ marginTop: 8,
+ },
+ additions: {
+ color: "#10B981",
+ fontWeight: 600,
+ },
+ deletions: {
+ color: "#EF4444",
+ fontWeight: 600,
+ },
+ files: {
+ color: "#71717A",
+ },
+};
diff --git a/frontend/components/DiffViewer.jsx b/frontend/components/DiffViewer.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..b1a05fbc9d3f4c52b40ab4cd075d42db99522663
--- /dev/null
+++ b/frontend/components/DiffViewer.jsx
@@ -0,0 +1,263 @@
+import React, { useState } from "react";
+
+/**
+ * DiffViewer β Claude-Code-on-Web parity diff overlay.
+ *
+ * Shows a file list on the left and unified diff on the right.
+ * Green = additions, red = deletions. Additive component.
+ */
+export default function DiffViewer({ diff, onClose }) {
+ const [selectedFile, setSelectedFile] = useState(0);
+
+ if (!diff || !diff.files || diff.files.length === 0) {
+ return (
+
+
+
+ Diff Viewer
+
+ ×
+
+
+
No changes to display.
+
+
+ );
+ }
+
+ const files = diff.files || [];
+ const currentFile = files[selectedFile] || files[0];
+
+ return (
+
+
+ {/* Header */}
+
+
+ Diff Viewer
+
+ +{diff.additions || 0}
+ {" "}
+ -{diff.deletions || 0}
+ {" in "}
+ {diff.files_changed || files.length} files
+
+
+
+ ×
+
+
+
+ {/* Body */}
+
+ {/* File list */}
+
+ {files.map((f, idx) => (
+
setSelectedFile(idx)}
+ >
+ {f.path}
+
+ +{f.additions || 0}
+ {" "}
+ -{f.deletions || 0}
+
+
+ ))}
+
+
+ {/* Diff content */}
+
+
{currentFile.path}
+
+ {(currentFile.hunks || []).map((hunk, hi) => (
+
+
{hunk.header || `@@ hunk ${hi + 1} @@`}
+ {(hunk.lines || []).map((line, li) => {
+ let bg = "transparent";
+ let color = "#D4D4D8";
+ if (line.startsWith("+")) {
+ bg = "rgba(16, 185, 129, 0.10)";
+ color = "#6EE7B7";
+ } else if (line.startsWith("-")) {
+ bg = "rgba(239, 68, 68, 0.10)";
+ color = "#FCA5A5";
+ }
+ return (
+
+ {line}
+
+ );
+ })}
+
+ ))}
+
+ {(!currentFile.hunks || currentFile.hunks.length === 0) && (
+
+ Diff content will appear here when the agent modifies files.
+
+ )}
+
+
+
+
+
+ );
+}
+
+const styles = {
+ overlay: {
+ position: "fixed",
+ top: 0,
+ left: 0,
+ right: 0,
+ bottom: 0,
+ backgroundColor: "rgba(0, 0, 0, 0.7)",
+ zIndex: 200,
+ display: "flex",
+ alignItems: "center",
+ justifyContent: "center",
+ },
+ panel: {
+ width: "90vw",
+ maxWidth: 1100,
+ height: "80vh",
+ backgroundColor: "#131316",
+ border: "1px solid #27272A",
+ borderRadius: 12,
+ display: "flex",
+ flexDirection: "column",
+ overflow: "hidden",
+ },
+ header: {
+ display: "flex",
+ justifyContent: "space-between",
+ alignItems: "center",
+ padding: "12px 16px",
+ borderBottom: "1px solid #27272A",
+ backgroundColor: "#18181B",
+ },
+ headerLeft: {
+ display: "flex",
+ alignItems: "center",
+ gap: 12,
+ },
+ headerTitle: {
+ fontSize: 14,
+ fontWeight: 600,
+ color: "#E4E4E7",
+ },
+ statBadge: {
+ fontSize: 12,
+ color: "#A1A1AA",
+ },
+ closeBtn: {
+ width: 28,
+ height: 28,
+ borderRadius: 6,
+ border: "1px solid #3F3F46",
+ background: "transparent",
+ color: "#A1A1AA",
+ fontSize: 18,
+ cursor: "pointer",
+ display: "flex",
+ alignItems: "center",
+ justifyContent: "center",
+ },
+ body: {
+ flex: 1,
+ display: "flex",
+ overflow: "hidden",
+ },
+ fileList: {
+ width: 240,
+ borderRight: "1px solid #27272A",
+ overflowY: "auto",
+ flexShrink: 0,
+ },
+ fileItem: {
+ padding: "8px 10px",
+ cursor: "pointer",
+ borderBottom: "1px solid rgba(39, 39, 42, 0.5)",
+ transition: "background-color 0.1s",
+ },
+ fileName: {
+ display: "block",
+ fontSize: 12,
+ fontFamily: "monospace",
+ color: "#E4E4E7",
+ whiteSpace: "nowrap",
+ overflow: "hidden",
+ textOverflow: "ellipsis",
+ },
+ fileStats: {
+ display: "block",
+ fontSize: 10,
+ marginTop: 2,
+ },
+ diffContent: {
+ flex: 1,
+ overflow: "auto",
+ display: "flex",
+ flexDirection: "column",
+ },
+ diffPath: {
+ padding: "8px 12px",
+ fontSize: 12,
+ fontFamily: "monospace",
+ color: "#A1A1AA",
+ borderBottom: "1px solid #27272A",
+ backgroundColor: "#18181B",
+ position: "sticky",
+ top: 0,
+ zIndex: 1,
+ },
+ diffCode: {
+ padding: "4px 0",
+ fontFamily: "monospace",
+ fontSize: 12,
+ lineHeight: 1.6,
+ },
+ hunkHeader: {
+ padding: "4px 12px",
+ color: "#6B7280",
+ backgroundColor: "rgba(59, 130, 246, 0.05)",
+ fontSize: 11,
+ fontStyle: "italic",
+ },
+ diffLine: {
+ padding: "0 12px",
+ whiteSpace: "pre",
+ },
+ diffPlaceholder: {
+ padding: 20,
+ textAlign: "center",
+ color: "#52525B",
+ fontSize: 13,
+ },
+ emptyState: {
+ flex: 1,
+ display: "flex",
+ alignItems: "center",
+ justifyContent: "center",
+ color: "#52525B",
+ fontSize: 14,
+ },
+};
diff --git a/frontend/components/EnvironmentEditor.jsx b/frontend/components/EnvironmentEditor.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..eb0740eebff0280f3155f478ac7a2f437f251681
--- /dev/null
+++ b/frontend/components/EnvironmentEditor.jsx
@@ -0,0 +1,278 @@
+import React, { useState } from "react";
+import { createPortal } from "react-dom";
+
+/**
+ * EnvironmentEditor β Claude-Code-on-Web parity environment config modal.
+ *
+ * Allows setting name, network access level, and environment variables.
+ */
+export default function EnvironmentEditor({ environment, onSave, onDelete, onClose }) {
+ const [name, setName] = useState(environment?.name || "");
+ const [networkAccess, setNetworkAccess] = useState(environment?.network_access || "limited");
+ const [envVarsText, setEnvVarsText] = useState(
+ environment?.env_vars
+ ? Object.entries(environment.env_vars)
+ .map(([k, v]) => `${k}=${v}`)
+ .join("\n")
+ : ""
+ );
+
+ const handleSave = () => {
+ const envVars = {};
+ envVarsText
+ .split("\n")
+ .map((line) => line.trim())
+ .filter((line) => line && line.includes("="))
+ .forEach((line) => {
+ const idx = line.indexOf("=");
+ const key = line.slice(0, idx).trim();
+ const val = line.slice(idx + 1).trim();
+ if (key) envVars[key] = val;
+ });
+
+ onSave({
+ id: environment?.id || null,
+ name: name.trim() || "Default",
+ network_access: networkAccess,
+ env_vars: envVars,
+ });
+ };
+
+ return createPortal(
+ { if (e.target === e.currentTarget) onClose(); }}>
+
e.stopPropagation()}>
+
+
+ {environment?.id ? "Edit Environment" : "New Environment"}
+
+
+ ×
+
+
+
+
+ {/* Name */}
+
Environment Name
+
setName(e.target.value)}
+ placeholder="e.g. Development, Staging, Production"
+ style={styles.input}
+ />
+
+ {/* Network Access */}
+
Network Access
+
+ {[
+ { value: "limited", label: "Limited", desc: "Allowlisted domains only (package managers, APIs)" },
+ { value: "full", label: "Full", desc: "Unrestricted internet access" },
+ { value: "none", label: "None", desc: "Air-gapped β no external network" },
+ ].map((opt) => (
+
+ setNetworkAccess(e.target.value)}
+ style={{ display: "none" }}
+ />
+
+
+ {opt.label}
+
+
+ {opt.desc}
+
+
+
+ ))}
+
+
+ {/* Environment Variables */}
+
Environment Variables
+
+
+
+ {onDelete && (
+
+ Delete
+
+ )}
+
+
+ Cancel
+
+
+ Save
+
+
+
+
,
+ document.body
+ );
+}
+
+const styles = {
+ overlay: {
+ position: "fixed",
+ top: 0,
+ left: 0,
+ right: 0,
+ bottom: 0,
+ backgroundColor: "rgba(0, 0, 0, 0.6)",
+ zIndex: 10000,
+ display: "flex",
+ alignItems: "center",
+ justifyContent: "center",
+ },
+ modal: {
+ width: 480,
+ maxHeight: "80vh",
+ backgroundColor: "#131316",
+ border: "1px solid #27272A",
+ borderRadius: 12,
+ display: "flex",
+ flexDirection: "column",
+ overflow: "hidden",
+ },
+ header: {
+ display: "flex",
+ justifyContent: "space-between",
+ alignItems: "center",
+ padding: "14px 16px",
+ borderBottom: "1px solid #27272A",
+ backgroundColor: "#18181B",
+ },
+ headerTitle: {
+ fontSize: 14,
+ fontWeight: 600,
+ color: "#E4E4E7",
+ },
+ closeBtn: {
+ width: 26,
+ height: 26,
+ borderRadius: 6,
+ border: "1px solid #3F3F46",
+ background: "transparent",
+ color: "#A1A1AA",
+ fontSize: 16,
+ cursor: "pointer",
+ display: "flex",
+ alignItems: "center",
+ justifyContent: "center",
+ },
+ body: {
+ padding: "16px",
+ overflowY: "auto",
+ flex: 1,
+ },
+ label: {
+ display: "block",
+ fontSize: 12,
+ fontWeight: 600,
+ color: "#A1A1AA",
+ marginBottom: 6,
+ marginTop: 14,
+ },
+ input: {
+ width: "100%",
+ padding: "8px 10px",
+ borderRadius: 6,
+ border: "1px solid #3F3F46",
+ background: "#18181B",
+ color: "#E4E4E7",
+ fontSize: 13,
+ outline: "none",
+ boxSizing: "border-box",
+ },
+ radioGroup: {
+ display: "flex",
+ flexDirection: "column",
+ gap: 6,
+ },
+ radioItem: {
+ display: "flex",
+ alignItems: "flex-start",
+ gap: 10,
+ padding: "8px 10px",
+ borderRadius: 6,
+ border: "1px solid #27272A",
+ cursor: "pointer",
+ transition: "border-color 0.15s, background-color 0.15s",
+ },
+ textarea: {
+ width: "100%",
+ padding: "8px 10px",
+ borderRadius: 6,
+ border: "1px solid #3F3F46",
+ background: "#18181B",
+ color: "#E4E4E7",
+ fontSize: 12,
+ fontFamily: "monospace",
+ outline: "none",
+ resize: "vertical",
+ boxSizing: "border-box",
+ },
+ footer: {
+ display: "flex",
+ alignItems: "center",
+ gap: 8,
+ padding: "12px 16px",
+ borderTop: "1px solid #27272A",
+ },
+ cancelBtn: {
+ padding: "6px 14px",
+ borderRadius: 6,
+ border: "1px solid #3F3F46",
+ background: "transparent",
+ color: "#A1A1AA",
+ fontSize: 12,
+ cursor: "pointer",
+ },
+ saveBtn: {
+ padding: "6px 14px",
+ borderRadius: 6,
+ border: "none",
+ background: "#3B82F6",
+ color: "#fff",
+ fontSize: 12,
+ fontWeight: 600,
+ cursor: "pointer",
+ },
+ deleteBtn: {
+ padding: "6px 14px",
+ borderRadius: 6,
+ border: "1px solid rgba(239, 68, 68, 0.3)",
+ background: "transparent",
+ color: "#EF4444",
+ fontSize: 12,
+ cursor: "pointer",
+ },
+};
diff --git a/frontend/components/EnvironmentSelector.jsx b/frontend/components/EnvironmentSelector.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..c54e475a0c5715cc34ee1523aab9a2dc53bbe889
--- /dev/null
+++ b/frontend/components/EnvironmentSelector.jsx
@@ -0,0 +1,199 @@
+import React, { useEffect, useState } from "react";
+import EnvironmentEditor from "./EnvironmentEditor.jsx";
+
+/**
+ * EnvironmentSelector β Claude-Code-on-Web parity environment dropdown.
+ *
+ * Shows current environment name + gear icon. Gear opens the editor modal.
+ * Fetches environments from /api/environments.
+ */
+export default function EnvironmentSelector({ activeEnvId, onEnvChange }) {
+ const [envs, setEnvs] = useState([]);
+ const [editorOpen, setEditorOpen] = useState(false);
+ const [editingEnv, setEditingEnv] = useState(null);
+
+ const fetchEnvs = async () => {
+ try {
+ const res = await fetch("/api/environments", { cache: "no-cache" });
+ if (!res.ok) return;
+ const data = await res.json();
+ setEnvs(data.environments || []);
+ } catch (err) {
+ console.warn("Failed to fetch environments:", err);
+ }
+ };
+
+ useEffect(() => {
+ fetchEnvs();
+ }, []);
+
+ const activeEnv =
+ envs.find((e) => e.id === activeEnvId) || envs[0] || { name: "Default", id: "default" };
+
+ const handleSave = async (config) => {
+ try {
+ const method = config.id ? "PUT" : "POST";
+ const url = config.id ? `/api/environments/${config.id}` : "/api/environments";
+ await fetch(url, {
+ method,
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify(config),
+ });
+ await fetchEnvs();
+ setEditorOpen(false);
+ setEditingEnv(null);
+ } catch (err) {
+ console.warn("Failed to save environment:", err);
+ }
+ };
+
+ const handleDelete = async (envId) => {
+ try {
+ await fetch(`/api/environments/${envId}`, { method: "DELETE" });
+ await fetchEnvs();
+ if (activeEnvId === envId) {
+ onEnvChange?.(null);
+ }
+ } catch (err) {
+ console.warn("Failed to delete environment:", err);
+ }
+ };
+
+ return (
+
+
ENVIRONMENT
+
+
+ {/* Env selector */}
+ onEnvChange?.(e.target.value)}
+ style={styles.select}
+ >
+ {envs.map((env) => (
+
+ {env.name}
+
+ ))}
+
+
+ {/* Network badge */}
+
+ {activeEnv.network_access || "limited"}
+
+
+
+ {/* Gear icon */}
+
{
+ setEditingEnv(activeEnv);
+ setEditorOpen(true);
+ }}
+ title="Configure environment"
+ >
+
+
+
+
+
+
+ {/* Add new */}
+
{
+ setEditingEnv(null);
+ setEditorOpen(true);
+ }}
+ title="Add environment"
+ >
+ +
+
+
+
+ {/* Editor modal */}
+ {editorOpen && (
+
handleDelete(editingEnv.id) : null}
+ onClose={() => {
+ setEditorOpen(false);
+ setEditingEnv(null);
+ }}
+ />
+ )}
+
+ );
+}
+
+const styles = {
+ container: {
+ padding: "10px 14px",
+ },
+ label: {
+ fontSize: 10,
+ fontWeight: 700,
+ letterSpacing: "0.08em",
+ color: "#71717A",
+ textTransform: "uppercase",
+ marginBottom: 6,
+ },
+ row: {
+ display: "flex",
+ alignItems: "center",
+ gap: 6,
+ },
+ envCard: {
+ flex: 1,
+ display: "flex",
+ alignItems: "center",
+ gap: 8,
+ padding: "4px 8px",
+ borderRadius: 6,
+ border: "1px solid #27272A",
+ backgroundColor: "#18181B",
+ minWidth: 0,
+ },
+ select: {
+ flex: 1,
+ background: "transparent",
+ border: "none",
+ color: "#E4E4E7",
+ fontSize: 12,
+ fontWeight: 500,
+ outline: "none",
+ cursor: "pointer",
+ minWidth: 0,
+ },
+ networkBadge: {
+ fontSize: 9,
+ fontWeight: 600,
+ textTransform: "uppercase",
+ letterSpacing: "0.04em",
+ flexShrink: 0,
+ },
+ gearBtn: {
+ width: 28,
+ height: 28,
+ borderRadius: 6,
+ border: "1px solid #27272A",
+ background: "transparent",
+ color: "#71717A",
+ cursor: "pointer",
+ display: "flex",
+ alignItems: "center",
+ justifyContent: "center",
+ fontSize: 14,
+ flexShrink: 0,
+ },
+};
diff --git a/frontend/components/FileTree.jsx b/frontend/components/FileTree.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..74352c434ba2a7a26ec6c61c63b32be21504c04a
--- /dev/null
+++ b/frontend/components/FileTree.jsx
@@ -0,0 +1,307 @@
+import React, { useState, useEffect } from "react";
+
+/**
+ * Simple recursive file tree viewer with refresh support
+ * Fetches tree data directly using the API.
+ */
+export default function FileTree({ repo, refreshTrigger, branch }) {
+ const [tree, setTree] = useState([]);
+ const [loading, setLoading] = useState(false);
+ const [isSwitchingBranch, setIsSwitchingBranch] = useState(false);
+ const [error, setError] = useState(null);
+ const [localRefresh, setLocalRefresh] = useState(0);
+
+ useEffect(() => {
+ if (!repo) return;
+
+ // Determine if this is a branch switch (we already have data)
+ const hasExistingData = tree.length > 0;
+ if (hasExistingData) {
+ setIsSwitchingBranch(true);
+ } else {
+ setLoading(true);
+ }
+ setError(null);
+
+ // Construct headers manually
+ let headers = {};
+ try {
+ const token = localStorage.getItem("github_token");
+ if (token) {
+ headers = { Authorization: `Bearer ${token}` };
+ }
+ } catch (e) {
+ console.warn("Unable to read github_token", e);
+ }
+
+ // Add cache busting + selected branch ref
+ const refParam = branch ? `&ref=${encodeURIComponent(branch)}` : "";
+ const cacheBuster = `?_t=${Date.now()}${refParam}`;
+
+ let cancelled = false;
+
+ fetch(`/api/repos/${repo.owner}/${repo.name}/tree${cacheBuster}`, { headers })
+ .then(async (res) => {
+ if (!res.ok) {
+ const errData = await res.json().catch(() => ({}));
+ throw new Error(errData.detail || "Failed to load files");
+ }
+ return res.json();
+ })
+ .then((data) => {
+ if (cancelled) return;
+ if (data.files && Array.isArray(data.files)) {
+ setTree(buildTree(data.files));
+ setError(null);
+ } else {
+ setError("No files found in repository");
+ }
+ })
+ .catch((err) => {
+ if (cancelled) return;
+ setError(err.message);
+ console.error("FileTree error:", err);
+ })
+ .finally(() => {
+ if (cancelled) return;
+ setIsSwitchingBranch(false);
+ setLoading(false);
+ });
+
+ return () => { cancelled = true; };
+ }, [repo, branch, refreshTrigger, localRefresh]); // eslint-disable-line react-hooks/exhaustive-deps
+
+ const handleRefresh = () => {
+ setLocalRefresh(prev => prev + 1);
+ };
+
+ // Theme matching parent component
+ const theme = {
+ border: "#27272A",
+ textPrimary: "#EDEDED",
+ textSecondary: "#A1A1AA",
+ accent: "#D95C3D",
+ warningText: "#F59E0B",
+ warningBg: "rgba(245, 158, 11, 0.1)",
+ warningBorder: "rgba(245, 158, 11, 0.2)",
+ };
+
+ const styles = {
+ header: {
+ display: "flex",
+ alignItems: "center",
+ justifyContent: "space-between",
+ padding: "8px 20px 8px 10px",
+ marginBottom: "8px",
+ borderBottom: `1px solid ${theme.border}`,
+ },
+ headerTitle: {
+ fontSize: "12px",
+ fontWeight: "600",
+ color: theme.textSecondary,
+ textTransform: "uppercase",
+ letterSpacing: "0.5px",
+ },
+ refreshButton: {
+ backgroundColor: "transparent",
+ border: `1px solid ${theme.border}`,
+ color: theme.textSecondary,
+ padding: "4px 8px",
+ borderRadius: "4px",
+ fontSize: "11px",
+ cursor: loading ? "not-allowed" : "pointer",
+ display: "flex",
+ alignItems: "center",
+ gap: "4px",
+ transition: "all 0.2s",
+ opacity: loading ? 0.5 : 1,
+ },
+ switchingBar: {
+ padding: "6px 20px",
+ fontSize: "11px",
+ color: theme.textSecondary,
+ backgroundColor: "rgba(59, 130, 246, 0.06)",
+ borderBottom: `1px solid ${theme.border}`,
+ },
+ loadingText: {
+ padding: "0 20px",
+ color: theme.textSecondary,
+ fontSize: "13px",
+ },
+ errorBox: {
+ padding: "12px 20px",
+ color: theme.warningText,
+ fontSize: "12px",
+ backgroundColor: theme.warningBg,
+ border: `1px solid ${theme.warningBorder}`,
+ borderRadius: "6px",
+ margin: "0 10px",
+ },
+ emptyText: {
+ padding: "0 20px",
+ color: theme.textSecondary,
+ fontSize: "13px",
+ },
+ treeContainer: {
+ fontSize: "13px",
+ color: theme.textSecondary,
+ padding: "0 10px 20px 10px",
+ },
+ };
+
+ return (
+
+ {/* Header with Refresh Button */}
+
+
Files
+
{
+ if (!loading) {
+ e.currentTarget.style.backgroundColor = "rgba(255, 255, 255, 0.05)";
+ }
+ }}
+ onMouseOut={(e) => {
+ e.currentTarget.style.backgroundColor = "transparent";
+ }}
+ >
+
+
+
+ {loading ? "..." : "Refresh"}
+
+
+
+ {/* Branch switch indicator (shown above existing tree, doesn't clear it) */}
+ {isSwitchingBranch && (
+
Loading branch...
+ )}
+
+ {/* Content */}
+ {loading && tree.length === 0 && (
+
Loading files...
+ )}
+
+ {!loading && !isSwitchingBranch && error && (
+
{error}
+ )}
+
+ {!loading && !isSwitchingBranch && !error && tree.length === 0 && (
+
No files found
+ )}
+
+ {tree.length > 0 && (
+
+ {tree.map((node) => (
+
+ ))}
+
+ )}
+
+ );
+}
+
+// Recursive Node Component
+function TreeNode({ node, level }) {
+ const [expanded, setExpanded] = useState(false);
+ const isFolder = node.children && node.children.length > 0;
+
+ const icon = isFolder ? (expanded ? "π" : "π") : "π";
+
+ return (
+
+
isFolder && setExpanded(!expanded)}
+ style={{
+ padding: "4px 0",
+ paddingLeft: `${level * 12}px`,
+ cursor: isFolder ? "pointer" : "default",
+ display: "flex",
+ alignItems: "center",
+ gap: "6px",
+ color: isFolder ? "#EDEDED" : "#A1A1AA",
+ whiteSpace: "nowrap"
+ }}
+ >
+ {icon}
+ {node.name}
+
+
+ {isFolder && expanded && (
+
+ {node.children.map(child => (
+
+ ))}
+
+ )}
+
+ );
+}
+
+// Helper to build tree structure from flat file list
+function buildTree(files) {
+ const root = [];
+
+ files.forEach(file => {
+ const parts = file.path.split('/');
+ let currentLevel = root;
+ let currentPath = "";
+
+ parts.forEach((part, idx) => {
+ currentPath = currentPath ? `${currentPath}/${part}` : part;
+
+ // Check if node exists at this level
+ let existingNode = currentLevel.find(n => n.name === part);
+
+ if (!existingNode) {
+ const newNode = {
+ name: part,
+ path: currentPath,
+ type: idx === parts.length - 1 ? file.type : 'tree',
+ children: []
+ };
+ currentLevel.push(newNode);
+ existingNode = newNode;
+ }
+
+ if (idx < parts.length - 1) {
+ currentLevel = existingNode.children;
+ }
+ });
+ });
+
+ // Sort folders first, then files
+ const sortNodes = (nodes) => {
+ nodes.sort((a, b) => {
+ const aIsFolder = a.children.length > 0;
+ const bIsFolder = b.children.length > 0;
+ if (aIsFolder && !bIsFolder) return -1;
+ if (!aIsFolder && bIsFolder) return 1;
+ return a.name.localeCompare(b.name);
+ });
+ nodes.forEach(n => {
+ if (n.children.length > 0) sortNodes(n.children);
+ });
+ };
+
+ sortNodes(root);
+ return root;
+}
\ No newline at end of file
diff --git a/frontend/components/FlowViewer.jsx b/frontend/components/FlowViewer.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..71ef1e0b71603c85dd38ed37b2e930ebf6405bde
--- /dev/null
+++ b/frontend/components/FlowViewer.jsx
@@ -0,0 +1,659 @@
+import React, { useEffect, useState, useCallback, useRef } from "react";
+import ReactFlow, { Background, Controls, MiniMap } from "reactflow";
+import "reactflow/dist/style.css";
+
+/* ------------------------------------------------------------------ */
+/* Node type β colour mapping */
+/* ------------------------------------------------------------------ */
+const NODE_COLOURS = {
+ agent: { border: "#ff7a3c", bg: "#20141a" },
+ router: { border: "#6c8cff", bg: "#141828" },
+ tool: { border: "#3a3b4d", bg: "#141821" },
+ tool_group: { border: "#3a3b4d", bg: "#141821" },
+ user: { border: "#4caf88", bg: "#14211a" },
+ output: { border: "#9c6cff", bg: "#1a1428" },
+};
+const DEFAULT_COLOUR = { border: "#3a3b4d", bg: "#141821" };
+
+function colourFor(type) {
+ return NODE_COLOURS[type] || DEFAULT_COLOUR;
+}
+
+const STYLE_COLOURS = {
+ single_task: "#6c8cff",
+ react_loop: "#ff7a3c",
+ crew_pipeline: "#4caf88",
+};
+
+const STYLE_LABELS = {
+ single_task: "Dispatch",
+ react_loop: "ReAct Loop",
+ crew_pipeline: "Pipeline",
+};
+
+/* ------------------------------------------------------------------ */
+/* TopologyCard β single clickable topology card */
+/* ------------------------------------------------------------------ */
+function TopologyCard({ topology, isActive, onClick }) {
+ const styleColor = STYLE_COLOURS[topology.execution_style] || "#9a9bb0";
+ const agentCount = topology.agents_used?.length || 0;
+
+ return (
+
+
+ {topology.icon}
+
+ {STYLE_LABELS[topology.execution_style] || topology.execution_style}
+
+
+
+ {topology.name}
+
+ {topology.description}
+
+ {agentCount} agent{agentCount !== 1 ? "s" : ""}
+
+
+ );
+}
+
+const cardStyles = {
+ card: {
+ display: "flex",
+ flexDirection: "column",
+ gap: 4,
+ padding: "10px 12px",
+ borderRadius: 8,
+ border: "1px solid #1e1f30",
+ cursor: "pointer",
+ textAlign: "left",
+ minWidth: 170,
+ maxWidth: 200,
+ flexShrink: 0,
+ transition: "border-color 0.2s, background-color 0.2s",
+ },
+ cardTop: {
+ display: "flex",
+ alignItems: "center",
+ justifyContent: "space-between",
+ gap: 6,
+ },
+ icon: {
+ fontSize: 18,
+ },
+ styleBadge: {
+ fontSize: 9,
+ fontWeight: 700,
+ textTransform: "uppercase",
+ letterSpacing: "0.05em",
+ padding: "1px 6px",
+ borderRadius: 4,
+ border: "1px solid",
+ },
+ name: {
+ fontSize: 12,
+ fontWeight: 600,
+ lineHeight: 1.3,
+ },
+ desc: {
+ fontSize: 10,
+ color: "#71717A",
+ lineHeight: 1.3,
+ overflow: "hidden",
+ display: "-webkit-box",
+ WebkitLineClamp: 2,
+ WebkitBoxOrient: "vertical",
+ },
+ agentCount: {
+ fontSize: 9,
+ color: "#52525B",
+ fontWeight: 600,
+ marginTop: 2,
+ },
+};
+
+/* ------------------------------------------------------------------ */
+/* TopologyPanel β card grid grouped by category */
+/* ------------------------------------------------------------------ */
+function TopologyPanel({
+ topologies,
+ activeTopology,
+ autoMode,
+ autoResult,
+ onSelect,
+ onToggleAuto,
+}) {
+ const systems = topologies.filter((t) => t.category === "system");
+ const pipelines = topologies.filter((t) => t.category === "pipeline");
+
+ return (
+
+ {/* Auto-detect toggle */}
+
+
+
+
+
+
+ Auto
+
+ {autoMode && autoResult && (
+
+ Detected: {autoResult.icon} {autoResult.name}
+ {autoResult.confidence != null && (
+
+ {" "}({Math.round(autoResult.confidence * 100)}%)
+
+ )}
+
+ )}
+
+
+ {/* System architectures */}
+
+
System Architectures
+
+ {systems.map((t) => (
+ onSelect(t.id)}
+ />
+ ))}
+
+
+
+ {/* Task pipelines */}
+
+
Task Pipelines
+
+ {pipelines.map((t) => (
+ onSelect(t.id)}
+ />
+ ))}
+
+
+
+ );
+}
+
+const panelStyles = {
+ root: {
+ padding: "8px 16px 12px",
+ borderBottom: "1px solid #1e1f30",
+ backgroundColor: "#08090e",
+ },
+ autoRow: {
+ display: "flex",
+ alignItems: "center",
+ gap: 10,
+ marginBottom: 10,
+ },
+ autoBtn: {
+ display: "flex",
+ alignItems: "center",
+ gap: 5,
+ padding: "4px 10px",
+ borderRadius: 6,
+ border: "1px solid #27272A",
+ background: "transparent",
+ fontSize: 11,
+ fontWeight: 600,
+ cursor: "pointer",
+ transition: "border-color 0.15s, color 0.15s",
+ },
+ autoHint: {
+ fontSize: 11,
+ color: "#9a9bb0",
+ },
+ section: {
+ marginBottom: 8,
+ },
+ sectionLabel: {
+ fontSize: 9,
+ fontWeight: 700,
+ textTransform: "uppercase",
+ letterSpacing: "0.08em",
+ color: "#52525B",
+ marginBottom: 6,
+ },
+ cardRow: {
+ display: "flex",
+ gap: 8,
+ overflowX: "auto",
+ scrollbarWidth: "none",
+ paddingBottom: 2,
+ },
+};
+
+/* ------------------------------------------------------------------ */
+/* Main FlowViewer component */
+/* ------------------------------------------------------------------ */
+export default function FlowViewer() {
+ const [nodes, setNodes] = useState([]);
+ const [edges, setEdges] = useState([]);
+ const [loading, setLoading] = useState(false);
+ const [error, setError] = useState("");
+
+ // Topology state
+ const [topologies, setTopologies] = useState([]);
+ const [activeTopology, setActiveTopology] = useState(null);
+ const [topologyMeta, setTopologyMeta] = useState(null);
+
+ // Auto-detection state
+ const [autoMode, setAutoMode] = useState(false);
+ const [autoResult, setAutoResult] = useState(null);
+ const [autoTestMessage, setAutoTestMessage] = useState("");
+
+ const initialLoadDone = useRef(false);
+
+ /* ---------- Load topology list on mount ---------- */
+ useEffect(() => {
+ (async () => {
+ try {
+ const [topoRes, prefRes] = await Promise.all([
+ fetch("/api/flow/topologies"),
+ fetch("/api/settings/topology"),
+ ]);
+ if (topoRes.ok) {
+ const data = await topoRes.json();
+ setTopologies(data);
+ }
+ if (prefRes.ok) {
+ const { topology } = await prefRes.json();
+ if (topology) {
+ setActiveTopology(topology);
+ }
+ }
+ } catch (e) {
+ console.warn("Failed to load topologies:", e);
+ }
+ initialLoadDone.current = true;
+ })();
+ }, []);
+
+ /* ---------- Load graph when topology changes ---------- */
+ const loadGraph = useCallback(async (topologyId) => {
+ setLoading(true);
+ setError("");
+ try {
+ const url = topologyId
+ ? `/api/flow/current?topology=${encodeURIComponent(topologyId)}`
+ : "/api/flow/current";
+ const res = await fetch(url);
+ const data = await res.json();
+ if (!res.ok) throw new Error(data.error || "Failed to load flow");
+
+ // Track topology metadata from response
+ if (data.topology_id) {
+ setTopologyMeta({
+ id: data.topology_id,
+ name: data.topology_name,
+ icon: data.topology_icon,
+ description: data.topology_description,
+ execution_style: data.execution_style,
+ agents_used: topologies.find((t) => t.id === data.topology_id)?.agents_used || [],
+ });
+ }
+
+ // Build ReactFlow nodes
+ const RFnodes = data.nodes.map((n, i) => {
+ const nodeType = n.type || "default";
+ const colour = colourFor(nodeType);
+ const d = n.data || {};
+
+ const label = d.label || n.label || n.id;
+ const description = d.description || n.description || "";
+ const model = d.model;
+ const mode = d.mode;
+
+ const pos = n.position || {
+ x: 50 + (i % 3) * 250,
+ y: 50 + Math.floor(i / 3) * 180,
+ };
+
+ return {
+ id: n.id,
+ data: {
+ label: (
+
+
+ {label}
+
+ {model && (
+
+ {model}
+
+ )}
+ {mode && (
+
+ {mode}
+
+ )}
+
+ {description}
+
+
+ ),
+ },
+ position: pos,
+ type: "default",
+ style: {
+ borderRadius: 12,
+ padding: "12px 16px",
+ border: `2px solid ${colour.border}`,
+ background: colour.bg,
+ color: "#f5f5f7",
+ fontSize: 13,
+ minWidth: 180,
+ maxWidth: 220,
+ },
+ };
+ });
+
+ // Build ReactFlow edges
+ const RFedges = data.edges.map((e) => ({
+ id: e.id,
+ source: e.source,
+ target: e.target,
+ label: e.label,
+ animated: e.animated !== false,
+ style: { stroke: "#7a7b8e", strokeWidth: 2 },
+ labelStyle: { fill: "#c3c5dd", fontSize: 11, fontWeight: 500 },
+ labelBgStyle: { fill: "#101117", fillOpacity: 0.9 },
+ ...(e.type === "bidirectional" && {
+ markerEnd: { type: "arrowclosed", color: "#7a7b8e" },
+ markerStart: { type: "arrowclosed", color: "#7a7b8e" },
+ animated: false,
+ style: { stroke: "#555670", strokeWidth: 1.5, strokeDasharray: "5 5" },
+ }),
+ }));
+
+ setNodes(RFnodes);
+ setEdges(RFedges);
+ } catch (e) {
+ console.error(e);
+ setError(e.message);
+ } finally {
+ setLoading(false);
+ }
+ }, [topologies]);
+
+ // Load graph whenever activeTopology changes
+ useEffect(() => {
+ loadGraph(activeTopology);
+ }, [activeTopology, loadGraph]);
+
+ /* ---------- Topology selection handler ---------- */
+ const handleTopologyChange = useCallback(
+ async (newTopologyId) => {
+ setActiveTopology(newTopologyId);
+ setAutoMode(false); // Manual selection disables auto
+ // Persist preference (fire-and-forget)
+ try {
+ await fetch("/api/settings/topology", {
+ method: "POST",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify({ topology: newTopologyId }),
+ });
+ } catch (e) {
+ console.warn("Failed to save topology preference:", e);
+ }
+ },
+ []
+ );
+
+ /* ---------- Auto-detection ---------- */
+ const handleToggleAuto = useCallback(() => {
+ setAutoMode((prev) => !prev);
+ if (!autoMode) {
+ setAutoResult(null);
+ }
+ }, [autoMode]);
+
+ const handleAutoClassify = useCallback(
+ async (message) => {
+ if (!message.trim()) return;
+ try {
+ const res = await fetch("/api/flow/classify", {
+ method: "POST",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify({ message }),
+ });
+ if (!res.ok) return;
+ const data = await res.json();
+ const recommendedId = data.recommended_topology;
+ const topo = topologies.find((t) => t.id === recommendedId);
+ setAutoResult({
+ id: recommendedId,
+ name: topo?.name || recommendedId,
+ icon: topo?.icon || "",
+ confidence: data.confidence,
+ alternatives: data.alternatives || [],
+ });
+ setActiveTopology(recommendedId);
+ } catch (e) {
+ console.warn("Auto-classify failed:", e);
+ }
+ },
+ [topologies]
+ );
+
+ // Debounced auto-classify when test message changes
+ useEffect(() => {
+ if (!autoMode || !autoTestMessage.trim()) return;
+ const t = setTimeout(() => handleAutoClassify(autoTestMessage), 500);
+ return () => clearTimeout(t);
+ }, [autoTestMessage, autoMode, handleAutoClassify]);
+
+ /* ---------- Render ---------- */
+ const activeStyleColor = STYLE_COLOURS[topologyMeta?.execution_style] || "#9a9bb0";
+
+ return (
+
+ {/* Header */}
+
+
+
Agent Workflow
+
+ Visual view of the multi-agent system that GitPilot uses to
+ plan and apply changes to your repositories.
+
+
+
+ {topologyMeta && (
+
+ {topologyMeta.icon}
+ {topologyMeta.name}
+
+ {STYLE_LABELS[topologyMeta.execution_style] || topologyMeta.execution_style}
+
+ {topologyMeta.agents_used?.length || 0} agents
+
+ )}
+ {loading &&
Loading... }
+
+
+
+ {/* Topology selector panel */}
+ {topologies.length > 0 && (
+
+ )}
+
+ {/* Auto-detection test input (shown when auto mode is on) */}
+ {autoMode && (
+
+
+ Test auto-detection: type a task description to see which topology is recommended
+
+
setAutoTestMessage(e.target.value)}
+ style={autoInputStyles.input}
+ />
+ {autoResult && autoResult.alternatives?.length > 0 && (
+
+ Alternatives:
+ {autoResult.alternatives.slice(0, 3).map((alt) => {
+ const altTopo = topologies.find((t) => t.id === alt.id);
+ return (
+ handleTopologyChange(alt.id)}
+ >
+ {altTopo?.icon} {altTopo?.name || alt.id}
+
+ {alt.confidence != null ? ` ${Math.round(alt.confidence * 100)}%` : ""}
+
+
+ );
+ })}
+
+ )}
+
+ )}
+
+ {/* Description bar */}
+ {topologyMeta && topologyMeta.description && !autoMode && (
+
+ {topologyMeta.icon} {topologyMeta.description}
+
+ )}
+
+ {/* ReactFlow canvas */}
+
+ {error ? (
+
+ ) : (
+
+
+ {
+ const border = node.style?.border || "";
+ if (border.includes("#ff7a3c")) return "#ff7a3c";
+ if (border.includes("#6c8cff")) return "#6c8cff";
+ if (border.includes("#4caf88")) return "#4caf88";
+ if (border.includes("#9c6cff")) return "#9c6cff";
+ return "#3a3b4d";
+ }}
+ maskColor="rgba(0, 0, 0, 0.6)"
+ />
+
+
+ )}
+
+
+ );
+}
+
+const autoInputStyles = {
+ wrap: {
+ padding: "8px 16px 10px",
+ borderBottom: "1px solid #1e1f30",
+ backgroundColor: "#0c0d14",
+ },
+ label: {
+ fontSize: 10,
+ color: "#71717A",
+ marginBottom: 6,
+ },
+ input: {
+ width: "100%",
+ padding: "8px 12px",
+ borderRadius: 6,
+ border: "1px solid #27272A",
+ background: "#08090e",
+ color: "#e0e1f0",
+ fontSize: 12,
+ fontFamily: "monospace",
+ outline: "none",
+ boxSizing: "border-box",
+ },
+ altRow: {
+ display: "flex",
+ alignItems: "center",
+ gap: 6,
+ marginTop: 6,
+ flexWrap: "wrap",
+ },
+ altBtn: {
+ padding: "2px 8px",
+ borderRadius: 4,
+ border: "1px solid #27272A",
+ background: "transparent",
+ color: "#9a9bb0",
+ fontSize: 10,
+ cursor: "pointer",
+ },
+};
diff --git a/frontend/components/Footer.jsx b/frontend/components/Footer.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..71e8a0bca926c639c885bde541d6b3f7c9c10fbd
--- /dev/null
+++ b/frontend/components/Footer.jsx
@@ -0,0 +1,48 @@
+import React from "react";
+
+export default function Footer() {
+ return (
+
+ );
+}
\ No newline at end of file
diff --git a/frontend/components/LlmSettings.jsx b/frontend/components/LlmSettings.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..c8c63737c9b57cbebed597d73bca173265b98495
--- /dev/null
+++ b/frontend/components/LlmSettings.jsx
@@ -0,0 +1,775 @@
+import React, { useEffect, useState } from "react";
+import { testProvider } from "../utils/api";
+
+const PROVIDERS = ["ollabridge", "openai", "claude", "watsonx", "ollama"];
+
+const PROVIDER_LABELS = {
+ ollabridge: "OllaBridge Cloud",
+ openai: "OpenAI",
+ claude: "Claude",
+ watsonx: "Watsonx",
+ ollama: "Ollama",
+};
+
+const AUTH_MODES = [
+ { id: "device", label: "Device Pairing", icon: "\uD83D\uDCF1" },
+ { id: "apikey", label: "API Key", icon: "\uD83D\uDD11" },
+ { id: "local", label: "Local Trust", icon: "\uD83C\uDFE0" },
+];
+
+export default function LlmSettings() {
+ const [settings, setSettings] = useState(null);
+ const [saving, setSaving] = useState(false);
+ const [error, setError] = useState("");
+ const [savedMsg, setSavedMsg] = useState("");
+
+ const [modelsByProvider, setModelsByProvider] = useState({});
+ const [modelsError, setModelsError] = useState("");
+ const [loadingModelsFor, setLoadingModelsFor] = useState("");
+
+ const [testResult, setTestResult] = useState(null);
+ const [testing, setTesting] = useState(false);
+
+ // OllaBridge pairing state
+ const [authMode, setAuthMode] = useState("local");
+ const [pairCode, setPairCode] = useState("");
+ const [pairing, setPairing] = useState(false);
+ const [pairResult, setPairResult] = useState(null);
+
+ useEffect(() => {
+ const load = async () => {
+ try {
+ const res = await fetch("/api/settings");
+ const data = await res.json();
+ if (!res.ok) throw new Error(data.error || "Failed to load settings");
+ setSettings(data);
+ } catch (e) {
+ console.error(e);
+ setError(e.message);
+ }
+ };
+ load();
+ }, []);
+
+ const updateField = (section, field, value) => {
+ setSettings((prev) => ({
+ ...prev,
+ [section]: {
+ ...prev[section],
+ [field]: value,
+ },
+ }));
+ };
+
+ const handleSave = async () => {
+ setSaving(true);
+ setError("");
+ setSavedMsg("");
+ try {
+ const res = await fetch("/api/settings/llm", {
+ method: "PUT",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify(settings),
+ });
+ const data = await res.json();
+ if (!res.ok) throw new Error(data.error || "Failed to save settings");
+ setSettings(data);
+ setSavedMsg("Settings saved successfully!");
+ setTimeout(() => setSavedMsg(""), 3000);
+ } catch (e) {
+ console.error(e);
+ setError(e.message);
+ } finally {
+ setSaving(false);
+ }
+ };
+
+ const loadModelsForProvider = async (provider) => {
+ setModelsError("");
+ setLoadingModelsFor(provider);
+ try {
+ const res = await fetch(`/api/settings/models?provider=${provider}`);
+ const data = await res.json();
+ if (!res.ok || data.error) {
+ throw new Error(data.error || "Failed to load models");
+ }
+ setModelsByProvider((prev) => ({
+ ...prev,
+ [provider]: data.models || [],
+ }));
+ } catch (e) {
+ console.error(e);
+ setModelsError(e.message);
+ } finally {
+ setLoadingModelsFor("");
+ }
+ };
+
+ // OllaBridge device pairing
+ const handlePair = async () => {
+ if (!pairCode.trim()) return;
+ setPairing(true);
+ setPairResult(null);
+ try {
+ const baseUrl = settings?.ollabridge?.base_url || "https://ruslanmv-ollabridge.hf.space";
+ const res = await fetch("/api/ollabridge/pair", {
+ method: "POST",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify({ base_url: baseUrl, code: pairCode.trim() }),
+ });
+ const data = await res.json();
+ if (data.success) {
+ setPairResult({ ok: true, message: "Paired successfully!" });
+ if (data.token) {
+ updateField("ollabridge", "api_key", data.token);
+ }
+ } else {
+ setPairResult({ ok: false, message: data.error || "Pairing failed" });
+ }
+ } catch (e) {
+ setPairResult({ ok: false, message: e.message });
+ } finally {
+ setPairing(false);
+ }
+ };
+
+ const handleTestConnection = async () => {
+ setTesting(true);
+ setTestResult(null);
+ try {
+ const activeProvider = settings?.provider || "ollama";
+ const config = { provider: activeProvider };
+
+ // Add provider-specific config
+ if (activeProvider === "openai" && settings?.openai) {
+ config.openai = {
+ api_key: settings.openai.api_key,
+ base_url: settings.openai.base_url,
+ model: settings.openai.model,
+ };
+ } else if (activeProvider === "claude" && settings?.claude) {
+ config.claude = {
+ api_key: settings.claude.api_key,
+ base_url: settings.claude.base_url,
+ model: settings.claude.model,
+ };
+ } else if (activeProvider === "watsonx" && settings?.watsonx) {
+ config.watsonx = {
+ api_key: settings.watsonx.api_key,
+ project_id: settings.watsonx.project_id,
+ base_url: settings.watsonx.base_url,
+ model_id: settings.watsonx.model_id,
+ };
+ } else if (activeProvider === "ollama" && settings?.ollama) {
+ config.ollama = {
+ base_url: settings.ollama.base_url,
+ model: settings.ollama.model,
+ };
+ } else if (activeProvider === "ollabridge" && settings?.ollabridge) {
+ config.ollabridge = {
+ base_url: settings.ollabridge.base_url,
+ model: settings.ollabridge.model,
+ api_key: settings.ollabridge.api_key,
+ };
+ }
+
+ const result = await testProvider(config);
+ setTestResult(result);
+ } catch (err) {
+ setTestResult({ health: "error", warning: err.message || "Test failed" });
+ } finally {
+ setTesting(false);
+ }
+ };
+
+ if (!settings) {
+ return (
+
+
Admin / LLM Settings
+
Loading current configuration\u2026
+
+ );
+ }
+
+ const { provider } = settings;
+ const availableModels = modelsByProvider[provider] || [];
+
+ return (
+
+
Admin / LLM Settings
+
+ Choose which LLM provider GitPilot should use for planning and agent
+ workflows. Provider settings are stored on the server.
+
+
+ {/* ACTIVE PROVIDER */}
+
+
Active provider
+
+ {PROVIDERS.map((p) => (
+
+ setSettings((prev) => ({ ...prev, provider: p }))
+ }
+ >
+ {PROVIDER_LABELS[p] || p}
+
+ ))}
+
+
+
+ {/* ============================================================ */}
+ {/* OLLABRIDGE CLOUD */}
+ {/* ============================================================ */}
+ {provider === "ollabridge" && (
+
+
OllaBridge Cloud Configuration
+
+ Connect to OllaBridge Cloud or any OllaBridge instance for LLM
+ inference. No API key required for public endpoints.
+
+
+ {/* AUTH MODE TABS */}
+
Authentication Mode
+
+ {AUTH_MODES.map((m) => (
+ setAuthMode(m.id)}
+ >
+ {m.icon}
+ {m.label}
+
+ ))}
+
+
+ {/* DEVICE PAIRING MODE */}
+ {authMode === "device" && (
+
+
+ Enter the pairing code from your OllaBridge console and
+ click Pair.
+
+
+ setPairCode(e.target.value.toUpperCase())}
+ onKeyDown={(e) => e.key === "Enter" && handlePair()}
+ />
+
+ {pairing ? (
+
+ ) : (
+ "\uD83D\uDD17"
+ )}{" "}
+ Pair
+
+
+ {pairResult && (
+
+ {pairResult.message}
+
+ )}
+
+ )}
+
+ {/* API KEY MODE */}
+ {authMode === "apikey" && (
+
+
+ Enter your OllaBridge API key or device token for authenticated
+ access.
+
+
API Key / Device Token
+
+ updateField("ollabridge", "api_key", e.target.value)
+ }
+ />
+
+ )}
+
+ {/* LOCAL TRUST MODE */}
+ {authMode === "local" && (
+
+
+ Connect to a local or trusted OllaBridge instance without
+ authentication. Ideal for local development or pre-configured
+ cloud endpoints.
+
+
+ )}
+
+ {/* BASE URL */}
+
+ Base URL
+
+
+ updateField("ollabridge", "base_url", e.target.value)
+ }
+ />
+
+ Default: https://ruslanmv-ollabridge.hf.space (free, no key
+ needed)
+
+
+ {/* MODEL */}
+
+ Model
+
+
+
+ updateField("ollabridge", "model", e.target.value)
+ }
+ style={{ flex: 1 }}
+ />
+ loadModelsForProvider("ollabridge")}
+ disabled={loadingModelsFor === "ollabridge"}
+ >
+ {loadingModelsFor === "ollabridge" ? (
+
+ ) : (
+ "\uD83D\uDD04"
+ )}{" "}
+ Fetch Models
+
+
+
+ {availableModels.length > 0 && (
+ <>
+
+ Available models
+
+
+ updateField("ollabridge", "model", e.target.value)
+ }
+ >
+ -- select a model --
+ {availableModels.map((m) => (
+
+ {m}
+
+ ))}
+
+ >
+ )}
+
+
+ Examples: qwen2.5:1.5b, llama3, mistral, codellama,
+ deepseek-coder
+
+
+ )}
+
+ {/* OPENAI */}
+ {provider === "openai" && (
+
+
OpenAI Configuration
+
+
API Key
+
updateField("openai", "api_key", e.target.value)}
+ />
+
+
+ Model
+
+
updateField("openai", "model", e.target.value)}
+ />
+
+
loadModelsForProvider("openai")}
+ disabled={loadingModelsFor === "openai"}
+ >
+ {loadingModelsFor === "openai"
+ ? "Loading models\u2026"
+ : "Load available models"}
+
+
+ {availableModels.length > 0 && (
+ <>
+
+ Choose from discovered models
+
+
+ updateField("openai", "model", e.target.value)
+ }
+ >
+ -- select a model --
+ {availableModels.map((m) => (
+
+ {m}
+
+ ))}
+
+ >
+ )}
+
+
+ Base URL (optional)
+
+
updateField("openai", "base_url", e.target.value)}
+ />
+
+ Examples: gpt-4o, gpt-4o-mini, gpt-4.1, gpt-4.1-mini
+
+
+ )}
+
+ {/* CLAUDE */}
+ {provider === "claude" && (
+
+
Claude Configuration
+
+
API Key
+
updateField("claude", "api_key", e.target.value)}
+ />
+
+
+ Model
+
+
updateField("claude", "model", e.target.value)}
+ />
+
+
loadModelsForProvider("claude")}
+ disabled={loadingModelsFor === "claude"}
+ >
+ {loadingModelsFor === "claude"
+ ? "Loading models\u2026"
+ : "Load available models"}
+
+
+ {availableModels.length > 0 && (
+ <>
+
+ Choose from discovered models
+
+
+ updateField("claude", "model", e.target.value)
+ }
+ >
+ -- select a model --
+ {availableModels.map((m) => (
+
+ {m}
+
+ ))}
+
+ >
+ )}
+
+
+ Base URL (optional)
+
+
updateField("claude", "base_url", e.target.value)}
+ />
+
+ Examples: claude-sonnet-4-5, claude-3.7-sonnet, claude-3-opus-20240229
+
+
+ )}
+
+ {/* WATSONX */}
+ {provider === "watsonx" && (
+
+
IBM watsonx.ai Configuration
+
+
API Key
+
updateField("watsonx", "api_key", e.target.value)}
+ />
+
+
+ Project ID
+
+
+ updateField("watsonx", "project_id", e.target.value)
+ }
+ />
+
+
+ Model ID
+
+
+ updateField("watsonx", "model_id", e.target.value)
+ }
+ />
+
+
loadModelsForProvider("watsonx")}
+ disabled={loadingModelsFor === "watsonx"}
+ >
+ {loadingModelsFor === "watsonx"
+ ? "Loading models\u2026"
+ : "Load available models"}
+
+
+ {availableModels.length > 0 && (
+ <>
+
+ Choose from discovered models
+
+
+ updateField("watsonx", "model_id", e.target.value)
+ }
+ >
+ -- select a model --
+ {availableModels.map((m) => (
+
+ {m}
+
+ ))}
+
+ >
+ )}
+
+
+ Base URL
+
+
+ updateField("watsonx", "base_url", e.target.value)
+ }
+ />
+
+ Examples: meta-llama/llama-3-3-70b-instruct, ibm/granite-13b-chat-v2
+
+
+ )}
+
+ {/* OLLAMA */}
+ {provider === "ollama" && (
+
+
Ollama Configuration
+
+
Base URL
+
updateField("ollama", "base_url", e.target.value)}
+ />
+
+
+ Model
+
+
updateField("ollama", "model", e.target.value)}
+ />
+
+
loadModelsForProvider("ollama")}
+ disabled={loadingModelsFor === "ollama"}
+ >
+ {loadingModelsFor === "ollama"
+ ? "Loading models\u2026"
+ : "Load available models"}
+
+
+ {availableModels.length > 0 && (
+ <>
+
+ Choose from discovered models
+
+
+ updateField("ollama", "model", e.target.value)
+ }
+ >
+ -- select a model --
+ {availableModels.map((m) => (
+
+ {m}
+
+ ))}
+
+ >
+ )}
+
+
+ Examples: llama3, mistral, codellama, phi3
+
+
+ )}
+
+ {modelsError && (
+
+ {modelsError}
+
+ )}
+
+
+
+ {testing ? "Testing..." : "Test Connection"}
+
+
+ {saving ? "Saving\u2026" : "Save settings"}
+
+ {savedMsg &&
{savedMsg} }
+ {error &&
{error} }
+ {testResult && (
+
+
+ {testResult.health === "ok" ? "\u2713 Connection successful" : "\u2717 Connection failed"}
+
+ {testResult.warning && (
+
+ {testResult.warning}
+
+ )}
+ {testResult.model && (
+
+ Model: {testResult.model}
+
+ )}
+
+ )}
+
+
+ );
+}
diff --git a/frontend/components/LoginPage.jsx b/frontend/components/LoginPage.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..59675b977c5d1d9224321ab2da3cdc86489c7048
--- /dev/null
+++ b/frontend/components/LoginPage.jsx
@@ -0,0 +1,535 @@
+// frontend/components/LoginPage.jsx
+import React, { useState, useEffect, useRef } from "react";
+import { apiUrl, safeFetchJSON } from "../utils/api.js";
+
+/**
+ * GitPilot β Enterprise Agentic Login
+ * Theme: "Claude Code" / Anthropic Enterprise (Dark + Warm Orange)
+ */
+
+export default function LoginPage({ onAuthenticated }) {
+ // Auth State
+ const [authProcessing, setAuthProcessing] = useState(false);
+ const [error, setError] = useState("");
+
+ // Mode State: 'loading' | 'web' (Has Secret) | 'device' (No Secret)
+ const [mode, setMode] = useState("loading");
+
+ // Device Flow State
+ const [deviceData, setDeviceData] = useState(null);
+ const pollTimer = useRef(null);
+ const stopPolling = useRef(false); // Flag to safely stop async polling
+
+ // Web Flow State
+ const [missingClientId, setMissingClientId] = useState(false);
+
+ // REF FIX: Prevents React StrictMode from running the auth exchange twice
+ const processingRef = useRef(false);
+
+ // 1. Initialization Effect
+ useEffect(() => {
+ const params = new URLSearchParams(window.location.search);
+ const code = params.get("code");
+ const state = params.get("state");
+
+ // A. If returning from GitHub (Web Flow Callback)
+ if (code) {
+ if (!processingRef.current) {
+ processingRef.current = true;
+ setMode("web"); // Implicitly web mode if we have a code
+ consumeOAuthCallback(code, state);
+ }
+ return;
+ }
+
+ // B. Otherwise, check Server Capabilities to decide UI Mode
+ safeFetchJSON(apiUrl("/api/auth/status"))
+ .then((data) => {
+ setMode(data.mode === "web" ? "web" : "device");
+ })
+ .catch((err) => {
+ console.warn("Auth status check failed, defaulting to device flow:", err);
+ setMode("device");
+ });
+
+ // Cleanup polling on unmount
+ return () => {
+ stopPolling.current = true;
+ if (pollTimer.current) clearTimeout(pollTimer.current);
+ };
+ // eslint-disable-next-line react-hooks/exhaustive-deps
+ }, []);
+
+ // ===========================================================================
+ // WEB FLOW LOGIC (Standard OAuth2)
+ // ===========================================================================
+
+ async function consumeOAuthCallback(code, state) {
+ const expectedState = sessionStorage.getItem("gitpilot_oauth_state");
+ if (state && expectedState && expectedState !== state) {
+ console.warn("OAuth state mismatch - proceeding with caution.");
+ }
+
+ setAuthProcessing(true);
+ setError("");
+ window.history.replaceState({}, document.title, window.location.pathname);
+
+ try {
+ const data = await safeFetchJSON(apiUrl("/api/auth/callback"), {
+ method: "POST",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify({ code, state: state || "" }),
+ });
+
+ handleSuccess(data);
+ } catch (err) {
+ console.error("Login Error:", err);
+ setError(err instanceof Error ? err.message : "Login failed.");
+ setAuthProcessing(false);
+ }
+ }
+
+ async function handleSignInWithGitHub() {
+ setError("");
+ setMissingClientId(false);
+ setAuthProcessing(true);
+
+ try {
+ const data = await safeFetchJSON(apiUrl("/api/auth/url"));
+
+ if (data.state) {
+ sessionStorage.setItem("gitpilot_oauth_state", data.state);
+ }
+
+ window.location.href = data.authorization_url;
+ } catch (err) {
+ console.error("Auth Start Error:", err);
+ // Check for missing client ID (404/500 errors)
+ if (err.message && (err.message.includes('404') || err.message.includes('500'))) {
+ setMissingClientId(true);
+ } else {
+ setError(err instanceof Error ? err.message : "Could not start sign-in.");
+ }
+ setAuthProcessing(false);
+ }
+ }
+
+ // ===========================================================================
+ // DEVICE FLOW LOGIC (No Client Secret Required)
+ // ===========================================================================
+
+ const startDeviceFlow = async () => {
+ setError("");
+ setAuthProcessing(true);
+ stopPolling.current = false; // Reset stop flag
+
+ try {
+ const data = await safeFetchJSON(apiUrl("/api/auth/device/code"), { method: "POST" });
+
+ // Handle Errors
+ if (data.error) {
+ if (data.error.includes("400") || data.error.includes("Bad Request")) {
+ throw new Error("Device Flow is disabled in GitHub. Please go to your GitHub App Settings > 'General' > 'Identifying and authorizing users' and check the box 'Enable Device Flow'.");
+ }
+ throw new Error(data.error);
+ }
+
+ if (!data.device_code) throw new Error("Invalid device code response");
+
+ setDeviceData(data);
+ setAuthProcessing(false);
+
+ // Start Polling (Recursive Timeout Pattern)
+ pollDeviceToken(data.device_code, data.interval || 5);
+
+ } catch (err) {
+ setError(err.message);
+ setAuthProcessing(false);
+ }
+ };
+
+ const pollDeviceToken = async (deviceCode, interval) => {
+ if (stopPolling.current) return;
+
+ try {
+ const response = await fetch(apiUrl("/api/auth/device/poll"), {
+ method: "POST",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify({ device_code: deviceCode })
+ });
+
+ // 1. Success (200)
+ if (response.status === 200) {
+ const data = await response.json();
+ handleSuccess(data);
+ return;
+ }
+
+ // 2. Pending (202) -> Continue Polling
+ if (response.status === 202) {
+ // Schedule next poll
+ pollTimer.current = setTimeout(
+ () => pollDeviceToken(deviceCode, interval),
+ interval * 1000
+ );
+ return;
+ }
+
+ // 3. Error (4xx/5xx) -> Stop Polling & Show Error
+ const errData = await response.json().catch(() => ({ error: "Unknown polling error" }));
+
+ // Special case: If it's just a 'slow_down' warning (sometimes 400), we just wait longer
+ if (errData.error === "slow_down") {
+ pollTimer.current = setTimeout(
+ () => pollDeviceToken(deviceCode, interval + 5),
+ (interval + 5) * 1000
+ );
+ return;
+ }
+
+ // Terminal errors
+ throw new Error(errData.error || `Polling failed: ${response.status}`);
+
+ } catch (e) {
+ console.error("Poll error:", e);
+ if (!stopPolling.current) {
+ setError(e.message || "Failed to connect to authentication server.");
+ setDeviceData(null); // Return to initial state
+ }
+ }
+ };
+
+ const handleManualCheck = async () => {
+ if (!deviceData?.device_code) return;
+
+ try {
+ const response = await fetch(apiUrl("/api/auth/device/poll"), {
+ method: "POST",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify({ device_code: deviceData.device_code })
+ });
+
+ if (response.status === 200) {
+ const data = await response.json();
+ handleSuccess(data);
+ } else if (response.status === 202) {
+ // Visual feedback for pending state
+ const btn = document.getElementById("manual-check-btn");
+ if (btn) {
+ const originalText = btn.innerText;
+ btn.innerText = "Still Pending...";
+ btn.disabled = true;
+ setTimeout(() => {
+ btn.innerText = originalText;
+ btn.disabled = false;
+ }, 2000);
+ }
+ }
+ } catch (e) {
+ console.error("Manual check failed", e);
+ }
+ };
+
+ const handleCancelDeviceFlow = () => {
+ stopPolling.current = true;
+ if (pollTimer.current) clearTimeout(pollTimer.current);
+ setDeviceData(null);
+ setError("");
+ };
+
+ // ===========================================================================
+ // SHARED HELPERS
+ // ===========================================================================
+
+ function handleSuccess(data) {
+ stopPolling.current = true; // Ensure polling stops
+ if (pollTimer.current) clearTimeout(pollTimer.current);
+
+ if (!data.access_token || !data.user) {
+ setError("Server returned incomplete session data.");
+ return;
+ }
+
+ try {
+ localStorage.setItem("github_token", data.access_token);
+ localStorage.setItem("github_user", JSON.stringify(data.user));
+ } catch (e) {
+ console.warn("LocalStorage access denied:", e);
+ }
+
+ if (typeof onAuthenticated === "function") {
+ onAuthenticated({
+ access_token: data.access_token,
+ user: data.user,
+ });
+ }
+ }
+
+ // --- Design Token System ---
+ const theme = {
+ bg: "#131316",
+ cardBg: "#1C1C1F",
+ border: "#27272A",
+ accent: "#D95C3D",
+ accentHover: "#C44F32",
+ textPrimary: "#EDEDED",
+ textSecondary: "#A1A1AA",
+ font: '"SΓΆhne", "Inter", -apple-system, sans-serif',
+ };
+
+ const styles = {
+ container: {
+ minHeight: "100vh",
+ display: "flex",
+ alignItems: "center",
+ justifyContent: "center",
+ backgroundColor: theme.bg,
+ fontFamily: theme.font,
+ color: theme.textPrimary,
+ letterSpacing: "-0.01em",
+ },
+ card: {
+ backgroundColor: theme.cardBg,
+ width: "100%",
+ maxWidth: "440px",
+ borderRadius: "12px",
+ border: `1px solid ${theme.border}`,
+ boxShadow: "0 24px 48px -12px rgba(0, 0, 0, 0.6)",
+ padding: "48px 40px",
+ textAlign: "center",
+ position: "relative",
+ },
+ logoBadge: {
+ width: "48px",
+ height: "48px",
+ backgroundColor: "rgba(217, 92, 61, 0.15)",
+ color: theme.accent,
+ borderRadius: "10px",
+ display: "flex",
+ alignItems: "center",
+ justifyContent: "center",
+ fontSize: "22px",
+ fontWeight: "700",
+ margin: "0 auto 32px auto",
+ border: "1px solid rgba(217, 92, 61, 0.2)",
+ },
+ h1: {
+ fontSize: "24px",
+ fontWeight: "600",
+ marginBottom: "12px",
+ color: theme.textPrimary,
+ },
+ p: {
+ fontSize: "14px",
+ color: theme.textSecondary,
+ lineHeight: "1.6",
+ marginBottom: "40px",
+ },
+ button: {
+ width: "100%",
+ height: "48px",
+ backgroundColor: theme.accent,
+ color: "#FFFFFF",
+ border: "none",
+ borderRadius: "8px",
+ fontSize: "14px",
+ fontWeight: "500",
+ cursor: (authProcessing || (mode === 'loading')) ? "not-allowed" : "pointer",
+ opacity: (authProcessing || (mode === 'loading')) ? 0.7 : 1,
+ transition: "background-color 0.2s ease",
+ display: "flex",
+ alignItems: "center",
+ justifyContent: "center",
+ gap: "10px",
+ boxShadow: "0 4px 12px rgba(217, 92, 61, 0.25)",
+ },
+ secondaryButton: {
+ backgroundColor: "transparent",
+ color: "#A1A1AA",
+ border: "1px solid #3F3F46",
+ padding: "8px 16px",
+ borderRadius: "6px",
+ fontSize: "12px",
+ cursor: "pointer",
+ marginTop: "16px",
+ minWidth: "100px"
+ },
+ errorBox: {
+ backgroundColor: "rgba(185, 28, 28, 0.15)",
+ border: "1px solid rgba(185, 28, 28, 0.3)",
+ color: "#FCA5A5",
+ padding: "12px",
+ borderRadius: "8px",
+ fontSize: "13px",
+ marginBottom: "24px",
+ textAlign: "left",
+ },
+ configCard: {
+ textAlign: "left",
+ backgroundColor: "#111",
+ border: "1px solid #333",
+ padding: "24px",
+ borderRadius: "8px",
+ marginBottom: "24px",
+ },
+ codeDisplay: {
+ backgroundColor: "#27272A",
+ color: theme.accent,
+ fontSize: "20px",
+ fontWeight: "700",
+ padding: "12px",
+ borderRadius: "6px",
+ textAlign: "center",
+ letterSpacing: "2px",
+ margin: "12px 0",
+ border: `1px dashed ${theme.accent}`,
+ cursor: "pointer",
+ },
+ footer: {
+ marginTop: "48px",
+ fontSize: "12px",
+ color: "#52525B",
+ }
+ };
+
+ // --- RENDER: Device Flow UI ---
+ const renderDeviceFlow = () => {
+ if (!deviceData) {
+ return (
+ !authProcessing && (e.currentTarget.style.backgroundColor = theme.accentHover)}
+ onMouseOut={(e) => !authProcessing && (e.currentTarget.style.backgroundColor = theme.accent)}
+ >
+ {authProcessing ? "Connecting..." : "Sign in with GitHub"}
+
+ );
+ }
+
+ return (
+
+
Authorize Device
+
+ GitPilot needs authorization to access your repositories.
+
+
+
+
1. Copy code:
+
{
+ navigator.clipboard.writeText(deviceData.user_code);
+ }}
+ title="Click to copy"
+ >
+ {deviceData.user_code}
+
+
+
+
+
+
+ β»
+ Waiting for authorization...
+
+
+
+
+
+ Check Status
+
+
+ Cancel
+
+
+
+ );
+ };
+
+ // --- RENDER: Config Error ---
+ if (missingClientId) {
+ return (
+
+
+
β οΈ
+
Configuration Error
+
Could not connect to GitHub Authentication services.
+
setMissingClientId(false)} style={{...styles.button, backgroundColor: "#3F3F46"}}>Retry
+
+
+ );
+ }
+
+ // --- RENDER: Main ---
+ return (
+
+
+
GP
+
+
GitPilot Enterprise
+
+ Agentic AI workflow for your repositories.
+ Secure. Context-aware. Automated.
+
+
+ {error &&
{error}
}
+
+ {mode === "loading" && (
+
Initializing...
+ )}
+
+ {mode === "web" && (
+
!authProcessing && (e.currentTarget.style.backgroundColor = theme.accentHover)}
+ onMouseOut={(e) => !authProcessing && (e.currentTarget.style.backgroundColor = theme.accent)}
+ >
+ {authProcessing ? "Connecting..." : (
+ <>
+
+ Sign in with GitHub
+ >
+ )}
+
+ )}
+
+ {mode === "device" && renderDeviceFlow()}
+
+
+ © {new Date().getFullYear()} GitPilot Inc.
+
+
+
+ );
+}
\ No newline at end of file
diff --git a/frontend/components/PlanView.jsx b/frontend/components/PlanView.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..a67efb237c5204e69cd1cec98248e8702ade6e25
--- /dev/null
+++ b/frontend/components/PlanView.jsx
@@ -0,0 +1,231 @@
+import React from "react";
+
+export default function PlanView({ plan }) {
+ if (!plan) return null;
+
+ // Calculate totals for each action type
+ const totals = { CREATE: 0, MODIFY: 0, DELETE: 0 };
+ plan.steps.forEach((step) => {
+ step.files.forEach((file) => {
+ totals[file.action] = (totals[file.action] || 0) + 1;
+ });
+ });
+
+ const theme = {
+ bg: "#18181B",
+ border: "#27272A",
+ textPrimary: "#EDEDED",
+ textSecondary: "#A1A1AA",
+ successBg: "rgba(16, 185, 129, 0.1)",
+ successText: "#10B981",
+ warningBg: "rgba(245, 158, 11, 0.1)",
+ warningText: "#F59E0B",
+ dangerBg: "rgba(239, 68, 68, 0.1)",
+ dangerText: "#EF4444",
+ };
+
+ const styles = {
+ container: {
+ display: "flex",
+ flexDirection: "column",
+ gap: "20px",
+ fontFamily: '-apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, sans-serif',
+ },
+ header: {
+ display: "flex",
+ flexDirection: "column",
+ gap: "8px",
+ paddingBottom: "16px",
+ borderBottom: `1px solid ${theme.border}`,
+ },
+ goal: {
+ fontSize: "14px",
+ fontWeight: "600",
+ color: theme.textPrimary,
+ },
+ summary: {
+ fontSize: "13px",
+ color: theme.textSecondary,
+ lineHeight: "1.5",
+ },
+ totals: {
+ display: "flex",
+ gap: "12px",
+ flexWrap: "wrap",
+ },
+ totalBadge: {
+ fontSize: "11px",
+ fontWeight: "500",
+ padding: "4px 8px",
+ borderRadius: "4px",
+ border: "1px solid transparent",
+ },
+ totalCreate: {
+ backgroundColor: theme.successBg,
+ color: theme.successText,
+ borderColor: "rgba(16, 185, 129, 0.2)",
+ },
+ totalModify: {
+ backgroundColor: theme.warningBg,
+ color: theme.warningText,
+ borderColor: "rgba(245, 158, 11, 0.2)",
+ },
+ totalDelete: {
+ backgroundColor: theme.dangerBg,
+ color: theme.dangerText,
+ borderColor: "rgba(239, 68, 68, 0.2)",
+ },
+ stepsList: {
+ listStyle: "none",
+ padding: 0,
+ margin: 0,
+ display: "flex",
+ flexDirection: "column",
+ gap: "24px",
+ },
+ step: {
+ display: "flex",
+ flexDirection: "column",
+ gap: "8px",
+ position: "relative",
+ },
+ stepHeader: {
+ display: "flex",
+ alignItems: "baseline",
+ gap: "8px",
+ fontSize: "13px",
+ fontWeight: "600",
+ color: theme.textPrimary,
+ },
+ stepNumber: {
+ color: theme.textSecondary,
+ fontSize: "11px",
+ textTransform: "uppercase",
+ letterSpacing: "0.05em",
+ },
+ stepDescription: {
+ fontSize: "13px",
+ color: theme.textSecondary,
+ lineHeight: "1.5",
+ margin: 0,
+ },
+ fileList: {
+ marginTop: "8px",
+ display: "flex",
+ flexDirection: "column",
+ gap: "4px",
+ backgroundColor: "#131316",
+ padding: "8px 12px",
+ borderRadius: "6px",
+ border: `1px solid ${theme.border}`,
+ },
+ fileItem: {
+ display: "flex",
+ alignItems: "center",
+ gap: "10px",
+ fontSize: "12px",
+ fontFamily: "ui-monospace, SFMono-Regular, Menlo, Monaco, Consolas, monospace",
+ },
+ actionBadge: {
+ padding: "2px 6px",
+ borderRadius: "4px",
+ fontSize: "10px",
+ fontWeight: "bold",
+ textTransform: "uppercase",
+ minWidth: "55px",
+ textAlign: "center",
+ letterSpacing: "0.02em",
+ },
+ path: {
+ color: "#D4D4D8",
+ whiteSpace: "nowrap",
+ overflow: "hidden",
+ textOverflow: "ellipsis",
+ },
+ risks: {
+ marginTop: "8px",
+ fontSize: "12px",
+ color: theme.warningText,
+ backgroundColor: "rgba(245, 158, 11, 0.05)",
+ padding: "8px 12px",
+ borderRadius: "6px",
+ border: "1px solid rgba(245, 158, 11, 0.1)",
+ display: "flex",
+ gap: "6px",
+ alignItems: "flex-start",
+ },
+ };
+
+ const getActionStyle = (action) => {
+ switch (action) {
+ case "CREATE": return styles.totalCreate;
+ case "MODIFY": return styles.totalModify;
+ case "DELETE": return styles.totalDelete;
+ default: return {};
+ }
+ };
+
+ return (
+
+ {/* Header & Summary */}
+
+
Goal: {plan.goal}
+
{plan.summary}
+
+
+ {/* Totals Summary */}
+
+ {totals.CREATE > 0 && (
+
+ {totals.CREATE} to create
+
+ )}
+ {totals.MODIFY > 0 && (
+
+ {totals.MODIFY} to modify
+
+ )}
+ {totals.DELETE > 0 && (
+
+ {totals.DELETE} to delete
+
+ )}
+
+
+ {/* Steps List */}
+
+ {plan.steps.map((s) => (
+
+
+ Step {s.step_number}
+ {s.title}
+
+ {s.description}
+
+ {/* Files List */}
+ {s.files && s.files.length > 0 && (
+
+ {s.files.map((file, idx) => (
+
+
+ {file.action}
+
+ {file.path}
+
+ ))}
+
+ )}
+
+ {/* Risks */}
+ {s.risks && (
+
+ β οΈ
+ {s.risks}
+
+ )}
+
+ ))}
+
+
+ );
+}
\ No newline at end of file
diff --git a/frontend/components/ProjectContextPanel.jsx b/frontend/components/ProjectContextPanel.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..0a480e9b6003a3a9e1c4ffbc80c86564d6ccbe55
--- /dev/null
+++ b/frontend/components/ProjectContextPanel.jsx
@@ -0,0 +1,572 @@
+import React, { useEffect, useMemo, useRef, useState } from "react";
+import FileTree from "./FileTree.jsx";
+import BranchPicker from "./BranchPicker.jsx";
+
+// --- INJECTED STYLES FOR ANIMATIONS ---
+const animationStyles = `
+ @keyframes highlight-pulse {
+ 0% { background-color: rgba(59, 130, 246, 0.10); }
+ 50% { background-color: rgba(59, 130, 246, 0.22); }
+ 100% { background-color: transparent; }
+ }
+ .pulse-context {
+ animation: highlight-pulse 1.1s ease-out;
+ }
+`;
+
+/**
+ * ProjectContextPanel (Production-ready)
+ *
+ * Controlled component:
+ * - Branch source of truth is App.jsx:
+ * - defaultBranch (prod)
+ * - currentBranch (what user sees)
+ * - sessionBranches (list of all active AI session branches)
+ *
+ * Responsibilities:
+ * - Show project context + branch dropdown + AI badge/banner
+ * - Fetch access status + file count for the currentBranch
+ * - Trigger visual pulse on pulseNonce (Hard Switch)
+ */
+export default function ProjectContextPanel({
+ repo,
+ defaultBranch,
+ currentBranch,
+ sessionBranch, // Active session branch (optional, for specific highlighting)
+ sessionBranches = [], // List of all AI branches
+ onBranchChange,
+ pulseNonce,
+ onSettingsClick,
+}) {
+ const [appUrl, setAppUrl] = useState("");
+ const [fileCount, setFileCount] = useState(0);
+
+ const [isDropdownOpen, setIsDropdownOpen] = useState(false);
+
+ // Data Loading State
+ const [analyzing, setAnalyzing] = useState(false);
+ const [accessInfo, setAccessInfo] = useState(null);
+ const [treeError, setTreeError] = useState(null);
+
+ // Retry / Refresh Logic
+ const [refreshTrigger, setRefreshTrigger] = useState(0);
+ const [retryCount, setRetryCount] = useState(0);
+ const retryTimeoutRef = useRef(null);
+
+ // UX State
+ const [animateHeader, setAnimateHeader] = useState(false);
+ const [toast, setToast] = useState({ visible: false, title: "", msg: "" });
+
+ // Calculate effective default to prevent 'main' fallback errors
+ const effectiveDefaultBranch = defaultBranch || repo?.default_branch || "main";
+ const branch = currentBranch || effectiveDefaultBranch;
+
+ // Determine if we are currently viewing an AI Session branch
+ const isAiSession = (sessionBranches.includes(branch)) || (sessionBranch === branch && branch !== effectiveDefaultBranch);
+
+ // Fetch App URL on mount
+ useEffect(() => {
+ fetch("/api/auth/app-url")
+ .then((res) => res.json())
+ .then((data) => {
+ if (data.app_url) setAppUrl(data.app_url);
+ })
+ .catch((err) => console.error("Failed to fetch App URL:", err));
+ }, []);
+
+ // Hard Switch pulse: whenever App increments pulseNonce
+ useEffect(() => {
+ if (!pulseNonce) return;
+ setAnimateHeader(true);
+ const t = window.setTimeout(() => setAnimateHeader(false), 1100);
+ return () => window.clearTimeout(t);
+ }, [pulseNonce]);
+
+ // Main data fetcher (Access + Tree stats) for currentBranch
+ // Stale-while-revalidate: keep previous data visible during fetch
+ useEffect(() => {
+ if (!repo) return;
+
+ // Only show full "analyzing" spinner if we have no data yet
+ if (!accessInfo) setAnalyzing(true);
+ setTreeError(null);
+
+ if (retryTimeoutRef.current) {
+ clearTimeout(retryTimeoutRef.current);
+ retryTimeoutRef.current = null;
+ }
+
+ let headers = {};
+ try {
+ const token = localStorage.getItem("github_token");
+ if (token) headers = { Authorization: `Bearer ${token}` };
+ } catch (e) {
+ console.warn("Unable to read github_token:", e);
+ }
+
+ let cancelled = false;
+ const cacheBuster = `&_t=${Date.now()}&retry=${retryCount}`;
+
+ // A) Access Check (with Stale Cache Fix)
+ fetch(`/api/auth/repo-access?owner=${repo.owner}&repo=${repo.name}${cacheBuster}`, {
+ headers,
+ cache: "no-cache",
+ })
+ .then(async (res) => {
+ if (cancelled) return;
+ const data = await res.json().catch(() => ({}));
+
+ if (!res.ok) {
+ setAccessInfo({ can_write: false, app_installed: false, auth_type: "none" });
+ return;
+ }
+
+ setAccessInfo(data);
+
+ // Auto-retry if user has push access but App is not detected yet (Stale Cache)
+ if (data.can_write && !data.app_installed && retryCount === 0) {
+ retryTimeoutRef.current = setTimeout(() => {
+ setRetryCount(1);
+ }, 1000);
+ }
+ })
+ .catch(() => {
+ if (!cancelled) setAccessInfo({ can_write: false, app_installed: false, auth_type: "none" });
+ });
+
+ // B) Tree count for the selected branch
+ // Don't clear fileCount β keep stale value visible until new one arrives
+ const hadFileCount = fileCount > 0;
+ if (!hadFileCount) setAnalyzing(true);
+
+ fetch(`/api/repos/${repo.owner}/${repo.name}/tree?ref=${encodeURIComponent(branch)}&_t=${Date.now()}`, {
+ headers,
+ cache: "no-cache",
+ })
+ .then(async (res) => {
+ if (cancelled) return;
+ const data = await res.json().catch(() => ({}));
+ if (!res.ok) {
+ setTreeError(data.detail || "Failed to load tree");
+ setFileCount(0);
+ return;
+ }
+ setFileCount(Array.isArray(data.files) ? data.files.length : 0);
+ })
+ .catch((err) => {
+ if (cancelled) return;
+ setTreeError(err.message);
+ setFileCount(0);
+ })
+ .finally(() => { if (!cancelled) setAnalyzing(false); });
+
+ return () => {
+ cancelled = true;
+ if (retryTimeoutRef.current) clearTimeout(retryTimeoutRef.current);
+ };
+ // eslint-disable-next-line react-hooks/exhaustive-deps
+ }, [repo?.owner, repo?.name, branch, refreshTrigger, retryCount]);
+
+ const showToast = (title, msg) => {
+ setToast({ visible: true, title, msg });
+ setTimeout(() => setToast((prev) => ({ ...prev, visible: false })), 3000);
+ };
+
+ const handleManualSwitch = (targetBranch) => {
+ if (!targetBranch || targetBranch === branch) {
+ setIsDropdownOpen(false);
+ return;
+ }
+
+ // Local UI feedback (App.jsx will handle the actual state change)
+ const goingAi = sessionBranches.includes(targetBranch);
+ showToast(
+ goingAi ? "Context Switched" : "Switched to Production",
+ goingAi ? `Viewing AI Session: ${targetBranch}` : `Viewing ${targetBranch}.`
+ );
+
+ setIsDropdownOpen(false);
+ if (onBranchChange) onBranchChange(targetBranch);
+ };
+
+ const handleRefresh = () => {
+ setAnalyzing(true);
+ setRetryCount(0);
+ setRefreshTrigger((prev) => prev + 1);
+ };
+
+ const handleInstallClick = () => {
+ if (!appUrl) return;
+ const targetUrl = appUrl.endsWith("/") ? `${appUrl}installations/new` : `${appUrl}/installations/new`;
+ window.open(targetUrl, "_blank", "noopener,noreferrer");
+ };
+
+ // --- STYLES ---
+ const theme = useMemo(
+ () => ({
+ bg: "#131316",
+ border: "#27272A",
+ textPrimary: "#EDEDED",
+ textSecondary: "#A1A1AA",
+ accent: "#3b82f6",
+ warningBorder: "rgba(245, 158, 11, 0.2)",
+ warningText: "#F59E0B",
+ successColor: "#10B981",
+ cardBg: "#18181B",
+ aiBg: "rgba(59, 130, 246, 0.10)",
+ aiBorder: "rgba(59, 130, 246, 0.30)",
+ aiText: "#60a5fa",
+ }),
+ []
+ );
+
+ const styles = useMemo(
+ () => ({
+ container: {
+ height: "100%",
+ borderRight: `1px solid ${theme.border}`,
+ backgroundColor: theme.bg,
+ display: "flex",
+ flexDirection: "column",
+ fontFamily: '"SΓΆhne", "Inter", sans-serif',
+ position: "relative",
+ overflow: "hidden",
+ },
+ header: {
+ padding: "16px 20px",
+ borderBottom: `1px solid ${theme.border}`,
+ display: "flex",
+ alignItems: "center",
+ justifyContent: "space-between",
+ transition: "background-color 0.3s ease",
+ },
+ titleGroup: { display: "flex", alignItems: "center", gap: "8px" },
+ title: { fontSize: "13px", fontWeight: "600", color: theme.textPrimary },
+ repoBadge: {
+ backgroundColor: "#27272A",
+ color: theme.textSecondary,
+ fontSize: "11px",
+ padding: "2px 8px",
+ borderRadius: "12px",
+ border: `1px solid ${theme.border}`,
+ fontFamily: "monospace",
+ },
+ aiBadge: {
+ display: "flex",
+ alignItems: "center",
+ gap: "6px",
+ backgroundColor: theme.aiBg,
+ color: theme.aiText,
+ fontSize: "10px",
+ fontWeight: "bold",
+ padding: "2px 8px",
+ borderRadius: "12px",
+ border: `1px solid ${theme.aiBorder}`,
+ textTransform: "uppercase",
+ letterSpacing: "0.5px",
+ },
+ content: {
+ padding: "16px 20px 12px 20px",
+ display: "flex",
+ flexDirection: "column",
+ gap: "12px",
+ },
+ statRow: { display: "flex", justifyContent: "space-between", fontSize: "13px", marginBottom: "4px" },
+ label: { color: theme.textSecondary },
+ value: { color: theme.textPrimary, fontWeight: "500" },
+ dropdownContainer: { position: "relative" },
+ branchButton: {
+ display: "flex",
+ alignItems: "center",
+ gap: "6px",
+ padding: "4px 8px",
+ borderRadius: "4px",
+ border: `1px solid ${isAiSession ? theme.aiBorder : theme.border}`,
+ backgroundColor: isAiSession ? "rgba(59, 130, 246, 0.05)" : "transparent",
+ color: isAiSession ? theme.aiText : theme.textPrimary,
+ fontSize: "13px",
+ cursor: "pointer",
+ fontFamily: "monospace",
+ },
+ dropdownMenu: {
+ position: "absolute",
+ top: "100%",
+ left: 0,
+ marginTop: "4px",
+ width: "240px",
+ backgroundColor: "#1F1F23",
+ border: `1px solid ${theme.border}`,
+ borderRadius: "6px",
+ boxShadow: "0 4px 12px rgba(0,0,0,0.5)",
+ zIndex: 50,
+ display: isDropdownOpen ? "block" : "none",
+ overflow: "hidden",
+ },
+ dropdownItem: {
+ padding: "8px 12px",
+ fontSize: "13px",
+ color: theme.textSecondary,
+ cursor: "pointer",
+ display: "flex",
+ alignItems: "center",
+ gap: "8px",
+ borderBottom: `1px solid ${theme.border}`,
+ },
+ contextBanner: {
+ backgroundColor: theme.aiBg,
+ borderTop: `1px solid ${theme.aiBorder}`,
+ padding: "8px 20px",
+ fontSize: "11px",
+ color: theme.aiText,
+ display: "flex",
+ justifyContent: "space-between",
+ alignItems: "center",
+ },
+ toast: {
+ position: "absolute",
+ top: "16px",
+ right: "16px",
+ backgroundColor: "#18181B",
+ border: `1px solid ${theme.border}`,
+ borderLeft: `3px solid ${theme.accent}`,
+ borderRadius: "6px",
+ padding: "12px",
+ boxShadow: "0 4px 12px rgba(0,0,0,0.5)",
+ zIndex: 100,
+ minWidth: "240px",
+ transition: "all 0.3s cubic-bezier(0.16, 1, 0.3, 1)",
+ transform: toast.visible ? "translateX(0)" : "translateX(120%)",
+ opacity: toast.visible ? 1 : 0,
+ },
+ toastTitle: { fontSize: "13px", fontWeight: "bold", color: theme.textPrimary, marginBottom: "2px" },
+ toastMsg: { fontSize: "11px", color: theme.textSecondary },
+ refreshButton: {
+ marginTop: "8px",
+ height: "32px",
+ padding: "0 12px",
+ backgroundColor: "transparent",
+ color: theme.textSecondary,
+ border: `1px solid ${theme.border}`,
+ borderRadius: "6px",
+ fontSize: "12px",
+ cursor: analyzing ? "not-allowed" : "pointer",
+ display: "flex",
+ alignItems: "center",
+ justifyContent: "center",
+ gap: "6px",
+ },
+ settingsBtn: {
+ display: "flex",
+ alignItems: "center",
+ justifyContent: "center",
+ width: "28px",
+ height: "28px",
+ borderRadius: "6px",
+ border: `1px solid ${theme.border}`,
+ backgroundColor: "transparent",
+ color: theme.textSecondary,
+ cursor: "pointer",
+ padding: 0,
+ transition: "color 0.15s, border-color 0.15s",
+ },
+ treeWrapper: { flex: 1, overflow: "auto", borderTop: `1px solid ${theme.border}` },
+ installCard: {
+ marginTop: "8px",
+ padding: "12px",
+ borderRadius: "8px",
+ backgroundColor: theme.cardBg,
+ border: `1px solid ${theme.warningBorder}`,
+ },
+ installHeader: {
+ display: "flex",
+ alignItems: "center",
+ gap: "10px",
+ fontSize: "14px",
+ fontWeight: "600",
+ color: theme.textPrimary,
+ },
+ installText: {
+ fontSize: "13px",
+ color: theme.textSecondary,
+ lineHeight: "1.5",
+ },
+ }),
+ [analyzing, isAiSession, isDropdownOpen, theme, toast.visible]
+ );
+
+ // Determine status text
+ let statusText = "Checking...";
+ let statusColor = theme.textSecondary;
+ let showInstallCard = false;
+
+ if (!analyzing && accessInfo) {
+ if (accessInfo.app_installed) {
+ statusText = "Write Access β";
+ statusColor = theme.successColor;
+ } else if (accessInfo.can_write && retryCount === 0) {
+ statusText = "Verifying...";
+ } else if (accessInfo.can_write) {
+ statusText = "Push Access (No App)";
+ statusColor = theme.warningText;
+ showInstallCard = true;
+ } else {
+ statusText = "Read Only";
+ statusColor = theme.warningText;
+ showInstallCard = true;
+ }
+ }
+
+ if (!repo) {
+ return (
+
+ );
+ }
+
+ return (
+
+
+
+ {/* TOAST */}
+
+
{toast.title}
+
{toast.msg}
+
+
+ {/* HEADER */}
+
+
+
Project context
+ {isAiSession && (
+
+
+
+
+ AI Session
+
+ )}
+
+
+ {!isAiSession &&
{repo.name} }
+ {onSettingsClick && (
+
+
+
+
+
+
+ )}
+
+
+
+ {/* CONTENT */}
+
+ {/* Branch selector (Claude-Code-on-Web parity β uses BranchPicker with search) */}
+
+ Branch:
+
+
+
+ {/* Stats */}
+
+ Files:
+ {analyzing ? "β¦" : fileCount}
+
+
+
+ Status:
+ {statusText}
+
+
+ {/* Tree error (optional display) */}
+ {treeError && (
+
+ {treeError}
+
+ )}
+
+ {/* Refresh */}
+
+
+
+
+ {analyzing ? "Refreshing..." : "Refresh"}
+
+
+ {/* Install card */}
+ {showInstallCard && (
+
+
+ β‘
+ Enable Write Access
+
+
+ Install the GitPilot App to enable AI agent operations.
+
+
+ Alternatively, use Folder or Local Git mode for local-first workflows without GitHub.
+
+
+ Install App
+
+
+ )}
+
+
+ {/* Context banner */}
+ {isAiSession && (
+
+
+
+
+
+
+
+ You are viewing an AI Session branch.
+
+ handleManualSwitch(effectiveDefaultBranch)}>
+ Return to {effectiveDefaultBranch}
+
+
+ )}
+
+ {/* File tree (branch-aware) */}
+
+
+
+
+ );
+}
\ No newline at end of file
diff --git a/frontend/components/ProjectSettings/ContextTab.jsx b/frontend/components/ProjectSettings/ContextTab.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..5272c846b181fb9c17031215e4056b565a865f09
--- /dev/null
+++ b/frontend/components/ProjectSettings/ContextTab.jsx
@@ -0,0 +1,352 @@
+import React, { useEffect, useMemo, useRef, useState } from "react";
+
+export default function ContextTab({ owner, repo }) {
+ const [assets, setAssets] = useState([]);
+ const [busy, setBusy] = useState(false);
+ const [error, setError] = useState("");
+ const [uploadHint, setUploadHint] = useState("");
+ const inputRef = useRef(null);
+
+ const canUse = useMemo(() => Boolean(owner && repo), [owner, repo]);
+
+ async function loadAssets() {
+ if (!canUse) return;
+ setError("");
+ try {
+ const res = await fetch(`/api/repos/${owner}/${repo}/context/assets`);
+ if (!res.ok) throw new Error(`Failed to list assets (${res.status})`);
+ const data = await res.json();
+ setAssets(data.assets || []);
+ } catch (e) {
+ setError(e?.message || "Failed to load assets");
+ }
+ }
+
+ useEffect(() => {
+ loadAssets();
+ // eslint-disable-next-line react-hooks/exhaustive-deps
+ }, [owner, repo]);
+
+ async function uploadFiles(fileList) {
+ if (!canUse) return;
+ const files = Array.from(fileList || []);
+ if (!files.length) return;
+
+ setBusy(true);
+ setError("");
+ setUploadHint(`Uploading ${files.length} file(s)...`);
+
+ try {
+ for (const f of files) {
+ const form = new FormData();
+ form.append("file", f);
+
+ const res = await fetch(
+ `/api/repos/${owner}/${repo}/context/assets/upload`,
+ { method: "POST", body: form }
+ );
+ if (!res.ok) {
+ const txt = await res.text().catch(() => "");
+ throw new Error(`Upload failed (${res.status}) ${txt}`);
+ }
+ }
+ setUploadHint("Upload complete. Refreshing list...");
+ await loadAssets();
+ setUploadHint("");
+ } catch (e) {
+ setError(e?.message || "Upload failed");
+ setUploadHint("");
+ } finally {
+ setBusy(false);
+ if (inputRef.current) inputRef.current.value = "";
+ }
+ }
+
+ async function deleteAsset(assetId) {
+ if (!canUse) return;
+ const ok = window.confirm("Delete this asset? This cannot be undone.");
+ if (!ok) return;
+
+ setBusy(true);
+ setError("");
+ try {
+ const res = await fetch(
+ `/api/repos/${owner}/${repo}/context/assets/${assetId}`,
+ { method: "DELETE" }
+ );
+ if (!res.ok) throw new Error(`Delete failed (${res.status})`);
+ await loadAssets();
+ } catch (e) {
+ setError(e?.message || "Delete failed");
+ } finally {
+ setBusy(false);
+ }
+ }
+
+ function downloadAsset(assetId) {
+ if (!canUse) return;
+ window.open(
+ `/api/repos/${owner}/${repo}/context/assets/${assetId}/download`,
+ "_blank"
+ );
+ }
+
+ const empty = !assets || assets.length === 0;
+
+ return (
+
+
+
+
Project Context
+
+ Upload documents, transcripts, screenshots, etc. (non-destructive,
+ additive).
+
+
+
+
+ uploadFiles(e.target.files)}
+ style={styles.fileInput}
+ />
+ inputRef.current?.click()}
+ >
+ Upload
+
+
+ Refresh
+
+
+
+
+
{
+ e.preventDefault();
+ e.stopPropagation();
+ }}
+ onDrop={(e) => {
+ e.preventDefault();
+ e.stopPropagation();
+ if (busy) return;
+ uploadFiles(e.dataTransfer.files);
+ }}
+ >
+
+ Drag & drop files here, or click Upload .
+
+
+ Tip: For audio/video, upload a transcript file too.
+
+
+
+ {uploadHint ?
{uploadHint}
: null}
+ {error ?
{error}
: null}
+
+
+
+
File
+
Type
+
Size
+
Indexed
+
Actions
+
+
+ {empty ? (
+
+ No context assets yet. Upload docs, transcripts, and screenshots to
+ improve planning quality.
+
+ ) : (
+ assets.map((a) => (
+
+
+
{a.filename}
+
+ Added: {a.created_at || "-"} | Extracted:{" "}
+ {Number(a.extracted_chars || 0).toLocaleString()} chars
+
+
+
+
+ {a.mime || "unknown"}
+
+
+
+ {formatBytes(a.size_bytes || 0)}
+
+
+
+ {a.indexed_chunks || 0} chunks
+
+
+
+ downloadAsset(a.asset_id)}
+ >
+ Download
+
+ deleteAsset(a.asset_id)}
+ >
+ Delete
+
+
+
+ ))
+ )}
+
+
+ );
+}
+
+function formatBytes(bytes) {
+ const b = Number(bytes || 0);
+ if (!b) return "0 B";
+ const units = ["B", "KB", "MB", "GB", "TB"];
+ let i = 0;
+ let v = b;
+ while (v >= 1024 && i < units.length - 1) {
+ v /= 1024;
+ i += 1;
+ }
+ return `${v.toFixed(v >= 10 || i === 0 ? 0 : 1)} ${units[i]}`;
+}
+
+const styles = {
+ wrap: { display: "flex", flexDirection: "column", gap: 12 },
+ topRow: {
+ display: "flex",
+ justifyContent: "space-between",
+ gap: 12,
+ alignItems: "flex-start",
+ flexWrap: "wrap",
+ },
+ left: { minWidth: 280 },
+ right: { display: "flex", gap: 8, alignItems: "center", flexWrap: "wrap" },
+ h1: { fontSize: 14, fontWeight: 800, color: "#fff" },
+ h2: { fontSize: 12, color: "rgba(255,255,255,0.65)", marginTop: 4 },
+ fileInput: { display: "none" },
+ btn: {
+ background: "rgba(255,255,255,0.10)",
+ border: "1px solid rgba(255,255,255,0.18)",
+ color: "#fff",
+ borderRadius: 10,
+ padding: "8px 10px",
+ cursor: "pointer",
+ fontSize: 13,
+ },
+ dropzone: {
+ border: "1px dashed rgba(255,255,255,0.22)",
+ borderRadius: 12,
+ padding: 16,
+ background: "rgba(255,255,255,0.03)",
+ },
+ dropText: { color: "rgba(255,255,255,0.85)", fontSize: 13 },
+ dropSub: { color: "rgba(255,255,255,0.55)", fontSize: 12, marginTop: 6 },
+ hint: {
+ color: "rgba(255,255,255,0.75)",
+ fontSize: 12,
+ padding: "8px 10px",
+ border: "1px solid rgba(255,255,255,0.12)",
+ borderRadius: 10,
+ background: "rgba(255,255,255,0.03)",
+ },
+ error: {
+ color: "#ffb3b3",
+ fontSize: 12,
+ padding: "8px 10px",
+ border: "1px solid rgba(255,120,120,0.25)",
+ borderRadius: 10,
+ background: "rgba(255,80,80,0.08)",
+ },
+ tableWrap: {
+ border: "1px solid rgba(255,255,255,0.12)",
+ borderRadius: 12,
+ overflow: "hidden",
+ },
+ tableHeader: {
+ display: "grid",
+ gridTemplateColumns: "1.6fr 1fr 0.6fr 0.6fr 0.8fr",
+ gap: 0,
+ padding: "10px 12px",
+ background: "rgba(255,255,255,0.03)",
+ borderBottom: "1px solid rgba(255,255,255,0.10)",
+ fontSize: 12,
+ color: "rgba(255,255,255,0.65)",
+ },
+ row: {
+ display: "grid",
+ gridTemplateColumns: "1.6fr 1fr 0.6fr 0.6fr 0.8fr",
+ padding: "10px 12px",
+ borderBottom: "1px solid rgba(255,255,255,0.08)",
+ alignItems: "center",
+ },
+ col: { minWidth: 0 },
+ colName: {},
+ colMeta: { color: "rgba(255,255,255,0.75)", fontSize: 12 },
+ colActions: { display: "flex", gap: 8, justifyContent: "flex-end" },
+ fileName: {
+ color: "#fff",
+ fontSize: 13,
+ fontWeight: 700,
+ overflow: "hidden",
+ textOverflow: "ellipsis",
+ whiteSpace: "nowrap",
+ },
+ small: {
+ color: "rgba(255,255,255,0.55)",
+ fontSize: 11,
+ marginTop: 4,
+ overflow: "hidden",
+ textOverflow: "ellipsis",
+ whiteSpace: "nowrap",
+ },
+ badge: {
+ display: "inline-flex",
+ alignItems: "center",
+ padding: "2px 8px",
+ borderRadius: 999,
+ border: "1px solid rgba(255,255,255,0.16)",
+ background: "rgba(255,255,255,0.04)",
+ fontSize: 11,
+ color: "rgba(255,255,255,0.80)",
+ maxWidth: "100%",
+ overflow: "hidden",
+ textOverflow: "ellipsis",
+ whiteSpace: "nowrap",
+ },
+ smallBtn: {
+ background: "rgba(255,255,255,0.08)",
+ border: "1px solid rgba(255,255,255,0.16)",
+ color: "#fff",
+ borderRadius: 10,
+ padding: "6px 8px",
+ cursor: "pointer",
+ fontSize: 12,
+ },
+ dangerBtn: {
+ border: "1px solid rgba(255,90,90,0.35)",
+ background: "rgba(255,90,90,0.10)",
+ },
+ empty: {
+ padding: 14,
+ color: "rgba(255,255,255,0.65)",
+ fontSize: 13,
+ },
+};
diff --git a/frontend/components/ProjectSettings/ConventionsTab.jsx b/frontend/components/ProjectSettings/ConventionsTab.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..d508ccf1a65817f1b5a44dbebcdfc1861c42b8b6
--- /dev/null
+++ b/frontend/components/ProjectSettings/ConventionsTab.jsx
@@ -0,0 +1,151 @@
+import React, { useEffect, useMemo, useState } from "react";
+
+export default function ConventionsTab({ owner, repo }) {
+ const [content, setContent] = useState("");
+ const [busy, setBusy] = useState(false);
+ const [error, setError] = useState("");
+
+ const canUse = useMemo(() => Boolean(owner && repo), [owner, repo]);
+
+ async function load() {
+ if (!canUse) return;
+ setError("");
+ setBusy(true);
+ try {
+ const res = await fetch(`/api/repos/${owner}/${repo}/context`);
+ if (!res.ok) throw new Error(`Failed to load conventions (${res.status})`);
+ const data = await res.json();
+ // backend may return { context: "..."} or { conventions: "..."} depending on implementation
+ setContent(data.context || data.conventions || data.memory || data.text || "");
+ } catch (e) {
+ setError(e?.message || "Failed to load conventions");
+ } finally {
+ setBusy(false);
+ }
+ }
+
+ async function initialize() {
+ if (!canUse) return;
+ setError("");
+ setBusy(true);
+ try {
+ const res = await fetch(`/api/repos/${owner}/${repo}/context/init`, {
+ method: "POST",
+ });
+ if (!res.ok) {
+ const txt = await res.text().catch(() => "");
+ throw new Error(`Init failed (${res.status}) ${txt}`);
+ }
+ await load();
+ } catch (e) {
+ setError(e?.message || "Init failed");
+ } finally {
+ setBusy(false);
+ }
+ }
+
+ useEffect(() => {
+ load();
+ // eslint-disable-next-line react-hooks/exhaustive-deps
+ }, [owner, repo]);
+
+ return (
+
+
+
+
Project Conventions
+
+ This is the project memory/conventions file used by GitPilot.
+
+
+
+
+ Refresh
+
+
+ Initialize
+
+
+
+
+ {error ?
{error}
: null}
+
+
+ {content ? (
+
{content}
+ ) : (
+
+ No conventions found yet. Click Initialize to create default
+ project memory if supported.
+
+ )}
+
+
+
+ Editing conventions is intentionally not included here to keep this
+ feature additive/non-destructive. You can extend this later with an
+ explicit "Edit" mode.
+
+
+ );
+}
+
+const styles = {
+ wrap: { display: "flex", flexDirection: "column", gap: 12 },
+ topRow: {
+ display: "flex",
+ justifyContent: "space-between",
+ gap: 12,
+ alignItems: "flex-start",
+ flexWrap: "wrap",
+ },
+ actions: { display: "flex", gap: 8, flexWrap: "wrap" },
+ h1: { fontSize: 14, fontWeight: 800, color: "#fff" },
+ h2: { fontSize: 12, color: "rgba(255,255,255,0.65)", marginTop: 4 },
+ btn: {
+ background: "rgba(255,255,255,0.10)",
+ border: "1px solid rgba(255,255,255,0.18)",
+ color: "#fff",
+ borderRadius: 10,
+ padding: "8px 10px",
+ cursor: "pointer",
+ fontSize: 13,
+ },
+ error: {
+ color: "#ffb3b3",
+ fontSize: 12,
+ padding: "8px 10px",
+ border: "1px solid rgba(255,120,120,0.25)",
+ borderRadius: 10,
+ background: "rgba(255,80,80,0.08)",
+ },
+ box: {
+ border: "1px solid rgba(255,255,255,0.12)",
+ borderRadius: 12,
+ overflow: "hidden",
+ background: "rgba(0,0,0,0.22)",
+ },
+ pre: {
+ margin: 0,
+ padding: 12,
+ color: "rgba(255,255,255,0.85)",
+ fontSize: 12,
+ lineHeight: 1.35,
+ whiteSpace: "pre-wrap",
+ overflow: "auto",
+ maxHeight: 520,
+ },
+ empty: {
+ padding: 12,
+ color: "rgba(255,255,255,0.65)",
+ fontSize: 13,
+ },
+ note: {
+ color: "rgba(255,255,255,0.55)",
+ fontSize: 12,
+ },
+};
diff --git a/frontend/components/ProjectSettings/UseCaseTab.jsx b/frontend/components/ProjectSettings/UseCaseTab.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..b01e9a46d3ac15c42d16decf69b93e0cf7192db2
--- /dev/null
+++ b/frontend/components/ProjectSettings/UseCaseTab.jsx
@@ -0,0 +1,637 @@
+import React, { useEffect, useMemo, useRef, useState } from "react";
+
+export default function UseCaseTab({ owner, repo }) {
+ const [useCases, setUseCases] = useState([]);
+ const [selectedId, setSelectedId] = useState("");
+ const [useCase, setUseCase] = useState(null);
+ const [busy, setBusy] = useState(false);
+ const [error, setError] = useState("");
+ const [draftTitle, setDraftTitle] = useState("New Use Case");
+ const [message, setMessage] = useState("");
+ const messagesEndRef = useRef(null);
+
+ const canUse = useMemo(() => Boolean(owner && repo), [owner, repo]);
+ const spec = useCase?.spec || {};
+
+ function scrollToBottom() {
+ requestAnimationFrame(() => {
+ messagesEndRef.current?.scrollIntoView({ behavior: "smooth" });
+ });
+ }
+
+ async function loadUseCases() {
+ if (!canUse) return;
+ setError("");
+ try {
+ const res = await fetch(`/api/repos/${owner}/${repo}/use-cases`);
+ if (!res.ok) throw new Error(`Failed to list use cases (${res.status})`);
+ const data = await res.json();
+ const list = data.use_cases || [];
+ setUseCases(list);
+
+ // auto select active or first
+ const active = list.find((x) => x.is_active);
+ const nextId = active?.use_case_id || list[0]?.use_case_id || "";
+ if (!selectedId && nextId) setSelectedId(nextId);
+ } catch (e) {
+ setError(e?.message || "Failed to load use cases");
+ }
+ }
+
+ async function loadUseCase(id) {
+ if (!canUse || !id) return;
+ setError("");
+ try {
+ const res = await fetch(`/api/repos/${owner}/${repo}/use-cases/${id}`);
+ if (!res.ok) throw new Error(`Failed to load use case (${res.status})`);
+ const data = await res.json();
+ setUseCase(data.use_case || null);
+ scrollToBottom();
+ } catch (e) {
+ setError(e?.message || "Failed to load use case");
+ }
+ }
+
+ useEffect(() => {
+ loadUseCases();
+ // eslint-disable-next-line react-hooks/exhaustive-deps
+ }, [owner, repo]);
+
+ useEffect(() => {
+ if (!selectedId) return;
+ loadUseCase(selectedId);
+ // eslint-disable-next-line react-hooks/exhaustive-deps
+ }, [selectedId]);
+
+ async function createUseCase() {
+ if (!canUse) return;
+ setBusy(true);
+ setError("");
+ try {
+ const res = await fetch(`/api/repos/${owner}/${repo}/use-cases`, {
+ method: "POST",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify({ title: draftTitle || "New Use Case" }),
+ });
+ if (!res.ok) {
+ const txt = await res.text().catch(() => "");
+ throw new Error(`Create failed (${res.status}) ${txt}`);
+ }
+ const data = await res.json();
+ const id = data?.use_case?.use_case_id;
+ await loadUseCases();
+ if (id) setSelectedId(id);
+ setDraftTitle("New Use Case");
+ } catch (e) {
+ setError(e?.message || "Create failed");
+ } finally {
+ setBusy(false);
+ }
+ }
+
+ async function sendMessage() {
+ if (!canUse || !selectedId) return;
+ const msg = (message || "").trim();
+ if (!msg) return;
+
+ setBusy(true);
+ setError("");
+
+ // optimistic UI: append user message immediately
+ setUseCase((prev) => {
+ if (!prev) return prev;
+ const next = { ...prev };
+ next.messages = Array.isArray(next.messages) ? [...next.messages] : [];
+ next.messages.push({ role: "user", content: msg, ts: new Date().toISOString() });
+ return next;
+ });
+ setMessage("");
+ scrollToBottom();
+
+ try {
+ const res = await fetch(
+ `/api/repos/${owner}/${repo}/use-cases/${selectedId}/chat`,
+ {
+ method: "POST",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify({ message: msg }),
+ }
+ );
+ if (!res.ok) {
+ const txt = await res.text().catch(() => "");
+ throw new Error(`Chat failed (${res.status}) ${txt}`);
+ }
+ const data = await res.json();
+ setUseCase(data.use_case || null);
+ await loadUseCases();
+ scrollToBottom();
+ } catch (e) {
+ setError(e?.message || "Chat failed");
+ } finally {
+ setBusy(false);
+ }
+ }
+
+ async function finalizeUseCase() {
+ if (!canUse || !selectedId) return;
+ setBusy(true);
+ setError("");
+ try {
+ const res = await fetch(
+ `/api/repos/${owner}/${repo}/use-cases/${selectedId}/finalize`,
+ { method: "POST" }
+ );
+ if (!res.ok) {
+ const txt = await res.text().catch(() => "");
+ throw new Error(`Finalize failed (${res.status}) ${txt}`);
+ }
+ const data = await res.json();
+ setUseCase(data.use_case || null);
+ await loadUseCases();
+ alert(
+ "Use Case finalized and marked active.\n\nA Markdown export was saved in the repo workspace .gitpilot/context/use_cases/."
+ );
+ } catch (e) {
+ setError(e?.message || "Finalize failed");
+ } finally {
+ setBusy(false);
+ }
+ }
+
+ const activeId = useCases.find((x) => x.is_active)?.use_case_id;
+
+ return (
+
+
+
+
Use Case
+
+ Guided chat to clarify requirements and produce a versioned spec.
+
+
+
+
+ setDraftTitle(e.target.value)}
+ placeholder="New use case title..."
+ style={styles.titleInput}
+ disabled={!canUse || busy}
+ />
+
+ New
+
+
+ Finalize
+
+
+ Refresh
+
+
+
+
+ {error ?
{error}
: null}
+
+
+
+
Use Cases
+
+ {useCases.length === 0 ? (
+
+ No use cases yet. Create one with New .
+
+ ) : (
+ useCases.map((uc) => (
+
setSelectedId(uc.use_case_id)}
+ >
+
+
+ {uc.title || "(untitled)"}
+
+ {uc.use_case_id === activeId ? (
+
ACTIVE
+ ) : null}
+
+
+ Updated: {uc.updated_at || uc.created_at || "-"}
+
+
+ ))
+ )}
+
+
+
+
+
Guided Chat
+
+ {Array.isArray(useCase?.messages) && useCase.messages.length ? (
+ useCase.messages.map((m, idx) => (
+
+
+ {m.role === "user" ? "You" : "Assistant"}
+
+
{m.content}
+
+ ))
+ ) : (
+
+ Select a use case and start chatting. You can paste structured
+ info like:
+
+{`Summary: ...
+Problem: ...
+Users: ...
+Requirements:
+- ...
+Acceptance Criteria:
+- ...`}
+
+
+ )}
+
+
+
+
+
+
+
+
+
Spec Preview
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Finalize will save a Markdown spec and mark it ACTIVE for context.
+
+
+ Finalize Spec
+
+
+
+
+
+ );
+}
+
+function Section({ title, value }) {
+ return (
+
+
{title}
+
+ {String(value || "").trim() ? (
+
{value}
+ ) : (
+
(empty)
+ )}
+
+
+ );
+}
+
+function ListSection({ title, items }) {
+ const list = Array.isArray(items) ? items : [];
+ return (
+
+
{title}
+
+ {list.length ? (
+
+ {list.map((x, i) => (
+
+ {x}
+
+ ))}
+
+ ) : (
+
(empty)
+ )}
+
+
+ );
+}
+
+const styles = {
+ wrap: { display: "flex", flexDirection: "column", gap: 12 },
+ topRow: {
+ display: "flex",
+ justifyContent: "space-between",
+ gap: 12,
+ alignItems: "flex-start",
+ flexWrap: "wrap",
+ },
+ left: { minWidth: 280 },
+ right: { display: "flex", gap: 8, alignItems: "center", flexWrap: "wrap" },
+ h1: { fontSize: 14, fontWeight: 800, color: "#fff" },
+ h2: { fontSize: 12, color: "rgba(255,255,255,0.65)", marginTop: 4 },
+ titleInput: {
+ width: 260,
+ maxWidth: "70vw",
+ padding: "8px 10px",
+ borderRadius: 10,
+ border: "1px solid rgba(255,255,255,0.18)",
+ background: "rgba(0,0,0,0.25)",
+ color: "#fff",
+ fontSize: 13,
+ outline: "none",
+ },
+ btn: {
+ background: "rgba(255,255,255,0.10)",
+ border: "1px solid rgba(255,255,255,0.18)",
+ color: "#fff",
+ borderRadius: 10,
+ padding: "8px 10px",
+ cursor: "pointer",
+ fontSize: 13,
+ },
+ primaryBtn: {
+ background: "rgba(255,255,255,0.12)",
+ border: "1px solid rgba(255,255,255,0.22)",
+ color: "#fff",
+ borderRadius: 10,
+ padding: "8px 12px",
+ cursor: "pointer",
+ fontSize: 13,
+ fontWeight: 700,
+ },
+ error: {
+ color: "#ffb3b3",
+ fontSize: 12,
+ padding: "8px 10px",
+ border: "1px solid rgba(255,120,120,0.25)",
+ borderRadius: 10,
+ background: "rgba(255,80,80,0.08)",
+ },
+ grid: {
+ display: "grid",
+ gridTemplateColumns: "300px 1.2fr 0.9fr",
+ gap: 12,
+ alignItems: "stretch",
+ },
+ sidebar: {
+ border: "1px solid rgba(255,255,255,0.12)",
+ borderRadius: 12,
+ overflow: "hidden",
+ background: "rgba(255,255,255,0.02)",
+ display: "flex",
+ flexDirection: "column",
+ minHeight: 520,
+ },
+ sidebarTitle: {
+ padding: 10,
+ borderBottom: "1px solid rgba(255,255,255,0.10)",
+ fontSize: 12,
+ fontWeight: 800,
+ color: "rgba(255,255,255,0.85)",
+ },
+ sidebarList: {
+ padding: 8,
+ display: "flex",
+ flexDirection: "column",
+ gap: 8,
+ overflow: "auto",
+ },
+ sidebarEmpty: {
+ color: "rgba(255,255,255,0.65)",
+ fontSize: 12,
+ padding: 8,
+ },
+ ucItem: {
+ textAlign: "left",
+ background: "rgba(0,0,0,0.25)",
+ border: "1px solid rgba(255,255,255,0.12)",
+ color: "#fff",
+ borderRadius: 12,
+ padding: 10,
+ cursor: "pointer",
+ },
+ ucItemActive: {
+ border: "1px solid rgba(255,255,255,0.25)",
+ background: "rgba(255,255,255,0.06)",
+ },
+ ucTitleRow: { display: "flex", alignItems: "center", gap: 8 },
+ ucTitle: {
+ fontSize: 13,
+ fontWeight: 800,
+ overflow: "hidden",
+ textOverflow: "ellipsis",
+ whiteSpace: "nowrap",
+ flex: 1,
+ },
+ activePill: {
+ fontSize: 10,
+ fontWeight: 800,
+ padding: "2px 8px",
+ borderRadius: 999,
+ border: "1px solid rgba(120,255,180,0.30)",
+ background: "rgba(120,255,180,0.10)",
+ color: "rgba(200,255,220,0.95)",
+ },
+ ucMeta: {
+ marginTop: 6,
+ fontSize: 11,
+ color: "rgba(255,255,255,0.60)",
+ },
+ chatCol: {
+ border: "1px solid rgba(255,255,255,0.12)",
+ borderRadius: 12,
+ overflow: "hidden",
+ display: "flex",
+ flexDirection: "column",
+ background: "rgba(255,255,255,0.02)",
+ minHeight: 520,
+ },
+ specCol: {
+ border: "1px solid rgba(255,255,255,0.12)",
+ borderRadius: 12,
+ overflow: "hidden",
+ display: "flex",
+ flexDirection: "column",
+ background: "rgba(255,255,255,0.02)",
+ minHeight: 520,
+ },
+ panelTitle: {
+ padding: 10,
+ borderBottom: "1px solid rgba(255,255,255,0.10)",
+ fontSize: 12,
+ fontWeight: 800,
+ color: "rgba(255,255,255,0.85)",
+ },
+ chatBox: {
+ flex: 1,
+ overflow: "auto",
+ padding: 10,
+ display: "flex",
+ flexDirection: "column",
+ gap: 10,
+ },
+ chatEmpty: {
+ color: "rgba(255,255,255,0.65)",
+ fontSize: 12,
+ padding: 6,
+ },
+ pre: {
+ marginTop: 10,
+ padding: 10,
+ borderRadius: 10,
+ border: "1px solid rgba(255,255,255,0.12)",
+ background: "rgba(0,0,0,0.25)",
+ color: "rgba(255,255,255,0.8)",
+ overflow: "auto",
+ fontSize: 11,
+ },
+ msg: {
+ border: "1px solid rgba(255,255,255,0.12)",
+ borderRadius: 12,
+ padding: 10,
+ background: "rgba(0,0,0,0.25)",
+ },
+ msgUser: {
+ border: "1px solid rgba(255,255,255,0.18)",
+ background: "rgba(255,255,255,0.04)",
+ },
+ msgAsst: {},
+ msgRole: {
+ fontSize: 11,
+ fontWeight: 800,
+ color: "rgba(255,255,255,0.70)",
+ marginBottom: 6,
+ },
+ msgContent: {
+ whiteSpace: "pre-wrap",
+ fontSize: 13,
+ color: "rgba(255,255,255,0.90)",
+ lineHeight: 1.35,
+ },
+ composer: {
+ borderTop: "1px solid rgba(255,255,255,0.10)",
+ padding: 10,
+ display: "flex",
+ gap: 10,
+ alignItems: "flex-end",
+ },
+ textarea: {
+ flex: 1,
+ minHeight: 52,
+ maxHeight: 120,
+ resize: "vertical",
+ padding: 10,
+ borderRadius: 12,
+ border: "1px solid rgba(255,255,255,0.18)",
+ background: "rgba(0,0,0,0.25)",
+ color: "#fff",
+ fontSize: 13,
+ outline: "none",
+ },
+ sendBtn: {
+ background: "rgba(255,255,255,0.12)",
+ border: "1px solid rgba(255,255,255,0.22)",
+ color: "#fff",
+ borderRadius: 12,
+ padding: "10px 12px",
+ cursor: "pointer",
+ fontSize: 13,
+ fontWeight: 800,
+ },
+ specBox: {
+ flex: 1,
+ overflow: "auto",
+ padding: 10,
+ display: "flex",
+ flexDirection: "column",
+ gap: 10,
+ },
+ specFooter: {
+ borderTop: "1px solid rgba(255,255,255,0.10)",
+ padding: 10,
+ display: "flex",
+ gap: 10,
+ alignItems: "center",
+ justifyContent: "space-between",
+ },
+ specHint: { fontSize: 12, color: "rgba(255,255,255,0.60)" },
+ section: {
+ border: "1px solid rgba(255,255,255,0.10)",
+ borderRadius: 12,
+ background: "rgba(0,0,0,0.22)",
+ overflow: "hidden",
+ },
+ sectionTitle: {
+ padding: "8px 10px",
+ borderBottom: "1px solid rgba(255,255,255,0.08)",
+ fontSize: 12,
+ fontWeight: 800,
+ color: "rgba(255,255,255,0.80)",
+ background: "rgba(255,255,255,0.02)",
+ },
+ sectionBody: { padding: "8px 10px" },
+ sectionText: {
+ whiteSpace: "pre-wrap",
+ fontSize: 12,
+ color: "rgba(255,255,255,0.90)",
+ lineHeight: 1.35,
+ },
+ sectionEmpty: { fontSize: 12, color: "rgba(255,255,255,0.45)" },
+ ul: { margin: 0, paddingLeft: 18 },
+ li: { color: "rgba(255,255,255,0.90)", fontSize: 12, lineHeight: 1.35 },
+};
diff --git a/frontend/components/ProjectSettingsModal.jsx b/frontend/components/ProjectSettingsModal.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..ff6cc365af5a1c1226d544b425ffa7bbc88698b4
--- /dev/null
+++ b/frontend/components/ProjectSettingsModal.jsx
@@ -0,0 +1,230 @@
+import React, { useEffect, useMemo, useState } from "react";
+import ContextTab from "./ProjectSettings/ContextTab.jsx";
+import UseCaseTab from "./ProjectSettings/UseCaseTab.jsx";
+import ConventionsTab from "./ProjectSettings/ConventionsTab.jsx";
+import EnvironmentSelector from "./EnvironmentSelector.jsx";
+
+export default function ProjectSettingsModal({
+ owner,
+ repo,
+ isOpen,
+ onClose,
+ activeEnvId,
+ onEnvChange,
+}) {
+ const [activeTab, setActiveTab] = useState("context");
+
+ useEffect(() => {
+ if (!isOpen) return;
+ // reset to Context each time opened (safe default)
+ setActiveTab("context");
+ }, [isOpen]);
+
+ const title = useMemo(() => {
+ const repoLabel = owner && repo ? `${owner}/${repo}` : "Project";
+ return `Project Settings β ${repoLabel}`;
+ }, [owner, repo]);
+
+ if (!isOpen) return null;
+
+ return (
+ {
+ // click outside closes
+ if (e.target === e.currentTarget) onClose?.();
+ }}
+ >
+
e.stopPropagation()}>
+
+
+
{title}
+
+ Manage context, use cases, and project conventions (additive only).
+
+
+
+ β
+
+
+
+
+ setActiveTab("context")}
+ />
+ setActiveTab("usecase")}
+ />
+ setActiveTab("conventions")}
+ />
+ setActiveTab("environment")}
+ />
+
+
+
+ {activeTab === "context" &&
}
+ {activeTab === "usecase" &&
}
+ {activeTab === "conventions" && (
+
+ )}
+ {activeTab === "environment" && (
+
+
+ Select and configure the execution environment for agent operations.
+
+
+
+ )}
+
+
+
+
+ Tip: Upload meeting notes/transcripts in Context, then finalize a Use
+ Case spec.
+
+
+ Done
+
+
+
+
+ );
+}
+
+function TabButton({ label, isActive, onClick }) {
+ return (
+
+ {label}
+
+ );
+}
+
+const styles = {
+ backdrop: {
+ position: "fixed",
+ inset: 0,
+ background: "rgba(0,0,0,0.45)",
+ display: "flex",
+ justifyContent: "center",
+ alignItems: "center",
+ zIndex: 9999,
+ padding: 16,
+ },
+ modal: {
+ width: "min(1100px, 96vw)",
+ height: "min(760px, 90vh)",
+ background: "#111",
+ border: "1px solid rgba(255,255,255,0.12)",
+ borderRadius: 12,
+ overflow: "hidden",
+ display: "flex",
+ flexDirection: "column",
+ boxShadow: "0 12px 40px rgba(0,0,0,0.35)",
+ },
+ header: {
+ padding: "14px 14px 10px",
+ display: "flex",
+ gap: 12,
+ alignItems: "flex-start",
+ justifyContent: "space-between",
+ borderBottom: "1px solid rgba(255,255,255,0.10)",
+ background: "linear-gradient(180deg, rgba(255,255,255,0.04), transparent)",
+ },
+ headerLeft: {
+ display: "flex",
+ flexDirection: "column",
+ gap: 4,
+ minWidth: 0,
+ },
+ title: {
+ fontSize: 16,
+ fontWeight: 700,
+ color: "#fff",
+ lineHeight: 1.2,
+ whiteSpace: "nowrap",
+ overflow: "hidden",
+ textOverflow: "ellipsis",
+ maxWidth: "88vw",
+ },
+ subtitle: {
+ fontSize: 12,
+ color: "rgba(255,255,255,0.65)",
+ },
+ closeBtn: {
+ background: "transparent",
+ border: "1px solid rgba(255,255,255,0.18)",
+ color: "rgba(255,255,255,0.85)",
+ borderRadius: 10,
+ padding: "6px 10px",
+ cursor: "pointer",
+ },
+ tabsRow: {
+ display: "flex",
+ gap: 8,
+ padding: 10,
+ borderBottom: "1px solid rgba(255,255,255,0.10)",
+ background: "rgba(255,255,255,0.02)",
+ },
+ tabBtn: {
+ background: "transparent",
+ border: "1px solid rgba(255,255,255,0.14)",
+ color: "rgba(255,255,255,0.75)",
+ borderRadius: 999,
+ padding: "8px 12px",
+ cursor: "pointer",
+ fontSize: 13,
+ },
+ tabBtnActive: {
+ border: "1px solid rgba(255,255,255,0.28)",
+ color: "#fff",
+ background: "rgba(255,255,255,0.06)",
+ },
+ body: {
+ flex: 1,
+ overflow: "auto",
+ padding: 12,
+ },
+ footer: {
+ padding: 12,
+ borderTop: "1px solid rgba(255,255,255,0.10)",
+ display: "flex",
+ alignItems: "center",
+ justifyContent: "space-between",
+ gap: 12,
+ background: "rgba(255,255,255,0.02)",
+ },
+ footerHint: {
+ color: "rgba(255,255,255,0.6)",
+ fontSize: 12,
+ overflow: "hidden",
+ textOverflow: "ellipsis",
+ whiteSpace: "nowrap",
+ },
+ primaryBtn: {
+ background: "rgba(255,255,255,0.10)",
+ border: "1px solid rgba(255,255,255,0.20)",
+ color: "#fff",
+ borderRadius: 10,
+ padding: "8px 12px",
+ cursor: "pointer",
+ },
+};
diff --git a/frontend/components/RepoSelector.jsx b/frontend/components/RepoSelector.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..dd601f4f7cb9b8a19a7858280d5886607a33271c
--- /dev/null
+++ b/frontend/components/RepoSelector.jsx
@@ -0,0 +1,269 @@
+import React, { useEffect, useState, useCallback } from "react";
+import { authFetch } from "../utils/api.js";
+
+export default function RepoSelector({ onSelect }) {
+ const [query, setQuery] = useState("");
+ const [repos, setRepos] = useState([]);
+ const [loading, setLoading] = useState(false);
+ const [loadingMore, setLoadingMore] = useState(false);
+ const [status, setStatus] = useState("");
+ const [page, setPage] = useState(1);
+ const [hasMore, setHasMore] = useState(false);
+ const [totalCount, setTotalCount] = useState(null);
+
+ /**
+ * Fetch repositories with pagination and optional search
+ * @param {number} pageNum - Page number to fetch
+ * @param {boolean} append - Whether to append or replace results
+ * @param {string} searchQuery - Search query (uses current query if not provided)
+ */
+ const fetchRepos = useCallback(async (pageNum = 1, append = false, searchQuery = query) => {
+ // Set appropriate loading state
+ if (pageNum === 1) {
+ setLoading(true);
+ setStatus("");
+ } else {
+ setLoadingMore(true);
+ }
+
+ try {
+ // Build URL with query parameters
+ const params = new URLSearchParams();
+ params.append("page", pageNum);
+ params.append("per_page", "100");
+ if (searchQuery) {
+ params.append("query", searchQuery);
+ }
+
+ const url = `/api/repos?${params.toString()}`;
+ const res = await authFetch(url);
+ const data = await res.json();
+
+ if (!res.ok) {
+ throw new Error(data.detail || data.error || "Failed to load repositories");
+ }
+
+ // Update repositories - append or replace
+ if (append) {
+ setRepos((prev) => [...prev, ...data.repositories]);
+ } else {
+ setRepos(data.repositories);
+ }
+
+ // Update pagination state
+ setPage(pageNum);
+ setHasMore(data.has_more);
+ setTotalCount(data.total_count);
+
+ // Show status if no results
+ if (!append && data.repositories.length === 0) {
+ if (searchQuery) {
+ setStatus(`No repositories matching "${searchQuery}"`);
+ } else {
+ setStatus("No repositories found");
+ }
+ } else {
+ setStatus("");
+ }
+ } catch (err) {
+ console.error("Error fetching repositories:", err);
+ setStatus(err.message || "Failed to load repositories");
+ } finally {
+ setLoading(false);
+ setLoadingMore(false);
+ }
+ }, [query]);
+
+ /**
+ * Load more repositories (next page)
+ */
+ const loadMore = () => {
+ fetchRepos(page + 1, true);
+ };
+
+ /**
+ * Handle search - resets to page 1
+ */
+ const handleSearch = () => {
+ setPage(1);
+ fetchRepos(1, false, query);
+ };
+
+ /**
+ * Handle input change - trigger search on Enter key
+ */
+ const handleKeyDown = (e) => {
+ if (e.key === "Enter") {
+ handleSearch();
+ }
+ };
+
+ /**
+ * Clear search and show all repos
+ */
+ const clearSearch = () => {
+ setQuery("");
+ setPage(1);
+ fetchRepos(1, false, "");
+ };
+
+ // Initial load on mount
+ useEffect(() => {
+ fetchRepos(1, false, "");
+ // eslint-disable-next-line react-hooks/exhaustive-deps
+ }, []);
+
+ /**
+ * Format repository count for display
+ */
+ const getCountText = () => {
+ if (totalCount !== null) {
+ // Search mode - show filtered count
+ return `${repos.length} of ${totalCount} repositories`;
+ } else {
+ // Pagination mode - show loaded count
+ return `${repos.length} ${repos.length === 1 ? "repository" : "repositories"}${hasMore ? "+" : ""}`;
+ }
+ };
+
+ return (
+
+
+ GitHub repos are optional. Use Folder or Local Git mode for local-first workflows.
+
+ {/* Search Header */}
+
+
+ setQuery(e.target.value)}
+ onKeyDown={handleKeyDown}
+ disabled={loading}
+ />
+
+ {loading ? "..." : "Search"}
+
+
+
+ {/* Search Info Bar */}
+ {(query || repos.length > 0) && (
+
+ {getCountText()}
+ {query && (
+
+ Clear search
+
+ )}
+
+ )}
+
+
+ {/* Status Message */}
+ {status && !loading && (
+
+ {status}
+
+ )}
+
+ {/* Repository List */}
+
+ {repos.map((r) => (
+
onSelect(r)}
+ >
+
+ {r.name}
+ {r.owner}
+
+ {r.private && (
+ Private
+ )}
+
+ ))}
+
+ {/* Loading Indicator */}
+ {loading && repos.length === 0 && (
+
+
+
Loading repositories...
+
+ )}
+
+ {/* Load More Button */}
+ {hasMore && !loading && repos.length > 0 && (
+
+ {loadingMore ? (
+ <>
+
+ Loading more...
+ >
+ ) : (
+ <>
+ Load more repositories
+ ({repos.length} loaded)
+ >
+ )}
+
+ )}
+
+ {/* All Loaded Message */}
+ {!hasMore && !loading && repos.length > 0 && (
+
+ β All repositories loaded ({repos.length} total)
+
+ )}
+
+
+ {/* GitHub App Installation Notice */}
+
+
+
+
+
+
+
+ Repository missing?
+
+
+
+
+
+ );
+}
\ No newline at end of file
diff --git a/frontend/components/SessionItem.jsx b/frontend/components/SessionItem.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..acf2ce3fc670698f9198362a5c81d9d871e99027
--- /dev/null
+++ b/frontend/components/SessionItem.jsx
@@ -0,0 +1,183 @@
+import React, { useState } from "react";
+
+/**
+ * SessionItem β a single row in the sessions sidebar.
+ *
+ * Shows status dot (pulsing/static), title, timestamp, message count.
+ * Claude-Code-on-Web parity: active=amber pulse, completed=green,
+ * failed=red, waiting=blue.
+ */
+export default function SessionItem({ session, isActive, onSelect, onDelete }) {
+ const [hovering, setHovering] = useState(false);
+
+ const status = session.status || "active";
+
+ const dotColor = {
+ active: "#F59E0B",
+ completed: "#10B981",
+ failed: "#EF4444",
+ waiting: "#3B82F6",
+ paused: "#6B7280",
+ }[status] || "#6B7280";
+
+ const isPulsing = status === "active";
+
+ const timeAgo = formatTimeAgo(session.updated_at);
+
+ // Prefer name (set from first user prompt) over generic fallback
+ const title =
+ session.name ||
+ (session.branch ? `${session.branch}` : `Session ${session.id?.slice(0, 8)}`);
+
+ return (
+ setHovering(true)}
+ onMouseLeave={() => setHovering(false)}
+ >
+
+
+ {/* Status dot */}
+
+
+ {/* Content */}
+
+
{title}
+
+ {timeAgo}
+ {session.mode && (
+
+ {session.mode === "github" ? "GH" : session.mode === "local-git" ? "Git" : "Dir"}
+
+ )}
+ {session.message_count > 0 && (
+ {session.message_count} msgs
+ )}
+
+
+
+ {/* Delete button (on hover) */}
+ {hovering && (
+
{
+ e.stopPropagation();
+ onDelete?.();
+ }}
+ title="Delete session"
+ >
+ ×
+
+ )}
+
+ );
+}
+
+function formatTimeAgo(isoStr) {
+ if (!isoStr) return "";
+ try {
+ const date = new Date(isoStr);
+ const now = new Date();
+ const diffMs = now - date;
+ const diffMin = Math.floor(diffMs / 60000);
+ if (diffMin < 1) return "just now";
+ if (diffMin < 60) return `${diffMin}m ago`;
+ const diffHr = Math.floor(diffMin / 60);
+ if (diffHr < 24) return `${diffHr}h ago`;
+ const diffDay = Math.floor(diffHr / 24);
+ return `${diffDay}d ago`;
+ } catch {
+ return "";
+ }
+}
+
+const styles = {
+ row: {
+ display: "flex",
+ alignItems: "center",
+ gap: 8,
+ padding: "8px 10px",
+ borderRadius: 6,
+ cursor: "pointer",
+ transition: "background-color 0.15s",
+ position: "relative",
+ marginBottom: 2,
+ animation: "session-fade-in 0.25s ease-out",
+ },
+ dot: {
+ width: 8,
+ height: 8,
+ borderRadius: "50%",
+ flexShrink: 0,
+ },
+ content: {
+ flex: 1,
+ minWidth: 0,
+ overflow: "hidden",
+ },
+ title: {
+ fontSize: 12,
+ fontWeight: 500,
+ color: "#E4E4E7",
+ whiteSpace: "nowrap",
+ overflow: "hidden",
+ textOverflow: "ellipsis",
+ },
+ meta: {
+ fontSize: 10,
+ color: "#71717A",
+ marginTop: 2,
+ display: "flex",
+ alignItems: "center",
+ gap: 6,
+ },
+ badge: {
+ fontSize: 9,
+ background: "#27272A",
+ padding: "1px 5px",
+ borderRadius: 8,
+ color: "#A1A1AA",
+ },
+ deleteBtn: {
+ position: "absolute",
+ right: 6,
+ top: 6,
+ width: 18,
+ height: 18,
+ borderRadius: 3,
+ border: "none",
+ background: "rgba(239, 68, 68, 0.15)",
+ color: "#EF4444",
+ fontSize: 14,
+ cursor: "pointer",
+ display: "flex",
+ alignItems: "center",
+ justifyContent: "center",
+ lineHeight: 1,
+ },
+};
diff --git a/frontend/components/SessionSidebar.jsx b/frontend/components/SessionSidebar.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..fb63c7526850dba78ce9a5e37a4b0efac32d0e8a
--- /dev/null
+++ b/frontend/components/SessionSidebar.jsx
@@ -0,0 +1,181 @@
+import React, { useEffect, useRef, useState } from "react";
+import SessionItem from "./SessionItem.jsx";
+
+/**
+ * SessionSidebar β Claude-Code-on-Web parity.
+ *
+ * Shows a scrollable list of coding sessions with status indicators,
+ * timestamps, and a "New Session" button. Additive β does not modify
+ * any existing component.
+ */
+export default function SessionSidebar({
+ repo,
+ activeSessionId,
+ onSelectSession,
+ onNewSession,
+ onDeleteSession,
+ refreshNonce = 0,
+}) {
+ const [sessions, setSessions] = useState([]);
+ const [loading, setLoading] = useState(false);
+ const pollRef = useRef(null);
+
+ const repoFullName = repo?.full_name || (repo ? `${repo.owner}/${repo.name}` : null);
+
+ // Fetch sessions
+ useEffect(() => {
+ if (!repoFullName) {
+ setSessions([]);
+ return;
+ }
+
+ let cancelled = false;
+
+ const fetchSessions = async () => {
+ setLoading(true);
+ try {
+ const token = localStorage.getItem("github_token");
+ const headers = token ? { Authorization: `Bearer ${token}` } : {};
+ const res = await fetch(`/api/sessions`, { headers, cache: "no-cache" });
+ if (!res.ok) return;
+ const data = await res.json();
+ if (cancelled) return;
+
+ // Filter to current repo
+ const filtered = (data.sessions || []).filter(
+ (s) => s.repo === repoFullName
+ );
+ setSessions(filtered);
+ } catch (err) {
+ console.warn("Failed to fetch sessions:", err);
+ } finally {
+ if (!cancelled) setLoading(false);
+ }
+ };
+
+ fetchSessions();
+
+ // Poll every 15s for status updates
+ pollRef.current = setInterval(fetchSessions, 15000);
+
+ return () => {
+ cancelled = true;
+ if (pollRef.current) clearInterval(pollRef.current);
+ };
+ }, [repoFullName, refreshNonce]);
+
+ const handleDelete = async (sessionId) => {
+ try {
+ const token = localStorage.getItem("github_token");
+ const headers = token ? { Authorization: `Bearer ${token}` } : {};
+ await fetch(`/api/sessions/${sessionId}`, { method: "DELETE", headers });
+ setSessions((prev) => prev.filter((s) => s.id !== sessionId));
+ // Notify parent so it can clear the chat if this was the active session
+ onDeleteSession?.(sessionId);
+ } catch (err) {
+ console.warn("Failed to delete session:", err);
+ }
+ };
+
+ return (
+
+
+
+ {/* Header */}
+
+ SESSIONS
+
+ +
+
+
+
+ {/* Session list */}
+
+ {loading && sessions.length === 0 && (
+
Loading...
+ )}
+
+ {!loading && sessions.length === 0 && (
+
+ No sessions yet.
+
+
+ Your first message will create one automatically.
+
+
+ )}
+
+ {sessions.map((s) => (
+
onSelectSession?.(s)}
+ onDelete={() => handleDelete(s.id)}
+ />
+ ))}
+
+
+ );
+}
+
+const animStyles = `
+ @keyframes session-fade-in {
+ from { opacity: 0; transform: translateY(4px); }
+ to { opacity: 1; transform: translateY(0); }
+ }
+`;
+
+const styles = {
+ container: {
+ display: "flex",
+ flexDirection: "column",
+ borderTop: "1px solid #27272A",
+ flex: 1,
+ minHeight: 0,
+ },
+ header: {
+ display: "flex",
+ justifyContent: "space-between",
+ alignItems: "center",
+ padding: "10px 14px 6px",
+ },
+ label: {
+ fontSize: 10,
+ fontWeight: 700,
+ letterSpacing: "0.08em",
+ color: "#71717A",
+ textTransform: "uppercase",
+ },
+ newBtn: {
+ width: 22,
+ height: 22,
+ borderRadius: 4,
+ border: "1px dashed #3F3F46",
+ background: "transparent",
+ color: "#A1A1AA",
+ fontSize: 14,
+ cursor: "pointer",
+ display: "flex",
+ alignItems: "center",
+ justifyContent: "center",
+ lineHeight: 1,
+ },
+ list: {
+ flex: 1,
+ overflowY: "auto",
+ padding: "0 6px 8px",
+ },
+ empty: {
+ textAlign: "center",
+ color: "#52525B",
+ fontSize: 12,
+ padding: "20px 8px",
+ lineHeight: 1.5,
+ },
+};
diff --git a/frontend/components/SettingsModal.jsx b/frontend/components/SettingsModal.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..80c40e4edba24b67dd8100a4dff43042bdd3c839
--- /dev/null
+++ b/frontend/components/SettingsModal.jsx
@@ -0,0 +1,270 @@
+import React, { useEffect, useState } from "react";
+
+export default function SettingsModal({ onClose }) {
+ const [settings, setSettings] = useState(null);
+ const [models, setModels] = useState([]);
+ const [modelsError, setModelsError] = useState(null);
+ const [loadingModels, setLoadingModels] = useState(false);
+ const [testResult, setTestResult] = useState(null); // { ok: bool, message: string }
+ const [testing, setTesting] = useState(false);
+
+ const loadSettings = async () => {
+ const res = await fetch("/api/settings");
+ const data = await res.json();
+ setSettings(data);
+ };
+
+ useEffect(() => {
+ loadSettings();
+ }, []);
+
+ const changeProvider = async (provider) => {
+ const res = await fetch("/api/settings/provider", {
+ method: "POST",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify({ provider }),
+ });
+ const data = await res.json();
+ setSettings(data);
+
+ // Reset models state when provider changes
+ setModels([]);
+ setModelsError(null);
+ };
+
+ const loadModels = async () => {
+ if (!settings) return;
+ setLoadingModels(true);
+ setModelsError(null);
+ try {
+ const res = await fetch(
+ `/api/settings/models?provider=${settings.provider}`
+ );
+ const data = await res.json();
+ if (data.error) {
+ setModelsError(data.error);
+ setModels([]);
+ } else {
+ setModels(data.models || []);
+ }
+ } catch (err) {
+ console.error(err);
+ setModelsError("Failed to load models");
+ setModels([]);
+ } finally {
+ setLoadingModels(false);
+ }
+ };
+
+ const currentModelForActiveProvider = () => {
+ if (!settings) return "";
+ const p = settings.provider;
+ if (p === "openai") return settings.openai?.model || "";
+ if (p === "claude") return settings.claude?.model || "";
+ if (p === "watsonx") return settings.watsonx?.model_id || "";
+ if (p === "ollama") return settings.ollama?.model || "";
+ return "";
+ };
+
+ const changeModel = async (model) => {
+ if (!settings) return;
+ const provider = settings.provider;
+
+ let payload = {};
+ if (provider === "openai") {
+ payload = {
+ openai: {
+ ...settings.openai,
+ model,
+ },
+ };
+ } else if (provider === "claude") {
+ payload = {
+ claude: {
+ ...settings.claude,
+ model,
+ },
+ };
+ } else if (provider === "watsonx") {
+ payload = {
+ watsonx: {
+ ...settings.watsonx,
+ model_id: model,
+ },
+ };
+ } else if (provider === "ollama") {
+ payload = {
+ ollama: {
+ ...settings.ollama,
+ model,
+ },
+ };
+ }
+
+ const res = await fetch("/api/settings/llm", {
+ method: "PUT",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify(payload),
+ });
+ const data = await res.json();
+ setSettings(data);
+ };
+
+ const testConnection = async () => {
+ if (!settings) return;
+ setTesting(true);
+ setTestResult(null);
+ try {
+ const res = await fetch(`/api/settings/test?provider=${settings.provider}`);
+ const data = await res.json();
+ if (!res.ok || data.error) {
+ setTestResult({ ok: false, message: data.error || data.detail || "Connection failed" });
+ } else {
+ setTestResult({ ok: true, message: data.message || "Connection successful" });
+ }
+ } catch (err) {
+ setTestResult({ ok: false, message: err.message || "Connection test failed" });
+ } finally {
+ setTesting(false);
+ }
+ };
+
+ if (!settings) return null;
+
+ const activeModel = currentModelForActiveProvider();
+
+ return (
+
+
e.stopPropagation()}>
+
+
+
+ Select which LLM provider GitPilot should use for planning and chat.
+
+
+
+ {settings.providers.map((p) => (
+
+
{p}
+
changeProvider(p)}
+ disabled={settings.provider === p}
+ >
+ {settings.provider === p ? "Active" : "Use"}
+
+
+ ))}
+
+
+ {/* Models section */}
+
+
+ Active provider: {settings.provider}
+
+
+
+
+ {testing ? "Testingβ¦" : "Test Connection"}
+
+
+ {loadingModels ? "Loadingβ¦" : "Display models"}
+
+
+ {activeModel && (
+
+ Current model: {activeModel}
+
+ )}
+
+
+ {modelsError && (
+
+ {modelsError}
+
+ )}
+
+ {testResult && (
+
+ {testResult.ok ? "β " : "β "}{testResult.message}
+
+ )}
+
+ {models.length > 0 && (
+
+
+ Select model for {settings.provider}:
+
+ changeModel(e.target.value)}
+ >
+ -- select a model --
+ {models.map((m) => (
+
+ {m}
+
+ ))}
+
+
+ )}
+
+
+
+ );
+}
diff --git a/frontend/components/StreamingMessage.jsx b/frontend/components/StreamingMessage.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..71aaf5918c26d7ae6b097505381767d990357823
--- /dev/null
+++ b/frontend/components/StreamingMessage.jsx
@@ -0,0 +1,182 @@
+import React from "react";
+
+/**
+ * StreamingMessage β Claude-Code-on-Web parity streaming renderer.
+ *
+ * Renders agent messages incrementally as they arrive via WebSocket.
+ * Shows tool use blocks (bash commands + output), explanatory text,
+ * and status indicators.
+ */
+export default function StreamingMessage({ events }) {
+ if (!events || events.length === 0) return null;
+
+ return (
+
+ {events.map((evt, idx) => (
+
+ ))}
+
+ );
+}
+
+function StreamingEvent({ event, isLast }) {
+ const { type } = event;
+
+ if (type === "agent_message") {
+ return (
+
+ {event.content}
+ {isLast && | }
+
+ );
+ }
+
+ if (type === "tool_use") {
+ return (
+
+
+
+
+
+
+
{event.tool || "terminal"}
+
+
+ $ {event.input}
+
+
+ );
+ }
+
+ if (type === "tool_result") {
+ return (
+
+ );
+ }
+
+ if (type === "status_change") {
+ const statusLabels = {
+ active: "Working...",
+ waiting: "Waiting for input",
+ completed: "Completed",
+ failed: "Failed",
+ };
+ return (
+
+
+
{statusLabels[event.status] || event.status}
+
+ );
+ }
+
+ if (type === "diff_update") {
+ return null; // Handled by DiffStats in parent
+ }
+
+ if (type === "error") {
+ return (
+
+ {event.message}
+
+ );
+ }
+
+ return null;
+}
+
+const styles = {
+ container: {
+ display: "flex",
+ flexDirection: "column",
+ gap: 4,
+ },
+ textBlock: {
+ fontSize: 14,
+ lineHeight: 1.6,
+ color: "#D4D4D8",
+ whiteSpace: "pre-wrap",
+ wordBreak: "break-word",
+ },
+ cursor: {
+ display: "inline-block",
+ animation: "blink 1s step-end infinite",
+ color: "#3B82F6",
+ fontWeight: 700,
+ },
+ toolBlock: {
+ margin: "4px 0",
+ borderRadius: 6,
+ border: "1px solid #27272A",
+ overflow: "hidden",
+ },
+ toolHeader: {
+ display: "flex",
+ alignItems: "center",
+ gap: 6,
+ padding: "6px 10px",
+ backgroundColor: "#18181B",
+ fontSize: 11,
+ color: "#71717A",
+ fontFamily: "monospace",
+ },
+ toolName: {
+ fontWeight: 600,
+ },
+ toolInput: {
+ padding: "8px 10px",
+ backgroundColor: "#0D0D0F",
+ fontFamily: "monospace",
+ fontSize: 12,
+ color: "#10B981",
+ whiteSpace: "pre-wrap",
+ wordBreak: "break-all",
+ },
+ toolOutput: {
+ padding: "8px 10px",
+ backgroundColor: "#0D0D0F",
+ maxHeight: 300,
+ overflowY: "auto",
+ },
+ toolOutputPre: {
+ margin: 0,
+ fontFamily: "monospace",
+ fontSize: 11,
+ color: "#A1A1AA",
+ whiteSpace: "pre-wrap",
+ wordBreak: "break-all",
+ },
+ statusLine: {
+ display: "flex",
+ alignItems: "center",
+ gap: 6,
+ padding: "4px 0",
+ fontSize: 12,
+ color: "#71717A",
+ fontStyle: "italic",
+ },
+ statusDot: {
+ width: 6,
+ height: 6,
+ borderRadius: "50%",
+ },
+ errorBlock: {
+ padding: "8px 12px",
+ borderRadius: 6,
+ backgroundColor: "rgba(239, 68, 68, 0.08)",
+ border: "1px solid rgba(239, 68, 68, 0.2)",
+ color: "#FCA5A5",
+ fontSize: 13,
+ },
+};
diff --git a/frontend/index.html b/frontend/index.html
new file mode 100644
index 0000000000000000000000000000000000000000..497850b3f10fa590f7e06d95843e28e5ce3fd4b5
--- /dev/null
+++ b/frontend/index.html
@@ -0,0 +1,12 @@
+
+
+
+
+ GitPilot
+
+
+
+
+
+
+
diff --git a/frontend/main.jsx b/frontend/main.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..2c2017cf902ef50f84816577bd83e21af501f43e
--- /dev/null
+++ b/frontend/main.jsx
@@ -0,0 +1,11 @@
+import React from "react";
+import ReactDOM from "react-dom/client";
+import App from "./App.jsx";
+import "./styles.css";
+import "./ollabridge.css";
+
+ReactDOM.createRoot(document.getElementById("root")).render(
+
+
+
+);
diff --git a/frontend/nginx.conf b/frontend/nginx.conf
new file mode 100644
index 0000000000000000000000000000000000000000..455bb91c50c5c97affbe57cf37fe1f7e07572f1d
--- /dev/null
+++ b/frontend/nginx.conf
@@ -0,0 +1,58 @@
+server {
+ listen 80;
+ server_name _;
+ root /usr/share/nginx/html;
+ index index.html;
+
+ # DNS resolver for dynamic upstream resolution
+ # This allows nginx to start even if backend doesn't exist yet
+ resolver 127.0.0.11 valid=30s ipv6=off;
+
+ # Gzip compression
+ gzip on;
+ gzip_vary on;
+ gzip_min_length 1024;
+ gzip_types text/plain text/css text/xml text/javascript application/x-javascript application/xml+rss application/json application/javascript;
+
+ # Security headers
+ add_header X-Frame-Options "SAMEORIGIN" always;
+ add_header X-Content-Type-Options "nosniff" always;
+ add_header X-XSS-Protection "1; mode=block" always;
+
+ # Handle API requests - proxy to backend (docker-compose only)
+ # Uses variables to force runtime DNS resolution instead of startup
+ location /api/ {
+ # Use variable to force runtime DNS resolution
+ set $backend "backend:8000";
+ proxy_pass http://$backend;
+ proxy_http_version 1.1;
+ proxy_set_header Upgrade $http_upgrade;
+ proxy_set_header Connection 'upgrade';
+ proxy_set_header Host $host;
+ proxy_set_header X-Real-IP $remote_addr;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header X-Forwarded-Proto $scheme;
+ proxy_cache_bypass $http_upgrade;
+
+ # Handle backend connection errors gracefully
+ proxy_intercept_errors on;
+ error_page 502 503 504 = @backend_unavailable;
+ }
+
+ # Fallback for when backend is unavailable
+ location @backend_unavailable {
+ add_header Content-Type application/json;
+ return 503 '{"error": "Backend service unavailable. Configure VITE_BACKEND_URL in frontend or ensure backend container is running."}';
+ }
+
+ # Serve static files
+ location / {
+ try_files $uri $uri/ /index.html;
+ }
+
+ # Cache static assets
+ location ~* \.(js|css|png|jpg|jpeg|gif|ico|svg|woff|woff2|ttf|eot)$ {
+ expires 1y;
+ add_header Cache-Control "public, immutable";
+ }
+}
diff --git a/frontend/ollabridge.css b/frontend/ollabridge.css
new file mode 100644
index 0000000000000000000000000000000000000000..26fc57504d5b22bf2e5384bff838314ab94115db
--- /dev/null
+++ b/frontend/ollabridge.css
@@ -0,0 +1,222 @@
+/* ============================================================================
+ OLLABRIDGE CLOUD - Provider Tabs & Pairing UI
+ ============================================================================ */
+
+/* Provider selection tabs (replaces dropdown) */
+.settings-provider-tabs {
+ display: flex;
+ gap: 4px;
+ flex-wrap: wrap;
+ margin-top: 4px;
+}
+
+.settings-provider-tab {
+ border: 1px solid #272832;
+ outline: none;
+ background: #0a0b0f;
+ color: #9a9bb0;
+ border-radius: 8px;
+ padding: 8px 14px;
+ font-size: 13px;
+ font-weight: 500;
+ cursor: pointer;
+ transition: all 0.2s ease;
+ font-family: inherit;
+}
+
+.settings-provider-tab:hover {
+ background: #1a1b26;
+ color: #c3c5dd;
+ border-color: #3a3b4d;
+}
+
+.settings-provider-tab-active {
+ background: rgba(255, 122, 60, 0.12);
+ color: #ff7a3c;
+ border-color: #ff7a3c;
+ font-weight: 600;
+}
+
+.settings-provider-tab-active:hover {
+ background: rgba(255, 122, 60, 0.18);
+ color: #ff8b52;
+}
+
+/* Auth mode tabs (Device Pairing / API Key / Local Trust) */
+.ob-auth-tabs {
+ display: flex;
+ gap: 4px;
+ margin-top: 4px;
+ margin-bottom: 8px;
+}
+
+.ob-auth-tab {
+ display: flex;
+ align-items: center;
+ gap: 6px;
+ border: 1px solid #272832;
+ outline: none;
+ background: #0a0b0f;
+ color: #9a9bb0;
+ border-radius: 8px;
+ padding: 7px 12px;
+ font-size: 12px;
+ font-weight: 500;
+ cursor: pointer;
+ transition: all 0.2s ease;
+ font-family: inherit;
+ white-space: nowrap;
+}
+
+.ob-auth-tab:hover {
+ background: #1a1b26;
+ color: #c3c5dd;
+ border-color: #3a3b4d;
+}
+
+.ob-auth-tab-active {
+ background: rgba(59, 130, 246, 0.1);
+ color: #60a5fa;
+ border-color: #3B82F6;
+ font-weight: 600;
+}
+
+.ob-auth-tab-active:hover {
+ background: rgba(59, 130, 246, 0.15);
+}
+
+.ob-auth-tab-icon {
+ font-size: 14px;
+ line-height: 1;
+}
+
+/* Auth panel (content below tabs) */
+.ob-auth-panel {
+ padding: 12px;
+ background: #0a0b0f;
+ border: 1px solid #1e1f30;
+ border-radius: 8px;
+ margin-bottom: 4px;
+}
+
+.ob-auth-desc {
+ font-size: 12px;
+ color: #9a9bb0;
+ line-height: 1.5;
+ margin-bottom: 10px;
+}
+
+/* Pairing row */
+.ob-pair-row {
+ display: flex;
+ gap: 8px;
+ align-items: center;
+}
+
+.ob-pair-input {
+ flex: 1;
+ font-family: "SF Mono", Monaco, "Cascadia Code", monospace;
+ font-size: 16px !important;
+ letter-spacing: 2px;
+ text-align: center;
+ text-transform: uppercase;
+}
+
+.ob-pair-btn {
+ display: flex;
+ align-items: center;
+ gap: 6px;
+ border: none;
+ outline: none;
+ background: #3B82F6;
+ color: #fff;
+ border-radius: 8px;
+ padding: 9px 16px;
+ font-size: 13px;
+ font-weight: 600;
+ cursor: pointer;
+ transition: all 0.2s ease;
+ white-space: nowrap;
+ font-family: inherit;
+}
+
+.ob-pair-btn:hover:not(:disabled) {
+ background: #4d93f7;
+ transform: translateY(-1px);
+ box-shadow: 0 4px 12px rgba(59, 130, 246, 0.3);
+}
+
+.ob-pair-btn:disabled {
+ opacity: 0.5;
+ cursor: not-allowed;
+}
+
+/* Pair spinner */
+.ob-pair-spinner {
+ display: inline-block;
+ width: 14px;
+ height: 14px;
+ border: 2px solid rgba(255, 255, 255, 0.3);
+ border-top-color: #fff;
+ border-radius: 50%;
+ animation: spin 0.6s linear infinite;
+}
+
+/* Pair result feedback */
+.ob-pair-result {
+ margin-top: 8px;
+ padding: 8px 12px;
+ border-radius: 6px;
+ font-size: 12px;
+ font-weight: 500;
+ animation: fadeIn 0.3s ease;
+}
+
+.ob-pair-result-ok {
+ background: rgba(76, 175, 136, 0.12);
+ border: 1px solid rgba(76, 175, 136, 0.3);
+ color: #7cffb3;
+}
+
+.ob-pair-result-err {
+ background: rgba(255, 82, 82, 0.1);
+ border: 1px solid rgba(255, 82, 82, 0.3);
+ color: #ff8a8a;
+}
+
+/* Model row (input + fetch button) */
+.ob-model-row {
+ display: flex;
+ gap: 8px;
+ align-items: center;
+}
+
+.ob-fetch-btn {
+ display: flex;
+ align-items: center;
+ gap: 5px;
+ border: 1px solid #272832;
+ outline: none;
+ background: #1a1b26;
+ color: #c3c5dd;
+ border-radius: 8px;
+ padding: 8px 12px;
+ font-size: 12px;
+ font-weight: 500;
+ cursor: pointer;
+ transition: all 0.2s ease;
+ white-space: nowrap;
+ font-family: inherit;
+}
+
+.ob-fetch-btn:hover:not(:disabled) {
+ background: #222335;
+ border-color: #3a3b4d;
+ color: #f5f5f7;
+ transform: translateY(-1px);
+}
+
+.ob-fetch-btn:disabled {
+ opacity: 0.5;
+ cursor: not-allowed;
+}
diff --git a/frontend/package-lock.json b/frontend/package-lock.json
new file mode 100644
index 0000000000000000000000000000000000000000..c1426f76422011747fc86f0c4d59bf8fb3237129
--- /dev/null
+++ b/frontend/package-lock.json
@@ -0,0 +1,3346 @@
+{
+ "name": "gitpilot-frontend",
+ "version": "0.2.3",
+ "lockfileVersion": 3,
+ "requires": true,
+ "packages": {
+ "": {
+ "name": "gitpilot-frontend",
+ "version": "0.2.3",
+ "dependencies": {
+ "react": "^18.2.0",
+ "react-dom": "^18.2.0",
+ "react-markdown": "^10.1.0",
+ "reactflow": "^11.11.4"
+ },
+ "devDependencies": {
+ "@vitejs/plugin-react": "^4.0.0",
+ "vite": "^5.0.0"
+ }
+ },
+ "node_modules/@babel/code-frame": {
+ "version": "7.27.1",
+ "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.27.1.tgz",
+ "integrity": "sha512-cjQ7ZlQ0Mv3b47hABuTevyTuYN4i+loJKGeV9flcCgIK37cCXRh+L1bd3iBHlynerhQ7BhCkn2BPbQUL+rGqFg==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/helper-validator-identifier": "^7.27.1",
+ "js-tokens": "^4.0.0",
+ "picocolors": "^1.1.1"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/compat-data": {
+ "version": "7.28.5",
+ "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.28.5.tgz",
+ "integrity": "sha512-6uFXyCayocRbqhZOB+6XcuZbkMNimwfVGFji8CTZnCzOHVGvDqzvitu1re2AU5LROliz7eQPhB8CpAMvnx9EjA==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/core": {
+ "version": "7.28.5",
+ "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.28.5.tgz",
+ "integrity": "sha512-e7jT4DxYvIDLk1ZHmU/m/mB19rex9sv0c2ftBtjSBv+kVM/902eh0fINUzD7UwLLNR+jU585GxUJ8/EBfAM5fw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/code-frame": "^7.27.1",
+ "@babel/generator": "^7.28.5",
+ "@babel/helper-compilation-targets": "^7.27.2",
+ "@babel/helper-module-transforms": "^7.28.3",
+ "@babel/helpers": "^7.28.4",
+ "@babel/parser": "^7.28.5",
+ "@babel/template": "^7.27.2",
+ "@babel/traverse": "^7.28.5",
+ "@babel/types": "^7.28.5",
+ "@jridgewell/remapping": "^2.3.5",
+ "convert-source-map": "^2.0.0",
+ "debug": "^4.1.0",
+ "gensync": "^1.0.0-beta.2",
+ "json5": "^2.2.3",
+ "semver": "^6.3.1"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/babel"
+ }
+ },
+ "node_modules/@babel/generator": {
+ "version": "7.28.5",
+ "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.28.5.tgz",
+ "integrity": "sha512-3EwLFhZ38J4VyIP6WNtt2kUdW9dokXA9Cr4IVIFHuCpZ3H8/YFOl5JjZHisrn1fATPBmKKqXzDFvh9fUwHz6CQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/parser": "^7.28.5",
+ "@babel/types": "^7.28.5",
+ "@jridgewell/gen-mapping": "^0.3.12",
+ "@jridgewell/trace-mapping": "^0.3.28",
+ "jsesc": "^3.0.2"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-compilation-targets": {
+ "version": "7.27.2",
+ "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.27.2.tgz",
+ "integrity": "sha512-2+1thGUUWWjLTYTHZWK1n8Yga0ijBz1XAhUXcKy81rd5g6yh7hGqMp45v7cadSbEHc9G3OTv45SyneRN3ps4DQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/compat-data": "^7.27.2",
+ "@babel/helper-validator-option": "^7.27.1",
+ "browserslist": "^4.24.0",
+ "lru-cache": "^5.1.1",
+ "semver": "^6.3.1"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-globals": {
+ "version": "7.28.0",
+ "resolved": "https://registry.npmjs.org/@babel/helper-globals/-/helper-globals-7.28.0.tgz",
+ "integrity": "sha512-+W6cISkXFa1jXsDEdYA8HeevQT/FULhxzR99pxphltZcVaugps53THCeiWA8SguxxpSp3gKPiuYfSWopkLQ4hw==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-module-imports": {
+ "version": "7.27.1",
+ "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.27.1.tgz",
+ "integrity": "sha512-0gSFWUPNXNopqtIPQvlD5WgXYI5GY2kP2cCvoT8kczjbfcfuIljTbcWrulD1CIPIX2gt1wghbDy08yE1p+/r3w==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/traverse": "^7.27.1",
+ "@babel/types": "^7.27.1"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-module-transforms": {
+ "version": "7.28.3",
+ "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.28.3.tgz",
+ "integrity": "sha512-gytXUbs8k2sXS9PnQptz5o0QnpLL51SwASIORY6XaBKF88nsOT0Zw9szLqlSGQDP/4TljBAD5y98p2U1fqkdsw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/helper-module-imports": "^7.27.1",
+ "@babel/helper-validator-identifier": "^7.27.1",
+ "@babel/traverse": "^7.28.3"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0"
+ }
+ },
+ "node_modules/@babel/helper-plugin-utils": {
+ "version": "7.27.1",
+ "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.27.1.tgz",
+ "integrity": "sha512-1gn1Up5YXka3YYAHGKpbideQ5Yjf1tDa9qYcgysz+cNCXukyLl6DjPXhD3VRwSb8c0J9tA4b2+rHEZtc6R0tlw==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-string-parser": {
+ "version": "7.27.1",
+ "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.27.1.tgz",
+ "integrity": "sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-validator-identifier": {
+ "version": "7.28.5",
+ "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.28.5.tgz",
+ "integrity": "sha512-qSs4ifwzKJSV39ucNjsvc6WVHs6b7S03sOh2OcHF9UHfVPqWWALUsNUVzhSBiItjRZoLHx7nIarVjqKVusUZ1Q==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-validator-option": {
+ "version": "7.27.1",
+ "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.27.1.tgz",
+ "integrity": "sha512-YvjJow9FxbhFFKDSuFnVCe2WxXk1zWc22fFePVNEaWJEu8IrZVlda6N0uHwzZrUM1il7NC9Mlp4MaJYbYd9JSg==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helpers": {
+ "version": "7.28.4",
+ "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.28.4.tgz",
+ "integrity": "sha512-HFN59MmQXGHVyYadKLVumYsA9dBFun/ldYxipEjzA4196jpLZd8UjEEBLkbEkvfYreDqJhZxYAWFPtrfhNpj4w==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/template": "^7.27.2",
+ "@babel/types": "^7.28.4"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/parser": {
+ "version": "7.28.5",
+ "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.28.5.tgz",
+ "integrity": "sha512-KKBU1VGYR7ORr3At5HAtUQ+TV3SzRCXmA/8OdDZiLDBIZxVyzXuztPjfLd3BV1PRAQGCMWWSHYhL0F8d5uHBDQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/types": "^7.28.5"
+ },
+ "bin": {
+ "parser": "bin/babel-parser.js"
+ },
+ "engines": {
+ "node": ">=6.0.0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-react-jsx-self": {
+ "version": "7.27.1",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-self/-/plugin-transform-react-jsx-self-7.27.1.tgz",
+ "integrity": "sha512-6UzkCs+ejGdZ5mFFC/OCUrv028ab2fp1znZmCZjAOBKiBK2jXD1O+BPSfX8X2qjJ75fZBMSnQn3Rq2mrBJK2mw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.27.1"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-react-jsx-source": {
+ "version": "7.27.1",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-source/-/plugin-transform-react-jsx-source-7.27.1.tgz",
+ "integrity": "sha512-zbwoTsBruTeKB9hSq73ha66iFeJHuaFkUbwvqElnygoNbj/jHRsSeokowZFN3CZ64IvEqcmmkVe89OPXc7ldAw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.27.1"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/template": {
+ "version": "7.27.2",
+ "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.27.2.tgz",
+ "integrity": "sha512-LPDZ85aEJyYSd18/DkjNh4/y1ntkE5KwUHWTiqgRxruuZL2F1yuHligVHLvcHY2vMHXttKFpJn6LwfI7cw7ODw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/code-frame": "^7.27.1",
+ "@babel/parser": "^7.27.2",
+ "@babel/types": "^7.27.1"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/traverse": {
+ "version": "7.28.5",
+ "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.28.5.tgz",
+ "integrity": "sha512-TCCj4t55U90khlYkVV/0TfkJkAkUg3jZFA3Neb7unZT8CPok7iiRfaX0F+WnqWqt7OxhOn0uBKXCw4lbL8W0aQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/code-frame": "^7.27.1",
+ "@babel/generator": "^7.28.5",
+ "@babel/helper-globals": "^7.28.0",
+ "@babel/parser": "^7.28.5",
+ "@babel/template": "^7.27.2",
+ "@babel/types": "^7.28.5",
+ "debug": "^4.3.1"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/types": {
+ "version": "7.28.5",
+ "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.28.5.tgz",
+ "integrity": "sha512-qQ5m48eI/MFLQ5PxQj4PFaprjyCTLI37ElWMmNs0K8Lk3dVeOdNpB3ks8jc7yM5CDmVC73eMVk/trk3fgmrUpA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/helper-string-parser": "^7.27.1",
+ "@babel/helper-validator-identifier": "^7.28.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@esbuild/aix-ppc64": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.21.5.tgz",
+ "integrity": "sha512-1SDgH6ZSPTlggy1yI6+Dbkiz8xzpHJEVAlF/AM1tHPLsf5STom9rwtjE4hKAF20FfXXNTFqEYXyJNWh1GiZedQ==",
+ "cpu": [
+ "ppc64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "aix"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/android-arm": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.21.5.tgz",
+ "integrity": "sha512-vCPvzSjpPHEi1siZdlvAlsPxXl7WbOVUBBAowWug4rJHb68Ox8KualB+1ocNvT5fjv6wpkX6o/iEpbDrf68zcg==",
+ "cpu": [
+ "arm"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "android"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/android-arm64": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.21.5.tgz",
+ "integrity": "sha512-c0uX9VAUBQ7dTDCjq+wdyGLowMdtR/GoC2U5IYk/7D1H1JYC0qseD7+11iMP2mRLN9RcCMRcjC4YMclCzGwS/A==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "android"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/android-x64": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.21.5.tgz",
+ "integrity": "sha512-D7aPRUUNHRBwHxzxRvp856rjUHRFW1SdQATKXH2hqA0kAZb1hKmi02OpYRacl0TxIGz/ZmXWlbZgjwWYaCakTA==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "android"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/darwin-arm64": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.21.5.tgz",
+ "integrity": "sha512-DwqXqZyuk5AiWWf3UfLiRDJ5EDd49zg6O9wclZ7kUMv2WRFr4HKjXp/5t8JZ11QbQfUS6/cRCKGwYhtNAY88kQ==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/darwin-x64": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.21.5.tgz",
+ "integrity": "sha512-se/JjF8NlmKVG4kNIuyWMV/22ZaerB+qaSi5MdrXtd6R08kvs2qCN4C09miupktDitvh8jRFflwGFBQcxZRjbw==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/freebsd-arm64": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.21.5.tgz",
+ "integrity": "sha512-5JcRxxRDUJLX8JXp/wcBCy3pENnCgBR9bN6JsY4OmhfUtIHe3ZW0mawA7+RDAcMLrMIZaf03NlQiX9DGyB8h4g==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "freebsd"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/freebsd-x64": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.21.5.tgz",
+ "integrity": "sha512-J95kNBj1zkbMXtHVH29bBriQygMXqoVQOQYA+ISs0/2l3T9/kj42ow2mpqerRBxDJnmkUDCaQT/dfNXWX/ZZCQ==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "freebsd"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/linux-arm": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.21.5.tgz",
+ "integrity": "sha512-bPb5AHZtbeNGjCKVZ9UGqGwo8EUu4cLq68E95A53KlxAPRmUyYv2D6F0uUI65XisGOL1hBP5mTronbgo+0bFcA==",
+ "cpu": [
+ "arm"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/linux-arm64": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.21.5.tgz",
+ "integrity": "sha512-ibKvmyYzKsBeX8d8I7MH/TMfWDXBF3db4qM6sy+7re0YXya+K1cem3on9XgdT2EQGMu4hQyZhan7TeQ8XkGp4Q==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/linux-ia32": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.21.5.tgz",
+ "integrity": "sha512-YvjXDqLRqPDl2dvRODYmmhz4rPeVKYvppfGYKSNGdyZkA01046pLWyRKKI3ax8fbJoK5QbxblURkwK/MWY18Tg==",
+ "cpu": [
+ "ia32"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/linux-loong64": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.21.5.tgz",
+ "integrity": "sha512-uHf1BmMG8qEvzdrzAqg2SIG/02+4/DHB6a9Kbya0XDvwDEKCoC8ZRWI5JJvNdUjtciBGFQ5PuBlpEOXQj+JQSg==",
+ "cpu": [
+ "loong64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/linux-mips64el": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.21.5.tgz",
+ "integrity": "sha512-IajOmO+KJK23bj52dFSNCMsz1QP1DqM6cwLUv3W1QwyxkyIWecfafnI555fvSGqEKwjMXVLokcV5ygHW5b3Jbg==",
+ "cpu": [
+ "mips64el"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/linux-ppc64": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.21.5.tgz",
+ "integrity": "sha512-1hHV/Z4OEfMwpLO8rp7CvlhBDnjsC3CttJXIhBi+5Aj5r+MBvy4egg7wCbe//hSsT+RvDAG7s81tAvpL2XAE4w==",
+ "cpu": [
+ "ppc64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/linux-riscv64": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.21.5.tgz",
+ "integrity": "sha512-2HdXDMd9GMgTGrPWnJzP2ALSokE/0O5HhTUvWIbD3YdjME8JwvSCnNGBnTThKGEB91OZhzrJ4qIIxk/SBmyDDA==",
+ "cpu": [
+ "riscv64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/linux-s390x": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.21.5.tgz",
+ "integrity": "sha512-zus5sxzqBJD3eXxwvjN1yQkRepANgxE9lgOW2qLnmr8ikMTphkjgXu1HR01K4FJg8h1kEEDAqDcZQtbrRnB41A==",
+ "cpu": [
+ "s390x"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/linux-x64": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.21.5.tgz",
+ "integrity": "sha512-1rYdTpyv03iycF1+BhzrzQJCdOuAOtaqHTWJZCWvijKD2N5Xu0TtVC8/+1faWqcP9iBCWOmjmhoH94dH82BxPQ==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/netbsd-x64": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.21.5.tgz",
+ "integrity": "sha512-Woi2MXzXjMULccIwMnLciyZH4nCIMpWQAs049KEeMvOcNADVxo0UBIQPfSmxB3CWKedngg7sWZdLvLczpe0tLg==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "netbsd"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/openbsd-x64": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.21.5.tgz",
+ "integrity": "sha512-HLNNw99xsvx12lFBUwoT8EVCsSvRNDVxNpjZ7bPn947b8gJPzeHWyNVhFsaerc0n3TsbOINvRP2byTZ5LKezow==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "openbsd"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/sunos-x64": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.21.5.tgz",
+ "integrity": "sha512-6+gjmFpfy0BHU5Tpptkuh8+uw3mnrvgs+dSPQXQOv3ekbordwnzTVEb4qnIvQcYXq6gzkyTnoZ9dZG+D4garKg==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "sunos"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/win32-arm64": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.21.5.tgz",
+ "integrity": "sha512-Z0gOTd75VvXqyq7nsl93zwahcTROgqvuAcYDUr+vOv8uHhNSKROyU961kgtCD1e95IqPKSQKH7tBTslnS3tA8A==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "win32"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/win32-ia32": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.21.5.tgz",
+ "integrity": "sha512-SWXFF1CL2RVNMaVs+BBClwtfZSvDgtL//G/smwAc5oVK/UPu2Gu9tIaRgFmYFFKrmg3SyAjSrElf0TiJ1v8fYA==",
+ "cpu": [
+ "ia32"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "win32"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/win32-x64": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.21.5.tgz",
+ "integrity": "sha512-tQd/1efJuzPC6rCFwEvLtci/xNFcTZknmXs98FYDfGE4wP9ClFV98nyKrzJKVPMhdDnjzLhdUyMX4PsQAPjwIw==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "win32"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@jridgewell/gen-mapping": {
+ "version": "0.3.13",
+ "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.13.tgz",
+ "integrity": "sha512-2kkt/7niJ6MgEPxF0bYdQ6etZaA+fQvDcLKckhy1yIQOzaoKjBBjSj63/aLVjYE3qhRt5dvM+uUyfCg6UKCBbA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jridgewell/sourcemap-codec": "^1.5.0",
+ "@jridgewell/trace-mapping": "^0.3.24"
+ }
+ },
+ "node_modules/@jridgewell/remapping": {
+ "version": "2.3.5",
+ "resolved": "https://registry.npmjs.org/@jridgewell/remapping/-/remapping-2.3.5.tgz",
+ "integrity": "sha512-LI9u/+laYG4Ds1TDKSJW2YPrIlcVYOwi2fUC6xB43lueCjgxV4lffOCZCtYFiH6TNOX+tQKXx97T4IKHbhyHEQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jridgewell/gen-mapping": "^0.3.5",
+ "@jridgewell/trace-mapping": "^0.3.24"
+ }
+ },
+ "node_modules/@jridgewell/resolve-uri": {
+ "version": "3.1.2",
+ "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz",
+ "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=6.0.0"
+ }
+ },
+ "node_modules/@jridgewell/sourcemap-codec": {
+ "version": "1.5.5",
+ "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz",
+ "integrity": "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/@jridgewell/trace-mapping": {
+ "version": "0.3.31",
+ "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.31.tgz",
+ "integrity": "sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jridgewell/resolve-uri": "^3.1.0",
+ "@jridgewell/sourcemap-codec": "^1.4.14"
+ }
+ },
+ "node_modules/@reactflow/background": {
+ "version": "11.3.14",
+ "resolved": "https://registry.npmjs.org/@reactflow/background/-/background-11.3.14.tgz",
+ "integrity": "sha512-Gewd7blEVT5Lh6jqrvOgd4G6Qk17eGKQfsDXgyRSqM+CTwDqRldG2LsWN4sNeno6sbqVIC2fZ+rAUBFA9ZEUDA==",
+ "license": "MIT",
+ "dependencies": {
+ "@reactflow/core": "11.11.4",
+ "classcat": "^5.0.3",
+ "zustand": "^4.4.1"
+ },
+ "peerDependencies": {
+ "react": ">=17",
+ "react-dom": ">=17"
+ }
+ },
+ "node_modules/@reactflow/controls": {
+ "version": "11.2.14",
+ "resolved": "https://registry.npmjs.org/@reactflow/controls/-/controls-11.2.14.tgz",
+ "integrity": "sha512-MiJp5VldFD7FrqaBNIrQ85dxChrG6ivuZ+dcFhPQUwOK3HfYgX2RHdBua+gx+40p5Vw5It3dVNp/my4Z3jF0dw==",
+ "license": "MIT",
+ "dependencies": {
+ "@reactflow/core": "11.11.4",
+ "classcat": "^5.0.3",
+ "zustand": "^4.4.1"
+ },
+ "peerDependencies": {
+ "react": ">=17",
+ "react-dom": ">=17"
+ }
+ },
+ "node_modules/@reactflow/core": {
+ "version": "11.11.4",
+ "resolved": "https://registry.npmjs.org/@reactflow/core/-/core-11.11.4.tgz",
+ "integrity": "sha512-H4vODklsjAq3AMq6Np4LE12i1I4Ta9PrDHuBR9GmL8uzTt2l2jh4CiQbEMpvMDcp7xi4be0hgXj+Ysodde/i7Q==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/d3": "^7.4.0",
+ "@types/d3-drag": "^3.0.1",
+ "@types/d3-selection": "^3.0.3",
+ "@types/d3-zoom": "^3.0.1",
+ "classcat": "^5.0.3",
+ "d3-drag": "^3.0.0",
+ "d3-selection": "^3.0.0",
+ "d3-zoom": "^3.0.0",
+ "zustand": "^4.4.1"
+ },
+ "peerDependencies": {
+ "react": ">=17",
+ "react-dom": ">=17"
+ }
+ },
+ "node_modules/@reactflow/minimap": {
+ "version": "11.7.14",
+ "resolved": "https://registry.npmjs.org/@reactflow/minimap/-/minimap-11.7.14.tgz",
+ "integrity": "sha512-mpwLKKrEAofgFJdkhwR5UQ1JYWlcAAL/ZU/bctBkuNTT1yqV+y0buoNVImsRehVYhJwffSWeSHaBR5/GJjlCSQ==",
+ "license": "MIT",
+ "dependencies": {
+ "@reactflow/core": "11.11.4",
+ "@types/d3-selection": "^3.0.3",
+ "@types/d3-zoom": "^3.0.1",
+ "classcat": "^5.0.3",
+ "d3-selection": "^3.0.0",
+ "d3-zoom": "^3.0.0",
+ "zustand": "^4.4.1"
+ },
+ "peerDependencies": {
+ "react": ">=17",
+ "react-dom": ">=17"
+ }
+ },
+ "node_modules/@reactflow/node-resizer": {
+ "version": "2.2.14",
+ "resolved": "https://registry.npmjs.org/@reactflow/node-resizer/-/node-resizer-2.2.14.tgz",
+ "integrity": "sha512-fwqnks83jUlYr6OHcdFEedumWKChTHRGw/kbCxj0oqBd+ekfs+SIp4ddyNU0pdx96JIm5iNFS0oNrmEiJbbSaA==",
+ "license": "MIT",
+ "dependencies": {
+ "@reactflow/core": "11.11.4",
+ "classcat": "^5.0.4",
+ "d3-drag": "^3.0.0",
+ "d3-selection": "^3.0.0",
+ "zustand": "^4.4.1"
+ },
+ "peerDependencies": {
+ "react": ">=17",
+ "react-dom": ">=17"
+ }
+ },
+ "node_modules/@reactflow/node-toolbar": {
+ "version": "1.3.14",
+ "resolved": "https://registry.npmjs.org/@reactflow/node-toolbar/-/node-toolbar-1.3.14.tgz",
+ "integrity": "sha512-rbynXQnH/xFNu4P9H+hVqlEUafDCkEoCy0Dg9mG22Sg+rY/0ck6KkrAQrYrTgXusd+cEJOMK0uOOFCK2/5rSGQ==",
+ "license": "MIT",
+ "dependencies": {
+ "@reactflow/core": "11.11.4",
+ "classcat": "^5.0.3",
+ "zustand": "^4.4.1"
+ },
+ "peerDependencies": {
+ "react": ">=17",
+ "react-dom": ">=17"
+ }
+ },
+ "node_modules/@rolldown/pluginutils": {
+ "version": "1.0.0-beta.27",
+ "resolved": "https://registry.npmjs.org/@rolldown/pluginutils/-/pluginutils-1.0.0-beta.27.tgz",
+ "integrity": "sha512-+d0F4MKMCbeVUJwG96uQ4SgAznZNSq93I3V+9NHA4OpvqG8mRCpGdKmK8l/dl02h2CCDHwW2FqilnTyDcAnqjA==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/@rollup/rollup-android-arm-eabi": {
+ "version": "4.53.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.53.2.tgz",
+ "integrity": "sha512-yDPzwsgiFO26RJA4nZo8I+xqzh7sJTZIWQOxn+/XOdPE31lAvLIYCKqjV+lNH/vxE2L2iH3plKxDCRK6i+CwhA==",
+ "cpu": [
+ "arm"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "android"
+ ]
+ },
+ "node_modules/@rollup/rollup-android-arm64": {
+ "version": "4.53.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.53.2.tgz",
+ "integrity": "sha512-k8FontTxIE7b0/OGKeSN5B6j25EuppBcWM33Z19JoVT7UTXFSo3D9CdU39wGTeb29NO3XxpMNauh09B+Ibw+9g==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "android"
+ ]
+ },
+ "node_modules/@rollup/rollup-darwin-arm64": {
+ "version": "4.53.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.53.2.tgz",
+ "integrity": "sha512-A6s4gJpomNBtJ2yioj8bflM2oogDwzUiMl2yNJ2v9E7++sHrSrsQ29fOfn5DM/iCzpWcebNYEdXpaK4tr2RhfQ==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "darwin"
+ ]
+ },
+ "node_modules/@rollup/rollup-darwin-x64": {
+ "version": "4.53.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.53.2.tgz",
+ "integrity": "sha512-e6XqVmXlHrBlG56obu9gDRPW3O3hLxpwHpLsBJvuI8qqnsrtSZ9ERoWUXtPOkY8c78WghyPHZdmPhHLWNdAGEw==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "darwin"
+ ]
+ },
+ "node_modules/@rollup/rollup-freebsd-arm64": {
+ "version": "4.53.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.53.2.tgz",
+ "integrity": "sha512-v0E9lJW8VsrwPux5Qe5CwmH/CF/2mQs6xU1MF3nmUxmZUCHazCjLgYvToOk+YuuUqLQBio1qkkREhxhc656ViA==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "freebsd"
+ ]
+ },
+ "node_modules/@rollup/rollup-freebsd-x64": {
+ "version": "4.53.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.53.2.tgz",
+ "integrity": "sha512-ClAmAPx3ZCHtp6ysl4XEhWU69GUB1D+s7G9YjHGhIGCSrsg00nEGRRZHmINYxkdoJehde8VIsDC5t9C0gb6yqA==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "freebsd"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-arm-gnueabihf": {
+ "version": "4.53.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.53.2.tgz",
+ "integrity": "sha512-EPlb95nUsz6Dd9Qy13fI5kUPXNSljaG9FiJ4YUGU1O/Q77i5DYFW5KR8g1OzTcdZUqQQ1KdDqsTohdFVwCwjqg==",
+ "cpu": [
+ "arm"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-arm-musleabihf": {
+ "version": "4.53.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.53.2.tgz",
+ "integrity": "sha512-BOmnVW+khAUX+YZvNfa0tGTEMVVEerOxN0pDk2E6N6DsEIa2Ctj48FOMfNDdrwinocKaC7YXUZ1pHlKpnkja/Q==",
+ "cpu": [
+ "arm"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-arm64-gnu": {
+ "version": "4.53.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.53.2.tgz",
+ "integrity": "sha512-Xt2byDZ+6OVNuREgBXr4+CZDJtrVso5woFtpKdGPhpTPHcNG7D8YXeQzpNbFRxzTVqJf7kvPMCub/pcGUWgBjA==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-arm64-musl": {
+ "version": "4.53.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.53.2.tgz",
+ "integrity": "sha512-+LdZSldy/I9N8+klim/Y1HsKbJ3BbInHav5qE9Iy77dtHC/pibw1SR/fXlWyAk0ThnpRKoODwnAuSjqxFRDHUQ==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-loong64-gnu": {
+ "version": "4.53.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-gnu/-/rollup-linux-loong64-gnu-4.53.2.tgz",
+ "integrity": "sha512-8ms8sjmyc1jWJS6WdNSA23rEfdjWB30LH8Wqj0Cqvv7qSHnvw6kgMMXRdop6hkmGPlyYBdRPkjJnj3KCUHV/uQ==",
+ "cpu": [
+ "loong64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-ppc64-gnu": {
+ "version": "4.53.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.53.2.tgz",
+ "integrity": "sha512-3HRQLUQbpBDMmzoxPJYd3W6vrVHOo2cVW8RUo87Xz0JPJcBLBr5kZ1pGcQAhdZgX9VV7NbGNipah1omKKe23/g==",
+ "cpu": [
+ "ppc64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-riscv64-gnu": {
+ "version": "4.53.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.53.2.tgz",
+ "integrity": "sha512-fMjKi+ojnmIvhk34gZP94vjogXNNUKMEYs+EDaB/5TG/wUkoeua7p7VCHnE6T2Tx+iaghAqQX8teQzcvrYpaQA==",
+ "cpu": [
+ "riscv64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-riscv64-musl": {
+ "version": "4.53.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.53.2.tgz",
+ "integrity": "sha512-XuGFGU+VwUUV5kLvoAdi0Wz5Xbh2SrjIxCtZj6Wq8MDp4bflb/+ThZsVxokM7n0pcbkEr2h5/pzqzDYI7cCgLQ==",
+ "cpu": [
+ "riscv64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-s390x-gnu": {
+ "version": "4.53.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.53.2.tgz",
+ "integrity": "sha512-w6yjZF0P+NGzWR3AXWX9zc0DNEGdtvykB03uhonSHMRa+oWA6novflo2WaJr6JZakG2ucsyb+rvhrKac6NIy+w==",
+ "cpu": [
+ "s390x"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-x64-gnu": {
+ "version": "4.53.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.53.2.tgz",
+ "integrity": "sha512-yo8d6tdfdeBArzC7T/PnHd7OypfI9cbuZzPnzLJIyKYFhAQ8SvlkKtKBMbXDxe1h03Rcr7u++nFS7tqXz87Gtw==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-x64-musl": {
+ "version": "4.53.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.53.2.tgz",
+ "integrity": "sha512-ah59c1YkCxKExPP8O9PwOvs+XRLKwh/mV+3YdKqQ5AMQ0r4M4ZDuOrpWkUaqO7fzAHdINzV9tEVu8vNw48z0lA==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-openharmony-arm64": {
+ "version": "4.53.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-openharmony-arm64/-/rollup-openharmony-arm64-4.53.2.tgz",
+ "integrity": "sha512-4VEd19Wmhr+Zy7hbUsFZ6YXEiP48hE//KPLCSVNY5RMGX2/7HZ+QkN55a3atM1C/BZCGIgqN+xrVgtdak2S9+A==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "openharmony"
+ ]
+ },
+ "node_modules/@rollup/rollup-win32-arm64-msvc": {
+ "version": "4.53.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.53.2.tgz",
+ "integrity": "sha512-IlbHFYc/pQCgew/d5fslcy1KEaYVCJ44G8pajugd8VoOEI8ODhtb/j8XMhLpwHCMB3yk2J07ctup10gpw2nyMA==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "win32"
+ ]
+ },
+ "node_modules/@rollup/rollup-win32-ia32-msvc": {
+ "version": "4.53.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.53.2.tgz",
+ "integrity": "sha512-lNlPEGgdUfSzdCWU176ku/dQRnA7W+Gp8d+cWv73jYrb8uT7HTVVxq62DUYxjbaByuf1Yk0RIIAbDzp+CnOTFg==",
+ "cpu": [
+ "ia32"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "win32"
+ ]
+ },
+ "node_modules/@rollup/rollup-win32-x64-gnu": {
+ "version": "4.53.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-gnu/-/rollup-win32-x64-gnu-4.53.2.tgz",
+ "integrity": "sha512-S6YojNVrHybQis2lYov1sd+uj7K0Q05NxHcGktuMMdIQ2VixGwAfbJ23NnlvvVV1bdpR2m5MsNBViHJKcA4ADw==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "win32"
+ ]
+ },
+ "node_modules/@rollup/rollup-win32-x64-msvc": {
+ "version": "4.53.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.53.2.tgz",
+ "integrity": "sha512-k+/Rkcyx//P6fetPoLMb8pBeqJBNGx81uuf7iljX9++yNBVRDQgD04L+SVXmXmh5ZP4/WOp4mWF0kmi06PW2tA==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "win32"
+ ]
+ },
+ "node_modules/@types/babel__core": {
+ "version": "7.20.5",
+ "resolved": "https://registry.npmjs.org/@types/babel__core/-/babel__core-7.20.5.tgz",
+ "integrity": "sha512-qoQprZvz5wQFJwMDqeseRXWv3rqMvhgpbXFfVyWhbx9X47POIA6i/+dXefEmZKoAgOaTdaIgNSMqMIU61yRyzA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/parser": "^7.20.7",
+ "@babel/types": "^7.20.7",
+ "@types/babel__generator": "*",
+ "@types/babel__template": "*",
+ "@types/babel__traverse": "*"
+ }
+ },
+ "node_modules/@types/babel__generator": {
+ "version": "7.27.0",
+ "resolved": "https://registry.npmjs.org/@types/babel__generator/-/babel__generator-7.27.0.tgz",
+ "integrity": "sha512-ufFd2Xi92OAVPYsy+P4n7/U7e68fex0+Ee8gSG9KX7eo084CWiQ4sdxktvdl0bOPupXtVJPY19zk6EwWqUQ8lg==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/types": "^7.0.0"
+ }
+ },
+ "node_modules/@types/babel__template": {
+ "version": "7.4.4",
+ "resolved": "https://registry.npmjs.org/@types/babel__template/-/babel__template-7.4.4.tgz",
+ "integrity": "sha512-h/NUaSyG5EyxBIp8YRxo4RMe2/qQgvyowRwVMzhYhBCONbW8PUsg4lkFMrhgZhUe5z3L3MiLDuvyJ/CaPa2A8A==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/parser": "^7.1.0",
+ "@babel/types": "^7.0.0"
+ }
+ },
+ "node_modules/@types/babel__traverse": {
+ "version": "7.28.0",
+ "resolved": "https://registry.npmjs.org/@types/babel__traverse/-/babel__traverse-7.28.0.tgz",
+ "integrity": "sha512-8PvcXf70gTDZBgt9ptxJ8elBeBjcLOAcOtoO/mPJjtji1+CdGbHgm77om1GrsPxsiE+uXIpNSK64UYaIwQXd4Q==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/types": "^7.28.2"
+ }
+ },
+ "node_modules/@types/d3": {
+ "version": "7.4.3",
+ "resolved": "https://registry.npmjs.org/@types/d3/-/d3-7.4.3.tgz",
+ "integrity": "sha512-lZXZ9ckh5R8uiFVt8ogUNf+pIrK4EsWrx2Np75WvF/eTpJ0FMHNhjXk8CKEx/+gpHbNQyJWehbFaTvqmHWB3ww==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/d3-array": "*",
+ "@types/d3-axis": "*",
+ "@types/d3-brush": "*",
+ "@types/d3-chord": "*",
+ "@types/d3-color": "*",
+ "@types/d3-contour": "*",
+ "@types/d3-delaunay": "*",
+ "@types/d3-dispatch": "*",
+ "@types/d3-drag": "*",
+ "@types/d3-dsv": "*",
+ "@types/d3-ease": "*",
+ "@types/d3-fetch": "*",
+ "@types/d3-force": "*",
+ "@types/d3-format": "*",
+ "@types/d3-geo": "*",
+ "@types/d3-hierarchy": "*",
+ "@types/d3-interpolate": "*",
+ "@types/d3-path": "*",
+ "@types/d3-polygon": "*",
+ "@types/d3-quadtree": "*",
+ "@types/d3-random": "*",
+ "@types/d3-scale": "*",
+ "@types/d3-scale-chromatic": "*",
+ "@types/d3-selection": "*",
+ "@types/d3-shape": "*",
+ "@types/d3-time": "*",
+ "@types/d3-time-format": "*",
+ "@types/d3-timer": "*",
+ "@types/d3-transition": "*",
+ "@types/d3-zoom": "*"
+ }
+ },
+ "node_modules/@types/d3-array": {
+ "version": "3.2.2",
+ "resolved": "https://registry.npmjs.org/@types/d3-array/-/d3-array-3.2.2.tgz",
+ "integrity": "sha512-hOLWVbm7uRza0BYXpIIW5pxfrKe0W+D5lrFiAEYR+pb6w3N2SwSMaJbXdUfSEv+dT4MfHBLtn5js0LAWaO6otw==",
+ "license": "MIT"
+ },
+ "node_modules/@types/d3-axis": {
+ "version": "3.0.6",
+ "resolved": "https://registry.npmjs.org/@types/d3-axis/-/d3-axis-3.0.6.tgz",
+ "integrity": "sha512-pYeijfZuBd87T0hGn0FO1vQ/cgLk6E1ALJjfkC0oJ8cbwkZl3TpgS8bVBLZN+2jjGgg38epgxb2zmoGtSfvgMw==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/d3-selection": "*"
+ }
+ },
+ "node_modules/@types/d3-brush": {
+ "version": "3.0.6",
+ "resolved": "https://registry.npmjs.org/@types/d3-brush/-/d3-brush-3.0.6.tgz",
+ "integrity": "sha512-nH60IZNNxEcrh6L1ZSMNA28rj27ut/2ZmI3r96Zd+1jrZD++zD3LsMIjWlvg4AYrHn/Pqz4CF3veCxGjtbqt7A==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/d3-selection": "*"
+ }
+ },
+ "node_modules/@types/d3-chord": {
+ "version": "3.0.6",
+ "resolved": "https://registry.npmjs.org/@types/d3-chord/-/d3-chord-3.0.6.tgz",
+ "integrity": "sha512-LFYWWd8nwfwEmTZG9PfQxd17HbNPksHBiJHaKuY1XeqscXacsS2tyoo6OdRsjf+NQYeB6XrNL3a25E3gH69lcg==",
+ "license": "MIT"
+ },
+ "node_modules/@types/d3-color": {
+ "version": "3.1.3",
+ "resolved": "https://registry.npmjs.org/@types/d3-color/-/d3-color-3.1.3.tgz",
+ "integrity": "sha512-iO90scth9WAbmgv7ogoq57O9YpKmFBbmoEoCHDB2xMBY0+/KVrqAaCDyCE16dUspeOvIxFFRI+0sEtqDqy2b4A==",
+ "license": "MIT"
+ },
+ "node_modules/@types/d3-contour": {
+ "version": "3.0.6",
+ "resolved": "https://registry.npmjs.org/@types/d3-contour/-/d3-contour-3.0.6.tgz",
+ "integrity": "sha512-BjzLgXGnCWjUSYGfH1cpdo41/hgdWETu4YxpezoztawmqsvCeep+8QGfiY6YbDvfgHz/DkjeIkkZVJavB4a3rg==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/d3-array": "*",
+ "@types/geojson": "*"
+ }
+ },
+ "node_modules/@types/d3-delaunay": {
+ "version": "6.0.4",
+ "resolved": "https://registry.npmjs.org/@types/d3-delaunay/-/d3-delaunay-6.0.4.tgz",
+ "integrity": "sha512-ZMaSKu4THYCU6sV64Lhg6qjf1orxBthaC161plr5KuPHo3CNm8DTHiLw/5Eq2b6TsNP0W0iJrUOFscY6Q450Hw==",
+ "license": "MIT"
+ },
+ "node_modules/@types/d3-dispatch": {
+ "version": "3.0.7",
+ "resolved": "https://registry.npmjs.org/@types/d3-dispatch/-/d3-dispatch-3.0.7.tgz",
+ "integrity": "sha512-5o9OIAdKkhN1QItV2oqaE5KMIiXAvDWBDPrD85e58Qlz1c1kI/J0NcqbEG88CoTwJrYe7ntUCVfeUl2UJKbWgA==",
+ "license": "MIT"
+ },
+ "node_modules/@types/d3-drag": {
+ "version": "3.0.7",
+ "resolved": "https://registry.npmjs.org/@types/d3-drag/-/d3-drag-3.0.7.tgz",
+ "integrity": "sha512-HE3jVKlzU9AaMazNufooRJ5ZpWmLIoc90A37WU2JMmeq28w1FQqCZswHZ3xR+SuxYftzHq6WU6KJHvqxKzTxxQ==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/d3-selection": "*"
+ }
+ },
+ "node_modules/@types/d3-dsv": {
+ "version": "3.0.7",
+ "resolved": "https://registry.npmjs.org/@types/d3-dsv/-/d3-dsv-3.0.7.tgz",
+ "integrity": "sha512-n6QBF9/+XASqcKK6waudgL0pf/S5XHPPI8APyMLLUHd8NqouBGLsU8MgtO7NINGtPBtk9Kko/W4ea0oAspwh9g==",
+ "license": "MIT"
+ },
+ "node_modules/@types/d3-ease": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/@types/d3-ease/-/d3-ease-3.0.2.tgz",
+ "integrity": "sha512-NcV1JjO5oDzoK26oMzbILE6HW7uVXOHLQvHshBUW4UMdZGfiY6v5BeQwh9a9tCzv+CeefZQHJt5SRgK154RtiA==",
+ "license": "MIT"
+ },
+ "node_modules/@types/d3-fetch": {
+ "version": "3.0.7",
+ "resolved": "https://registry.npmjs.org/@types/d3-fetch/-/d3-fetch-3.0.7.tgz",
+ "integrity": "sha512-fTAfNmxSb9SOWNB9IoG5c8Hg6R+AzUHDRlsXsDZsNp6sxAEOP0tkP3gKkNSO/qmHPoBFTxNrjDprVHDQDvo5aA==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/d3-dsv": "*"
+ }
+ },
+ "node_modules/@types/d3-force": {
+ "version": "3.0.10",
+ "resolved": "https://registry.npmjs.org/@types/d3-force/-/d3-force-3.0.10.tgz",
+ "integrity": "sha512-ZYeSaCF3p73RdOKcjj+swRlZfnYpK1EbaDiYICEEp5Q6sUiqFaFQ9qgoshp5CzIyyb/yD09kD9o2zEltCexlgw==",
+ "license": "MIT"
+ },
+ "node_modules/@types/d3-format": {
+ "version": "3.0.4",
+ "resolved": "https://registry.npmjs.org/@types/d3-format/-/d3-format-3.0.4.tgz",
+ "integrity": "sha512-fALi2aI6shfg7vM5KiR1wNJnZ7r6UuggVqtDA+xiEdPZQwy/trcQaHnwShLuLdta2rTymCNpxYTiMZX/e09F4g==",
+ "license": "MIT"
+ },
+ "node_modules/@types/d3-geo": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/@types/d3-geo/-/d3-geo-3.1.0.tgz",
+ "integrity": "sha512-856sckF0oP/diXtS4jNsiQw/UuK5fQG8l/a9VVLeSouf1/PPbBE1i1W852zVwKwYCBkFJJB7nCFTbk6UMEXBOQ==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/geojson": "*"
+ }
+ },
+ "node_modules/@types/d3-hierarchy": {
+ "version": "3.1.7",
+ "resolved": "https://registry.npmjs.org/@types/d3-hierarchy/-/d3-hierarchy-3.1.7.tgz",
+ "integrity": "sha512-tJFtNoYBtRtkNysX1Xq4sxtjK8YgoWUNpIiUee0/jHGRwqvzYxkq0hGVbbOGSz+JgFxxRu4K8nb3YpG3CMARtg==",
+ "license": "MIT"
+ },
+ "node_modules/@types/d3-interpolate": {
+ "version": "3.0.4",
+ "resolved": "https://registry.npmjs.org/@types/d3-interpolate/-/d3-interpolate-3.0.4.tgz",
+ "integrity": "sha512-mgLPETlrpVV1YRJIglr4Ez47g7Yxjl1lj7YKsiMCb27VJH9W8NVM6Bb9d8kkpG/uAQS5AmbA48q2IAolKKo1MA==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/d3-color": "*"
+ }
+ },
+ "node_modules/@types/d3-path": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/@types/d3-path/-/d3-path-3.1.1.tgz",
+ "integrity": "sha512-VMZBYyQvbGmWyWVea0EHs/BwLgxc+MKi1zLDCONksozI4YJMcTt8ZEuIR4Sb1MMTE8MMW49v0IwI5+b7RmfWlg==",
+ "license": "MIT"
+ },
+ "node_modules/@types/d3-polygon": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/@types/d3-polygon/-/d3-polygon-3.0.2.tgz",
+ "integrity": "sha512-ZuWOtMaHCkN9xoeEMr1ubW2nGWsp4nIql+OPQRstu4ypeZ+zk3YKqQT0CXVe/PYqrKpZAi+J9mTs05TKwjXSRA==",
+ "license": "MIT"
+ },
+ "node_modules/@types/d3-quadtree": {
+ "version": "3.0.6",
+ "resolved": "https://registry.npmjs.org/@types/d3-quadtree/-/d3-quadtree-3.0.6.tgz",
+ "integrity": "sha512-oUzyO1/Zm6rsxKRHA1vH0NEDG58HrT5icx/azi9MF1TWdtttWl0UIUsjEQBBh+SIkrpd21ZjEv7ptxWys1ncsg==",
+ "license": "MIT"
+ },
+ "node_modules/@types/d3-random": {
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/@types/d3-random/-/d3-random-3.0.3.tgz",
+ "integrity": "sha512-Imagg1vJ3y76Y2ea0871wpabqp613+8/r0mCLEBfdtqC7xMSfj9idOnmBYyMoULfHePJyxMAw3nWhJxzc+LFwQ==",
+ "license": "MIT"
+ },
+ "node_modules/@types/d3-scale": {
+ "version": "4.0.9",
+ "resolved": "https://registry.npmjs.org/@types/d3-scale/-/d3-scale-4.0.9.tgz",
+ "integrity": "sha512-dLmtwB8zkAeO/juAMfnV+sItKjlsw2lKdZVVy6LRr0cBmegxSABiLEpGVmSJJ8O08i4+sGR6qQtb6WtuwJdvVw==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/d3-time": "*"
+ }
+ },
+ "node_modules/@types/d3-scale-chromatic": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/@types/d3-scale-chromatic/-/d3-scale-chromatic-3.1.0.tgz",
+ "integrity": "sha512-iWMJgwkK7yTRmWqRB5plb1kadXyQ5Sj8V/zYlFGMUBbIPKQScw+Dku9cAAMgJG+z5GYDoMjWGLVOvjghDEFnKQ==",
+ "license": "MIT"
+ },
+ "node_modules/@types/d3-selection": {
+ "version": "3.0.11",
+ "resolved": "https://registry.npmjs.org/@types/d3-selection/-/d3-selection-3.0.11.tgz",
+ "integrity": "sha512-bhAXu23DJWsrI45xafYpkQ4NtcKMwWnAC/vKrd2l+nxMFuvOT3XMYTIj2opv8vq8AO5Yh7Qac/nSeP/3zjTK0w==",
+ "license": "MIT"
+ },
+ "node_modules/@types/d3-shape": {
+ "version": "3.1.7",
+ "resolved": "https://registry.npmjs.org/@types/d3-shape/-/d3-shape-3.1.7.tgz",
+ "integrity": "sha512-VLvUQ33C+3J+8p+Daf+nYSOsjB4GXp19/S/aGo60m9h1v6XaxjiT82lKVWJCfzhtuZ3yD7i/TPeC/fuKLLOSmg==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/d3-path": "*"
+ }
+ },
+ "node_modules/@types/d3-time": {
+ "version": "3.0.4",
+ "resolved": "https://registry.npmjs.org/@types/d3-time/-/d3-time-3.0.4.tgz",
+ "integrity": "sha512-yuzZug1nkAAaBlBBikKZTgzCeA+k1uy4ZFwWANOfKw5z5LRhV0gNA7gNkKm7HoK+HRN0wX3EkxGk0fpbWhmB7g==",
+ "license": "MIT"
+ },
+ "node_modules/@types/d3-time-format": {
+ "version": "4.0.3",
+ "resolved": "https://registry.npmjs.org/@types/d3-time-format/-/d3-time-format-4.0.3.tgz",
+ "integrity": "sha512-5xg9rC+wWL8kdDj153qZcsJ0FWiFt0J5RB6LYUNZjwSnesfblqrI/bJ1wBdJ8OQfncgbJG5+2F+qfqnqyzYxyg==",
+ "license": "MIT"
+ },
+ "node_modules/@types/d3-timer": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/@types/d3-timer/-/d3-timer-3.0.2.tgz",
+ "integrity": "sha512-Ps3T8E8dZDam6fUyNiMkekK3XUsaUEik+idO9/YjPtfj2qruF8tFBXS7XhtE4iIXBLxhmLjP3SXpLhVf21I9Lw==",
+ "license": "MIT"
+ },
+ "node_modules/@types/d3-transition": {
+ "version": "3.0.9",
+ "resolved": "https://registry.npmjs.org/@types/d3-transition/-/d3-transition-3.0.9.tgz",
+ "integrity": "sha512-uZS5shfxzO3rGlu0cC3bjmMFKsXv+SmZZcgp0KD22ts4uGXp5EVYGzu/0YdwZeKmddhcAccYtREJKkPfXkZuCg==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/d3-selection": "*"
+ }
+ },
+ "node_modules/@types/d3-zoom": {
+ "version": "3.0.8",
+ "resolved": "https://registry.npmjs.org/@types/d3-zoom/-/d3-zoom-3.0.8.tgz",
+ "integrity": "sha512-iqMC4/YlFCSlO8+2Ii1GGGliCAY4XdeG748w5vQUbevlbDu0zSjH/+jojorQVBK/se0j6DUFNPBGSqD3YWYnDw==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/d3-interpolate": "*",
+ "@types/d3-selection": "*"
+ }
+ },
+ "node_modules/@types/debug": {
+ "version": "4.1.12",
+ "resolved": "https://registry.npmjs.org/@types/debug/-/debug-4.1.12.tgz",
+ "integrity": "sha512-vIChWdVG3LG1SMxEvI/AK+FWJthlrqlTu7fbrlywTkkaONwk/UAGaULXRlf8vkzFBLVm0zkMdCquhL5aOjhXPQ==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/ms": "*"
+ }
+ },
+ "node_modules/@types/estree": {
+ "version": "1.0.8",
+ "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.8.tgz",
+ "integrity": "sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==",
+ "license": "MIT"
+ },
+ "node_modules/@types/estree-jsx": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/@types/estree-jsx/-/estree-jsx-1.0.5.tgz",
+ "integrity": "sha512-52CcUVNFyfb1A2ALocQw/Dd1BQFNmSdkuC3BkZ6iqhdMfQz7JWOFRuJFloOzjk+6WijU56m9oKXFAXc7o3Towg==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/estree": "*"
+ }
+ },
+ "node_modules/@types/geojson": {
+ "version": "7946.0.16",
+ "resolved": "https://registry.npmjs.org/@types/geojson/-/geojson-7946.0.16.tgz",
+ "integrity": "sha512-6C8nqWur3j98U6+lXDfTUWIfgvZU+EumvpHKcYjujKH7woYyLj2sUmff0tRhrqM7BohUw7Pz3ZB1jj2gW9Fvmg==",
+ "license": "MIT"
+ },
+ "node_modules/@types/hast": {
+ "version": "3.0.4",
+ "resolved": "https://registry.npmjs.org/@types/hast/-/hast-3.0.4.tgz",
+ "integrity": "sha512-WPs+bbQw5aCj+x6laNGWLH3wviHtoCv/P3+otBhbOhJgG8qtpdAMlTCxLtsTWA7LH1Oh/bFCHsBn0TPS5m30EQ==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "*"
+ }
+ },
+ "node_modules/@types/mdast": {
+ "version": "4.0.4",
+ "resolved": "https://registry.npmjs.org/@types/mdast/-/mdast-4.0.4.tgz",
+ "integrity": "sha512-kGaNbPh1k7AFzgpud/gMdvIm5xuECykRR+JnWKQno9TAXVa6WIVCGTPvYGekIDL4uwCZQSYbUxNBSb1aUo79oA==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "*"
+ }
+ },
+ "node_modules/@types/ms": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/@types/ms/-/ms-2.1.0.tgz",
+ "integrity": "sha512-GsCCIZDE/p3i96vtEqx+7dBUGXrc7zeSK3wwPHIaRThS+9OhWIXRqzs4d6k1SVU8g91DrNRWxWUGhp5KXQb2VA==",
+ "license": "MIT"
+ },
+ "node_modules/@types/react": {
+ "version": "19.2.7",
+ "resolved": "https://registry.npmjs.org/@types/react/-/react-19.2.7.tgz",
+ "integrity": "sha512-MWtvHrGZLFttgeEj28VXHxpmwYbor/ATPYbBfSFZEIRK0ecCFLl2Qo55z52Hss+UV9CRN7trSeq1zbgx7YDWWg==",
+ "license": "MIT",
+ "peer": true,
+ "dependencies": {
+ "csstype": "^3.2.2"
+ }
+ },
+ "node_modules/@types/unist": {
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/@types/unist/-/unist-3.0.3.tgz",
+ "integrity": "sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q==",
+ "license": "MIT"
+ },
+ "node_modules/@ungap/structured-clone": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/@ungap/structured-clone/-/structured-clone-1.3.0.tgz",
+ "integrity": "sha512-WmoN8qaIAo7WTYWbAZuG8PYEhn5fkz7dZrqTBZ7dtt//lL2Gwms1IcnQ5yHqjDfX8Ft5j4YzDM23f87zBfDe9g==",
+ "license": "ISC"
+ },
+ "node_modules/@vitejs/plugin-react": {
+ "version": "4.7.0",
+ "resolved": "https://registry.npmjs.org/@vitejs/plugin-react/-/plugin-react-4.7.0.tgz",
+ "integrity": "sha512-gUu9hwfWvvEDBBmgtAowQCojwZmJ5mcLn3aufeCsitijs3+f2NsrPtlAWIR6OPiqljl96GVCUbLe0HyqIpVaoA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/core": "^7.28.0",
+ "@babel/plugin-transform-react-jsx-self": "^7.27.1",
+ "@babel/plugin-transform-react-jsx-source": "^7.27.1",
+ "@rolldown/pluginutils": "1.0.0-beta.27",
+ "@types/babel__core": "^7.20.5",
+ "react-refresh": "^0.17.0"
+ },
+ "engines": {
+ "node": "^14.18.0 || >=16.0.0"
+ },
+ "peerDependencies": {
+ "vite": "^4.2.0 || ^5.0.0 || ^6.0.0 || ^7.0.0"
+ }
+ },
+ "node_modules/bail": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/bail/-/bail-2.0.2.tgz",
+ "integrity": "sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/baseline-browser-mapping": {
+ "version": "2.8.28",
+ "resolved": "https://registry.npmjs.org/baseline-browser-mapping/-/baseline-browser-mapping-2.8.28.tgz",
+ "integrity": "sha512-gYjt7OIqdM0PcttNYP2aVrr2G0bMALkBaoehD4BuRGjAOtipg0b6wHg1yNL+s5zSnLZZrGHOw4IrND8CD+3oIQ==",
+ "dev": true,
+ "license": "Apache-2.0",
+ "bin": {
+ "baseline-browser-mapping": "dist/cli.js"
+ }
+ },
+ "node_modules/browserslist": {
+ "version": "4.28.0",
+ "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.28.0.tgz",
+ "integrity": "sha512-tbydkR/CxfMwelN0vwdP/pLkDwyAASZ+VfWm4EOwlB6SWhx1sYnWLqo8N5j0rAzPfzfRaxt0mM/4wPU/Su84RQ==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "opencollective",
+ "url": "https://opencollective.com/browserslist"
+ },
+ {
+ "type": "tidelift",
+ "url": "https://tidelift.com/funding/github/npm/browserslist"
+ },
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/ai"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "baseline-browser-mapping": "^2.8.25",
+ "caniuse-lite": "^1.0.30001754",
+ "electron-to-chromium": "^1.5.249",
+ "node-releases": "^2.0.27",
+ "update-browserslist-db": "^1.1.4"
+ },
+ "bin": {
+ "browserslist": "cli.js"
+ },
+ "engines": {
+ "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7"
+ }
+ },
+ "node_modules/caniuse-lite": {
+ "version": "1.0.30001754",
+ "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001754.tgz",
+ "integrity": "sha512-x6OeBXueoAceOmotzx3PO4Zpt4rzpeIFsSr6AAePTZxSkXiYDUmpypEl7e2+8NCd9bD7bXjqyef8CJYPC1jfxg==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "opencollective",
+ "url": "https://opencollective.com/browserslist"
+ },
+ {
+ "type": "tidelift",
+ "url": "https://tidelift.com/funding/github/npm/caniuse-lite"
+ },
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/ai"
+ }
+ ],
+ "license": "CC-BY-4.0"
+ },
+ "node_modules/ccount": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/ccount/-/ccount-2.0.1.tgz",
+ "integrity": "sha512-eyrF0jiFpY+3drT6383f1qhkbGsLSifNAjA61IUjZjmLCWjItY6LB9ft9YhoDgwfmclB2zhu51Lc7+95b8NRAg==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/character-entities": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/character-entities/-/character-entities-2.0.2.tgz",
+ "integrity": "sha512-shx7oQ0Awen/BRIdkjkvz54PnEEI/EjwXDSIZp86/KKdbafHh1Df/RYGBhn4hbe2+uKC9FnT5UCEdyPz3ai9hQ==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/character-entities-html4": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/character-entities-html4/-/character-entities-html4-2.1.0.tgz",
+ "integrity": "sha512-1v7fgQRj6hnSwFpq1Eu0ynr/CDEw0rXo2B61qXrLNdHZmPKgb7fqS1a2JwF0rISo9q77jDI8VMEHoApn8qDoZA==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/character-entities-legacy": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/character-entities-legacy/-/character-entities-legacy-3.0.0.tgz",
+ "integrity": "sha512-RpPp0asT/6ufRm//AJVwpViZbGM/MkjQFxJccQRHmISF/22NBtsHqAWmL+/pmkPWoIUJdWyeVleTl1wydHATVQ==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/character-reference-invalid": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/character-reference-invalid/-/character-reference-invalid-2.0.1.tgz",
+ "integrity": "sha512-iBZ4F4wRbyORVsu0jPV7gXkOsGYjGHPmAyv+HiHG8gi5PtC9KI2j1+v8/tlibRvjoWX027ypmG/n0HtO5t7unw==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/classcat": {
+ "version": "5.0.5",
+ "resolved": "https://registry.npmjs.org/classcat/-/classcat-5.0.5.tgz",
+ "integrity": "sha512-JhZUT7JFcQy/EzW605k/ktHtncoo9vnyW/2GspNYwFlN1C/WmjuV/xtS04e9SOkL2sTdw0VAZ2UGCcQ9lR6p6w==",
+ "license": "MIT"
+ },
+ "node_modules/comma-separated-tokens": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/comma-separated-tokens/-/comma-separated-tokens-2.0.3.tgz",
+ "integrity": "sha512-Fu4hJdvzeylCfQPp9SGWidpzrMs7tTrlu6Vb8XGaRGck8QSNZJJp538Wrb60Lax4fPwR64ViY468OIUTbRlGZg==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/convert-source-map": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz",
+ "integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/csstype": {
+ "version": "3.2.3",
+ "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.2.3.tgz",
+ "integrity": "sha512-z1HGKcYy2xA8AGQfwrn0PAy+PB7X/GSj3UVJW9qKyn43xWa+gl5nXmU4qqLMRzWVLFC8KusUX8T/0kCiOYpAIQ==",
+ "license": "MIT",
+ "peer": true
+ },
+ "node_modules/d3-color": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/d3-color/-/d3-color-3.1.0.tgz",
+ "integrity": "sha512-zg/chbXyeBtMQ1LbD/WSoW2DpC3I0mpmPdW+ynRTj/x2DAWYrIY7qeZIHidozwV24m4iavr15lNwIwLxRmOxhA==",
+ "license": "ISC",
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-dispatch": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/d3-dispatch/-/d3-dispatch-3.0.1.tgz",
+ "integrity": "sha512-rzUyPU/S7rwUflMyLc1ETDeBj0NRuHKKAcvukozwhshr6g6c5d8zh4c2gQjY2bZ0dXeGLWc1PF174P2tVvKhfg==",
+ "license": "ISC",
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-drag": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/d3-drag/-/d3-drag-3.0.0.tgz",
+ "integrity": "sha512-pWbUJLdETVA8lQNJecMxoXfH6x+mO2UQo8rSmZ+QqxcbyA3hfeprFgIT//HW2nlHChWeIIMwS2Fq+gEARkhTkg==",
+ "license": "ISC",
+ "dependencies": {
+ "d3-dispatch": "1 - 3",
+ "d3-selection": "3"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-ease": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/d3-ease/-/d3-ease-3.0.1.tgz",
+ "integrity": "sha512-wR/XK3D3XcLIZwpbvQwQ5fK+8Ykds1ip7A2Txe0yxncXSdq1L9skcG7blcedkOX+ZcgxGAmLX1FrRGbADwzi0w==",
+ "license": "BSD-3-Clause",
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-interpolate": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/d3-interpolate/-/d3-interpolate-3.0.1.tgz",
+ "integrity": "sha512-3bYs1rOD33uo8aqJfKP3JWPAibgw8Zm2+L9vBKEHJ2Rg+viTR7o5Mmv5mZcieN+FRYaAOWX5SJATX6k1PWz72g==",
+ "license": "ISC",
+ "dependencies": {
+ "d3-color": "1 - 3"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-selection": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/d3-selection/-/d3-selection-3.0.0.tgz",
+ "integrity": "sha512-fmTRWbNMmsmWq6xJV8D19U/gw/bwrHfNXxrIN+HfZgnzqTHp9jOmKMhsTUjXOJnZOdZY9Q28y4yebKzqDKlxlQ==",
+ "license": "ISC",
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-timer": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/d3-timer/-/d3-timer-3.0.1.tgz",
+ "integrity": "sha512-ndfJ/JxxMd3nw31uyKoY2naivF+r29V+Lc0svZxe1JvvIRmi8hUsrMvdOwgS1o6uBHmiz91geQ0ylPP0aj1VUA==",
+ "license": "ISC",
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-transition": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/d3-transition/-/d3-transition-3.0.1.tgz",
+ "integrity": "sha512-ApKvfjsSR6tg06xrL434C0WydLr7JewBB3V+/39RMHsaXTOG0zmt/OAXeng5M5LBm0ojmxJrpomQVZ1aPvBL4w==",
+ "license": "ISC",
+ "dependencies": {
+ "d3-color": "1 - 3",
+ "d3-dispatch": "1 - 3",
+ "d3-ease": "1 - 3",
+ "d3-interpolate": "1 - 3",
+ "d3-timer": "1 - 3"
+ },
+ "engines": {
+ "node": ">=12"
+ },
+ "peerDependencies": {
+ "d3-selection": "2 - 3"
+ }
+ },
+ "node_modules/d3-zoom": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/d3-zoom/-/d3-zoom-3.0.0.tgz",
+ "integrity": "sha512-b8AmV3kfQaqWAuacbPuNbL6vahnOJflOhexLzMMNLga62+/nh0JzvJ0aO/5a5MVgUFGS7Hu1P9P03o3fJkDCyw==",
+ "license": "ISC",
+ "dependencies": {
+ "d3-dispatch": "1 - 3",
+ "d3-drag": "2 - 3",
+ "d3-interpolate": "1 - 3",
+ "d3-selection": "2 - 3",
+ "d3-transition": "2 - 3"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/debug": {
+ "version": "4.4.3",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz",
+ "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==",
+ "license": "MIT",
+ "dependencies": {
+ "ms": "^2.1.3"
+ },
+ "engines": {
+ "node": ">=6.0"
+ },
+ "peerDependenciesMeta": {
+ "supports-color": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/decode-named-character-reference": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/decode-named-character-reference/-/decode-named-character-reference-1.2.0.tgz",
+ "integrity": "sha512-c6fcElNV6ShtZXmsgNgFFV5tVX2PaV4g+MOAkb8eXHvn6sryJBrZa9r0zV6+dtTyoCKxtDy5tyQ5ZwQuidtd+Q==",
+ "license": "MIT",
+ "dependencies": {
+ "character-entities": "^2.0.0"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/dequal": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/dequal/-/dequal-2.0.3.tgz",
+ "integrity": "sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/devlop": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/devlop/-/devlop-1.1.0.tgz",
+ "integrity": "sha512-RWmIqhcFf1lRYBvNmr7qTNuyCt/7/ns2jbpp1+PalgE/rDQcBT0fioSMUpJ93irlUhC5hrg4cYqe6U+0ImW0rA==",
+ "license": "MIT",
+ "dependencies": {
+ "dequal": "^2.0.0"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/electron-to-chromium": {
+ "version": "1.5.252",
+ "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.252.tgz",
+ "integrity": "sha512-53uTpjtRgS7gjIxZ4qCgFdNO2q+wJt/Z8+xAvxbCqXPJrY6h7ighUkadQmNMXH96crtpa6gPFNP7BF4UBGDuaA==",
+ "dev": true,
+ "license": "ISC"
+ },
+ "node_modules/esbuild": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.21.5.tgz",
+ "integrity": "sha512-mg3OPMV4hXywwpoDxu3Qda5xCKQi+vCTZq8S9J/EpkhB2HzKXq4SNFZE3+NK93JYxc8VMSep+lOUSC/RVKaBqw==",
+ "dev": true,
+ "hasInstallScript": true,
+ "license": "MIT",
+ "bin": {
+ "esbuild": "bin/esbuild"
+ },
+ "engines": {
+ "node": ">=12"
+ },
+ "optionalDependencies": {
+ "@esbuild/aix-ppc64": "0.21.5",
+ "@esbuild/android-arm": "0.21.5",
+ "@esbuild/android-arm64": "0.21.5",
+ "@esbuild/android-x64": "0.21.5",
+ "@esbuild/darwin-arm64": "0.21.5",
+ "@esbuild/darwin-x64": "0.21.5",
+ "@esbuild/freebsd-arm64": "0.21.5",
+ "@esbuild/freebsd-x64": "0.21.5",
+ "@esbuild/linux-arm": "0.21.5",
+ "@esbuild/linux-arm64": "0.21.5",
+ "@esbuild/linux-ia32": "0.21.5",
+ "@esbuild/linux-loong64": "0.21.5",
+ "@esbuild/linux-mips64el": "0.21.5",
+ "@esbuild/linux-ppc64": "0.21.5",
+ "@esbuild/linux-riscv64": "0.21.5",
+ "@esbuild/linux-s390x": "0.21.5",
+ "@esbuild/linux-x64": "0.21.5",
+ "@esbuild/netbsd-x64": "0.21.5",
+ "@esbuild/openbsd-x64": "0.21.5",
+ "@esbuild/sunos-x64": "0.21.5",
+ "@esbuild/win32-arm64": "0.21.5",
+ "@esbuild/win32-ia32": "0.21.5",
+ "@esbuild/win32-x64": "0.21.5"
+ }
+ },
+ "node_modules/escalade": {
+ "version": "3.2.0",
+ "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz",
+ "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/estree-util-is-identifier-name": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/estree-util-is-identifier-name/-/estree-util-is-identifier-name-3.0.0.tgz",
+ "integrity": "sha512-hFtqIDZTIUZ9BXLb8y4pYGyk6+wekIivNVTcmvk8NoOh+VeRn5y6cEHzbURrWbfp1fIqdVipilzj+lfaadNZmg==",
+ "license": "MIT",
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/extend": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz",
+ "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==",
+ "license": "MIT"
+ },
+ "node_modules/fsevents": {
+ "version": "2.3.3",
+ "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz",
+ "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==",
+ "dev": true,
+ "hasInstallScript": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "engines": {
+ "node": "^8.16.0 || ^10.6.0 || >=11.0.0"
+ }
+ },
+ "node_modules/gensync": {
+ "version": "1.0.0-beta.2",
+ "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz",
+ "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/hast-util-to-jsx-runtime": {
+ "version": "2.3.6",
+ "resolved": "https://registry.npmjs.org/hast-util-to-jsx-runtime/-/hast-util-to-jsx-runtime-2.3.6.tgz",
+ "integrity": "sha512-zl6s8LwNyo1P9uw+XJGvZtdFF1GdAkOg8ujOw+4Pyb76874fLps4ueHXDhXWdk6YHQ6OgUtinliG7RsYvCbbBg==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/estree": "^1.0.0",
+ "@types/hast": "^3.0.0",
+ "@types/unist": "^3.0.0",
+ "comma-separated-tokens": "^2.0.0",
+ "devlop": "^1.0.0",
+ "estree-util-is-identifier-name": "^3.0.0",
+ "hast-util-whitespace": "^3.0.0",
+ "mdast-util-mdx-expression": "^2.0.0",
+ "mdast-util-mdx-jsx": "^3.0.0",
+ "mdast-util-mdxjs-esm": "^2.0.0",
+ "property-information": "^7.0.0",
+ "space-separated-tokens": "^2.0.0",
+ "style-to-js": "^1.0.0",
+ "unist-util-position": "^5.0.0",
+ "vfile-message": "^4.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/hast-util-whitespace": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/hast-util-whitespace/-/hast-util-whitespace-3.0.0.tgz",
+ "integrity": "sha512-88JUN06ipLwsnv+dVn+OIYOvAuvBMy/Qoi6O7mQHxdPXpjy+Cd6xRkWwux7DKO+4sYILtLBRIKgsdpS2gQc7qw==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/hast": "^3.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/html-url-attributes": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/html-url-attributes/-/html-url-attributes-3.0.1.tgz",
+ "integrity": "sha512-ol6UPyBWqsrO6EJySPz2O7ZSr856WDrEzM5zMqp+FJJLGMW35cLYmmZnl0vztAZxRUoNZJFTCohfjuIJ8I4QBQ==",
+ "license": "MIT",
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/inline-style-parser": {
+ "version": "0.2.7",
+ "resolved": "https://registry.npmjs.org/inline-style-parser/-/inline-style-parser-0.2.7.tgz",
+ "integrity": "sha512-Nb2ctOyNR8DqQoR0OwRG95uNWIC0C1lCgf5Naz5H6Ji72KZ8OcFZLz2P5sNgwlyoJ8Yif11oMuYs5pBQa86csA==",
+ "license": "MIT"
+ },
+ "node_modules/is-alphabetical": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/is-alphabetical/-/is-alphabetical-2.0.1.tgz",
+ "integrity": "sha512-FWyyY60MeTNyeSRpkM2Iry0G9hpr7/9kD40mD/cGQEuilcZYS4okz8SN2Q6rLCJ8gbCt6fN+rC+6tMGS99LaxQ==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/is-alphanumerical": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/is-alphanumerical/-/is-alphanumerical-2.0.1.tgz",
+ "integrity": "sha512-hmbYhX/9MUMF5uh7tOXyK/n0ZvWpad5caBA17GsC6vyuCqaWliRG5K1qS9inmUhEMaOBIW7/whAnSwveW/LtZw==",
+ "license": "MIT",
+ "dependencies": {
+ "is-alphabetical": "^2.0.0",
+ "is-decimal": "^2.0.0"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/is-decimal": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/is-decimal/-/is-decimal-2.0.1.tgz",
+ "integrity": "sha512-AAB9hiomQs5DXWcRB1rqsxGUstbRroFOPPVAomNk/3XHR5JyEZChOyTWe2oayKnsSsr/kcGqF+z6yuH6HHpN0A==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/is-hexadecimal": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/is-hexadecimal/-/is-hexadecimal-2.0.1.tgz",
+ "integrity": "sha512-DgZQp241c8oO6cA1SbTEWiXeoxV42vlcJxgH+B3hi1AiqqKruZR3ZGF8In3fj4+/y/7rHvlOZLZtgJ/4ttYGZg==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/is-plain-obj": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-4.1.0.tgz",
+ "integrity": "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/js-tokens": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz",
+ "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==",
+ "license": "MIT"
+ },
+ "node_modules/jsesc": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-3.1.0.tgz",
+ "integrity": "sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA==",
+ "dev": true,
+ "license": "MIT",
+ "bin": {
+ "jsesc": "bin/jsesc"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/json5": {
+ "version": "2.2.3",
+ "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz",
+ "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==",
+ "dev": true,
+ "license": "MIT",
+ "bin": {
+ "json5": "lib/cli.js"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/longest-streak": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/longest-streak/-/longest-streak-3.1.0.tgz",
+ "integrity": "sha512-9Ri+o0JYgehTaVBBDoMqIl8GXtbWg711O3srftcHhZ0dqnETqLaoIK0x17fUw9rFSlK/0NlsKe0Ahhyl5pXE2g==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/loose-envify": {
+ "version": "1.4.0",
+ "resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz",
+ "integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==",
+ "license": "MIT",
+ "dependencies": {
+ "js-tokens": "^3.0.0 || ^4.0.0"
+ },
+ "bin": {
+ "loose-envify": "cli.js"
+ }
+ },
+ "node_modules/lru-cache": {
+ "version": "5.1.1",
+ "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz",
+ "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==",
+ "dev": true,
+ "license": "ISC",
+ "dependencies": {
+ "yallist": "^3.0.2"
+ }
+ },
+ "node_modules/mdast-util-from-markdown": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/mdast-util-from-markdown/-/mdast-util-from-markdown-2.0.2.tgz",
+ "integrity": "sha512-uZhTV/8NBuw0WHkPTrCqDOl0zVe1BIng5ZtHoDk49ME1qqcjYmmLmOf0gELgcRMxN4w2iuIeVso5/6QymSrgmA==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/mdast": "^4.0.0",
+ "@types/unist": "^3.0.0",
+ "decode-named-character-reference": "^1.0.0",
+ "devlop": "^1.0.0",
+ "mdast-util-to-string": "^4.0.0",
+ "micromark": "^4.0.0",
+ "micromark-util-decode-numeric-character-reference": "^2.0.0",
+ "micromark-util-decode-string": "^2.0.0",
+ "micromark-util-normalize-identifier": "^2.0.0",
+ "micromark-util-symbol": "^2.0.0",
+ "micromark-util-types": "^2.0.0",
+ "unist-util-stringify-position": "^4.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-mdx-expression": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/mdast-util-mdx-expression/-/mdast-util-mdx-expression-2.0.1.tgz",
+ "integrity": "sha512-J6f+9hUp+ldTZqKRSg7Vw5V6MqjATc+3E4gf3CFNcuZNWD8XdyI6zQ8GqH7f8169MM6P7hMBRDVGnn7oHB9kXQ==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/estree-jsx": "^1.0.0",
+ "@types/hast": "^3.0.0",
+ "@types/mdast": "^4.0.0",
+ "devlop": "^1.0.0",
+ "mdast-util-from-markdown": "^2.0.0",
+ "mdast-util-to-markdown": "^2.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-mdx-jsx": {
+ "version": "3.2.0",
+ "resolved": "https://registry.npmjs.org/mdast-util-mdx-jsx/-/mdast-util-mdx-jsx-3.2.0.tgz",
+ "integrity": "sha512-lj/z8v0r6ZtsN/cGNNtemmmfoLAFZnjMbNyLzBafjzikOM+glrjNHPlf6lQDOTccj9n5b0PPihEBbhneMyGs1Q==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/estree-jsx": "^1.0.0",
+ "@types/hast": "^3.0.0",
+ "@types/mdast": "^4.0.0",
+ "@types/unist": "^3.0.0",
+ "ccount": "^2.0.0",
+ "devlop": "^1.1.0",
+ "mdast-util-from-markdown": "^2.0.0",
+ "mdast-util-to-markdown": "^2.0.0",
+ "parse-entities": "^4.0.0",
+ "stringify-entities": "^4.0.0",
+ "unist-util-stringify-position": "^4.0.0",
+ "vfile-message": "^4.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-mdxjs-esm": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/mdast-util-mdxjs-esm/-/mdast-util-mdxjs-esm-2.0.1.tgz",
+ "integrity": "sha512-EcmOpxsZ96CvlP03NghtH1EsLtr0n9Tm4lPUJUBccV9RwUOneqSycg19n5HGzCf+10LozMRSObtVr3ee1WoHtg==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/estree-jsx": "^1.0.0",
+ "@types/hast": "^3.0.0",
+ "@types/mdast": "^4.0.0",
+ "devlop": "^1.0.0",
+ "mdast-util-from-markdown": "^2.0.0",
+ "mdast-util-to-markdown": "^2.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-phrasing": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/mdast-util-phrasing/-/mdast-util-phrasing-4.1.0.tgz",
+ "integrity": "sha512-TqICwyvJJpBwvGAMZjj4J2n0X8QWp21b9l0o7eXyVJ25YNWYbJDVIyD1bZXE6WtV6RmKJVYmQAKWa0zWOABz2w==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/mdast": "^4.0.0",
+ "unist-util-is": "^6.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-to-hast": {
+ "version": "13.2.1",
+ "resolved": "https://registry.npmjs.org/mdast-util-to-hast/-/mdast-util-to-hast-13.2.1.tgz",
+ "integrity": "sha512-cctsq2wp5vTsLIcaymblUriiTcZd0CwWtCbLvrOzYCDZoWyMNV8sZ7krj09FSnsiJi3WVsHLM4k6Dq/yaPyCXA==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/hast": "^3.0.0",
+ "@types/mdast": "^4.0.0",
+ "@ungap/structured-clone": "^1.0.0",
+ "devlop": "^1.0.0",
+ "micromark-util-sanitize-uri": "^2.0.0",
+ "trim-lines": "^3.0.0",
+ "unist-util-position": "^5.0.0",
+ "unist-util-visit": "^5.0.0",
+ "vfile": "^6.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-to-markdown": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/mdast-util-to-markdown/-/mdast-util-to-markdown-2.1.2.tgz",
+ "integrity": "sha512-xj68wMTvGXVOKonmog6LwyJKrYXZPvlwabaryTjLh9LuvovB/KAH+kvi8Gjj+7rJjsFi23nkUxRQv1KqSroMqA==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/mdast": "^4.0.0",
+ "@types/unist": "^3.0.0",
+ "longest-streak": "^3.0.0",
+ "mdast-util-phrasing": "^4.0.0",
+ "mdast-util-to-string": "^4.0.0",
+ "micromark-util-classify-character": "^2.0.0",
+ "micromark-util-decode-string": "^2.0.0",
+ "unist-util-visit": "^5.0.0",
+ "zwitch": "^2.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-to-string": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/mdast-util-to-string/-/mdast-util-to-string-4.0.0.tgz",
+ "integrity": "sha512-0H44vDimn51F0YwvxSJSm0eCDOJTRlmN0R1yBh4HLj9wiV1Dn0QoXGbvFAWj2hSItVTlCmBF1hqKlIyUBVFLPg==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/mdast": "^4.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/micromark": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/micromark/-/micromark-4.0.2.tgz",
+ "integrity": "sha512-zpe98Q6kvavpCr1NPVSCMebCKfD7CA2NqZ+rykeNhONIJBpc1tFKt9hucLGwha3jNTNI8lHpctWJWoimVF4PfA==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "@types/debug": "^4.0.0",
+ "debug": "^4.0.0",
+ "decode-named-character-reference": "^1.0.0",
+ "devlop": "^1.0.0",
+ "micromark-core-commonmark": "^2.0.0",
+ "micromark-factory-space": "^2.0.0",
+ "micromark-util-character": "^2.0.0",
+ "micromark-util-chunked": "^2.0.0",
+ "micromark-util-combine-extensions": "^2.0.0",
+ "micromark-util-decode-numeric-character-reference": "^2.0.0",
+ "micromark-util-encode": "^2.0.0",
+ "micromark-util-normalize-identifier": "^2.0.0",
+ "micromark-util-resolve-all": "^2.0.0",
+ "micromark-util-sanitize-uri": "^2.0.0",
+ "micromark-util-subtokenize": "^2.0.0",
+ "micromark-util-symbol": "^2.0.0",
+ "micromark-util-types": "^2.0.0"
+ }
+ },
+ "node_modules/micromark-core-commonmark": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/micromark-core-commonmark/-/micromark-core-commonmark-2.0.3.tgz",
+ "integrity": "sha512-RDBrHEMSxVFLg6xvnXmb1Ayr2WzLAWjeSATAoxwKYJV94TeNavgoIdA0a9ytzDSVzBy2YKFK+emCPOEibLeCrg==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "decode-named-character-reference": "^1.0.0",
+ "devlop": "^1.0.0",
+ "micromark-factory-destination": "^2.0.0",
+ "micromark-factory-label": "^2.0.0",
+ "micromark-factory-space": "^2.0.0",
+ "micromark-factory-title": "^2.0.0",
+ "micromark-factory-whitespace": "^2.0.0",
+ "micromark-util-character": "^2.0.0",
+ "micromark-util-chunked": "^2.0.0",
+ "micromark-util-classify-character": "^2.0.0",
+ "micromark-util-html-tag-name": "^2.0.0",
+ "micromark-util-normalize-identifier": "^2.0.0",
+ "micromark-util-resolve-all": "^2.0.0",
+ "micromark-util-subtokenize": "^2.0.0",
+ "micromark-util-symbol": "^2.0.0",
+ "micromark-util-types": "^2.0.0"
+ }
+ },
+ "node_modules/micromark-factory-destination": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-factory-destination/-/micromark-factory-destination-2.0.1.tgz",
+ "integrity": "sha512-Xe6rDdJlkmbFRExpTOmRj9N3MaWmbAgdpSrBQvCFqhezUn4AHqJHbaEnfbVYYiexVSs//tqOdY/DxhjdCiJnIA==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "micromark-util-character": "^2.0.0",
+ "micromark-util-symbol": "^2.0.0",
+ "micromark-util-types": "^2.0.0"
+ }
+ },
+ "node_modules/micromark-factory-label": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-factory-label/-/micromark-factory-label-2.0.1.tgz",
+ "integrity": "sha512-VFMekyQExqIW7xIChcXn4ok29YE3rnuyveW3wZQWWqF4Nv9Wk5rgJ99KzPvHjkmPXF93FXIbBp6YdW3t71/7Vg==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "devlop": "^1.0.0",
+ "micromark-util-character": "^2.0.0",
+ "micromark-util-symbol": "^2.0.0",
+ "micromark-util-types": "^2.0.0"
+ }
+ },
+ "node_modules/micromark-factory-space": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.1.tgz",
+ "integrity": "sha512-zRkxjtBxxLd2Sc0d+fbnEunsTj46SWXgXciZmHq0kDYGnck/ZSGj9/wULTV95uoeYiK5hRXP2mJ98Uo4cq/LQg==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "micromark-util-character": "^2.0.0",
+ "micromark-util-types": "^2.0.0"
+ }
+ },
+ "node_modules/micromark-factory-title": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-factory-title/-/micromark-factory-title-2.0.1.tgz",
+ "integrity": "sha512-5bZ+3CjhAd9eChYTHsjy6TGxpOFSKgKKJPJxr293jTbfry2KDoWkhBb6TcPVB4NmzaPhMs1Frm9AZH7OD4Cjzw==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "micromark-factory-space": "^2.0.0",
+ "micromark-util-character": "^2.0.0",
+ "micromark-util-symbol": "^2.0.0",
+ "micromark-util-types": "^2.0.0"
+ }
+ },
+ "node_modules/micromark-factory-whitespace": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-factory-whitespace/-/micromark-factory-whitespace-2.0.1.tgz",
+ "integrity": "sha512-Ob0nuZ3PKt/n0hORHyvoD9uZhr+Za8sFoP+OnMcnWK5lngSzALgQYKMr9RJVOWLqQYuyn6ulqGWSXdwf6F80lQ==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "micromark-factory-space": "^2.0.0",
+ "micromark-util-character": "^2.0.0",
+ "micromark-util-symbol": "^2.0.0",
+ "micromark-util-types": "^2.0.0"
+ }
+ },
+ "node_modules/micromark-util-character": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.1.tgz",
+ "integrity": "sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "micromark-util-symbol": "^2.0.0",
+ "micromark-util-types": "^2.0.0"
+ }
+ },
+ "node_modules/micromark-util-chunked": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-util-chunked/-/micromark-util-chunked-2.0.1.tgz",
+ "integrity": "sha512-QUNFEOPELfmvv+4xiNg2sRYeS/P84pTW0TCgP5zc9FpXetHY0ab7SxKyAQCNCc1eK0459uoLI1y5oO5Vc1dbhA==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "micromark-util-symbol": "^2.0.0"
+ }
+ },
+ "node_modules/micromark-util-classify-character": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-util-classify-character/-/micromark-util-classify-character-2.0.1.tgz",
+ "integrity": "sha512-K0kHzM6afW/MbeWYWLjoHQv1sgg2Q9EccHEDzSkxiP/EaagNzCm7T/WMKZ3rjMbvIpvBiZgwR3dKMygtA4mG1Q==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "micromark-util-character": "^2.0.0",
+ "micromark-util-symbol": "^2.0.0",
+ "micromark-util-types": "^2.0.0"
+ }
+ },
+ "node_modules/micromark-util-combine-extensions": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-util-combine-extensions/-/micromark-util-combine-extensions-2.0.1.tgz",
+ "integrity": "sha512-OnAnH8Ujmy59JcyZw8JSbK9cGpdVY44NKgSM7E9Eh7DiLS2E9RNQf0dONaGDzEG9yjEl5hcqeIsj4hfRkLH/Bg==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "micromark-util-chunked": "^2.0.0",
+ "micromark-util-types": "^2.0.0"
+ }
+ },
+ "node_modules/micromark-util-decode-numeric-character-reference": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/micromark-util-decode-numeric-character-reference/-/micromark-util-decode-numeric-character-reference-2.0.2.tgz",
+ "integrity": "sha512-ccUbYk6CwVdkmCQMyr64dXz42EfHGkPQlBj5p7YVGzq8I7CtjXZJrubAYezf7Rp+bjPseiROqe7G6foFd+lEuw==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "micromark-util-symbol": "^2.0.0"
+ }
+ },
+ "node_modules/micromark-util-decode-string": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-util-decode-string/-/micromark-util-decode-string-2.0.1.tgz",
+ "integrity": "sha512-nDV/77Fj6eH1ynwscYTOsbK7rR//Uj0bZXBwJZRfaLEJ1iGBR6kIfNmlNqaqJf649EP0F3NWNdeJi03elllNUQ==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "decode-named-character-reference": "^1.0.0",
+ "micromark-util-character": "^2.0.0",
+ "micromark-util-decode-numeric-character-reference": "^2.0.0",
+ "micromark-util-symbol": "^2.0.0"
+ }
+ },
+ "node_modules/micromark-util-encode": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-util-encode/-/micromark-util-encode-2.0.1.tgz",
+ "integrity": "sha512-c3cVx2y4KqUnwopcO9b/SCdo2O67LwJJ/UyqGfbigahfegL9myoEFoDYZgkT7f36T0bLrM9hZTAaAyH+PCAXjw==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT"
+ },
+ "node_modules/micromark-util-html-tag-name": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-util-html-tag-name/-/micromark-util-html-tag-name-2.0.1.tgz",
+ "integrity": "sha512-2cNEiYDhCWKI+Gs9T0Tiysk136SnR13hhO8yW6BGNyhOC4qYFnwF1nKfD3HFAIXA5c45RrIG1ub11GiXeYd1xA==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT"
+ },
+ "node_modules/micromark-util-normalize-identifier": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-util-normalize-identifier/-/micromark-util-normalize-identifier-2.0.1.tgz",
+ "integrity": "sha512-sxPqmo70LyARJs0w2UclACPUUEqltCkJ6PhKdMIDuJ3gSf/Q+/GIe3WKl0Ijb/GyH9lOpUkRAO2wp0GVkLvS9Q==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "micromark-util-symbol": "^2.0.0"
+ }
+ },
+ "node_modules/micromark-util-resolve-all": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-util-resolve-all/-/micromark-util-resolve-all-2.0.1.tgz",
+ "integrity": "sha512-VdQyxFWFT2/FGJgwQnJYbe1jjQoNTS4RjglmSjTUlpUMa95Htx9NHeYW4rGDJzbjvCsl9eLjMQwGeElsqmzcHg==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "micromark-util-types": "^2.0.0"
+ }
+ },
+ "node_modules/micromark-util-sanitize-uri": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-util-sanitize-uri/-/micromark-util-sanitize-uri-2.0.1.tgz",
+ "integrity": "sha512-9N9IomZ/YuGGZZmQec1MbgxtlgougxTodVwDzzEouPKo3qFWvymFHWcnDi2vzV1ff6kas9ucW+o3yzJK9YB1AQ==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "micromark-util-character": "^2.0.0",
+ "micromark-util-encode": "^2.0.0",
+ "micromark-util-symbol": "^2.0.0"
+ }
+ },
+ "node_modules/micromark-util-subtokenize": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/micromark-util-subtokenize/-/micromark-util-subtokenize-2.1.0.tgz",
+ "integrity": "sha512-XQLu552iSctvnEcgXw6+Sx75GflAPNED1qx7eBJ+wydBb2KCbRZe+NwvIEEMM83uml1+2WSXpBAcp9IUCgCYWA==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "devlop": "^1.0.0",
+ "micromark-util-chunked": "^2.0.0",
+ "micromark-util-symbol": "^2.0.0",
+ "micromark-util-types": "^2.0.0"
+ }
+ },
+ "node_modules/micromark-util-symbol": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz",
+ "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT"
+ },
+ "node_modules/micromark-util-types": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/micromark-util-types/-/micromark-util-types-2.0.2.tgz",
+ "integrity": "sha512-Yw0ECSpJoViF1qTU4DC6NwtC4aWGt1EkzaQB8KPPyCRR8z9TWeV0HbEFGTO+ZY1wB22zmxnJqhPyTpOVCpeHTA==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT"
+ },
+ "node_modules/ms": {
+ "version": "2.1.3",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz",
+ "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==",
+ "license": "MIT"
+ },
+ "node_modules/nanoid": {
+ "version": "3.3.11",
+ "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.11.tgz",
+ "integrity": "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/ai"
+ }
+ ],
+ "license": "MIT",
+ "bin": {
+ "nanoid": "bin/nanoid.cjs"
+ },
+ "engines": {
+ "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1"
+ }
+ },
+ "node_modules/node-releases": {
+ "version": "2.0.27",
+ "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.27.tgz",
+ "integrity": "sha512-nmh3lCkYZ3grZvqcCH+fjmQ7X+H0OeZgP40OierEaAptX4XofMh5kwNbWh7lBduUzCcV/8kZ+NDLCwm2iorIlA==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/parse-entities": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/parse-entities/-/parse-entities-4.0.2.tgz",
+ "integrity": "sha512-GG2AQYWoLgL877gQIKeRPGO1xF9+eG1ujIb5soS5gPvLQ1y2o8FL90w2QWNdf9I361Mpp7726c+lj3U0qK1uGw==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^2.0.0",
+ "character-entities-legacy": "^3.0.0",
+ "character-reference-invalid": "^2.0.0",
+ "decode-named-character-reference": "^1.0.0",
+ "is-alphanumerical": "^2.0.0",
+ "is-decimal": "^2.0.0",
+ "is-hexadecimal": "^2.0.0"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/parse-entities/node_modules/@types/unist": {
+ "version": "2.0.11",
+ "resolved": "https://registry.npmjs.org/@types/unist/-/unist-2.0.11.tgz",
+ "integrity": "sha512-CmBKiL6NNo/OqgmMn95Fk9Whlp2mtvIv+KNpQKN2F4SjvrEesubTRWGYSg+BnWZOnlCaSTU1sMpsBOzgbYhnsA==",
+ "license": "MIT"
+ },
+ "node_modules/picocolors": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz",
+ "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==",
+ "dev": true,
+ "license": "ISC"
+ },
+ "node_modules/postcss": {
+ "version": "8.5.6",
+ "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.5.6.tgz",
+ "integrity": "sha512-3Ybi1tAuwAP9s0r1UQ2J4n5Y0G05bJkpUIO0/bI9MhwmD70S5aTWbXGBwxHrelT+XM1k6dM0pk+SwNkpTRN7Pg==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "opencollective",
+ "url": "https://opencollective.com/postcss/"
+ },
+ {
+ "type": "tidelift",
+ "url": "https://tidelift.com/funding/github/npm/postcss"
+ },
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/ai"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "nanoid": "^3.3.11",
+ "picocolors": "^1.1.1",
+ "source-map-js": "^1.2.1"
+ },
+ "engines": {
+ "node": "^10 || ^12 || >=14"
+ }
+ },
+ "node_modules/property-information": {
+ "version": "7.1.0",
+ "resolved": "https://registry.npmjs.org/property-information/-/property-information-7.1.0.tgz",
+ "integrity": "sha512-TwEZ+X+yCJmYfL7TPUOcvBZ4QfoT5YenQiJuX//0th53DE6w0xxLEtfK3iyryQFddXuvkIk51EEgrJQ0WJkOmQ==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/react": {
+ "version": "18.3.1",
+ "resolved": "https://registry.npmjs.org/react/-/react-18.3.1.tgz",
+ "integrity": "sha512-wS+hAgJShR0KhEvPJArfuPVN1+Hz1t0Y6n5jLrGQbkb4urgPE/0Rve+1kMB1v/oWgHgm4WIcV+i7F2pTVj+2iQ==",
+ "license": "MIT",
+ "dependencies": {
+ "loose-envify": "^1.1.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/react-dom": {
+ "version": "18.3.1",
+ "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-18.3.1.tgz",
+ "integrity": "sha512-5m4nQKp+rZRb09LNH59GM4BxTh9251/ylbKIbpe7TpGxfJ+9kv6BLkLBXIjjspbgbnIBNqlI23tRnTWT0snUIw==",
+ "license": "MIT",
+ "dependencies": {
+ "loose-envify": "^1.1.0",
+ "scheduler": "^0.23.2"
+ },
+ "peerDependencies": {
+ "react": "^18.3.1"
+ }
+ },
+ "node_modules/react-markdown": {
+ "version": "10.1.0",
+ "resolved": "https://registry.npmjs.org/react-markdown/-/react-markdown-10.1.0.tgz",
+ "integrity": "sha512-qKxVopLT/TyA6BX3Ue5NwabOsAzm0Q7kAPwq6L+wWDwisYs7R8vZ0nRXqq6rkueboxpkjvLGU9fWifiX/ZZFxQ==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/hast": "^3.0.0",
+ "@types/mdast": "^4.0.0",
+ "devlop": "^1.0.0",
+ "hast-util-to-jsx-runtime": "^2.0.0",
+ "html-url-attributes": "^3.0.0",
+ "mdast-util-to-hast": "^13.0.0",
+ "remark-parse": "^11.0.0",
+ "remark-rehype": "^11.0.0",
+ "unified": "^11.0.0",
+ "unist-util-visit": "^5.0.0",
+ "vfile": "^6.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ },
+ "peerDependencies": {
+ "@types/react": ">=18",
+ "react": ">=18"
+ }
+ },
+ "node_modules/react-refresh": {
+ "version": "0.17.0",
+ "resolved": "https://registry.npmjs.org/react-refresh/-/react-refresh-0.17.0.tgz",
+ "integrity": "sha512-z6F7K9bV85EfseRCp2bzrpyQ0Gkw1uLoCel9XBVWPg/TjRj94SkJzUTGfOa4bs7iJvBWtQG0Wq7wnI0syw3EBQ==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/reactflow": {
+ "version": "11.11.4",
+ "resolved": "https://registry.npmjs.org/reactflow/-/reactflow-11.11.4.tgz",
+ "integrity": "sha512-70FOtJkUWH3BAOsN+LU9lCrKoKbtOPnz2uq0CV2PLdNSwxTXOhCbsZr50GmZ+Rtw3jx8Uv7/vBFtCGixLfd4Og==",
+ "license": "MIT",
+ "dependencies": {
+ "@reactflow/background": "11.3.14",
+ "@reactflow/controls": "11.2.14",
+ "@reactflow/core": "11.11.4",
+ "@reactflow/minimap": "11.7.14",
+ "@reactflow/node-resizer": "2.2.14",
+ "@reactflow/node-toolbar": "1.3.14"
+ },
+ "peerDependencies": {
+ "react": ">=17",
+ "react-dom": ">=17"
+ }
+ },
+ "node_modules/remark-parse": {
+ "version": "11.0.0",
+ "resolved": "https://registry.npmjs.org/remark-parse/-/remark-parse-11.0.0.tgz",
+ "integrity": "sha512-FCxlKLNGknS5ba/1lmpYijMUzX2esxW5xQqjWxw2eHFfS2MSdaHVINFmhjo+qN1WhZhNimq0dZATN9pH0IDrpA==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/mdast": "^4.0.0",
+ "mdast-util-from-markdown": "^2.0.0",
+ "micromark-util-types": "^2.0.0",
+ "unified": "^11.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/remark-rehype": {
+ "version": "11.1.2",
+ "resolved": "https://registry.npmjs.org/remark-rehype/-/remark-rehype-11.1.2.tgz",
+ "integrity": "sha512-Dh7l57ianaEoIpzbp0PC9UKAdCSVklD8E5Rpw7ETfbTl3FqcOOgq5q2LVDhgGCkaBv7p24JXikPdvhhmHvKMsw==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/hast": "^3.0.0",
+ "@types/mdast": "^4.0.0",
+ "mdast-util-to-hast": "^13.0.0",
+ "unified": "^11.0.0",
+ "vfile": "^6.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/rollup": {
+ "version": "4.53.2",
+ "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.53.2.tgz",
+ "integrity": "sha512-MHngMYwGJVi6Fmnk6ISmnk7JAHRNF0UkuucA0CUW3N3a4KnONPEZz+vUanQP/ZC/iY1Qkf3bwPWzyY84wEks1g==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@types/estree": "1.0.8"
+ },
+ "bin": {
+ "rollup": "dist/bin/rollup"
+ },
+ "engines": {
+ "node": ">=18.0.0",
+ "npm": ">=8.0.0"
+ },
+ "optionalDependencies": {
+ "@rollup/rollup-android-arm-eabi": "4.53.2",
+ "@rollup/rollup-android-arm64": "4.53.2",
+ "@rollup/rollup-darwin-arm64": "4.53.2",
+ "@rollup/rollup-darwin-x64": "4.53.2",
+ "@rollup/rollup-freebsd-arm64": "4.53.2",
+ "@rollup/rollup-freebsd-x64": "4.53.2",
+ "@rollup/rollup-linux-arm-gnueabihf": "4.53.2",
+ "@rollup/rollup-linux-arm-musleabihf": "4.53.2",
+ "@rollup/rollup-linux-arm64-gnu": "4.53.2",
+ "@rollup/rollup-linux-arm64-musl": "4.53.2",
+ "@rollup/rollup-linux-loong64-gnu": "4.53.2",
+ "@rollup/rollup-linux-ppc64-gnu": "4.53.2",
+ "@rollup/rollup-linux-riscv64-gnu": "4.53.2",
+ "@rollup/rollup-linux-riscv64-musl": "4.53.2",
+ "@rollup/rollup-linux-s390x-gnu": "4.53.2",
+ "@rollup/rollup-linux-x64-gnu": "4.53.2",
+ "@rollup/rollup-linux-x64-musl": "4.53.2",
+ "@rollup/rollup-openharmony-arm64": "4.53.2",
+ "@rollup/rollup-win32-arm64-msvc": "4.53.2",
+ "@rollup/rollup-win32-ia32-msvc": "4.53.2",
+ "@rollup/rollup-win32-x64-gnu": "4.53.2",
+ "@rollup/rollup-win32-x64-msvc": "4.53.2",
+ "fsevents": "~2.3.2"
+ }
+ },
+ "node_modules/scheduler": {
+ "version": "0.23.2",
+ "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.23.2.tgz",
+ "integrity": "sha512-UOShsPwz7NrMUqhR6t0hWjFduvOzbtv7toDH1/hIrfRNIDBnnBWd0CwJTGvTpngVlmwGCdP9/Zl/tVrDqcuYzQ==",
+ "license": "MIT",
+ "dependencies": {
+ "loose-envify": "^1.1.0"
+ }
+ },
+ "node_modules/semver": {
+ "version": "6.3.1",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz",
+ "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==",
+ "dev": true,
+ "license": "ISC",
+ "bin": {
+ "semver": "bin/semver.js"
+ }
+ },
+ "node_modules/source-map-js": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz",
+ "integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==",
+ "dev": true,
+ "license": "BSD-3-Clause",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/space-separated-tokens": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/space-separated-tokens/-/space-separated-tokens-2.0.2.tgz",
+ "integrity": "sha512-PEGlAwrG8yXGXRjW32fGbg66JAlOAwbObuqVoJpv/mRgoWDQfgH1wDPvtzWyUSNAXBGSk8h755YDbbcEy3SH2Q==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/stringify-entities": {
+ "version": "4.0.4",
+ "resolved": "https://registry.npmjs.org/stringify-entities/-/stringify-entities-4.0.4.tgz",
+ "integrity": "sha512-IwfBptatlO+QCJUo19AqvrPNqlVMpW9YEL2LIVY+Rpv2qsjCGxaDLNRgeGsQWJhfItebuJhsGSLjaBbNSQ+ieg==",
+ "license": "MIT",
+ "dependencies": {
+ "character-entities-html4": "^2.0.0",
+ "character-entities-legacy": "^3.0.0"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/style-to-js": {
+ "version": "1.1.21",
+ "resolved": "https://registry.npmjs.org/style-to-js/-/style-to-js-1.1.21.tgz",
+ "integrity": "sha512-RjQetxJrrUJLQPHbLku6U/ocGtzyjbJMP9lCNK7Ag0CNh690nSH8woqWH9u16nMjYBAok+i7JO1NP2pOy8IsPQ==",
+ "license": "MIT",
+ "dependencies": {
+ "style-to-object": "1.0.14"
+ }
+ },
+ "node_modules/style-to-object": {
+ "version": "1.0.14",
+ "resolved": "https://registry.npmjs.org/style-to-object/-/style-to-object-1.0.14.tgz",
+ "integrity": "sha512-LIN7rULI0jBscWQYaSswptyderlarFkjQ+t79nzty8tcIAceVomEVlLzH5VP4Cmsv6MtKhs7qaAiwlcp+Mgaxw==",
+ "license": "MIT",
+ "dependencies": {
+ "inline-style-parser": "0.2.7"
+ }
+ },
+ "node_modules/trim-lines": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/trim-lines/-/trim-lines-3.0.1.tgz",
+ "integrity": "sha512-kRj8B+YHZCc9kQYdWfJB2/oUl9rA99qbowYYBtr4ui4mZyAQ2JpvVBd/6U2YloATfqBhBTSMhTpgBHtU0Mf3Rg==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/trough": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/trough/-/trough-2.2.0.tgz",
+ "integrity": "sha512-tmMpK00BjZiUyVyvrBK7knerNgmgvcV/KLVyuma/SC+TQN167GrMRciANTz09+k3zW8L8t60jWO1GpfkZdjTaw==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/unified": {
+ "version": "11.0.5",
+ "resolved": "https://registry.npmjs.org/unified/-/unified-11.0.5.tgz",
+ "integrity": "sha512-xKvGhPWw3k84Qjh8bI3ZeJjqnyadK+GEFtazSfZv/rKeTkTjOJho6mFqh2SM96iIcZokxiOpg78GazTSg8+KHA==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^3.0.0",
+ "bail": "^2.0.0",
+ "devlop": "^1.0.0",
+ "extend": "^3.0.0",
+ "is-plain-obj": "^4.0.0",
+ "trough": "^2.0.0",
+ "vfile": "^6.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/unist-util-is": {
+ "version": "6.0.1",
+ "resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-6.0.1.tgz",
+ "integrity": "sha512-LsiILbtBETkDz8I9p1dQ0uyRUWuaQzd/cuEeS1hoRSyW5E5XGmTzlwY1OrNzzakGowI9Dr/I8HVaw4hTtnxy8g==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^3.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/unist-util-position": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/unist-util-position/-/unist-util-position-5.0.0.tgz",
+ "integrity": "sha512-fucsC7HjXvkB5R3kTCO7kUjRdrS0BJt3M/FPxmHMBOm8JQi2BsHAHFsy27E0EolP8rp0NzXsJ+jNPyDWvOJZPA==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^3.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/unist-util-stringify-position": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/unist-util-stringify-position/-/unist-util-stringify-position-4.0.0.tgz",
+ "integrity": "sha512-0ASV06AAoKCDkS2+xw5RXJywruurpbC4JZSm7nr7MOt1ojAzvyyaO+UxZf18j8FCF6kmzCZKcAgN/yu2gm2XgQ==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^3.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/unist-util-visit": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-5.0.0.tgz",
+ "integrity": "sha512-MR04uvD+07cwl/yhVuVWAtw+3GOR/knlL55Nd/wAdblk27GCVt3lqpTivy/tkJcZoNPzTwS1Y+KMojlLDhoTzg==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^3.0.0",
+ "unist-util-is": "^6.0.0",
+ "unist-util-visit-parents": "^6.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/unist-util-visit-parents": {
+ "version": "6.0.2",
+ "resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-6.0.2.tgz",
+ "integrity": "sha512-goh1s1TBrqSqukSc8wrjwWhL0hiJxgA8m4kFxGlQ+8FYQ3C/m11FcTs4YYem7V664AhHVvgoQLk890Ssdsr2IQ==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^3.0.0",
+ "unist-util-is": "^6.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/update-browserslist-db": {
+ "version": "1.1.4",
+ "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.1.4.tgz",
+ "integrity": "sha512-q0SPT4xyU84saUX+tomz1WLkxUbuaJnR1xWt17M7fJtEJigJeWUNGUqrauFXsHnqev9y9JTRGwk13tFBuKby4A==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "opencollective",
+ "url": "https://opencollective.com/browserslist"
+ },
+ {
+ "type": "tidelift",
+ "url": "https://tidelift.com/funding/github/npm/browserslist"
+ },
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/ai"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "escalade": "^3.2.0",
+ "picocolors": "^1.1.1"
+ },
+ "bin": {
+ "update-browserslist-db": "cli.js"
+ },
+ "peerDependencies": {
+ "browserslist": ">= 4.21.0"
+ }
+ },
+ "node_modules/use-sync-external-store": {
+ "version": "1.6.0",
+ "resolved": "https://registry.npmjs.org/use-sync-external-store/-/use-sync-external-store-1.6.0.tgz",
+ "integrity": "sha512-Pp6GSwGP/NrPIrxVFAIkOQeyw8lFenOHijQWkUTrDvrF4ALqylP2C/KCkeS9dpUM3KvYRQhna5vt7IL95+ZQ9w==",
+ "license": "MIT",
+ "peerDependencies": {
+ "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0"
+ }
+ },
+ "node_modules/vfile": {
+ "version": "6.0.3",
+ "resolved": "https://registry.npmjs.org/vfile/-/vfile-6.0.3.tgz",
+ "integrity": "sha512-KzIbH/9tXat2u30jf+smMwFCsno4wHVdNmzFyL+T/L3UGqqk6JKfVqOFOZEpZSHADH1k40ab6NUIXZq422ov3Q==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^3.0.0",
+ "vfile-message": "^4.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/vfile-message": {
+ "version": "4.0.3",
+ "resolved": "https://registry.npmjs.org/vfile-message/-/vfile-message-4.0.3.tgz",
+ "integrity": "sha512-QTHzsGd1EhbZs4AsQ20JX1rC3cOlt/IWJruk893DfLRr57lcnOeMaWG4K0JrRta4mIJZKth2Au3mM3u03/JWKw==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^3.0.0",
+ "unist-util-stringify-position": "^4.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/vite": {
+ "version": "5.4.21",
+ "resolved": "https://registry.npmjs.org/vite/-/vite-5.4.21.tgz",
+ "integrity": "sha512-o5a9xKjbtuhY6Bi5S3+HvbRERmouabWbyUcpXXUA1u+GNUKoROi9byOJ8M0nHbHYHkYICiMlqxkg1KkYmm25Sw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "esbuild": "^0.21.3",
+ "postcss": "^8.4.43",
+ "rollup": "^4.20.0"
+ },
+ "bin": {
+ "vite": "bin/vite.js"
+ },
+ "engines": {
+ "node": "^18.0.0 || >=20.0.0"
+ },
+ "funding": {
+ "url": "https://github.com/vitejs/vite?sponsor=1"
+ },
+ "optionalDependencies": {
+ "fsevents": "~2.3.3"
+ },
+ "peerDependencies": {
+ "@types/node": "^18.0.0 || >=20.0.0",
+ "less": "*",
+ "lightningcss": "^1.21.0",
+ "sass": "*",
+ "sass-embedded": "*",
+ "stylus": "*",
+ "sugarss": "*",
+ "terser": "^5.4.0"
+ },
+ "peerDependenciesMeta": {
+ "@types/node": {
+ "optional": true
+ },
+ "less": {
+ "optional": true
+ },
+ "lightningcss": {
+ "optional": true
+ },
+ "sass": {
+ "optional": true
+ },
+ "sass-embedded": {
+ "optional": true
+ },
+ "stylus": {
+ "optional": true
+ },
+ "sugarss": {
+ "optional": true
+ },
+ "terser": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/yallist": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz",
+ "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==",
+ "dev": true,
+ "license": "ISC"
+ },
+ "node_modules/zustand": {
+ "version": "4.5.7",
+ "resolved": "https://registry.npmjs.org/zustand/-/zustand-4.5.7.tgz",
+ "integrity": "sha512-CHOUy7mu3lbD6o6LJLfllpjkzhHXSBlX8B9+qPddUsIfeF5S/UZ5q0kmCsnRqT1UHFQZchNFDDzMbQsuesHWlw==",
+ "license": "MIT",
+ "dependencies": {
+ "use-sync-external-store": "^1.2.2"
+ },
+ "engines": {
+ "node": ">=12.7.0"
+ },
+ "peerDependencies": {
+ "@types/react": ">=16.8",
+ "immer": ">=9.0.6",
+ "react": ">=16.8"
+ },
+ "peerDependenciesMeta": {
+ "@types/react": {
+ "optional": true
+ },
+ "immer": {
+ "optional": true
+ },
+ "react": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/zwitch": {
+ "version": "2.0.4",
+ "resolved": "https://registry.npmjs.org/zwitch/-/zwitch-2.0.4.tgz",
+ "integrity": "sha512-bXE4cR/kVZhKZX/RjPEflHaKVhUVl85noU3v6b8apfQEc1x4A+zBxjZ4lN8LqGd6WZ3dl98pY4o717VFmoPp+A==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ }
+ }
+}
diff --git a/frontend/package.json b/frontend/package.json
new file mode 100644
index 0000000000000000000000000000000000000000..34b9741baf823844a1ee45405e7f8f34f040f363
--- /dev/null
+++ b/frontend/package.json
@@ -0,0 +1,21 @@
+{
+ "name": "gitpilot-frontend",
+ "version": "0.2.3",
+ "private": true,
+ "scripts": {
+ "dev": "vite --host",
+ "build": "vite build",
+ "vercel-build": "vite build",
+ "preview": "vite preview"
+ },
+ "dependencies": {
+ "react": "^18.2.0",
+ "react-dom": "^18.2.0",
+ "react-markdown": "^10.1.0",
+ "reactflow": "^11.11.4"
+ },
+ "devDependencies": {
+ "@vitejs/plugin-react": "^4.0.0",
+ "vite": "^5.0.0"
+ }
+}
diff --git a/frontend/styles.css b/frontend/styles.css
new file mode 100644
index 0000000000000000000000000000000000000000..8159f0a2a2d447bfb617d46e3a3e715ed83e0e03
--- /dev/null
+++ b/frontend/styles.css
@@ -0,0 +1,2825 @@
+:root {
+ color-scheme: dark;
+ font-family: system-ui, -apple-system, BlinkMacSystemFont, "SF Pro Text",
+ sans-serif;
+ background: #050608;
+ color: #f5f5f7;
+}
+
+*,
+*::before,
+*::after {
+ box-sizing: border-box;
+}
+
+body {
+ margin: 0;
+ overflow: hidden;
+}
+
+/* Custom scrollbar styling - Claude Code style */
+::-webkit-scrollbar {
+ width: 8px;
+ height: 8px;
+}
+
+::-webkit-scrollbar-track {
+ background: transparent;
+}
+
+::-webkit-scrollbar-thumb {
+ background: #272832;
+ border-radius: 4px;
+ transition: background 0.2s ease;
+}
+
+::-webkit-scrollbar-thumb:hover {
+ background: #3a3b4d;
+}
+
+/* App Root - Fixed height with footer accommodation */
+.app-root {
+ display: flex;
+ flex-direction: column;
+ height: 100vh;
+ background: radial-gradient(circle at top, #171823 0, #050608 55%);
+ color: #f5f5f7;
+ overflow: hidden;
+}
+
+/* Main content wrapper (sidebar + workspace) */
+.main-wrapper {
+ display: flex;
+ flex: 1;
+ min-height: 0;
+ overflow: hidden;
+}
+
+/* Sidebar */
+.sidebar {
+ width: 320px;
+ padding: 16px 14px;
+ border-right: 1px solid #272832;
+ background: linear-gradient(180deg, #101117 0, #050608 100%);
+ display: flex;
+ flex-direction: column;
+ gap: 16px;
+ overflow-y: auto;
+ overflow-x: hidden;
+}
+
+/* User Profile Section */
+.user-profile {
+ margin-top: auto;
+ padding-top: 16px;
+ border-top: 1px solid #272832;
+ display: flex;
+ flex-direction: column;
+ gap: 12px;
+ animation: fadeIn 0.3s ease;
+}
+
+.user-profile-header {
+ display: flex;
+ align-items: center;
+ gap: 10px;
+}
+
+.user-avatar {
+ width: 40px;
+ height: 40px;
+ border-radius: 10px;
+ border: 2px solid #272832;
+ transition: all 0.2s ease;
+}
+
+.user-avatar:hover {
+ border-color: #ff7a3c;
+ transform: scale(1.05);
+}
+
+.user-info {
+ flex: 1;
+ min-width: 0;
+}
+
+.user-name {
+ font-size: 13px;
+ font-weight: 600;
+ color: #f5f5f7;
+ white-space: nowrap;
+ overflow: hidden;
+ text-overflow: ellipsis;
+}
+
+.user-login {
+ font-size: 11px;
+ color: #9a9bb0;
+ white-space: nowrap;
+ overflow: hidden;
+ text-overflow: ellipsis;
+}
+
+.btn-logout {
+ border: none;
+ outline: none;
+ background: #1a1b26;
+ color: #c3c5dd;
+ border-radius: 8px;
+ padding: 8px 12px;
+ font-size: 12px;
+ font-weight: 500;
+ cursor: pointer;
+ transition: all 0.2s ease;
+ border: 1px solid #272832;
+}
+
+.btn-logout:hover {
+ background: #2a2b3c;
+ border-color: #ff7a3c;
+ color: #ff7a3c;
+ transform: translateY(-1px);
+}
+
+.logo-row {
+ display: flex;
+ align-items: center;
+ gap: 10px;
+ animation: fadeIn 0.3s ease;
+}
+
+@keyframes fadeIn {
+ from {
+ opacity: 0;
+ transform: translateY(-10px);
+ }
+ to {
+ opacity: 1;
+ transform: translateY(0);
+ }
+}
+
+.logo-square {
+ width: 32px;
+ height: 32px;
+ border-radius: 8px;
+ background: #ff7a3c;
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ font-weight: 700;
+ color: #050608;
+ transition: transform 0.2s ease;
+}
+
+.logo-square:hover {
+ transform: scale(1.05);
+}
+
+.logo-title {
+ font-size: 16px;
+ font-weight: 600;
+}
+
+.logo-subtitle {
+ font-size: 12px;
+ color: #a1a2b3;
+}
+
+/* Active context card */
+.sidebar-context-card {
+ padding: 10px 12px;
+ border-radius: 10px;
+ background: #151622;
+ border: 1px solid #272832;
+ display: flex;
+ flex-direction: column;
+ gap: 6px;
+ animation: slideIn 0.3s ease;
+}
+
+.sidebar-context-header {
+ display: flex;
+ align-items: center;
+ justify-content: space-between;
+}
+
+.sidebar-context-close {
+ width: 22px;
+ height: 22px;
+ border-radius: 4px;
+ border: none;
+ background: transparent;
+ color: #71717a;
+ cursor: pointer;
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ padding: 0;
+ transition: all 0.15s ease;
+}
+
+.sidebar-context-close:hover {
+ background: #272832;
+ color: #f5f5f7;
+}
+
+.sidebar-section-label {
+ font-size: 10px;
+ font-weight: 700;
+ letter-spacing: 0.08em;
+ color: #71717a;
+ text-transform: uppercase;
+}
+
+.sidebar-context-body {
+ display: flex;
+ flex-direction: column;
+ gap: 2px;
+}
+
+.sidebar-context-repo {
+ font-size: 13px;
+ font-weight: 600;
+ color: #f5f5f7;
+ white-space: nowrap;
+ overflow: hidden;
+ text-overflow: ellipsis;
+}
+
+.sidebar-context-meta {
+ font-size: 11px;
+ color: #9a9bb0;
+ display: flex;
+ align-items: center;
+ gap: 6px;
+}
+
+.sidebar-context-dot {
+ width: 3px;
+ height: 3px;
+ border-radius: 50%;
+ background: #4a4b5e;
+ display: inline-block;
+}
+
+.sidebar-context-actions {
+ display: flex;
+ gap: 6px;
+ margin-top: 2px;
+}
+
+.sidebar-context-btn {
+ border: none;
+ outline: none;
+ background: #1a1b26;
+ color: #9a9bb0;
+ border-radius: 6px;
+ padding: 4px 10px;
+ font-size: 11px;
+ font-weight: 500;
+ cursor: pointer;
+ transition: all 0.15s ease;
+ border: 1px solid #272832;
+}
+
+.sidebar-context-btn:hover {
+ background: #222335;
+ color: #c3c5dd;
+ border-color: #3a3b4d;
+}
+
+/* Per-repo chip list in sidebar context card */
+.sidebar-repo-chips {
+ display: flex;
+ flex-direction: column;
+ gap: 3px;
+}
+
+.sidebar-repo-chip {
+ display: flex;
+ align-items: center;
+ gap: 5px;
+ padding: 5px 6px 5px 8px;
+ border-radius: 6px;
+ border: 1px solid #272832;
+ background: #111220;
+ cursor: pointer;
+ white-space: nowrap;
+ overflow: hidden;
+ transition: border-color 0.15s, background-color 0.15s;
+}
+
+.sidebar-repo-chip:hover {
+ border-color: #3a3b4d;
+ background: #1a1b2e;
+}
+
+.sidebar-repo-chip-active {
+ border-color: #3B82F6;
+ background: rgba(59, 130, 246, 0.06);
+}
+
+.sidebar-chip-name {
+ font-size: 12px;
+ font-weight: 600;
+ color: #c3c5dd;
+ font-family: monospace;
+ overflow: hidden;
+ text-overflow: ellipsis;
+ flex: 1;
+ min-width: 0;
+}
+
+.sidebar-repo-chip-active .sidebar-chip-name {
+ color: #f5f5f7;
+}
+
+.sidebar-chip-dot {
+ width: 2px;
+ height: 2px;
+ border-radius: 50%;
+ background: #4a4b5e;
+ flex-shrink: 0;
+}
+
+.sidebar-chip-branch {
+ font-size: 10px;
+ color: #71717a;
+ font-family: monospace;
+ flex-shrink: 0;
+}
+
+.sidebar-repo-chip-active .sidebar-chip-branch {
+ color: #60a5fa;
+}
+
+.sidebar-chip-write-badge {
+ font-size: 8px;
+ font-weight: 700;
+ text-transform: uppercase;
+ letter-spacing: 0.06em;
+ color: #4caf88;
+ padding: 0 4px;
+ border-radius: 3px;
+ border: 1px solid rgba(76, 175, 136, 0.25);
+ flex-shrink: 0;
+}
+
+/* Per-chip remove button: subtle by default, visible on hover */
+.sidebar-chip-remove {
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ width: 16px;
+ height: 16px;
+ border-radius: 3px;
+ border: none;
+ background: transparent;
+ color: #52525B;
+ cursor: pointer;
+ flex-shrink: 0;
+ padding: 0;
+ opacity: 0;
+ transition: opacity 0.15s, color 0.15s, background 0.15s;
+}
+
+.sidebar-repo-chip:hover .sidebar-chip-remove {
+ opacity: 1;
+}
+
+.sidebar-chip-remove:hover {
+ color: #f87171;
+ background: rgba(248, 113, 113, 0.1);
+}
+
+/* "clear all" link-style button */
+.sidebar-clear-all {
+ font-size: 9px;
+ color: #52525B;
+ width: auto;
+ height: auto;
+ padding: 2px 6px;
+ font-weight: 600;
+ text-transform: uppercase;
+ letter-spacing: 0.04em;
+}
+
+.sidebar-clear-all:hover {
+ color: #f87171;
+ background: rgba(248, 113, 113, 0.08);
+}
+
+@keyframes slideIn {
+ from {
+ opacity: 0;
+ transform: translateX(-10px);
+ }
+ to {
+ opacity: 1;
+ transform: translateX(0);
+ }
+}
+
+/* ContextBar β horizontal chip bar above workspace */
+.ctxbar {
+ display: flex;
+ align-items: center;
+ gap: 8px;
+ padding: 6px 12px;
+ border-bottom: 1px solid #1E1F23;
+ background-color: #0D0D10;
+ min-height: 40px;
+}
+
+.ctxbar-scroll {
+ display: flex;
+ align-items: center;
+ gap: 6px;
+ flex: 1;
+ overflow-x: auto;
+ scrollbar-width: none;
+}
+
+.ctxbar-scroll::-webkit-scrollbar {
+ display: none;
+}
+
+.ctxbar-chip {
+ display: flex;
+ align-items: center;
+ gap: 5px;
+ padding: 4px 6px 4px 8px;
+ border-radius: 6px;
+ border: 1px solid #27272A;
+ background: #18181B;
+ cursor: pointer;
+ white-space: nowrap;
+ position: relative;
+ flex-shrink: 0;
+ transition: border-color 0.15s, background-color 0.15s;
+}
+
+.ctxbar-chip:hover {
+ border-color: #3a3b4d;
+ background: #1e1f30;
+}
+
+.ctxbar-chip-active {
+ border-color: #3B82F6;
+ background: rgba(59, 130, 246, 0.08);
+}
+
+.ctxbar-chip-indicator {
+ position: absolute;
+ left: 0;
+ top: 25%;
+ bottom: 25%;
+ width: 2px;
+ border-radius: 1px;
+ background-color: #3B82F6;
+}
+
+.ctxbar-chip-name {
+ font-size: 12px;
+ font-weight: 600;
+ font-family: monospace;
+ color: #A1A1AA;
+ max-width: 120px;
+ overflow: hidden;
+ text-overflow: ellipsis;
+}
+
+.ctxbar-chip-active .ctxbar-chip-name {
+ color: #E4E4E7;
+}
+
+.ctxbar-chip-dot {
+ width: 2px;
+ height: 2px;
+ border-radius: 50%;
+ background: #4a4b5e;
+ flex-shrink: 0;
+}
+
+.ctxbar-chip-branch {
+ font-size: 10px;
+ font-family: monospace;
+ background: none;
+ border: 1px solid transparent;
+ border-radius: 3px;
+ padding: 1px 4px;
+ cursor: pointer;
+ color: #71717A;
+ transition: border-color 0.15s, color 0.15s;
+}
+
+.ctxbar-chip-branch:hover {
+ border-color: #3a3b4d;
+}
+
+.ctxbar-chip-branch-active {
+ color: #60a5fa;
+}
+
+.ctxbar-chip-write {
+ font-size: 8px;
+ font-weight: 700;
+ text-transform: uppercase;
+ letter-spacing: 0.06em;
+ color: #4caf88;
+ padding: 0 4px;
+ border-radius: 3px;
+ border: 1px solid rgba(76, 175, 136, 0.25);
+ flex-shrink: 0;
+}
+
+/* Hover-reveal remove button (Claude-style: hidden β visible on chip hover β red on X hover) */
+.ctxbar-chip-remove {
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ width: 16px;
+ height: 16px;
+ border-radius: 3px;
+ border: none;
+ background: transparent;
+ color: #52525B;
+ cursor: pointer;
+ flex-shrink: 0;
+ padding: 0;
+ opacity: 0;
+ transition: opacity 0.15s, color 0.15s, background 0.15s;
+}
+
+.ctxbar-chip-remove-visible,
+.ctxbar-chip:hover .ctxbar-chip-remove {
+ opacity: 1;
+}
+
+.ctxbar-chip-remove:hover {
+ color: #f87171;
+ background: rgba(248, 113, 113, 0.1);
+}
+
+.ctxbar-add {
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ width: 28px;
+ height: 28px;
+ border-radius: 6px;
+ border: 1px dashed #3F3F46;
+ background: transparent;
+ color: #71717A;
+ cursor: pointer;
+ flex-shrink: 0;
+ transition: border-color 0.15s, color 0.15s;
+}
+
+.ctxbar-add:hover {
+ border-color: #60a5fa;
+ color: #60a5fa;
+}
+
+.ctxbar-meta {
+ font-size: 10px;
+ color: #52525B;
+ white-space: nowrap;
+ flex-shrink: 0;
+ font-weight: 600;
+ text-transform: uppercase;
+ letter-spacing: 0.04em;
+}
+
+.ctxbar-branch-picker {
+ position: absolute;
+ top: 100%;
+ left: 0;
+ z-index: 100;
+ margin-top: 4px;
+}
+
+/* Legacy compat β kept for other uses */
+.sidebar-repo-info {
+ padding: 10px 12px;
+ border-radius: 10px;
+ background: #151622;
+ border: 1px solid #272832;
+ animation: slideIn 0.3s ease;
+}
+
+.sidebar-repo-name {
+ font-size: 13px;
+ font-weight: 500;
+}
+
+.sidebar-repo-meta {
+ font-size: 11px;
+ color: #9a9bb0;
+ margin-top: 2px;
+}
+
+.settings-button {
+ border: none;
+ outline: none;
+ background: #1a1b26;
+ color: #f5f5f7;
+ border-radius: 8px;
+ padding: 8px 10px;
+ cursor: pointer;
+ font-size: 13px;
+ transition: all 0.2s ease;
+}
+
+.settings-button:hover {
+ background: #222335;
+ transform: translateY(-1px);
+}
+
+/* Repo search */
+.repo-search-box {
+ border-radius: 12px;
+ background: #101117;
+ border: 1px solid #272832;
+ padding: 8px;
+ display: flex;
+ flex-direction: column;
+ gap: 8px;
+}
+
+/* Search header wrapper */
+.repo-search-header {
+ display: flex;
+ flex-direction: column;
+ gap: 8px;
+}
+
+/* Search row with input and button */
+.repo-search-row {
+ display: flex;
+ gap: 6px;
+ align-items: center;
+}
+
+/* Search input */
+.repo-search-input {
+ flex: 1;
+ border-radius: 7px;
+ padding: 8px 10px;
+ border: 1px solid #272832;
+ background: #050608;
+ color: #f5f5f7;
+ font-size: 13px;
+ transition: all 0.2s ease;
+}
+
+.repo-search-input:focus {
+ outline: none;
+ border-color: #ff7a3c;
+ background: #0a0b0f;
+ box-shadow: 0 0 0 3px rgba(255, 122, 60, 0.08);
+}
+
+.repo-search-input::placeholder {
+ color: #676883;
+}
+
+.repo-search-input:disabled {
+ opacity: 0.5;
+ cursor: not-allowed;
+}
+
+/* Search button */
+.repo-search-btn {
+ border-radius: 7px;
+ border: none;
+ outline: none;
+ padding: 8px 14px;
+ background: #1a1b26;
+ color: #f5f5f7;
+ cursor: pointer;
+ font-size: 13px;
+ font-weight: 500;
+ transition: all 0.2s ease;
+ white-space: nowrap;
+}
+
+.repo-search-btn:hover:not(:disabled) {
+ background: #222335;
+ transform: translateY(-1px);
+}
+
+.repo-search-btn:active:not(:disabled) {
+ transform: translateY(0);
+}
+
+.repo-search-btn:disabled {
+ opacity: 0.5;
+ cursor: not-allowed;
+}
+
+/* Info bar (shows count and clear button) */
+.repo-info-bar {
+ display: flex;
+ justify-content: space-between;
+ align-items: center;
+ padding: 6px 10px;
+ background: #0a0b0f;
+ border: 1px solid #272832;
+ border-radius: 7px;
+ font-size: 11px;
+}
+
+.repo-count {
+ color: #9a9bb0;
+ font-weight: 500;
+}
+
+.repo-clear-btn {
+ padding: 3px 10px;
+ background: transparent;
+ border: 1px solid #272832;
+ border-radius: 5px;
+ color: #9a9bb0;
+ font-size: 11px;
+ font-weight: 500;
+ cursor: pointer;
+ transition: all 0.2s ease;
+}
+
+.repo-clear-btn:hover:not(:disabled) {
+ background: #1a1b26;
+ color: #c3c5dd;
+ border-color: #3a3b4d;
+}
+
+.repo-clear-btn:disabled {
+ opacity: 0.5;
+ cursor: not-allowed;
+}
+
+/* Status message */
+.repo-status {
+ padding: 8px 10px;
+ background: #1a1b26;
+ border: 1px solid #272832;
+ border-radius: 7px;
+ color: #9a9bb0;
+ font-size: 11px;
+ text-align: center;
+}
+
+/* Repository list */
+.repo-list {
+ max-height: 220px;
+ overflow-y: auto;
+ overflow-x: hidden;
+ padding-right: 2px;
+ display: flex;
+ flex-direction: column;
+ gap: 4px;
+}
+
+/* Custom scrollbar for repo list */
+.repo-list::-webkit-scrollbar {
+ width: 6px;
+}
+
+.repo-list::-webkit-scrollbar-track {
+ background: transparent;
+}
+
+.repo-list::-webkit-scrollbar-thumb {
+ background: #272832;
+ border-radius: 3px;
+}
+
+.repo-list::-webkit-scrollbar-thumb:hover {
+ background: #3a3b4d;
+}
+
+/* Repository item */
+.repo-item {
+ width: 100%;
+ text-align: left;
+ border: none;
+ outline: none;
+ background: transparent;
+ color: #f5f5f7;
+ padding: 8px 8px;
+ border-radius: 7px;
+ cursor: pointer;
+ display: flex;
+ align-items: center;
+ justify-content: space-between;
+ gap: 8px;
+ transition: all 0.15s ease;
+ border: 1px solid transparent;
+}
+
+.repo-item:hover {
+ background: #1a1b26;
+ border-color: #272832;
+ transform: translateX(2px);
+}
+
+.repo-item-content {
+ display: flex;
+ flex-direction: column;
+ gap: 2px;
+ flex: 1;
+ min-width: 0;
+}
+
+.repo-name {
+ font-size: 13px;
+ font-weight: 500;
+ overflow: hidden;
+ text-overflow: ellipsis;
+ white-space: nowrap;
+}
+
+.repo-owner {
+ font-size: 11px;
+ color: #8e8fac;
+ overflow: hidden;
+ text-overflow: ellipsis;
+ white-space: nowrap;
+}
+
+/* Private badge */
+.repo-badge-private {
+ padding: 2px 6px;
+ background: #1a1b26;
+ border: 1px solid #3a3b4d;
+ border-radius: 4px;
+ color: #9a9bb0;
+ font-size: 9px;
+ font-weight: 600;
+ text-transform: uppercase;
+ letter-spacing: 0.3px;
+ white-space: nowrap;
+ flex-shrink: 0;
+}
+
+/* Loading states */
+.repo-loading {
+ display: flex;
+ flex-direction: column;
+ align-items: center;
+ gap: 10px;
+ padding: 30px 20px;
+ color: #9a9bb0;
+ font-size: 12px;
+}
+
+.repo-loading-spinner {
+ width: 24px;
+ height: 24px;
+ border: 2px solid #272832;
+ border-top-color: #ff7a3c;
+ border-radius: 50%;
+ animation: repo-spin 0.8s linear infinite;
+}
+
+.repo-loading-spinner-small {
+ width: 14px;
+ height: 14px;
+ border: 2px solid rgba(255, 122, 60, 0.3);
+ border-top-color: #ff7a3c;
+ border-radius: 50%;
+ animation: repo-spin 0.8s linear infinite;
+}
+
+@keyframes repo-spin {
+ to {
+ transform: rotate(360deg);
+ }
+}
+
+/* Load more button */
+.repo-load-more {
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ gap: 8px;
+ width: 100%;
+ padding: 10px 12px;
+ margin: 4px 0;
+ background: #0a0b0f;
+ border: 1px solid #272832;
+ border-radius: 7px;
+ color: #c3c5dd;
+ font-size: 12px;
+ font-weight: 500;
+ cursor: pointer;
+ transition: all 0.2s ease;
+}
+
+.repo-load-more:hover:not(:disabled) {
+ background: #1a1b26;
+ border-color: #3a3b4d;
+ transform: translateY(-1px);
+}
+
+.repo-load-more:active:not(:disabled) {
+ transform: translateY(0);
+}
+
+.repo-load-more:disabled {
+ opacity: 0.6;
+ cursor: not-allowed;
+}
+
+.repo-load-more-count {
+ color: #7779a0;
+ font-weight: 400;
+}
+
+/* All loaded message */
+.repo-all-loaded {
+ padding: 10px 12px;
+ margin: 4px 0;
+ background: rgba(124, 255, 179, 0.08);
+ border: 1px solid rgba(124, 255, 179, 0.2);
+ border-radius: 7px;
+ color: #7cffb3;
+ font-size: 11px;
+ text-align: center;
+ font-weight: 500;
+}
+
+/* GitHub App installation notice */
+.repo-github-notice {
+ display: flex;
+ align-items: flex-start;
+ gap: 10px;
+ padding: 10px 12px;
+ background: #0a0b0f;
+ border: 1px solid #272832;
+ border-radius: 7px;
+ font-size: 11px;
+ line-height: 1.5;
+ margin-top: 4px;
+}
+
+.repo-github-icon {
+ flex-shrink: 0;
+ margin-top: 1px;
+ opacity: 0.6;
+ color: #9a9bb0;
+ width: 16px;
+ height: 16px;
+}
+
+.repo-github-notice-content {
+ flex: 1;
+ display: flex;
+ flex-direction: column;
+ gap: 3px;
+}
+
+.repo-github-notice-title {
+ color: #c3c5dd;
+ font-weight: 600;
+ font-size: 11px;
+}
+
+.repo-github-notice-text {
+ color: #9a9bb0;
+}
+
+.repo-github-link {
+ color: #ff7a3c;
+ text-decoration: none;
+ font-weight: 500;
+ transition: color 0.2s ease;
+}
+
+.repo-github-link:hover {
+ color: #ff8b52;
+ text-decoration: underline;
+}
+
+/* Focus visible for accessibility */
+.repo-item:focus-visible,
+.repo-search-btn:focus-visible,
+.repo-load-more:focus-visible,
+.repo-clear-btn:focus-visible {
+ outline: 2px solid #ff7a3c;
+ outline-offset: 2px;
+}
+
+/* Reduced motion support */
+@media (prefers-reduced-motion: reduce) {
+ .repo-item,
+ .repo-search-btn,
+ .repo-load-more,
+ .repo-clear-btn {
+ transition: none;
+ }
+
+ .repo-loading-spinner,
+ .repo-loading-spinner-small {
+ animation: none;
+ }
+}
+
+/* Mobile responsive adjustments */
+@media (max-width: 768px) {
+ .repo-search-input {
+ font-size: 16px; /* Prevents zoom on iOS */
+ }
+
+ .repo-item {
+ padding: 7px 7px;
+ }
+
+ .repo-name {
+ font-size: 12px;
+ }
+
+ .repo-owner {
+ font-size: 10px;
+ }
+}
+
+/* Workspace */
+.workspace {
+ flex: 1;
+ display: flex;
+ flex-direction: column;
+ position: relative;
+ overflow: hidden;
+ min-height: 0;
+}
+
+.empty-state {
+ margin: auto;
+ max-width: 420px;
+ text-align: center;
+ color: #c3c5dd;
+ animation: fadeIn 0.5s ease;
+}
+
+.empty-bot {
+ font-size: 36px;
+ margin-bottom: 12px;
+ animation: bounce 2s ease infinite;
+}
+
+@keyframes bounce {
+ 0%, 100% {
+ transform: translateY(0);
+ }
+ 50% {
+ transform: translateY(-10px);
+ }
+}
+
+.empty-state h1 {
+ font-size: 24px;
+ margin-bottom: 6px;
+}
+
+.empty-state p {
+ font-size: 14px;
+ color: #9a9bb0;
+}
+
+/* Workspace grid - Properly constrained */
+.workspace-grid {
+ display: grid;
+ grid-template-columns: 320px minmax(340px, 1fr);
+ height: 100%;
+ overflow: hidden;
+ flex: 1;
+ min-height: 0;
+}
+
+/* Panels */
+.panel-header {
+ height: 40px;
+ padding: 0 16px;
+ border-bottom: 1px solid #272832;
+ display: flex;
+ align-items: center;
+ justify-content: space-between;
+ font-size: 13px;
+ font-weight: 500;
+ color: #c3c5dd;
+ background: #0a0b0f;
+ flex-shrink: 0;
+}
+
+.badge {
+ padding: 2px 6px;
+ border-radius: 999px;
+ border: 1px solid #3a3b4d;
+ font-size: 10px;
+}
+
+/* Files */
+.files-panel {
+ border-right: 1px solid #272832;
+ background: #101117;
+ display: flex;
+ flex-direction: column;
+ overflow: hidden;
+}
+
+.files-list {
+ flex: 1;
+ overflow-y: auto;
+ overflow-x: hidden;
+ padding: 6px 4px;
+ min-height: 0;
+}
+
+.files-item {
+ border: none;
+ outline: none;
+ width: 100%;
+ background: transparent;
+ color: #f5f5f7;
+ display: flex;
+ align-items: center;
+ gap: 8px;
+ padding: 4px 8px;
+ border-radius: 6px;
+ cursor: pointer;
+ font-size: 12px;
+ transition: all 0.15s ease;
+}
+
+.files-item:hover {
+ background: #1a1b26;
+ transform: translateX(2px);
+}
+
+.files-item-active {
+ background: #2a2b3c;
+}
+
+.file-icon {
+ width: 16px;
+ flex-shrink: 0;
+}
+
+.file-path {
+ white-space: nowrap;
+ overflow: hidden;
+ text-overflow: ellipsis;
+}
+
+.files-empty {
+ padding: 10px 12px;
+ font-size: 12px;
+ color: #9a9bb0;
+}
+
+/* Chat panel */
+.editor-panel {
+ display: flex;
+ flex-direction: column;
+ background: #050608;
+}
+
+.chat-container {
+ display: flex;
+ flex-direction: column;
+ flex: 1;
+ min-height: 0;
+ overflow: hidden;
+}
+
+.chat-messages {
+ flex: 1;
+ padding: 12px 16px;
+ overflow-y: auto;
+ overflow-x: hidden;
+ font-size: 13px;
+ min-height: 0;
+ scroll-behavior: smooth;
+}
+
+.chat-message-user {
+ margin-bottom: 16px;
+ animation: slideInRight 0.3s ease;
+}
+
+@keyframes slideInRight {
+ from {
+ opacity: 0;
+ transform: translateX(20px);
+ }
+ to {
+ opacity: 1;
+ transform: translateX(0);
+ }
+}
+
+.chat-message-ai {
+ margin-bottom: 16px;
+ animation: slideInLeft 0.3s ease;
+}
+
+@keyframes slideInLeft {
+ from {
+ opacity: 0;
+ transform: translateX(-20px);
+ }
+ to {
+ opacity: 1;
+ transform: translateX(0);
+ }
+}
+
+.chat-message-ai span {
+ display: inline-block;
+ padding: 10px 14px;
+ border-radius: 12px;
+ max-width: 80%;
+ line-height: 1.5;
+}
+
+.chat-message-user span {
+ display: inline;
+ padding: 0;
+ border-radius: 0;
+ background: transparent;
+ border: none;
+ max-width: none;
+ line-height: inherit;
+}
+
+.chat-message-ai span {
+ background: #151622;
+ border: 1px solid #272832;
+}
+
+.chat-empty-state {
+ display: flex;
+ flex-direction: column;
+ align-items: center;
+ justify-content: center;
+ min-height: 300px;
+ padding: 40px 20px;
+ text-align: center;
+}
+
+.chat-empty-icon {
+ font-size: 48px;
+ margin-bottom: 16px;
+ opacity: 0.6;
+ animation: pulse 2s ease infinite;
+}
+
+@keyframes pulse {
+ 0%, 100% {
+ opacity: 0.6;
+ }
+ 50% {
+ opacity: 0.8;
+ }
+}
+
+.chat-empty-state p {
+ margin: 0;
+ font-size: 13px;
+ color: #9a9bb0;
+ max-width: 400px;
+}
+
+.chat-input-box {
+ padding: 12px 16px;
+ border-top: 1px solid #272832;
+ display: flex;
+ flex-direction: column;
+ gap: 10px;
+ background: #050608;
+ flex-shrink: 0;
+ min-height: fit-content;
+ box-shadow: 0 -2px 10px rgba(0, 0, 0, 0.3);
+}
+
+.chat-input-row {
+ display: flex;
+ gap: 10px;
+ align-items: center;
+ flex-wrap: wrap;
+}
+
+.chat-input {
+ flex: 1;
+ min-width: 200px;
+ border-radius: 8px;
+ padding: 10px 12px;
+ border: 1px solid #272832;
+ background: #0a0b0f;
+ color: #f5f5f7;
+ font-size: 13px;
+ line-height: 1.5;
+ transition: all 0.2s ease;
+}
+
+.chat-input:focus {
+ outline: none;
+ border-color: #ff7a3c;
+ background: #101117;
+ box-shadow: 0 0 0 3px rgba(255, 122, 60, 0.1);
+}
+
+.chat-input::placeholder {
+ color: #676883;
+}
+
+.chat-btn {
+ border-radius: 8px;
+ border: none;
+ outline: none;
+ padding: 10px 16px;
+ background: #ff7a3c;
+ color: #050608;
+ cursor: pointer;
+ font-size: 13px;
+ font-weight: 600;
+ transition: all 0.2s ease;
+ white-space: nowrap;
+ min-height: 40px;
+}
+
+.chat-btn:hover:not(:disabled) {
+ background: #ff8c52;
+ transform: translateY(-1px);
+ box-shadow: 0 4px 12px rgba(255, 122, 60, 0.3);
+}
+
+.chat-btn:active:not(:disabled) {
+ transform: translateY(0);
+}
+
+.chat-btn.secondary {
+ background: #1a1b26;
+ color: #f5f5f7;
+ border: 1px solid #272832;
+}
+
+.chat-btn.secondary:hover:not(:disabled) {
+ background: #222335;
+ border-color: #3a3b4d;
+}
+
+.chat-btn:disabled {
+ opacity: 0.5;
+ cursor: not-allowed;
+}
+
+/* Plan rendering */
+.plan-card {
+ border-radius: 12px;
+ background: #101117;
+ border: 1px solid #272832;
+ padding: 10px 12px;
+ margin-top: 6px;
+ animation: fadeIn 0.3s ease;
+}
+
+.plan-steps {
+ margin: 6px 0 0;
+ padding-left: 18px;
+ font-size: 12px;
+}
+
+.plan-steps li {
+ margin-bottom: 4px;
+}
+
+/* Modal */
+.modal-backdrop {
+ position: fixed;
+ inset: 0;
+ background: rgba(0, 0, 0, 0.55);
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ z-index: 20;
+ animation: fadeIn 0.2s ease;
+}
+
+.modal {
+ background: #101117;
+ border-radius: 16px;
+ border: 1px solid #272832;
+ padding: 16px 18px;
+ width: 360px;
+ animation: scaleIn 0.3s ease;
+}
+
+@keyframes scaleIn {
+ from {
+ opacity: 0;
+ transform: scale(0.9);
+ }
+ to {
+ opacity: 1;
+ transform: scale(1);
+ }
+}
+
+.modal-header {
+ display: flex;
+ justify-content: space-between;
+ align-items: center;
+ margin-bottom: 10px;
+}
+
+.modal-title {
+ font-size: 15px;
+ font-weight: 600;
+}
+
+.modal-close {
+ border: none;
+ outline: none;
+ background: transparent;
+ color: #9a9bb0;
+ cursor: pointer;
+ transition: color 0.2s ease;
+}
+
+.modal-close:hover {
+ color: #ff7a3c;
+}
+
+.provider-list {
+ display: flex;
+ flex-direction: column;
+ gap: 6px;
+ margin-top: 8px;
+}
+
+.provider-item {
+ display: flex;
+ align-items: center;
+ justify-content: space-between;
+ padding: 6px 8px;
+ border-radius: 8px;
+ background: #151622;
+ border: 1px solid #272832;
+ font-size: 13px;
+ transition: all 0.2s ease;
+}
+
+.provider-item:hover {
+ border-color: #3a3b4d;
+}
+
+.provider-item.active {
+ border-color: #ff7a3c;
+ background: rgba(255, 122, 60, 0.1);
+}
+
+.provider-name {
+ font-weight: 500;
+}
+
+.provider-badge {
+ font-size: 11px;
+ color: #9a9bb0;
+}
+
+/* Navigation */
+.main-nav {
+ display: flex;
+ flex-direction: column;
+ gap: 2px;
+ margin-top: 10px;
+ margin-bottom: 10px;
+}
+
+.nav-btn {
+ border: none;
+ outline: none;
+ background: transparent;
+ color: #9a9bb0;
+ border-radius: 8px;
+ font-size: 13px;
+ font-weight: 500;
+ padding: 8px 12px;
+ text-align: left;
+ cursor: pointer;
+ transition: all 0.15s ease;
+}
+
+.nav-btn:hover {
+ background: #1a1b26;
+ color: #c3c5dd;
+}
+
+.nav-btn-active {
+ background: #1a1b26;
+ color: #f5f5f7;
+ font-weight: 600;
+ border-left: 2px solid #ff7a3c;
+ padding-left: 10px;
+}
+
+/* Settings page */
+.settings-root {
+ padding: 20px 24px;
+ overflow-y: auto;
+ max-width: 800px;
+}
+
+.settings-root h1 {
+ margin-top: 0;
+ font-size: 24px;
+ margin-bottom: 8px;
+}
+
+.settings-muted {
+ font-size: 13px;
+ color: #9a9bb0;
+ margin-bottom: 20px;
+ line-height: 1.5;
+}
+
+.settings-card {
+ background: #101117;
+ border-radius: 12px;
+ border: 1px solid #272832;
+ padding: 14px 16px;
+ margin-bottom: 14px;
+ display: flex;
+ flex-direction: column;
+ gap: 8px;
+ transition: all 0.2s ease;
+}
+
+.settings-card:hover {
+ border-color: #3a3b4d;
+}
+
+.settings-title {
+ font-size: 15px;
+ font-weight: 600;
+ margin-bottom: 4px;
+}
+
+.settings-label {
+ font-size: 12px;
+ color: #9a9bb0;
+ font-weight: 500;
+ margin-top: 4px;
+}
+
+.settings-input,
+.settings-select {
+ background: #050608;
+ border-radius: 8px;
+ border: 1px solid #272832;
+ padding: 8px 10px;
+ color: #f5f5f7;
+ font-size: 13px;
+ font-family: inherit;
+ transition: all 0.2s ease;
+}
+
+.settings-input:focus,
+.settings-select:focus {
+ outline: none;
+ border-color: #ff7a3c;
+ box-shadow: 0 0 0 3px rgba(255, 122, 60, 0.1);
+}
+
+.settings-input::placeholder {
+ color: #676883;
+}
+
+.settings-hint {
+ font-size: 11px;
+ color: #7a7b8e;
+ margin-top: -2px;
+}
+
+.settings-actions {
+ margin-top: 12px;
+ display: flex;
+ align-items: center;
+ gap: 12px;
+}
+
+.settings-save-btn {
+ background: #ff7a3c;
+ border-radius: 999px;
+ border: none;
+ outline: none;
+ padding: 9px 18px;
+ font-size: 13px;
+ cursor: pointer;
+ color: #050608;
+ font-weight: 600;
+ transition: all 0.2s ease;
+}
+
+.settings-save-btn:hover {
+ background: #ff8b52;
+ transform: translateY(-1px);
+ box-shadow: 0 4px 12px rgba(255, 122, 60, 0.3);
+}
+
+.settings-save-btn:disabled {
+ opacity: 0.5;
+ cursor: not-allowed;
+ transform: none;
+}
+
+.settings-success {
+ font-size: 12px;
+ color: #7cffb3;
+ font-weight: 500;
+}
+
+.settings-error {
+ font-size: 12px;
+ color: #ff8a8a;
+ font-weight: 500;
+}
+
+/* Flow viewer */
+.flow-root {
+ display: flex;
+ flex-direction: column;
+ height: 100%;
+ overflow: hidden;
+}
+
+.flow-header {
+ padding: 16px 20px;
+ border-bottom: 1px solid #272832;
+ display: flex;
+ align-items: center;
+ justify-content: space-between;
+}
+
+.flow-header h1 {
+ margin: 0;
+ font-size: 22px;
+ margin-bottom: 4px;
+}
+
+.flow-header p {
+ margin: 0;
+ font-size: 12px;
+ color: #9a9bb0;
+ max-width: 600px;
+ line-height: 1.5;
+}
+
+.flow-canvas {
+ flex: 1;
+ background: #050608;
+ position: relative;
+}
+
+.flow-error {
+ position: absolute;
+ inset: 0;
+ display: flex;
+ flex-direction: column;
+ align-items: center;
+ justify-content: center;
+ gap: 12px;
+}
+
+.error-icon {
+ font-size: 48px;
+}
+
+.error-text {
+ font-size: 14px;
+ color: #ff8a8a;
+}
+
+/* Assistant Message Sections */
+.gp-section {
+ margin-bottom: 16px;
+ border-radius: 12px;
+ background: #101117;
+ border: 1px solid #272832;
+ overflow: hidden;
+ animation: fadeIn 0.3s ease;
+}
+
+.gp-section-header {
+ padding: 8px 12px;
+ background: #151622;
+ border-bottom: 1px solid #272832;
+}
+
+.gp-section-header h3 {
+ margin: 0;
+ font-size: 13px;
+ font-weight: 600;
+ color: #c3c5dd;
+}
+
+.gp-section-content {
+ padding: 12px;
+}
+
+.gp-section-answer .gp-section-content p {
+ margin: 0;
+ font-size: 13px;
+ line-height: 1.6;
+ color: #f5f5f7;
+}
+
+.gp-section-plan {
+ background: #0a0b0f;
+}
+
+/* Plan View Enhanced */
+.plan-header {
+ margin-bottom: 12px;
+}
+
+.plan-goal {
+ font-size: 13px;
+ font-weight: 600;
+ margin-bottom: 4px;
+ color: #f5f5f7;
+}
+
+.plan-summary {
+ font-size: 12px;
+ color: #c3c5dd;
+ line-height: 1.5;
+}
+
+.plan-totals {
+ display: flex;
+ gap: 8px;
+ margin-bottom: 12px;
+ flex-wrap: wrap;
+}
+
+.plan-total {
+ padding: 4px 8px;
+ border-radius: 6px;
+ font-size: 11px;
+ font-weight: 500;
+ animation: fadeIn 0.3s ease;
+}
+
+.plan-total-create {
+ background: rgba(76, 175, 80, 0.15);
+ color: #81c784;
+ border: 1px solid rgba(76, 175, 80, 0.3);
+}
+
+.plan-total-modify {
+ background: rgba(33, 150, 243, 0.15);
+ color: #64b5f6;
+ border: 1px solid rgba(33, 150, 243, 0.3);
+}
+
+.plan-total-delete {
+ background: rgba(244, 67, 54, 0.15);
+ color: #e57373;
+ border: 1px solid rgba(244, 67, 54, 0.3);
+}
+
+.plan-step {
+ margin-bottom: 12px;
+ padding-bottom: 12px;
+ border-bottom: 1px solid #1a1b26;
+}
+
+.plan-step:last-child {
+ border-bottom: none;
+ padding-bottom: 0;
+ margin-bottom: 0;
+}
+
+.plan-step-header {
+ margin-bottom: 6px;
+}
+
+.plan-step-description {
+ font-size: 12px;
+ color: #9a9bb0;
+ margin-bottom: 8px;
+}
+
+.plan-files {
+ list-style: none;
+ padding: 0;
+ margin: 8px 0;
+}
+
+.plan-file {
+ display: flex;
+ align-items: center;
+ gap: 8px;
+ padding: 4px 0;
+}
+
+.gp-pill {
+ padding: 2px 6px;
+ border-radius: 4px;
+ font-size: 10px;
+ font-weight: 600;
+ text-transform: uppercase;
+ letter-spacing: 0.3px;
+}
+
+.gp-pill-create {
+ background: rgba(76, 175, 80, 0.2);
+ color: #81c784;
+ border: 1px solid rgba(76, 175, 80, 0.4);
+}
+
+.gp-pill-modify {
+ background: rgba(33, 150, 243, 0.2);
+ color: #64b5f6;
+ border: 1px solid rgba(33, 150, 243, 0.4);
+}
+
+.gp-pill-delete {
+ background: rgba(244, 67, 54, 0.2);
+ color: #e57373;
+ border: 1px solid rgba(244, 67, 54, 0.4);
+}
+
+.plan-file-path {
+ font-size: 11px;
+ color: #c3c5dd;
+ font-family: "SF Mono", Monaco, "Cascadia Code", monospace;
+ background: #0a0b0f;
+ padding: 2px 6px;
+ border-radius: 4px;
+}
+
+.plan-step-risks {
+ margin-top: 8px;
+ padding: 6px 8px;
+ background: rgba(255, 152, 0, 0.1);
+ border-left: 2px solid #ff9800;
+ border-radius: 4px;
+ font-size: 11px;
+ color: #ffb74d;
+}
+
+.plan-risk-label {
+ font-weight: 600;
+}
+
+/* Execution Log */
+.execution-steps {
+ list-style: none;
+ padding: 0;
+ margin: 0;
+}
+
+.execution-step {
+ padding: 8px;
+ margin-bottom: 6px;
+ background: #0a0b0f;
+ border-radius: 6px;
+ font-size: 11px;
+ font-family: "SF Mono", Monaco, "Cascadia Code", monospace;
+ white-space: pre-wrap;
+}
+
+.execution-step-number {
+ color: #ff7a3c;
+ font-weight: 600;
+ margin-right: 8px;
+}
+
+.execution-step-summary {
+ color: #c3c5dd;
+}
+
+/* Project Context Panel - Properly constrained */
+.gp-context {
+ padding: 12px;
+ height: 100%;
+ overflow: hidden;
+ display: flex;
+ flex-direction: column;
+}
+
+.gp-context-column {
+ background: #0a0b0f;
+ border-right: 1px solid #272832;
+ overflow: hidden;
+ display: flex;
+ flex-direction: column;
+}
+
+.gp-chat-column {
+ display: flex;
+ flex-direction: column;
+ background: #050608;
+ height: 100%;
+ min-width: 0;
+ overflow: hidden;
+}
+
+.gp-card {
+ background: #101117;
+ border-radius: 12px;
+ border: 1px solid #272832;
+ overflow: hidden;
+ display: flex;
+ flex-direction: column;
+ height: 100%;
+ min-height: 0;
+}
+
+.gp-card-header {
+ padding: 10px 12px;
+ background: #151622;
+ border-bottom: 1px solid #272832;
+ display: flex;
+ align-items: center;
+ justify-content: space-between;
+ flex-shrink: 0;
+}
+
+.gp-card-header h2 {
+ margin: 0;
+ font-size: 14px;
+ font-weight: 600;
+ color: #f5f5f7;
+}
+
+.gp-badge {
+ padding: 3px 8px;
+ border-radius: 999px;
+ background: #2a2b3c;
+ border: 1px solid #3a3b4d;
+ font-size: 11px;
+ color: #c3c5dd;
+ font-weight: 500;
+ transition: all 0.2s ease;
+}
+
+.gp-badge:hover {
+ border-color: #ff7a3c;
+}
+
+.gp-context-meta {
+ padding: 12px;
+ display: flex;
+ flex-direction: column;
+ gap: 6px;
+ border-bottom: 1px solid #272832;
+ flex-shrink: 0;
+ background: #0a0b0f;
+}
+
+.gp-context-meta-item {
+ display: flex;
+ align-items: center;
+ gap: 6px;
+ font-size: 12px;
+}
+
+.gp-context-meta-label {
+ color: #9a9bb0;
+ min-width: 60px;
+}
+
+.gp-context-meta-item strong {
+ color: #f5f5f7;
+ font-weight: 500;
+}
+
+/* File tree - Properly scrollable */
+.gp-context-tree {
+ flex: 1;
+ overflow-y: auto;
+ overflow-x: hidden;
+ min-height: 0;
+ padding: 4px;
+}
+
+.gp-context-empty {
+ padding: 20px 12px;
+ text-align: center;
+ color: #9a9bb0;
+ font-size: 12px;
+}
+
+/* Footer - Fixed at bottom */
+.gp-footer {
+ position: fixed;
+ bottom: 0;
+ left: 0;
+ right: 0;
+ border-top: 1px solid #272832;
+ padding: 8px 20px;
+ display: flex;
+ justify-content: space-between;
+ align-items: center;
+ font-size: 11px;
+ color: #9a9bb0;
+ background: #0a0b0f;
+ backdrop-filter: blur(10px);
+ z-index: 10;
+ box-shadow: 0 -2px 10px rgba(0, 0, 0, 0.2);
+}
+
+.gp-footer-left {
+ display: flex;
+ align-items: center;
+ gap: 6px;
+ font-weight: 500;
+ color: #c3c5dd;
+}
+
+.gp-footer-right {
+ display: flex;
+ align-items: center;
+ gap: 12px;
+}
+
+.gp-footer-right a {
+ color: #9a9bb0;
+ text-decoration: none;
+ transition: all 0.2s ease;
+}
+
+.gp-footer-right a:hover {
+ color: #ff7a3c;
+ transform: translateY(-1px);
+}
+
+/* Adjust app-root to account for fixed footer */
+.app-root > .main-wrapper {
+ padding-bottom: 32px; /* Space for fixed footer */
+}
+
+/* ============================================================================
+ LOGIN PAGE - Enterprise GitHub Authentication
+ ============================================================================ */
+
+.login-page {
+ min-height: 100vh;
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ background: radial-gradient(circle at center, #171823 0%, #050608 70%);
+ padding: 20px;
+ animation: fadeIn 0.4s ease;
+}
+
+.login-container {
+ width: 100%;
+ max-width: 480px;
+ background: #101117;
+ border: 1px solid #272832;
+ border-radius: 24px;
+ padding: 40px 36px;
+ box-shadow: 0 20px 60px rgba(0, 0, 0, 0.4);
+ animation: slideUp 0.5s ease;
+}
+
+@keyframes slideUp {
+ from {
+ opacity: 0;
+ transform: translateY(20px);
+ }
+ to {
+ opacity: 1;
+ transform: translateY(0);
+ }
+}
+
+/* Header */
+.login-header {
+ text-align: center;
+ margin-bottom: 32px;
+}
+
+.login-logo {
+ display: flex;
+ justify-content: center;
+ margin-bottom: 16px;
+}
+
+.logo-icon {
+ width: 64px;
+ height: 64px;
+ border-radius: 16px;
+ background: linear-gradient(135deg, #ff7a3c 0%, #ff6b2b 100%);
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ font-weight: 700;
+ font-size: 28px;
+ color: #050608;
+ box-shadow: 0 8px 24px rgba(255, 122, 60, 0.3);
+ transition: transform 0.3s ease;
+}
+
+.logo-icon:hover {
+ transform: scale(1.05) rotate(3deg);
+}
+
+.login-title {
+ margin: 0;
+ font-size: 28px;
+ font-weight: 700;
+ color: #f5f5f7;
+ margin-bottom: 8px;
+ letter-spacing: -0.5px;
+}
+
+.login-subtitle {
+ margin: 0;
+ font-size: 14px;
+ color: #9a9bb0;
+ font-weight: 500;
+}
+
+/* Welcome Section */
+.login-welcome {
+ margin-bottom: 28px;
+ padding-bottom: 28px;
+ border-bottom: 1px solid #272832;
+}
+
+.login-welcome h2 {
+ margin: 0 0 12px 0;
+ font-size: 20px;
+ font-weight: 600;
+ color: #f5f5f7;
+}
+
+.login-welcome p {
+ margin: 0;
+ font-size: 14px;
+ line-height: 1.6;
+ color: #c3c5dd;
+}
+
+/* Error Message */
+.login-error {
+ display: flex;
+ align-items: center;
+ gap: 10px;
+ padding: 12px 14px;
+ background: rgba(255, 82, 82, 0.1);
+ border: 1px solid rgba(255, 82, 82, 0.3);
+ border-radius: 10px;
+ color: #ff8a8a;
+ font-size: 13px;
+ margin-bottom: 20px;
+ animation: shake 0.4s ease;
+}
+
+@keyframes shake {
+ 0%, 100% { transform: translateX(0); }
+ 25% { transform: translateX(-5px); }
+ 75% { transform: translateX(5px); }
+}
+
+.login-error svg {
+ flex-shrink: 0;
+}
+
+/* Login Actions */
+.login-actions {
+ display: flex;
+ flex-direction: column;
+ gap: 14px;
+ margin-bottom: 28px;
+}
+
+/* Buttons */
+.btn-primary,
+.btn-secondary,
+.btn-text {
+ border: none;
+ outline: none;
+ cursor: pointer;
+ font-family: inherit;
+ font-weight: 600;
+ transition: all 0.2s ease;
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ gap: 10px;
+}
+
+.btn-large {
+ padding: 14px 24px;
+ font-size: 15px;
+ border-radius: 12px;
+}
+
+.btn-primary {
+ background: linear-gradient(135deg, #ff7a3c 0%, #ff6b2b 100%);
+ color: #fff;
+ box-shadow: 0 4px 12px rgba(255, 122, 60, 0.25);
+}
+
+.btn-primary:hover:not(:disabled) {
+ transform: translateY(-2px);
+ box-shadow: 0 8px 20px rgba(255, 122, 60, 0.35);
+}
+
+.btn-primary:active:not(:disabled) {
+ transform: translateY(0);
+}
+
+.btn-primary:disabled {
+ opacity: 0.6;
+ cursor: not-allowed;
+}
+
+.btn-secondary {
+ background: #1a1b26;
+ color: #f5f5f7;
+ border: 1px solid #3a3b4d;
+}
+
+.btn-secondary:hover {
+ background: #2a2b3c;
+ border-color: #4a4b5d;
+ transform: translateY(-1px);
+}
+
+.btn-text {
+ background: transparent;
+ color: #9a9bb0;
+ padding: 10px;
+ font-size: 14px;
+ font-weight: 500;
+}
+
+.btn-text:hover {
+ color: #ff7a3c;
+}
+
+/* Button Spinner */
+.btn-spinner {
+ width: 16px;
+ height: 16px;
+ border: 2px solid rgba(255, 255, 255, 0.3);
+ border-top-color: #fff;
+ border-radius: 50%;
+ animation: spin 0.6s linear infinite;
+}
+
+@keyframes spin {
+ to { transform: rotate(360deg); }
+}
+
+/* Loading Spinner (Page) */
+.loading-spinner {
+ width: 48px;
+ height: 48px;
+ border: 4px solid #272832;
+ border-top-color: #ff7a3c;
+ border-radius: 50%;
+ animation: spin 0.8s linear infinite;
+ margin: 0 auto;
+}
+
+/* Divider */
+.login-divider {
+ position: relative;
+ text-align: center;
+ margin: 8px 0;
+}
+
+.login-divider::before {
+ content: '';
+ position: absolute;
+ top: 50%;
+ left: 0;
+ right: 0;
+ height: 1px;
+ background: #272832;
+}
+
+.login-divider span {
+ position: relative;
+ display: inline-block;
+ padding: 0 16px;
+ background: #101117;
+ color: #9a9bb0;
+ font-size: 12px;
+ font-weight: 500;
+}
+
+/* Form */
+.login-form {
+ display: flex;
+ flex-direction: column;
+ gap: 18px;
+ margin-bottom: 28px;
+}
+
+.form-group {
+ display: flex;
+ flex-direction: column;
+ gap: 8px;
+}
+
+.form-group label {
+ font-size: 13px;
+ font-weight: 600;
+ color: #f5f5f7;
+}
+
+.form-input {
+ background: #0a0b0f;
+ border: 1px solid #272832;
+ border-radius: 10px;
+ padding: 12px 14px;
+ color: #f5f5f7;
+ font-size: 14px;
+ font-family: "SF Mono", Monaco, monospace;
+ transition: all 0.2s ease;
+}
+
+.form-input:focus {
+ outline: none;
+ border-color: #ff7a3c;
+ box-shadow: 0 0 0 4px rgba(255, 122, 60, 0.1);
+}
+
+.form-input:disabled {
+ opacity: 0.5;
+ cursor: not-allowed;
+}
+
+.form-input::placeholder {
+ color: #676883;
+}
+
+.form-hint {
+ font-size: 12px;
+ color: #9a9bb0;
+ line-height: 1.5;
+ margin: 0;
+}
+
+.form-link {
+ color: #ff7a3c;
+ text-decoration: none;
+ font-weight: 500;
+ transition: color 0.2s ease;
+}
+
+.form-link:hover {
+ color: #ff8b52;
+ text-decoration: underline;
+}
+
+.form-hint code {
+ background: #1a1b26;
+ padding: 2px 6px;
+ border-radius: 4px;
+ font-family: "SF Mono", Monaco, monospace;
+ font-size: 11px;
+ color: #ff7a3c;
+}
+
+/* Notice (for no auth configured) */
+.login-notice {
+ padding: 20px;
+ background: rgba(255, 152, 0, 0.1);
+ border: 1px solid rgba(255, 152, 0, 0.3);
+ border-radius: 12px;
+ margin-bottom: 28px;
+}
+
+.login-notice h3 {
+ margin: 0 0 12px 0;
+ font-size: 16px;
+ color: #ffb74d;
+}
+
+.login-notice p {
+ margin: 0 0 12px 0;
+ font-size: 13px;
+ color: #c3c5dd;
+ line-height: 1.6;
+}
+
+.login-notice ul {
+ margin: 0;
+ padding-left: 20px;
+ font-size: 13px;
+ color: #c3c5dd;
+ line-height: 1.8;
+}
+
+.login-notice code {
+ background: #1a1b26;
+ padding: 2px 6px;
+ border-radius: 4px;
+ font-family: "SF Mono", Monaco, monospace;
+ font-size: 12px;
+ color: #ff7a3c;
+}
+
+/* Features List */
+.login-features {
+ display: flex;
+ flex-direction: column;
+ gap: 12px;
+ padding: 20px 0;
+ border-top: 1px solid #272832;
+ border-bottom: 1px solid #272832;
+ margin-bottom: 20px;
+}
+
+.feature-item {
+ display: flex;
+ align-items: flex-start;
+ gap: 12px;
+}
+
+.feature-icon {
+ flex-shrink: 0;
+ width: 20px;
+ height: 20px;
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ color: #7cffb3;
+}
+
+.feature-text {
+ display: flex;
+ flex-direction: column;
+ gap: 2px;
+}
+
+.feature-text strong {
+ font-size: 13px;
+ font-weight: 600;
+ color: #f5f5f7;
+}
+
+.feature-text span {
+ font-size: 12px;
+ color: #9a9bb0;
+}
+
+/* Footer */
+.login-footer {
+ text-align: center;
+}
+
+.login-footer p {
+ margin: 0;
+ font-size: 11px;
+ color: #7a7b8e;
+ line-height: 1.6;
+}/* ============================================================================
+ INSTALLATION MODAL - Claude Code Style
+ ============================================================================ */
+
+.install-modal-backdrop {
+ position: fixed;
+ inset: 0;
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ background: rgba(0, 0, 0, 0.7);
+ backdrop-filter: blur(8px);
+ z-index: 9999;
+ animation: fadeIn 0.2s ease;
+}
+
+.install-modal {
+ width: 480px;
+ max-width: 90vw;
+ background: #101117;
+ border: 1px solid #272832;
+ border-radius: 16px;
+ box-shadow: 0 20px 60px rgba(0, 0, 0, 0.5);
+ animation: modalSlideIn 0.3s ease;
+ overflow: hidden;
+}
+
+@keyframes modalSlideIn {
+ from {
+ opacity: 0;
+ transform: translateY(-20px) scale(0.95);
+ }
+ to {
+ opacity: 1;
+ transform: translateY(0) scale(1);
+ }
+}
+
+/* Modal Header */
+.install-modal-header {
+ padding: 32px 32px 24px;
+ text-align: center;
+ border-bottom: 1px solid #272832;
+}
+
+.install-modal-logo {
+ display: flex;
+ justify-content: center;
+ margin-bottom: 16px;
+}
+
+.logo-icon-large {
+ width: 56px;
+ height: 56px;
+ border-radius: 12px;
+ background: linear-gradient(135deg, #ff7a3c 0%, #ff6b2b 100%);
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ font-weight: 700;
+ font-size: 24px;
+ color: #050608;
+ box-shadow: 0 4px 16px rgba(255, 122, 60, 0.3);
+}
+
+.install-modal-title {
+ margin: 0 0 8px 0;
+ font-size: 20px;
+ font-weight: 600;
+ color: #f5f5f7;
+}
+
+.install-modal-subtitle {
+ margin: 0;
+ font-size: 13px;
+ color: #9a9bb0;
+ line-height: 1.5;
+}
+
+/* Status Indicator */
+.install-status {
+ display: flex;
+ align-items: center;
+ gap: 10px;
+ padding: 12px 16px;
+ margin: 16px 24px;
+ border-radius: 8px;
+ font-size: 13px;
+ transition: all 0.2s ease;
+}
+
+.install-status-error {
+ background: rgba(255, 82, 82, 0.1);
+ border: 1px solid rgba(255, 82, 82, 0.3);
+ color: #ff8a8a;
+}
+
+.install-status-pending {
+ background: rgba(255, 152, 0, 0.1);
+ border: 1px solid rgba(255, 152, 0, 0.3);
+ color: #ffb74d;
+}
+
+.status-icon {
+ flex-shrink: 0;
+}
+
+.status-spinner {
+ width: 16px;
+ height: 16px;
+ border: 2px solid rgba(255, 180, 77, 0.3);
+ border-top-color: #ffb74d;
+ border-radius: 50%;
+ animation: spin 0.6s linear infinite;
+}
+
+/* Installation Steps */
+.install-steps {
+ padding: 24px 32px;
+ display: flex;
+ flex-direction: column;
+ gap: 16px;
+}
+
+.install-step {
+ display: flex;
+ align-items: flex-start;
+ gap: 12px;
+}
+
+.step-number {
+ flex-shrink: 0;
+ width: 28px;
+ height: 28px;
+ border-radius: 8px;
+ background: #1a1b26;
+ border: 1px solid #3a3b4d;
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ font-size: 13px;
+ font-weight: 600;
+ color: #ff7a3c;
+}
+
+.step-content h3 {
+ margin: 0 0 4px 0;
+ font-size: 14px;
+ font-weight: 600;
+ color: #f5f5f7;
+}
+
+.step-content p {
+ margin: 0;
+ font-size: 12px;
+ color: #9a9bb0;
+ line-height: 1.5;
+}
+
+/* Action Buttons */
+.install-modal-actions {
+ display: flex;
+ align-items: center;
+ justify-content: flex-end;
+ gap: 10px;
+ padding: 16px 24px;
+ border-top: 1px solid #272832;
+ background: #0a0b0f;
+}
+
+.btn-install-primary {
+ border: none;
+ outline: none;
+ background: #000;
+ color: #fff;
+ padding: 10px 18px;
+ border-radius: 8px;
+ font-size: 13px;
+ font-weight: 600;
+ cursor: pointer;
+ display: flex;
+ align-items: center;
+ gap: 8px;
+ transition: all 0.2s ease;
+}
+
+.btn-install-primary:hover:not(:disabled) {
+ background: #1a1a1a;
+ transform: translateY(-1px);
+}
+
+.btn-install-primary:active:not(:disabled) {
+ transform: translateY(0);
+}
+
+.btn-install-primary:disabled {
+ opacity: 0.5;
+ cursor: not-allowed;
+}
+
+.btn-check-status {
+ border: 1px solid #3a3b4d;
+ outline: none;
+ background: #1a1b26;
+ color: #f5f5f7;
+ padding: 10px 18px;
+ border-radius: 8px;
+ font-size: 13px;
+ font-weight: 500;
+ cursor: pointer;
+ display: flex;
+ align-items: center;
+ gap: 8px;
+ transition: all 0.2s ease;
+}
+
+.btn-check-status:hover:not(:disabled) {
+ background: #2a2b3c;
+ border-color: #4a4b5d;
+}
+
+.btn-check-status:disabled {
+ opacity: 0.5;
+ cursor: not-allowed;
+}
+
+.btn-install-secondary {
+ border: 1px solid #3a3b4d;
+ outline: none;
+ background: transparent;
+ color: #c3c5dd;
+ padding: 10px 18px;
+ border-radius: 8px;
+ font-size: 13px;
+ font-weight: 500;
+ cursor: pointer;
+ display: flex;
+ align-items: center;
+ gap: 8px;
+ transition: all 0.2s ease;
+}
+
+.btn-install-secondary:hover:not(:disabled) {
+ background: #1a1b26;
+ border-color: #4a4b5d;
+}
+
+.btn-install-secondary:disabled {
+ opacity: 0.5;
+ cursor: not-allowed;
+}
+
+/* Footer */
+.install-modal-footer {
+ padding: 16px 32px 24px;
+ text-align: center;
+}
+
+.install-modal-footer p {
+ margin: 0;
+ font-size: 12px;
+ color: #7a7b8e;
+ line-height: 1.6;
+}
+
+.install-modal-footer strong {
+ color: #c3c5dd;
+ font-weight: 600;
+}
+
+/* Button spinner */
+.btn-spinner {
+ width: 14px;
+ height: 14px;
+ border: 2px solid rgba(255, 255, 255, 0.3);
+ border-top-color: #fff;
+ border-radius: 50%;
+ animation: spin 0.6s linear infinite;
+}
+
+@keyframes spin {
+ to { transform: rotate(360deg); }
+}
+
+/* Secondary primary-style button for "Load available models" */
+.settings-load-btn {
+ margin-top: 8px;
+
+ /* Make it hug the text, not full width */
+ display: inline-flex;
+ align-items: center;
+ justify-content: center;
+ width: auto !important;
+ min-width: 0;
+ align-self: flex-start;
+
+ /* Size: slightly smaller than Save but same family */
+ padding: 7px 14px;
+ border-radius: 999px;
+
+ font-size: 12px;
+ font-weight: 600;
+ letter-spacing: 0.01em;
+
+ border: none;
+ outline: none;
+ cursor: pointer;
+
+ /* Match Save button color palette */
+ background: #ff7a3c;
+ color: #050608;
+
+ transition:
+ background 0.2s ease,
+ box-shadow 0.2s ease,
+ transform 0.15s ease,
+ opacity 0.2s ease;
+}
+
+.settings-load-btn:hover {
+ background: #ff8b52;
+ transform: translateY(-1px);
+ box-shadow: 0 3px 10px rgba(255, 122, 60, 0.28);
+}
+
+.settings-load-btn:active {
+ transform: translateY(0);
+ box-shadow: 0 1px 4px rgba(255, 122, 60, 0.25);
+}
+
+.settings-load-btn:disabled {
+ opacity: 0.55;
+ cursor: not-allowed;
+ transform: none;
+ box-shadow: none;
+}
diff --git a/frontend/utils/api.js b/frontend/utils/api.js
new file mode 100644
index 0000000000000000000000000000000000000000..c9a0a870981961b3c8c0aeb6b9c7b01f07879028
--- /dev/null
+++ b/frontend/utils/api.js
@@ -0,0 +1,212 @@
+/**
+ * API utilities for authenticated requests
+ */
+
+/**
+ * Get backend URL from environment or use relative path (for local dev)
+ * - Production (Vercel): Uses VITE_BACKEND_URL env var (e.g., https://gitpilot-backend.onrender.com)
+ * - Development (local): Uses relative paths (proxied by Vite to localhost:8000)
+ */
+const BACKEND_URL = import.meta.env.VITE_BACKEND_URL || '';
+
+/**
+ * Check if backend URL is configured
+ * @returns {boolean} True if backend URL is set
+ */
+export function isBackendConfigured() {
+ return BACKEND_URL !== '' && BACKEND_URL !== undefined;
+}
+
+/**
+ * Get the configured backend URL
+ * @returns {string} Backend URL or empty string
+ */
+export function getBackendUrl() {
+ return BACKEND_URL;
+}
+
+/**
+ * Construct full API URL
+ * @param {string} path - API endpoint path (e.g., '/api/chat/plan')
+ * @returns {string} Full URL to API endpoint
+ */
+export function apiUrl(path) {
+ // Ensure path starts with /
+ const cleanPath = path.startsWith('/') ? path : `/${path}`;
+ return `${BACKEND_URL}${cleanPath}`;
+}
+
+/**
+ * Enhanced fetch with better error handling for JSON parsing
+ * @param {string} url - URL to fetch
+ * @param {Object} options - Fetch options
+ * @returns {Promise} Parsed JSON response
+ */
+export async function safeFetchJSON(url, options = {}) {
+ try {
+ const response = await fetch(url, options);
+ const contentType = response.headers.get('content-type');
+
+ // Check if response is actually JSON
+ if (!contentType || !contentType.includes('application/json')) {
+ // If not JSON, it might be an HTML error page
+ const text = await response.text();
+
+ // Check if it looks like HTML (starts with } Fetch response
+ */
+export async function authFetch(url, options = {}) {
+ const headers = {
+ ...getAuthHeaders(),
+ ...options.headers,
+ };
+
+ return fetch(url, {
+ ...options,
+ headers,
+ });
+}
+
+/**
+ * Make an authenticated JSON request
+ * @param {string} url - API endpoint URL
+ * @param {Object} options - Fetch options
+ * @returns {Promise} Parsed JSON response
+ */
+export async function authFetchJSON(url, options = {}) {
+ const headers = {
+ 'Content-Type': 'application/json',
+ ...getAuthHeaders(),
+ ...options.headers,
+ };
+
+ const response = await fetch(url, {
+ ...options,
+ headers,
+ });
+
+ if (!response.ok) {
+ const error = await response.json().catch(() => ({ detail: 'Request failed' }));
+ throw new Error(error.detail || error.message || 'Request failed');
+ }
+
+ return response.json();
+}
+
+// βββ Redesigned API Endpoints ββββββββββββββββββββββββββββ
+
+/**
+ * Get normalized server status
+ */
+export async function fetchStatus() {
+ return safeFetchJSON(apiUrl("/api/status"));
+}
+
+/**
+ * Get detailed provider status
+ */
+export async function fetchProviderStatus() {
+ return safeFetchJSON(apiUrl("/api/providers/status"));
+}
+
+/**
+ * Test a provider configuration
+ */
+export async function testProvider(providerConfig) {
+ return safeFetchJSON(apiUrl("/api/providers/test"), {
+ method: "POST",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify(providerConfig),
+ });
+}
+
+/**
+ * Start a session by mode
+ */
+export async function startSession(sessionConfig) {
+ return safeFetchJSON(apiUrl("/api/session/start"), {
+ method: "POST",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify(sessionConfig),
+ });
+}
+
+/**
+ * Send a chat message (redesigned endpoint)
+ */
+export async function sendChatMessage(messageConfig) {
+ return safeFetchJSON(apiUrl("/api/chat/send"), {
+ method: "POST",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify(messageConfig),
+ });
+}
+
+/**
+ * Get workspace summary
+ */
+export async function fetchWorkspaceSummary(folderPath) {
+ const query = folderPath ? `?folder_path=${encodeURIComponent(folderPath)}` : "";
+ return safeFetchJSON(apiUrl(`/api/workspace/summary${query}`));
+}
+
+/**
+ * Run security scan on workspace
+ */
+export async function scanWorkspace(path) {
+ const query = path ? `?path=${encodeURIComponent(path)}` : "";
+ return safeFetchJSON(apiUrl(`/api/security/scan-workspace${query}`));
+}
\ No newline at end of file
diff --git a/frontend/utils/ws.js b/frontend/utils/ws.js
new file mode 100644
index 0000000000000000000000000000000000000000..ad1ec4d2652ff52b17acf774558cadf37ca624e4
--- /dev/null
+++ b/frontend/utils/ws.js
@@ -0,0 +1,157 @@
+/**
+ * WebSocket client for real-time session streaming.
+ *
+ * Provides auto-reconnection, heartbeat, and event dispatching.
+ * Falls back gracefully β callers should always have an HTTP fallback.
+ */
+
+const WS_RECONNECT_DELAYS = [1000, 2000, 4000, 8000, 16000];
+const HEARTBEAT_INTERVAL = 30000;
+const MAX_RECONNECT_ATTEMPTS = 5;
+// If a connection dies within this window it counts as unstable
+const MIN_STABLE_DURATION_MS = 3000;
+
+export class SessionWebSocket {
+ constructor(sessionId, { onMessage, onStatusChange, onError, onConnect, onDisconnect } = {}) {
+ this._sessionId = sessionId;
+ this._handlers = { onMessage, onStatusChange, onError, onConnect, onDisconnect };
+ this._ws = null;
+ this._reconnectAttempt = 0;
+ this._heartbeatTimer = null;
+ this._closed = false;
+ this._connectTime = 0;
+ }
+
+ connect() {
+ if (this._closed) return;
+
+ const protocol = window.location.protocol === 'https:' ? 'wss:' : 'ws:';
+ const backendUrl = import.meta.env.VITE_BACKEND_URL || '';
+ let wsUrl;
+
+ if (backendUrl) {
+ // Production: replace http(s) with ws(s)
+ wsUrl = backendUrl.replace(/^http/, 'ws') + `/ws/sessions/${this._sessionId}`;
+ } else {
+ // Dev: same host
+ wsUrl = `${protocol}//${window.location.host}/ws/sessions/${this._sessionId}`;
+ }
+
+ this._ws = new WebSocket(wsUrl);
+
+ this._ws.onopen = () => {
+ this._connectTime = Date.now();
+ this._reconnectAttempt = 0;
+ this._startHeartbeat();
+ this._handlers.onConnect?.();
+ };
+
+ this._ws.onmessage = (event) => {
+ try {
+ const data = JSON.parse(event.data);
+ this._dispatch(data);
+ } catch (e) {
+ console.warn('[ws] Failed to parse message:', e);
+ }
+ };
+
+ this._ws.onclose = (event) => {
+ this._stopHeartbeat();
+ this._handlers.onDisconnect?.(event);
+
+ if (!this._closed) {
+ // If connection died very quickly, count it as unstable
+ const lived = Date.now() - (this._connectTime || 0);
+ if (lived < MIN_STABLE_DURATION_MS) {
+ this._reconnectAttempt++;
+ }
+
+ if (this._reconnectAttempt < MAX_RECONNECT_ATTEMPTS) {
+ this._scheduleReconnect();
+ } else {
+ console.warn('[ws] Max reconnect attempts reached, giving up.');
+ }
+ }
+ };
+
+ this._ws.onerror = (error) => {
+ this._handlers.onError?.(error);
+ };
+ }
+
+ send(data) {
+ if (this._ws?.readyState === WebSocket.OPEN) {
+ this._ws.send(JSON.stringify(data));
+ return true;
+ }
+ return false;
+ }
+
+ sendMessage(content) {
+ return this.send({ type: 'user_message', content });
+ }
+
+ cancel() {
+ return this.send({ type: 'cancel' });
+ }
+
+ close() {
+ this._closed = true;
+ this._stopHeartbeat();
+ if (this._ws) {
+ this._ws.close();
+ this._ws = null;
+ }
+ }
+
+ get connected() {
+ return this._ws?.readyState === WebSocket.OPEN;
+ }
+
+ _dispatch(data) {
+ const { type } = data;
+
+ switch (type) {
+ case 'agent_message':
+ case 'tool_use':
+ case 'tool_result':
+ case 'diff_update':
+ case 'session_restored':
+ case 'message_received':
+ this._handlers.onMessage?.(data);
+ break;
+ case 'status_change':
+ this._handlers.onStatusChange?.(data.status);
+ break;
+ case 'error':
+ this._handlers.onError?.(new Error(data.message));
+ break;
+ case 'pong':
+ break;
+ default:
+ this._handlers.onMessage?.(data);
+ }
+ }
+
+ _startHeartbeat() {
+ this._stopHeartbeat();
+ this._heartbeatTimer = setInterval(() => {
+ this.send({ type: 'ping' });
+ }, HEARTBEAT_INTERVAL);
+ }
+
+ _stopHeartbeat() {
+ if (this._heartbeatTimer) {
+ clearInterval(this._heartbeatTimer);
+ this._heartbeatTimer = null;
+ }
+ }
+
+ _scheduleReconnect() {
+ const delay = WS_RECONNECT_DELAYS[
+ Math.min(this._reconnectAttempt, WS_RECONNECT_DELAYS.length - 1)
+ ];
+ this._reconnectAttempt++;
+ setTimeout(() => this.connect(), delay);
+ }
+}
diff --git a/frontend/vite.config.js b/frontend/vite.config.js
new file mode 100644
index 0000000000000000000000000000000000000000..c957ef7e1678614e70468a54299a709a9b2e0dce
--- /dev/null
+++ b/frontend/vite.config.js
@@ -0,0 +1,22 @@
+// frontend/vite.config.js (Fixed)
+import { defineConfig } from "vite";
+import react from "@vitejs/plugin-react";
+
+export default defineConfig({
+ plugins: [react()],
+ server: {
+ // π‘ FIX: This tells Vite to bind to 0.0.0.0,
+ // making it reachable from the Windows host in WSL.
+ port: 5173,
+ host: true, // <--- Add this line
+ // Only proxy API requests when NOT running in Vercel dev
+ // (Vercel dev handles API routing to serverless functions)
+ proxy: process.env.VERCEL ? undefined : {
+ "/api": "http://localhost:8000",
+ "/ws": {
+ target: "ws://localhost:8000",
+ ws: true
+ }
+ }
+ }
+});
\ No newline at end of file
diff --git a/gitpilot/__init__.py b/gitpilot/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..febd3bec09bbdd4c708d9dde1a6513130281818e
--- /dev/null
+++ b/gitpilot/__init__.py
@@ -0,0 +1,5 @@
+"""GitPilot package."""
+
+from .version import __version__
+
+__all__ = ["__version__"]
diff --git a/gitpilot/__main__.py b/gitpilot/__main__.py
new file mode 100644
index 0000000000000000000000000000000000000000..46c73041d1fd9224b478b6fe032b704a206b565b
--- /dev/null
+++ b/gitpilot/__main__.py
@@ -0,0 +1,5 @@
+"""Allow running gitpilot as a module: python -m gitpilot"""
+from .cli import main
+
+if __name__ == "__main__":
+ main()
diff --git a/gitpilot/_api_core.py b/gitpilot/_api_core.py
new file mode 100644
index 0000000000000000000000000000000000000000..417df1c3425efa2ada512c2611c7d13b3b8cce8a
--- /dev/null
+++ b/gitpilot/_api_core.py
@@ -0,0 +1,2382 @@
+# gitpilot/_api_core.py -- Original API module (re-exported by api.py)
+from __future__ import annotations
+
+from pathlib import Path
+from typing import List, Optional
+
+from fastapi import FastAPI, Query, Path as FPath, Header, HTTPException, UploadFile, File
+from fastapi.responses import FileResponse, JSONResponse
+from fastapi.staticfiles import StaticFiles
+from fastapi.middleware.cors import CORSMiddleware
+from pydantic import BaseModel, Field
+
+from .version import __version__
+from .github_api import (
+ list_user_repos,
+ list_user_repos_paginated, # Pagination support
+ search_user_repos, # Search across all repos
+ get_repo_tree,
+ get_file,
+ put_file,
+ execution_context,
+ github_request,
+)
+from .github_app import check_repo_write_access
+from .settings import AppSettings, get_settings, set_provider, update_settings, LLMProvider
+from .agentic import (
+ generate_plan,
+ execute_plan,
+ PlanResult,
+ get_flow_definition,
+ dispatch_request,
+ create_pr_after_execution,
+)
+from .agent_router import route as route_request
+from . import github_issues
+from . import github_pulls
+from . import github_search
+from .session import SessionManager, Session
+from .hooks import HookManager, HookEvent
+from .permissions import PermissionManager, PermissionMode
+from .memory import MemoryManager
+from .context_vault import ContextVault
+from .use_case import UseCaseManager
+from .mcp_client import MCPClient
+from .plugins import PluginManager
+from .skills import SkillManager
+from .smart_model_router import ModelRouter, ModelRouterConfig
+from .topology_registry import (
+ list_topologies as _list_topologies,
+ get_topology_graph as _get_topology_graph,
+ classify_message as _classify_message,
+ get_saved_topology_preference,
+ save_topology_preference,
+)
+from .agent_teams import AgentTeam
+from .learning import LearningEngine
+from .cross_repo import CrossRepoAnalyzer
+from .predictions import PredictiveEngine
+from .security import SecurityScanner
+from .nl_database import NLQueryEngine, QueryDialect, SafetyLevel, TableSchema
+from .github_oauth import (
+ generate_authorization_url,
+ exchange_code_for_token,
+ validate_token,
+ initiate_device_flow,
+ poll_device_token,
+ AuthSession,
+ GitHubUser,
+)
+import os
+import logging
+from .model_catalog import list_models_for_provider
+
+# Optional A2A adapter (MCP ContextForge)
+from .a2a_adapter import router as a2a_router
+
+logger = logging.getLogger(__name__)
+
+# --- Phase 1 singletons ---
+_session_mgr = SessionManager()
+_hook_mgr = HookManager()
+_perm_mgr = PermissionManager()
+
+# --- Phase 2 singletons ---
+_mcp_client = MCPClient()
+_plugin_mgr = PluginManager()
+_skill_mgr = SkillManager()
+_model_router = ModelRouter()
+
+# --- Phase 3 singletons ---
+_agent_team = AgentTeam()
+_learning_engine = LearningEngine()
+_cross_repo = CrossRepoAnalyzer()
+_predictive_engine = PredictiveEngine()
+_security_scanner = SecurityScanner()
+_nl_engine = NLQueryEngine()
+
+app = FastAPI(
+ title="GitPilot API",
+ version=__version__,
+ description="Agentic AI assistant for GitHub repositories.",
+)
+
+# ==========================================================================
+# Optional A2A Adapter (MCP ContextForge)
+# ==========================================================================
+# This is feature-flagged and does not affect the existing UI/REST API unless
+# explicitly enabled.
+def _env_bool(name: str, default: bool) -> bool:
+ raw = os.getenv(name)
+ if raw is None:
+ return default
+ return raw.strip().lower() in {"1", "true", "yes", "y", "on"}
+
+
+if _env_bool("GITPILOT_ENABLE_A2A", False):
+ logger.info("A2A adapter enabled (mounting /a2a/* endpoints)")
+ app.include_router(a2a_router)
+else:
+ logger.info("A2A adapter disabled (set GITPILOT_ENABLE_A2A=true to enable)")
+
+# ============================================================================
+# CORS Configuration
+# ============================================================================
+# Enable CORS to allow frontend (local dev or Vercel) to connect to backend
+allowed_origins_str = os.getenv("CORS_ORIGINS", "http://localhost:5173")
+allowed_origins = [origin.strip() for origin in allowed_origins_str.split(",")]
+
+logger.info(f"CORS enabled for origins: {allowed_origins}")
+
+app.add_middleware(
+ CORSMiddleware,
+ allow_origins=allowed_origins,
+ allow_credentials=True,
+ allow_methods=["*"],
+ allow_headers=["*"],
+)
+
+
+def get_github_token(authorization: Optional[str] = Header(None)) -> Optional[str]:
+ """
+ Extract GitHub token from Authorization header.
+
+ Supports formats:
+ - Bearer
+ - token
+ -
+ """
+ if not authorization:
+ return None
+
+ if authorization.startswith("Bearer "):
+ return authorization[7:]
+ elif authorization.startswith("token "):
+ return authorization[6:]
+ else:
+ return authorization
+
+
+# --- FIXED: Added default_branch to model ---
+class RepoSummary(BaseModel):
+ id: int
+ name: str
+ full_name: str
+ private: bool
+ owner: str
+ default_branch: str = "main" # <--- CRITICAL FIX: Defaults to main, but can be master/dev
+
+
+class PaginatedReposResponse(BaseModel):
+ """Response model for paginated repository listing."""
+ repositories: List[RepoSummary]
+ page: int
+ per_page: int
+ total_count: Optional[int] = None
+ has_more: bool
+ query: Optional[str] = None
+
+
+class FileEntry(BaseModel):
+ path: str
+ type: str
+
+
+class FileTreeResponse(BaseModel):
+ files: List[FileEntry] = Field(default_factory=list)
+
+
+class FileContent(BaseModel):
+ path: str
+ encoding: str = "utf-8"
+ content: str
+
+
+class CommitRequest(BaseModel):
+ path: str
+ content: str
+ message: str
+
+
+class CommitResponse(BaseModel):
+ path: str
+ commit_sha: str
+ commit_url: Optional[str] = None
+
+
+class SettingsResponse(BaseModel):
+ provider: LLMProvider
+ providers: List[LLMProvider]
+ openai: dict
+ claude: dict
+ watsonx: dict
+ ollama: dict
+ langflow_url: str
+ has_langflow_plan_flow: bool
+
+
+class ProviderModelsResponse(BaseModel):
+ provider: LLMProvider
+ models: List[str] = Field(default_factory=list)
+ error: Optional[str] = None
+
+
+class ProviderUpdate(BaseModel):
+ provider: LLMProvider
+
+
+class ChatPlanRequest(BaseModel):
+ repo_owner: str
+ repo_name: str
+ goal: str
+ branch_name: Optional[str] = None
+
+
+class ExecutePlanRequest(BaseModel):
+ repo_owner: str
+ repo_name: str
+ plan: PlanResult
+ branch_name: Optional[str] = None
+
+
+class AuthUrlResponse(BaseModel):
+ authorization_url: str
+ state: str
+
+
+class AuthCallbackRequest(BaseModel):
+ code: str
+ state: str
+
+
+class TokenValidationRequest(BaseModel):
+ access_token: str
+
+
+class UserInfoResponse(BaseModel):
+ user: GitHubUser
+ authenticated: bool
+
+
+class RepoAccessResponse(BaseModel):
+ can_write: bool
+ app_installed: bool
+ auth_type: str
+
+
+# --- v2 Request/Response models ---
+
+class ChatRequest(BaseModel):
+ """Unified chat request for the conversational dispatcher."""
+ repo_owner: str
+ repo_name: str
+ message: str
+ branch_name: Optional[str] = None
+ auto_pr: bool = False
+ topology_id: Optional[str] = None # Override topology for this request
+
+
+class IssueCreateRequest(BaseModel):
+ title: str
+ body: Optional[str] = None
+ labels: Optional[List[str]] = None
+ assignees: Optional[List[str]] = None
+ milestone: Optional[int] = None
+
+
+class IssueUpdateRequest(BaseModel):
+ title: Optional[str] = None
+ body: Optional[str] = None
+ state: Optional[str] = None
+ labels: Optional[List[str]] = None
+ assignees: Optional[List[str]] = None
+ milestone: Optional[int] = None
+
+
+class IssueCommentRequest(BaseModel):
+ body: str
+
+
+class PRCreateRequest(BaseModel):
+ title: str
+ head: str
+ base: str
+ body: Optional[str] = None
+ draft: bool = False
+
+
+class PRMergeRequest(BaseModel):
+ merge_method: str = "merge"
+ commit_title: Optional[str] = None
+ commit_message: Optional[str] = None
+
+
+class SearchRequest(BaseModel):
+ query: str
+ per_page: int = 30
+ page: int = 1
+
+
+# ============================================================================
+# Repository Endpoints - Enterprise Grade with Pagination & Search
+# ============================================================================
+
+@app.get("/api/repos", response_model=PaginatedReposResponse)
+async def api_list_repos(
+ query: Optional[str] = Query(None, description="Search query (searches across ALL repositories)"),
+ page: int = Query(1, ge=1, description="Page number (starts at 1)"),
+ per_page: int = Query(100, ge=1, le=100, description="Results per page (max 100)"),
+ authorization: Optional[str] = Header(None),
+):
+ """
+ List user repositories with enterprise-grade pagination and search.
+ Includes default_branch information for correct frontend routing.
+ """
+ token = get_github_token(authorization)
+
+ try:
+ if query:
+ # SEARCH MODE: Search across ALL repositories
+ result = await search_user_repos(
+ query=query,
+ page=page,
+ per_page=per_page,
+ token=token
+ )
+ else:
+ # PAGINATION MODE: Return repos page by page
+ result = await list_user_repos_paginated(
+ page=page,
+ per_page=per_page,
+ token=token
+ )
+
+ # --- FIXED: Mapping default_branch ---
+ repos = [
+ RepoSummary(
+ id=r["id"],
+ name=r["name"],
+ full_name=r["full_name"],
+ private=r["private"],
+ owner=r["owner"],
+ default_branch=r.get("default_branch", "main"), # <--- CRITICAL FIX
+ )
+ for r in result["repositories"]
+ ]
+
+ return PaginatedReposResponse(
+ repositories=repos,
+ page=result["page"],
+ per_page=result["per_page"],
+ total_count=result.get("total_count"),
+ has_more=result["has_more"],
+ query=query,
+ )
+
+ except Exception as e:
+ logging.exception("Error fetching repositories")
+ return JSONResponse(
+ content={
+ "error": f"Failed to fetch repositories: {str(e)}",
+ "repositories": [],
+ "page": page,
+ "per_page": per_page,
+ "has_more": False,
+ },
+ status_code=500
+ )
+
+
+@app.get("/api/repos/all")
+async def api_list_all_repos(
+ query: Optional[str] = Query(None, description="Search query"),
+ authorization: Optional[str] = Header(None),
+):
+ """
+ Fetch ALL user repositories at once (no pagination).
+ Useful for quick searches, but paginated endpoint is preferred.
+ """
+ token = get_github_token(authorization)
+
+ try:
+ # Fetch all repositories (this will make multiple API calls)
+ all_repos = []
+ page = 1
+ max_pages = 15 # Safety limit: 1500 repos max (15 * 100)
+
+ while page <= max_pages:
+ result = await list_user_repos_paginated(
+ page=page,
+ per_page=100,
+ token=token
+ )
+
+ all_repos.extend(result["repositories"])
+
+ if not result["has_more"]:
+ break
+
+ page += 1
+
+ # Filter by query if provided
+ if query:
+ query_lower = query.lower()
+ all_repos = [
+ r for r in all_repos
+ if query_lower in r["name"].lower() or query_lower in r["full_name"].lower()
+ ]
+
+ # --- FIXED: Mapping default_branch ---
+ repos = [
+ RepoSummary(
+ id=r["id"],
+ name=r["name"],
+ full_name=r["full_name"],
+ private=r["private"],
+ owner=r["owner"],
+ default_branch=r.get("default_branch", "main"), # <--- CRITICAL FIX
+ )
+ for r in all_repos
+ ]
+
+ return {
+ "repositories": repos,
+ "total_count": len(repos),
+ "query": query,
+ }
+
+ except Exception as e:
+ logging.exception("Error fetching all repositories")
+ return JSONResponse(
+ content={"error": f"Failed to fetch repositories: {str(e)}"},
+ status_code=500
+ )
+
+
+@app.get("/api/repos/{owner}/{repo}/tree", response_model=FileTreeResponse)
+async def api_repo_tree(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ ref: Optional[str] = Query(
+ None,
+ description="Git reference (branch, tag, or commit SHA). If omitted, defaults to HEAD.",
+ ),
+ authorization: Optional[str] = Header(None),
+):
+ """
+ Get the file tree for a repository.
+ Handles 'main' vs 'master' discrepancies and empty repositories gracefully.
+ """
+ token = get_github_token(authorization)
+
+ # Keep legacy behavior: missing/empty ref behaves like HEAD.
+ ref_value = (ref or "").strip() or "HEAD"
+
+ try:
+ tree = await get_repo_tree(owner, repo, token=token, ref=ref_value)
+ return FileTreeResponse(files=[FileEntry(**f) for f in tree])
+
+ except HTTPException as e:
+ if e.status_code == 409:
+ return FileTreeResponse(files=[])
+
+ if e.status_code == 404:
+ return JSONResponse(
+ status_code=404,
+ content={
+ "detail": f"Ref '{ref_value}' not found. The repository might be using a different default branch (e.g., 'master')."
+ }
+ )
+
+ raise e
+
+
+@app.get("/api/repos/{owner}/{repo}/file", response_model=FileContent)
+async def api_get_file(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ path: str = Query(...),
+ authorization: Optional[str] = Header(None),
+):
+ token = get_github_token(authorization)
+ content = await get_file(owner, repo, path, token=token)
+ return FileContent(path=path, content=content)
+
+
+@app.post("/api/repos/{owner}/{repo}/file", response_model=CommitResponse)
+async def api_put_file(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ payload: CommitRequest = ...,
+ authorization: Optional[str] = Header(None),
+):
+ token = get_github_token(authorization)
+ result = await put_file(
+ owner, repo, payload.path, payload.content, payload.message, token=token
+ )
+ return CommitResponse(**result)
+
+
+# ============================================================================
+# Settings Endpoints
+# ============================================================================
+
+@app.get("/api/settings", response_model=SettingsResponse)
+async def api_get_settings():
+ s: AppSettings = get_settings()
+ return SettingsResponse(
+ provider=s.provider,
+ providers=[LLMProvider.openai, LLMProvider.claude, LLMProvider.watsonx, LLMProvider.ollama],
+ openai=s.openai.model_dump(),
+ claude=s.claude.model_dump(),
+ watsonx=s.watsonx.model_dump(),
+ ollama=s.ollama.model_dump(),
+ langflow_url=s.langflow_url,
+ has_langflow_plan_flow=bool(s.langflow_plan_flow_id),
+ )
+
+
+@app.get("/api/settings/models", response_model=ProviderModelsResponse)
+async def api_list_models(provider: Optional[LLMProvider] = Query(None)):
+ """
+ Return the list of LLM models available for a provider.
+
+ If 'provider' is not given, use the currently active provider from settings.
+ """
+ s: AppSettings = get_settings()
+ effective_provider = provider or s.provider
+
+ models, error = list_models_for_provider(effective_provider, s)
+
+ return ProviderModelsResponse(
+ provider=effective_provider,
+ models=models,
+ error=error,
+ )
+
+
+@app.post("/api/settings/provider", response_model=SettingsResponse)
+async def api_set_provider(update: ProviderUpdate):
+ s = set_provider(update.provider)
+ return SettingsResponse(
+ provider=s.provider,
+ providers=[LLMProvider.openai, LLMProvider.claude, LLMProvider.watsonx, LLMProvider.ollama],
+ openai=s.openai.model_dump(),
+ claude=s.claude.model_dump(),
+ watsonx=s.watsonx.model_dump(),
+ ollama=s.ollama.model_dump(),
+ langflow_url=s.langflow_url,
+ has_langflow_plan_flow=bool(s.langflow_plan_flow_id),
+ )
+
+
+@app.put("/api/settings/llm", response_model=SettingsResponse)
+async def api_update_llm_settings(updates: dict):
+ """Update full LLM settings including provider-specific configs."""
+ s = update_settings(updates)
+ return SettingsResponse(
+ provider=s.provider,
+ providers=[LLMProvider.openai, LLMProvider.claude, LLMProvider.watsonx, LLMProvider.ollama],
+ openai=s.openai.model_dump(),
+ claude=s.claude.model_dump(),
+ watsonx=s.watsonx.model_dump(),
+ ollama=s.ollama.model_dump(),
+ langflow_url=s.langflow_url,
+ has_langflow_plan_flow=bool(s.langflow_plan_flow_id),
+ )
+
+
+# ============================================================================
+# Chat Endpoints
+# ============================================================================
+
+@app.post("/api/chat/plan", response_model=PlanResult)
+async def api_chat_plan(req: ChatPlanRequest, authorization: Optional[str] = Header(None)):
+ token = get_github_token(authorization)
+
+ # β
Added logging for branch_name received
+ logger.info(
+ "PLAN REQUEST: %s/%s | branch_name=%r",
+ req.repo_owner,
+ req.repo_name,
+ req.branch_name,
+ )
+
+ with execution_context(token, ref=req.branch_name): # β
set ref context
+ full_name = f"{req.repo_owner}/{req.repo_name}"
+ plan = await generate_plan(req.goal, full_name, token=token, branch_name=req.branch_name)
+ return plan
+
+
+@app.post("/api/chat/execute")
+async def api_chat_execute(
+ req: ExecutePlanRequest,
+ authorization: Optional[str] = Header(None)
+):
+ token = get_github_token(authorization)
+
+ # β
FIX: use execution_context(token, ref=req.branch_name) so tool calls that rely on context
+ # never accidentally run on HEAD/default when branch_name is provided.
+ with execution_context(token, ref=req.branch_name):
+ full_name = f"{req.repo_owner}/{req.repo_name}"
+ result = await execute_plan(
+ req.plan, full_name, token=token, branch_name=req.branch_name
+ )
+ if isinstance(result, dict):
+ result.setdefault(
+ "mode",
+ "sticky" if req.branch_name else "hard-switch",
+ )
+ return result
+
+
+@app.get("/api/flow/current")
+async def api_get_flow(topology: Optional[str] = Query(None)):
+ """Return the agent flow definition as a graph.
+
+ If ``topology`` query param is provided, returns the graph for that
+ topology. Otherwise falls back to the user's saved preference, and
+ finally to the legacy ``get_flow_definition()`` output for full
+ backward compatibility.
+ """
+ tid = topology or get_saved_topology_preference()
+ if tid:
+ return _get_topology_graph(tid)
+ # Legacy path β returns the original hardcoded graph
+ flow = await get_flow_definition()
+ return flow
+
+
+# ============================================================================
+# Topology Registry Endpoints (additive β no existing behaviour changed)
+# ============================================================================
+
+@app.get("/api/flow/topologies")
+async def api_list_topologies():
+ """Return lightweight summaries of all available topology presets."""
+ return _list_topologies()
+
+
+@app.get("/api/flow/topology/{topology_id}")
+async def api_get_topology(topology_id: str):
+ """Return the full flow graph for a specific topology."""
+ return _get_topology_graph(topology_id)
+
+
+class ClassifyRequest(BaseModel):
+ message: str
+
+
+@app.post("/api/flow/classify")
+async def api_classify_message(req: ClassifyRequest):
+ """Auto-detect the best topology for a given user message.
+
+ Returns the recommended topology, confidence score, and up to 4
+ alternatives ranked by relevance.
+ """
+ result = _classify_message(req.message)
+ return result.to_dict()
+
+
+class TopologyPrefRequest(BaseModel):
+ topology: str
+
+
+@app.get("/api/settings/topology")
+async def api_get_topology_pref():
+ """Return the user's saved topology preference (or null)."""
+ pref = get_saved_topology_preference()
+ return {"topology": pref}
+
+
+@app.post("/api/settings/topology")
+async def api_set_topology_pref(req: TopologyPrefRequest):
+ """Save the user's preferred topology."""
+ save_topology_preference(req.topology)
+ return {"status": "ok", "topology": req.topology}
+
+
+# ============================================================================
+# Conversational Chat Endpoint (v2 upgrade)
+# ============================================================================
+
+@app.post("/api/chat/message")
+async def api_chat_message(req: ChatRequest, authorization: Optional[str] = Header(None)):
+ """
+ Unified conversational endpoint. The router analyses the message and
+ dispatches to the appropriate agent (issue, PR, search, review, learning,
+ or the existing plan+execute pipeline).
+ """
+ token = get_github_token(authorization)
+
+ logger.info(
+ "CHAT MESSAGE: %s/%s | message=%r | branch=%r",
+ req.repo_owner,
+ req.repo_name,
+ req.message[:80],
+ req.branch_name,
+ )
+
+ with execution_context(token, ref=req.branch_name):
+ full_name = f"{req.repo_owner}/{req.repo_name}"
+ result = await dispatch_request(
+ req.message, full_name, token=token, branch_name=req.branch_name,
+ topology_id=req.topology_id,
+ )
+
+ # If auto_pr is requested and execution completed, create PR
+ if (
+ req.auto_pr
+ and isinstance(result, dict)
+ and result.get("category") == "plan_execute"
+ and result.get("plan")
+ ):
+ result["auto_pr_hint"] = (
+ "Plan generated. Execute it first, then auto-PR will be created."
+ )
+
+ return result
+
+
+@app.post("/api/chat/execute-with-pr")
+async def api_chat_execute_with_pr(
+ req: ExecutePlanRequest,
+ authorization: Optional[str] = Header(None),
+):
+ """Execute a plan AND automatically create a pull request afterwards."""
+ token = get_github_token(authorization)
+
+ with execution_context(token, ref=req.branch_name):
+ full_name = f"{req.repo_owner}/{req.repo_name}"
+ result = await execute_plan(
+ req.plan, full_name, token=token, branch_name=req.branch_name,
+ )
+
+ if isinstance(result, dict) and result.get("status") == "completed":
+ branch = result.get("branch", req.branch_name)
+ if branch:
+ pr = await create_pr_after_execution(
+ full_name,
+ branch,
+ req.plan.goal,
+ result.get("executionLog", {}),
+ token=token,
+ )
+ if pr:
+ result["pull_request"] = {
+ "number": pr.get("number"),
+ "url": pr.get("html_url"),
+ "title": pr.get("title"),
+ }
+
+ result.setdefault(
+ "mode",
+ "sticky" if req.branch_name else "hard-switch",
+ )
+
+ return result
+
+
+# ============================================================================
+# Issue Endpoints (v2 upgrade)
+# ============================================================================
+
+@app.get("/api/repos/{owner}/{repo}/issues")
+async def api_list_issues(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ state: str = Query("open"),
+ labels: Optional[str] = Query(None),
+ per_page: int = Query(30, ge=1, le=100),
+ page: int = Query(1, ge=1),
+ authorization: Optional[str] = Header(None),
+):
+ """List issues for a repository."""
+ token = get_github_token(authorization)
+ issues = await github_issues.list_issues(
+ owner, repo, state=state, labels=labels,
+ per_page=per_page, page=page, token=token,
+ )
+ return {"issues": issues, "page": page, "per_page": per_page}
+
+
+@app.get("/api/repos/{owner}/{repo}/issues/{issue_number}")
+async def api_get_issue(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ issue_number: int = FPath(...),
+ authorization: Optional[str] = Header(None),
+):
+ """Get a single issue."""
+ token = get_github_token(authorization)
+ return await github_issues.get_issue(owner, repo, issue_number, token=token)
+
+
+@app.post("/api/repos/{owner}/{repo}/issues")
+async def api_create_issue(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ payload: IssueCreateRequest = ...,
+ authorization: Optional[str] = Header(None),
+):
+ """Create a new issue."""
+ token = get_github_token(authorization)
+ return await github_issues.create_issue(
+ owner, repo, payload.title,
+ body=payload.body, labels=payload.labels,
+ assignees=payload.assignees, milestone=payload.milestone,
+ token=token,
+ )
+
+
+@app.patch("/api/repos/{owner}/{repo}/issues/{issue_number}")
+async def api_update_issue(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ issue_number: int = FPath(...),
+ payload: IssueUpdateRequest = ...,
+ authorization: Optional[str] = Header(None),
+):
+ """Update an existing issue."""
+ token = get_github_token(authorization)
+ return await github_issues.update_issue(
+ owner, repo, issue_number,
+ title=payload.title, body=payload.body, state=payload.state,
+ labels=payload.labels, assignees=payload.assignees,
+ milestone=payload.milestone, token=token,
+ )
+
+
+@app.get("/api/repos/{owner}/{repo}/issues/{issue_number}/comments")
+async def api_list_issue_comments(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ issue_number: int = FPath(...),
+ authorization: Optional[str] = Header(None),
+):
+ """List comments on an issue."""
+ token = get_github_token(authorization)
+ return await github_issues.list_issue_comments(owner, repo, issue_number, token=token)
+
+
+@app.post("/api/repos/{owner}/{repo}/issues/{issue_number}/comments")
+async def api_add_issue_comment(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ issue_number: int = FPath(...),
+ payload: IssueCommentRequest = ...,
+ authorization: Optional[str] = Header(None),
+):
+ """Add a comment to an issue."""
+ token = get_github_token(authorization)
+ return await github_issues.add_issue_comment(
+ owner, repo, issue_number, payload.body, token=token,
+ )
+
+
+# ============================================================================
+# Pull Request Endpoints (v2 upgrade)
+# ============================================================================
+
+@app.get("/api/repos/{owner}/{repo}/pulls")
+async def api_list_pulls(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ state: str = Query("open"),
+ per_page: int = Query(30, ge=1, le=100),
+ page: int = Query(1, ge=1),
+ authorization: Optional[str] = Header(None),
+):
+ """List pull requests."""
+ token = get_github_token(authorization)
+ prs = await github_pulls.list_pull_requests(
+ owner, repo, state=state, per_page=per_page, page=page, token=token,
+ )
+ return {"pull_requests": prs, "page": page, "per_page": per_page}
+
+
+@app.get("/api/repos/{owner}/{repo}/pulls/{pull_number}")
+async def api_get_pull(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ pull_number: int = FPath(...),
+ authorization: Optional[str] = Header(None),
+):
+ """Get a single pull request."""
+ token = get_github_token(authorization)
+ return await github_pulls.get_pull_request(owner, repo, pull_number, token=token)
+
+
+@app.post("/api/repos/{owner}/{repo}/pulls")
+async def api_create_pull(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ payload: PRCreateRequest = ...,
+ authorization: Optional[str] = Header(None),
+):
+ """Create a new pull request."""
+ token = get_github_token(authorization)
+ return await github_pulls.create_pull_request(
+ owner, repo, title=payload.title, head=payload.head,
+ base=payload.base, body=payload.body, draft=payload.draft,
+ token=token,
+ )
+
+
+@app.put("/api/repos/{owner}/{repo}/pulls/{pull_number}/merge")
+async def api_merge_pull(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ pull_number: int = FPath(...),
+ payload: PRMergeRequest = ...,
+ authorization: Optional[str] = Header(None),
+):
+ """Merge a pull request."""
+ token = get_github_token(authorization)
+ return await github_pulls.merge_pull_request(
+ owner, repo, pull_number,
+ merge_method=payload.merge_method,
+ commit_title=payload.commit_title,
+ commit_message=payload.commit_message,
+ token=token,
+ )
+
+
+@app.get("/api/repos/{owner}/{repo}/pulls/{pull_number}/files")
+async def api_list_pr_files(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ pull_number: int = FPath(...),
+ authorization: Optional[str] = Header(None),
+):
+ """List files changed in a pull request."""
+ token = get_github_token(authorization)
+ return await github_pulls.list_pr_files(owner, repo, pull_number, token=token)
+
+
+# ============================================================================
+# Search Endpoints (v2 upgrade)
+# ============================================================================
+
+@app.get("/api/search/code")
+async def api_search_code(
+ q: str = Query(..., description="Search query"),
+ owner: Optional[str] = Query(None),
+ repo: Optional[str] = Query(None),
+ language: Optional[str] = Query(None),
+ per_page: int = Query(30, ge=1, le=100),
+ page: int = Query(1, ge=1),
+ authorization: Optional[str] = Header(None),
+):
+ """Search for code across GitHub."""
+ token = get_github_token(authorization)
+ return await github_search.search_code(
+ q, owner=owner, repo=repo, language=language,
+ per_page=per_page, page=page, token=token,
+ )
+
+
+@app.get("/api/search/issues")
+async def api_search_issues(
+ q: str = Query(..., description="Search query"),
+ owner: Optional[str] = Query(None),
+ repo: Optional[str] = Query(None),
+ state: Optional[str] = Query(None),
+ label: Optional[str] = Query(None),
+ per_page: int = Query(30, ge=1, le=100),
+ page: int = Query(1, ge=1),
+ authorization: Optional[str] = Header(None),
+):
+ """Search issues and pull requests."""
+ token = get_github_token(authorization)
+ return await github_search.search_issues(
+ q, owner=owner, repo=repo, state=state, label=label,
+ per_page=per_page, page=page, token=token,
+ )
+
+
+@app.get("/api/search/repositories")
+async def api_search_repositories(
+ q: str = Query(..., description="Search query"),
+ language: Optional[str] = Query(None),
+ sort: Optional[str] = Query(None),
+ per_page: int = Query(30, ge=1, le=100),
+ page: int = Query(1, ge=1),
+ authorization: Optional[str] = Header(None),
+):
+ """Search for repositories."""
+ token = get_github_token(authorization)
+ return await github_search.search_repositories(
+ q, language=language, sort=sort,
+ per_page=per_page, page=page, token=token,
+ )
+
+
+@app.get("/api/search/users")
+async def api_search_users(
+ q: str = Query(..., description="Search query"),
+ type_filter: Optional[str] = Query(None, alias="type"),
+ location: Optional[str] = Query(None),
+ language: Optional[str] = Query(None),
+ per_page: int = Query(30, ge=1, le=100),
+ page: int = Query(1, ge=1),
+ authorization: Optional[str] = Header(None),
+):
+ """Search for GitHub users and organizations."""
+ token = get_github_token(authorization)
+ return await github_search.search_users(
+ q, type_filter=type_filter, location=location, language=language,
+ per_page=per_page, page=page, token=token,
+ )
+
+
+# ============================================================================
+# Route Analysis Endpoint (v2 upgrade)
+# ============================================================================
+
+@app.post("/api/chat/route")
+async def api_chat_route(payload: dict):
+ """Preview how a message would be routed without executing it.
+
+ Useful for the frontend to display which agent(s) will handle the request.
+ """
+ message = payload.get("message", "")
+ if not message:
+ return JSONResponse({"error": "message is required"}, status_code=400)
+
+ workflow = route_request(message)
+ return {
+ "category": workflow.category.value,
+ "agents": [a.value for a in workflow.agents],
+ "description": workflow.description,
+ "requires_repo_context": workflow.requires_repo_context,
+ "entity_number": workflow.entity_number,
+ "metadata": workflow.metadata,
+ }
+
+
+# ============================================================================
+# Authentication Endpoints (Web Flow + Device Flow)
+# ============================================================================
+
+@app.get("/api/auth/url", response_model=AuthUrlResponse)
+async def api_get_auth_url():
+ """
+ Generate GitHub OAuth authorization URL (Web Flow).
+ Requires Client Secret to be configured.
+ """
+ auth_url, state = generate_authorization_url()
+ return AuthUrlResponse(authorization_url=auth_url, state=state)
+
+
+@app.post("/api/auth/callback", response_model=AuthSession)
+async def api_auth_callback(request: AuthCallbackRequest):
+ """
+ Handle GitHub OAuth callback (Web Flow).
+ Exchange the authorization code for an access token.
+ """
+ try:
+ session = await exchange_code_for_token(request.code, request.state)
+ return session
+ except ValueError as e:
+ return JSONResponse(
+ {"error": str(e)},
+ status_code=400,
+ )
+
+
+@app.post("/api/auth/validate", response_model=UserInfoResponse)
+async def api_validate_token(request: TokenValidationRequest):
+ """
+ Validate a GitHub access token and return user information.
+ """
+ user = await validate_token(request.access_token)
+ if user:
+ return UserInfoResponse(user=user, authenticated=True)
+ return UserInfoResponse(
+ user=GitHubUser(login="", id=0, avatar_url=""),
+ authenticated=False,
+ )
+
+
+@app.post("/api/auth/device/code")
+async def api_device_code():
+ """
+ Start the device login flow (Step 1).
+ Does NOT require a client secret.
+ """
+ try:
+ data = await initiate_device_flow()
+ return data
+ except Exception as e:
+ return JSONResponse({"error": str(e)}, status_code=500)
+
+
+@app.post("/api/auth/device/poll")
+async def api_device_poll(payload: dict):
+ """
+ Poll GitHub to check if user authorized the device (Step 2).
+ """
+ device_code = payload.get("device_code")
+ if not device_code:
+ return JSONResponse({"error": "Missing device_code"}, status_code=400)
+
+ try:
+ session = await poll_device_token(device_code)
+ if session:
+ return session
+
+ return JSONResponse({"status": "pending"}, status_code=202)
+ except ValueError as e:
+ return JSONResponse({"error": str(e)}, status_code=400)
+
+
+@app.get("/api/auth/status")
+async def api_auth_status():
+ """
+ Smart check: Do we have a secret (Web Flow) or just ID (Device Flow)?
+ This tells the frontend which UI to render.
+ """
+ has_secret = bool(os.getenv("GITHUB_CLIENT_SECRET"))
+ has_id = bool(os.getenv("GITHUB_CLIENT_ID", "Iv23litmRp80Z6wmlyRn"))
+
+ return {
+ "mode": "web" if has_secret else "device",
+ "configured": has_id,
+ "oauth_configured": has_secret,
+ "pat_configured": bool(os.getenv("GITPILOT_GITHUB_TOKEN") or os.getenv("GITHUB_TOKEN")),
+ }
+
+
+@app.get("/api/auth/app-url")
+async def api_get_app_url():
+ """Get GitHub App installation URL."""
+ app_slug = os.getenv("GITHUB_APP_SLUG", "gitpilota")
+ app_url = f"https://github.com/apps/{app_slug}"
+ return {
+ "app_url": app_url,
+ "app_slug": app_slug,
+ }
+
+
+@app.get("/api/auth/installation-status")
+async def api_check_installation_status():
+ """Check if GitHub App is installed for the current user."""
+ pat_token = os.getenv("GITPILOT_GITHUB_TOKEN") or os.getenv("GITHUB_TOKEN")
+
+ if pat_token:
+ user = await validate_token(pat_token)
+ if user:
+ return {
+ "installed": True,
+ "access_token": pat_token,
+ "user": user,
+ "auth_type": "pat",
+ }
+
+ github_app_id = os.getenv("GITHUB_APP_ID", "2313985")
+ if not github_app_id:
+ return {
+ "installed": False,
+ "message": "GitHub authentication not configured.",
+ "auth_type": "none",
+ }
+
+ return {
+ "installed": False,
+ "message": "GitHub App not installed.",
+ "auth_type": "github_app",
+ }
+
+
+@app.get("/api/auth/repo-access", response_model=RepoAccessResponse)
+async def api_check_repo_access(
+ owner: str = Query(...),
+ repo: str = Query(...),
+ authorization: Optional[str] = Header(None),
+):
+ """
+ Check if we have write access to a repository via User token or GitHub App.
+
+ This endpoint helps the frontend determine if it should show
+ installation prompts or if the user already has sufficient permissions.
+ """
+ token = get_github_token(authorization)
+ access_info = await check_repo_write_access(owner, repo, user_token=token)
+
+ return RepoAccessResponse(
+ can_write=access_info["can_write"],
+ app_installed=access_info["app_installed"],
+ auth_type=access_info["auth_type"],
+ )
+
+
+# ============================================================================
+# Session Endpoints (Phase 1)
+# ============================================================================
+
+@app.get("/api/sessions")
+async def api_list_sessions():
+ """List all saved sessions."""
+ return {"sessions": _session_mgr.list_sessions()}
+
+
+@app.post("/api/sessions")
+async def api_create_session(payload: dict):
+ """Create a new session.
+
+ Accepts either legacy single-repo or multi-repo format:
+ Legacy: {"repo_full_name": "owner/repo", "branch": "main"}
+ Multi: {"repos": [{full_name, branch, mode}], "active_repo": "owner/repo"}
+ """
+ repo = payload.get("repo_full_name", "")
+ branch = payload.get("branch")
+ name = payload.get("name") # optional β derived from first user prompt
+ session = _session_mgr.create(repo_full_name=repo, branch=branch, name=name)
+
+ # Multi-repo context support
+ if payload.get("repos"):
+ session.repos = payload["repos"]
+ session.active_repo = payload.get("active_repo", repo)
+ elif repo:
+ session.repos = [{"full_name": repo, "branch": branch or "main", "mode": "write"}]
+ session.active_repo = repo
+
+ _session_mgr.save(session)
+ return {"session_id": session.id, "status": session.status}
+
+
+@app.get("/api/sessions/{session_id}")
+async def api_get_session(session_id: str):
+ """Get session details."""
+ session = _session_mgr.load(session_id)
+ if not session:
+ raise HTTPException(status_code=404, detail="Session not found")
+ return {
+ "id": session.id,
+ "status": session.status,
+ "repo_full_name": session.repo_full_name,
+ "branch": session.branch,
+ "created_at": session.created_at,
+ "message_count": len(session.messages),
+ "checkpoint_count": len(session.checkpoints),
+ "repos": session.repos,
+ "active_repo": session.active_repo,
+ }
+
+
+@app.delete("/api/sessions/{session_id}")
+async def api_delete_session(session_id: str):
+ """Delete a session."""
+ deleted = _session_mgr.delete(session_id)
+ if not deleted:
+ raise HTTPException(status_code=404, detail="Session not found")
+ return {"deleted": True}
+
+
+@app.patch("/api/sessions/{session_id}/context")
+async def api_update_session_context(session_id: str, payload: dict):
+ """Add, remove, or activate repos in a session's multi-repo context.
+
+ Actions:
+ {"action": "add", "repo_full_name": "owner/repo", "branch": "main"}
+ {"action": "remove", "repo_full_name": "owner/repo"}
+ {"action": "set_active", "repo_full_name": "owner/repo"}
+ """
+ session = _session_mgr.load(session_id)
+ if not session:
+ raise HTTPException(status_code=404, detail="Session not found")
+
+ action = payload.get("action")
+ repo_name = payload.get("repo_full_name")
+ if not action or not repo_name:
+ raise HTTPException(status_code=400, detail="action and repo_full_name required")
+
+ if action == "add":
+ branch = payload.get("branch", "main")
+ if not any(r.get("full_name") == repo_name for r in session.repos):
+ session.repos.append({
+ "full_name": repo_name,
+ "branch": branch,
+ "mode": "read",
+ })
+ if not session.active_repo:
+ session.active_repo = repo_name
+ elif action == "remove":
+ session.repos = [r for r in session.repos if r.get("full_name") != repo_name]
+ if session.active_repo == repo_name:
+ session.active_repo = session.repos[0]["full_name"] if session.repos else None
+ elif action == "set_active":
+ if any(r.get("full_name") == repo_name for r in session.repos):
+ # Update mode flags
+ for r in session.repos:
+ r["mode"] = "write" if r.get("full_name") == repo_name else "read"
+ session.active_repo = repo_name
+ else:
+ raise HTTPException(status_code=400, detail="Repo not in session context")
+ else:
+ raise HTTPException(status_code=400, detail=f"Unknown action: {action}")
+
+ _session_mgr.save(session)
+ return {
+ "repos": session.repos,
+ "active_repo": session.active_repo,
+ }
+
+
+@app.post("/api/sessions/{session_id}/checkpoint")
+async def api_create_checkpoint(session_id: str, payload: dict):
+ """Create a checkpoint for a session."""
+ session = _session_mgr.load(session_id)
+ if not session:
+ raise HTTPException(status_code=404, detail="Session not found")
+ label = payload.get("label", "checkpoint")
+ cp = _session_mgr.create_checkpoint(session, label=label)
+ return {"checkpoint_id": cp.id, "label": cp.label, "created_at": cp.created_at}
+
+
+# ============================================================================
+# Hooks Endpoints (Phase 1)
+# ============================================================================
+
+@app.get("/api/hooks")
+async def api_list_hooks():
+ """List registered hooks."""
+ return {"hooks": _hook_mgr.list_hooks()}
+
+
+@app.post("/api/hooks")
+async def api_register_hook(payload: dict):
+ """Register a new hook."""
+ from .hooks import HookDefinition
+ try:
+ hook = HookDefinition(
+ event=HookEvent(payload["event"]),
+ name=payload["name"],
+ command=payload.get("command"),
+ blocking=payload.get("blocking", False),
+ timeout=payload.get("timeout", 30),
+ )
+ _hook_mgr.register(hook)
+ return {"registered": True, "name": hook.name, "event": hook.event.value}
+ except (KeyError, ValueError) as e:
+ raise HTTPException(status_code=400, detail=str(e))
+
+
+@app.delete("/api/hooks/{event}/{name}")
+async def api_unregister_hook(event: str, name: str):
+ """Unregister a hook by event and name."""
+ try:
+ _hook_mgr.unregister(HookEvent(event), name)
+ return {"unregistered": True}
+ except ValueError as e:
+ raise HTTPException(status_code=400, detail=str(e))
+
+
+# ============================================================================
+# Permissions Endpoints (Phase 1)
+# ============================================================================
+
+@app.get("/api/permissions")
+async def api_get_permissions():
+ """Get current permission policy."""
+ return _perm_mgr.to_dict()
+
+
+@app.put("/api/permissions/mode")
+async def api_set_permission_mode(payload: dict):
+ """Set the permission mode (normal, plan, auto)."""
+ mode_str = payload.get("mode", "normal")
+ try:
+ _perm_mgr.policy.mode = PermissionMode(mode_str)
+ return {"mode": _perm_mgr.policy.mode.value}
+ except ValueError:
+ raise HTTPException(status_code=400, detail=f"Invalid mode: {mode_str}")
+
+
+# ============================================================================
+# Project Context / Memory Endpoints (Phase 1)
+# ============================================================================
+
+@app.get("/api/repos/{owner}/{repo}/context")
+async def api_get_project_context(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+):
+ """Get project conventions and memory for a repository workspace."""
+ from pathlib import Path as StdPath
+ workspace_path = StdPath.home() / ".gitpilot" / "workspaces" / owner / repo
+ if not workspace_path.exists():
+ return {"conventions": "", "rules": [], "auto_memory": {}, "system_prompt": ""}
+ mgr = MemoryManager(workspace_path)
+ ctx = mgr.load_context()
+ return {
+ "conventions": ctx.conventions,
+ "rules": ctx.rules,
+ "auto_memory": ctx.auto_memory,
+ "system_prompt": ctx.to_system_prompt(),
+ }
+
+
+@app.post("/api/repos/{owner}/{repo}/context/init")
+async def api_init_project_context(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+):
+ """Initialize .gitpilot/ directory with template GITPILOT.md."""
+ from pathlib import Path as StdPath
+ workspace_path = StdPath.home() / ".gitpilot" / "workspaces" / owner / repo
+ workspace_path.mkdir(parents=True, exist_ok=True)
+ mgr = MemoryManager(workspace_path)
+ md_path = mgr.init_project()
+ return {"initialized": True, "path": str(md_path)}
+
+
+@app.post("/api/repos/{owner}/{repo}/context/pattern")
+async def api_add_learned_pattern(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ payload: dict = ...,
+):
+ """Add a learned pattern to auto-memory."""
+ from pathlib import Path as StdPath
+ pattern = payload.get("pattern", "")
+ if not pattern:
+ raise HTTPException(status_code=400, detail="pattern is required")
+ workspace_path = StdPath.home() / ".gitpilot" / "workspaces" / owner / repo
+ workspace_path.mkdir(parents=True, exist_ok=True)
+ mgr = MemoryManager(workspace_path)
+ mgr.add_learned_pattern(pattern)
+ return {"added": True, "pattern": pattern}
+
+
+# ============================================================================
+# Context Vault Endpoints (additive β Context + Use Case system)
+# ============================================================================
+
+def _workspace_path(owner: str, repo: str) -> Path:
+ """Resolve the local workspace path for a repo."""
+ return Path.home() / ".gitpilot" / "workspaces" / owner / repo
+
+
+@app.get("/api/repos/{owner}/{repo}/context/assets")
+async def api_list_context_assets(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+):
+ """List all uploaded context assets for a repository."""
+ vault = ContextVault(_workspace_path(owner, repo))
+ assets = vault.list_assets()
+ return {"assets": [a.to_dict() for a in assets]}
+
+
+@app.post("/api/repos/{owner}/{repo}/context/assets/upload")
+async def api_upload_context_asset(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ file: UploadFile = File(...),
+):
+ """Upload a file to the project context vault."""
+ vault = ContextVault(_workspace_path(owner, repo))
+ content = await file.read()
+ mime = file.content_type or ""
+ filename = file.filename or "upload"
+
+ try:
+ meta = vault.upload_asset(filename, content, mime=mime)
+ return {"asset": meta.to_dict()}
+ except ValueError as e:
+ raise HTTPException(status_code=400, detail=str(e))
+
+
+@app.delete("/api/repos/{owner}/{repo}/context/assets/{asset_id}")
+async def api_delete_context_asset(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ asset_id: str = FPath(...),
+):
+ """Delete a context asset."""
+ vault = ContextVault(_workspace_path(owner, repo))
+ vault.delete_asset(asset_id)
+ return {"deleted": True, "asset_id": asset_id}
+
+
+@app.get("/api/repos/{owner}/{repo}/context/assets/{asset_id}/download")
+async def api_download_context_asset(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ asset_id: str = FPath(...),
+):
+ """Download a raw context asset file."""
+ vault = ContextVault(_workspace_path(owner, repo))
+ asset_path = vault.get_asset_path(asset_id)
+ if not asset_path:
+ raise HTTPException(status_code=404, detail="Asset not found")
+ filename = vault.get_asset_filename(asset_id)
+ return FileResponse(asset_path, filename=filename)
+
+
+# ============================================================================
+# Use Case Endpoints (additive β guided requirement clarification)
+# ============================================================================
+
+@app.get("/api/repos/{owner}/{repo}/use-cases")
+async def api_list_use_cases(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+):
+ """List all use cases for a repository."""
+ mgr = UseCaseManager(_workspace_path(owner, repo))
+ return {"use_cases": mgr.list_use_cases()}
+
+
+@app.post("/api/repos/{owner}/{repo}/use-cases")
+async def api_create_use_case(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ payload: dict = ...,
+):
+ """Create a new use case."""
+ title = payload.get("title", "New Use Case")
+ initial_notes = payload.get("initial_notes", "")
+ mgr = UseCaseManager(_workspace_path(owner, repo))
+ uc = mgr.create_use_case(title=title, initial_notes=initial_notes)
+ return {"use_case": uc.to_dict()}
+
+
+@app.get("/api/repos/{owner}/{repo}/use-cases/{use_case_id}")
+async def api_get_use_case(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ use_case_id: str = FPath(...),
+):
+ """Get a single use case with messages and spec."""
+ mgr = UseCaseManager(_workspace_path(owner, repo))
+ uc = mgr.get_use_case(use_case_id)
+ if not uc:
+ raise HTTPException(status_code=404, detail="Use case not found")
+ return {"use_case": uc.to_dict()}
+
+
+@app.post("/api/repos/{owner}/{repo}/use-cases/{use_case_id}/chat")
+async def api_use_case_chat(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ use_case_id: str = FPath(...),
+ payload: dict = ...,
+):
+ """Send a guided chat message and get assistant response + updated spec."""
+ message = payload.get("message", "")
+ if not message:
+ raise HTTPException(status_code=400, detail="message is required")
+ mgr = UseCaseManager(_workspace_path(owner, repo))
+ uc = mgr.chat(use_case_id, message)
+ if not uc:
+ raise HTTPException(status_code=404, detail="Use case not found")
+ return {"use_case": uc.to_dict()}
+
+
+@app.post("/api/repos/{owner}/{repo}/use-cases/{use_case_id}/finalize")
+async def api_finalize_use_case(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ use_case_id: str = FPath(...),
+):
+ """Finalize a use case: mark active, export markdown spec."""
+ mgr = UseCaseManager(_workspace_path(owner, repo))
+ uc = mgr.finalize(use_case_id)
+ if not uc:
+ raise HTTPException(status_code=404, detail="Use case not found")
+ return {"use_case": uc.to_dict()}
+
+
+# ============================================================================
+# MCP Endpoints (Phase 2)
+# ============================================================================
+
+@app.get("/api/mcp/servers")
+async def api_mcp_list_servers():
+ """List configured MCP servers and their connection status."""
+ return _mcp_client.to_dict()
+
+
+@app.post("/api/mcp/connect/{server_name}")
+async def api_mcp_connect(server_name: str):
+ """Connect to a named MCP server."""
+ try:
+ conn = await _mcp_client.connect(server_name)
+ return {
+ "connected": True,
+ "server": server_name,
+ "tools": [{"name": t.name, "description": t.description} for t in conn.tools],
+ }
+ except Exception as e:
+ raise HTTPException(status_code=400, detail=str(e))
+
+
+@app.post("/api/mcp/disconnect/{server_name}")
+async def api_mcp_disconnect(server_name: str):
+ """Disconnect from a named MCP server."""
+ await _mcp_client.disconnect(server_name)
+ return {"disconnected": True, "server": server_name}
+
+
+@app.post("/api/mcp/call")
+async def api_mcp_call_tool(payload: dict):
+ """Call a tool on a connected MCP server."""
+ server = payload.get("server", "")
+ tool_name = payload.get("tool", "")
+ params = payload.get("params", {})
+ if not server or not tool_name:
+ raise HTTPException(status_code=400, detail="server and tool are required")
+ conn = _mcp_client._connections.get(server)
+ if not conn:
+ raise HTTPException(status_code=404, detail=f"Not connected to server: {server}")
+ try:
+ result = await _mcp_client.call_tool(conn, tool_name, params)
+ return {"result": result}
+ except Exception as e:
+ raise HTTPException(status_code=500, detail=str(e))
+
+
+# ============================================================================
+# Plugin Endpoints (Phase 2)
+# ============================================================================
+
+@app.get("/api/plugins")
+async def api_list_plugins():
+ """List installed plugins."""
+ plugins = _plugin_mgr.list_installed()
+ return {"plugins": [p.to_dict() for p in plugins]}
+
+
+@app.post("/api/plugins/install")
+async def api_install_plugin(payload: dict):
+ """Install a plugin from a git URL or local path."""
+ source = payload.get("source", "")
+ if not source:
+ raise HTTPException(status_code=400, detail="source is required")
+ try:
+ info = _plugin_mgr.install(source)
+ return {"installed": True, "plugin": info.to_dict()}
+ except Exception as e:
+ raise HTTPException(status_code=400, detail=str(e))
+
+
+@app.delete("/api/plugins/{name}")
+async def api_uninstall_plugin(name: str):
+ """Uninstall a plugin by name."""
+ removed = _plugin_mgr.uninstall(name)
+ if not removed:
+ raise HTTPException(status_code=404, detail=f"Plugin not found: {name}")
+ return {"uninstalled": True, "name": name}
+
+
+# ============================================================================
+# Skills Endpoints (Phase 2)
+# ============================================================================
+
+@app.get("/api/skills")
+async def api_list_skills():
+ """List all available skills."""
+ return {"skills": _skill_mgr.list_skills()}
+
+
+@app.post("/api/skills/invoke")
+async def api_invoke_skill(payload: dict):
+ """Invoke a skill by name."""
+ name = payload.get("name", "")
+ context = payload.get("context", {})
+ if not name:
+ raise HTTPException(status_code=400, detail="name is required")
+ prompt = _skill_mgr.invoke(name, context)
+ if prompt is None:
+ raise HTTPException(status_code=404, detail=f"Skill not found: {name}")
+ return {"skill": name, "rendered_prompt": prompt}
+
+
+@app.post("/api/skills/reload")
+async def api_reload_skills():
+ """Reload skills from all sources."""
+ count = _skill_mgr.load_all()
+ return {"reloaded": True, "count": count}
+
+
+# ============================================================================
+# Vision Endpoints (Phase 2)
+# ============================================================================
+
+@app.post("/api/vision/analyze")
+async def api_vision_analyze(payload: dict):
+ """Analyze an image with a text prompt."""
+ from .vision import VisionAnalyzer
+ image_path = payload.get("image_path", "")
+ prompt = payload.get("prompt", "Describe this image.")
+ provider = payload.get("provider", "openai")
+ if not image_path:
+ raise HTTPException(status_code=400, detail="image_path is required")
+ try:
+ analyzer = VisionAnalyzer(provider=provider)
+ result = await analyzer.analyze_image(Path(image_path), prompt)
+ return result.to_dict()
+ except Exception as e:
+ raise HTTPException(status_code=400, detail=str(e))
+
+
+# ============================================================================
+# Model Router Endpoints (Phase 2)
+# ============================================================================
+
+@app.post("/api/model-router/select")
+async def api_model_select(payload: dict):
+ """Preview which model would be selected for a request."""
+ request = payload.get("request", "")
+ category = payload.get("category")
+ if not request:
+ raise HTTPException(status_code=400, detail="request is required")
+ selection = _model_router.select(request, category)
+ return {
+ "model": selection.model,
+ "tier": selection.tier.value,
+ "complexity": selection.complexity.value,
+ "provider": selection.provider,
+ "reason": selection.reason,
+ }
+
+
+@app.get("/api/model-router/usage")
+async def api_model_usage():
+ """Get model usage summary and budget status."""
+ return _model_router.get_usage_summary()
+
+
+# ============================================================================
+# Agent Teams Endpoints (Phase 3)
+# ============================================================================
+
+@app.post("/api/agent-teams/plan")
+async def api_team_plan(payload: dict):
+ """Split a complex task into parallel subtasks."""
+ task = payload.get("task", "")
+ if not task:
+ raise HTTPException(status_code=400, detail="task is required")
+ subtasks = _agent_team.plan_and_split(task)
+ return {"subtasks": [{"id": s.id, "title": s.title, "description": s.description} for s in subtasks]}
+
+
+@app.post("/api/agent-teams/execute")
+async def api_team_execute(payload: dict):
+ """Execute subtasks in parallel and merge results."""
+ task = payload.get("task", "")
+ if not task:
+ raise HTTPException(status_code=400, detail="task is required")
+ subtasks = _agent_team.plan_and_split(task)
+ result = await _agent_team.execute_parallel(subtasks)
+ return result.to_dict()
+
+
+# ============================================================================
+# Learning Engine Endpoints (Phase 3)
+# ============================================================================
+
+@app.post("/api/learning/evaluate")
+async def api_learning_evaluate(payload: dict):
+ """Evaluate an action outcome for learning."""
+ action = payload.get("action", "")
+ outcome = payload.get("outcome", {})
+ repo = payload.get("repo", "")
+ if not action:
+ raise HTTPException(status_code=400, detail="action is required")
+ evaluation = _learning_engine.evaluate_outcome(action, outcome, repo=repo)
+ return {
+ "action": evaluation.action,
+ "success": evaluation.success,
+ "score": evaluation.score,
+ "feedback": evaluation.feedback,
+ }
+
+
+@app.get("/api/learning/insights/{owner}/{repo}")
+async def api_learning_insights(owner: str = FPath(...), repo: str = FPath(...)):
+ """Get learned insights for a repository."""
+ repo_name = f"{owner}/{repo}"
+ insights = _learning_engine.get_repo_insights(repo_name)
+ return {
+ "repo": repo_name,
+ "patterns": insights.patterns,
+ "preferred_style": insights.preferred_style,
+ "success_rate": insights.success_rate,
+ "total_evaluations": insights.total_evaluations,
+ }
+
+
+@app.post("/api/learning/style")
+async def api_learning_set_style(payload: dict):
+ """Set preferred coding style for a repository."""
+ repo = payload.get("repo", "")
+ style = payload.get("style", {})
+ if not repo:
+ raise HTTPException(status_code=400, detail="repo is required")
+ _learning_engine.set_preferred_style(repo, style)
+ return {"repo": repo, "style": style}
+
+
+# ============================================================================
+# Cross-Repo Intelligence Endpoints (Phase 3)
+# ============================================================================
+
+@app.post("/api/cross-repo/dependencies")
+async def api_cross_repo_dependencies(payload: dict):
+ """Analyze dependencies from provided file contents."""
+ files = payload.get("files", {})
+ if not files:
+ raise HTTPException(status_code=400, detail="files dict is required (filename -> content)")
+ graph = _cross_repo.analyze_dependencies_from_files(files)
+ return graph.to_dict()
+
+
+@app.post("/api/cross-repo/impact")
+async def api_cross_repo_impact(payload: dict):
+ """Analyze impact of updating a package."""
+ files = payload.get("files", {})
+ package_name = payload.get("package", "")
+ new_version = payload.get("new_version")
+ if not package_name:
+ raise HTTPException(status_code=400, detail="package is required")
+ graph = _cross_repo.analyze_dependencies_from_files(files)
+ report = _cross_repo.impact_analysis(graph, package_name, new_version)
+ return report.to_dict()
+
+
+# ============================================================================
+# Predictions Endpoints (Phase 3)
+# ============================================================================
+
+@app.post("/api/predictions/suggest")
+async def api_predictions_suggest(payload: dict):
+ """Get proactive suggestions based on context."""
+ context = payload.get("context", "")
+ if not context:
+ raise HTTPException(status_code=400, detail="context is required")
+ suggestions = _predictive_engine.predict(context)
+ return {"suggestions": [s.to_dict() for s in suggestions]}
+
+
+@app.get("/api/predictions/rules")
+async def api_predictions_rules():
+ """List all prediction rules."""
+ return {"rules": _predictive_engine.list_rules()}
+
+
+# ============================================================================
+# Security Scanner Endpoints (Phase 3)
+# ============================================================================
+
+@app.post("/api/security/scan-file")
+async def api_security_scan_file(payload: dict):
+ """Scan a single file for security issues."""
+ file_path = payload.get("file_path", "")
+ if not file_path:
+ raise HTTPException(status_code=400, detail="file_path is required")
+ findings = _security_scanner.scan_file(file_path)
+ return {"findings": [f.to_dict() for f in findings], "count": len(findings)}
+
+
+@app.post("/api/security/scan-directory")
+async def api_security_scan_directory(payload: dict):
+ """Recursively scan a directory for security issues."""
+ directory = payload.get("directory", "")
+ if not directory:
+ raise HTTPException(status_code=400, detail="directory is required")
+ result = _security_scanner.scan_directory(directory)
+ return result.to_dict()
+
+
+@app.post("/api/security/scan-diff")
+async def api_security_scan_diff(payload: dict):
+ """Scan a git diff for security issues in added lines."""
+ diff_text = payload.get("diff", "")
+ if not diff_text:
+ raise HTTPException(status_code=400, detail="diff is required")
+ findings = _security_scanner.scan_diff(diff_text)
+ return {"findings": [f.to_dict() for f in findings], "count": len(findings)}
+
+
+# ============================================================================
+# Natural Language Database Endpoints (Phase 3)
+# ============================================================================
+
+@app.post("/api/nl-database/translate")
+async def api_nl_translate(payload: dict):
+ """Translate natural language to SQL."""
+ question = payload.get("question", "")
+ dialect = payload.get("dialect", "postgresql")
+ tables = payload.get("tables", [])
+ if not question:
+ raise HTTPException(status_code=400, detail="question is required")
+ engine = NLQueryEngine(dialect=QueryDialect(dialect))
+ for t in tables:
+ engine.add_table(TableSchema(
+ name=t["name"],
+ columns=t.get("columns", []),
+ primary_key=t.get("primary_key"),
+ ))
+ sql = engine.translate(question)
+ error = engine.validate_query(sql)
+ return {"question": question, "sql": sql, "valid": error is None, "error": error}
+
+
+@app.post("/api/nl-database/explain")
+async def api_nl_explain(payload: dict):
+ """Explain what a SQL query does in plain English."""
+ sql = payload.get("sql", "")
+ if not sql:
+ raise HTTPException(status_code=400, detail="sql is required")
+ explanation = _nl_engine.explain(sql)
+ return {"sql": sql, "explanation": explanation}
+
+
+# ============================================================================
+# Branch Listing Endpoint (Claude-Code-on-Web Parity)
+# ============================================================================
+
+class BranchInfo(BaseModel):
+ name: str
+ is_default: bool = False
+ protected: bool = False
+ commit_sha: Optional[str] = None
+
+
+class BranchListResponse(BaseModel):
+ repository: str
+ default_branch: str
+ page: int
+ per_page: int
+ has_more: bool
+ branches: List[BranchInfo]
+
+
+@app.get("/api/repos/{owner}/{repo}/branches", response_model=BranchListResponse)
+async def api_list_branches(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ page: int = Query(1, ge=1),
+ per_page: int = Query(100, ge=1, le=100),
+ query: Optional[str] = Query(None, description="Substring filter"),
+ authorization: Optional[str] = Header(None),
+):
+ """List branches for a repository with optional search filtering."""
+ import httpx as _httpx
+
+ token = get_github_token(authorization)
+ if not token:
+ raise HTTPException(status_code=401, detail="GitHub token required")
+
+ headers = {
+ "Authorization": f"Bearer {token}",
+ "Accept": "application/vnd.github+json",
+ "User-Agent": "gitpilot",
+ }
+ timeout = _httpx.Timeout(connect=10.0, read=30.0, write=30.0, pool=10.0)
+
+ async with _httpx.AsyncClient(
+ base_url="https://api.github.com", headers=headers, timeout=timeout
+ ) as client:
+ # Fetch repo info for default_branch
+ repo_resp = await client.get(f"/repos/{owner}/{repo}")
+ if repo_resp.status_code >= 400:
+ logging.warning(
+ "branches: repo lookup failed %s/%s β %s %s",
+ owner, repo, repo_resp.status_code, repo_resp.text[:200],
+ )
+ raise HTTPException(
+ status_code=repo_resp.status_code,
+ detail=f"Cannot access repository: {repo_resp.status_code}",
+ )
+
+ repo_data = repo_resp.json()
+ default_branch_name = repo_data.get("default_branch", "main")
+
+ # Fetch ALL branch pages (GitHub caps at 100 per page)
+ all_raw = []
+ current_page = page
+ while True:
+ branch_resp = await client.get(
+ f"/repos/{owner}/{repo}/branches",
+ params={"page": current_page, "per_page": per_page},
+ )
+ if branch_resp.status_code >= 400:
+ logging.warning(
+ "branches: list failed %s/%s page=%s β %s %s",
+ owner, repo, current_page, branch_resp.status_code, branch_resp.text[:200],
+ )
+ raise HTTPException(
+ status_code=branch_resp.status_code,
+ detail=f"Failed to list branches: {branch_resp.status_code}",
+ )
+
+ page_data = branch_resp.json() if isinstance(branch_resp.json(), list) else []
+ all_raw.extend(page_data)
+
+ # Check if there are more pages
+ link_header = branch_resp.headers.get("Link", "") or ""
+ if 'rel="next"' not in link_header or len(page_data) < per_page:
+ break
+ current_page += 1
+ # Safety: cap at 10 pages (1000 branches)
+ if current_page - page >= 10:
+ break
+
+ q = (query or "").strip().lower()
+
+ branches = []
+ for b in all_raw:
+ name = (b.get("name") or "").strip()
+ if not name:
+ continue
+ if q and q not in name.lower():
+ continue
+ branches.append(BranchInfo(
+ name=name,
+ is_default=(name == default_branch_name),
+ protected=bool(b.get("protected", False)),
+ commit_sha=(b.get("commit") or {}).get("sha"),
+ ))
+
+ # Sort: default branch first, then alphabetical
+ branches.sort(key=lambda x: (0 if x.is_default else 1, x.name.lower()))
+
+ return BranchListResponse(
+ repository=f"{owner}/{repo}",
+ default_branch=default_branch_name,
+ page=page,
+ per_page=per_page,
+ has_more=False,
+ branches=branches,
+ )
+
+
+# ============================================================================
+# Environment Configuration Endpoints (Claude-Code-on-Web Parity)
+# ============================================================================
+
+import json as _json
+_ENV_ROOT = Path.home() / ".gitpilot" / "environments"
+
+
+class EnvironmentConfig(BaseModel):
+ id: Optional[str] = None
+ name: str = "Default"
+ network_access: str = Field("limited", description="limited | full | none")
+ env_vars: dict = Field(default_factory=dict)
+
+
+class EnvironmentListResponse(BaseModel):
+ environments: List[EnvironmentConfig]
+
+
+@app.get("/api/environments", response_model=EnvironmentListResponse)
+async def api_list_environments():
+ """List all environment configurations."""
+ _ENV_ROOT.mkdir(parents=True, exist_ok=True)
+ envs = []
+ for path in sorted(_ENV_ROOT.glob("*.json")):
+ try:
+ data = _json.loads(path.read_text())
+ envs.append(EnvironmentConfig(**data))
+ except Exception:
+ continue
+ if not envs:
+ envs.append(EnvironmentConfig(id="default", name="Default", network_access="limited"))
+ return EnvironmentListResponse(environments=envs)
+
+
+@app.post("/api/environments")
+async def api_create_environment(config: EnvironmentConfig):
+ """Create a new environment configuration."""
+ import uuid
+ _ENV_ROOT.mkdir(parents=True, exist_ok=True)
+ config.id = config.id or uuid.uuid4().hex[:12]
+ path = _ENV_ROOT / f"{config.id}.json"
+ path.write_text(_json.dumps(config.model_dump(), indent=2))
+ return config.model_dump()
+
+
+@app.put("/api/environments/{env_id}")
+async def api_update_environment(env_id: str, config: EnvironmentConfig):
+ """Update an environment configuration."""
+ _ENV_ROOT.mkdir(parents=True, exist_ok=True)
+ path = _ENV_ROOT / f"{env_id}.json"
+ config.id = env_id
+ path.write_text(_json.dumps(config.model_dump(), indent=2))
+ return config.model_dump()
+
+
+@app.delete("/api/environments/{env_id}")
+async def api_delete_environment(env_id: str):
+ """Delete an environment configuration."""
+ path = _ENV_ROOT / f"{env_id}.json"
+ if path.exists():
+ path.unlink()
+ return {"deleted": True}
+ raise HTTPException(status_code=404, detail="Environment not found")
+
+
+# ============================================================================
+# Session Messages + Diff Endpoints (Claude-Code-on-Web Parity)
+# ============================================================================
+
+@app.post("/api/sessions/{session_id}/message")
+async def api_add_session_message(session_id: str, payload: dict):
+ """Add a message to a session's conversation history."""
+ try:
+ session = _session_mgr.load(session_id)
+ except FileNotFoundError:
+ raise HTTPException(status_code=404, detail="Session not found")
+ role = payload.get("role", "user")
+ content = payload.get("content", "")
+ session.add_message(role, content, **payload.get("metadata", {}))
+ _session_mgr.save(session)
+ return {"message_count": len(session.messages)}
+
+
+@app.get("/api/sessions/{session_id}/messages")
+async def api_get_session_messages(session_id: str):
+ """Get all messages for a session."""
+ try:
+ session = _session_mgr.load(session_id)
+ except FileNotFoundError:
+ raise HTTPException(status_code=404, detail="Session not found")
+ return {
+ "session_id": session.id,
+ "messages": [
+ {
+ "role": m.role,
+ "content": m.content,
+ "timestamp": m.timestamp,
+ "metadata": m.metadata,
+ }
+ for m in session.messages
+ ],
+ }
+
+
+@app.get("/api/sessions/{session_id}/diff")
+async def api_get_session_diff(session_id: str):
+ """Get diff stats for a session (placeholder for sandbox integration)."""
+ try:
+ session = _session_mgr.load(session_id)
+ except FileNotFoundError:
+ raise HTTPException(status_code=404, detail="Session not found")
+ diff = session.metadata.get("diff", {
+ "files_changed": 0,
+ "additions": 0,
+ "deletions": 0,
+ "files": [],
+ })
+ return {"session_id": session.id, "diff": diff}
+
+
+@app.post("/api/sessions/{session_id}/status")
+async def api_update_session_status(session_id: str, payload: dict):
+ """Update session status (active, completed, failed, waiting)."""
+ try:
+ session = _session_mgr.load(session_id)
+ except FileNotFoundError:
+ raise HTTPException(status_code=404, detail="Session not found")
+ new_status = payload.get("status", "active")
+ if new_status not in ("active", "paused", "completed", "failed", "waiting"):
+ raise HTTPException(status_code=400, detail="Invalid status")
+ session.status = new_status
+ _session_mgr.save(session)
+ return {"session_id": session.id, "status": session.status}
+
+
+# ============================================================================
+# WebSocket Streaming Endpoint (Claude-Code-on-Web Parity)
+# ============================================================================
+
+from fastapi import WebSocket, WebSocketDisconnect
+
+
+@app.websocket("/ws/sessions/{session_id}")
+async def session_websocket(websocket: WebSocket, session_id: str):
+ """
+ Real-time bidirectional communication for a coding session.
+
+ Server events:
+ { type: "agent_message", content: "..." }
+ { type: "tool_use", tool: "bash", input: "npm test" }
+ { type: "tool_result", tool: "bash", output: "All tests passed" }
+ { type: "diff_update", stats: { additions: N, deletions: N, files: N } }
+ { type: "status_change", status: "completed" }
+ { type: "error", message: "..." }
+
+ Client events:
+ { type: "user_message", content: "..." }
+ { type: "cancel" }
+ """
+ await websocket.accept()
+
+ # Verify session exists
+ try:
+ session = _session_mgr.load(session_id)
+ except FileNotFoundError:
+ await websocket.send_json({"type": "error", "message": "Session not found"})
+ await websocket.close()
+ return
+
+ # Send session history on connect
+ await websocket.send_json({
+ "type": "session_restored",
+ "session_id": session.id,
+ "status": session.status,
+ "message_count": len(session.messages),
+ })
+
+ try:
+ while True:
+ data = await websocket.receive_json()
+ event_type = data.get("type", "")
+
+ if event_type == "user_message":
+ content = data.get("content", "")
+ session.add_message("user", content)
+ _session_mgr.save(session)
+
+ # Acknowledge receipt
+ await websocket.send_json({
+ "type": "message_received",
+ "message_index": len(session.messages) - 1,
+ })
+
+ # Stream agent response (integration point for agentic.py)
+ await websocket.send_json({
+ "type": "status_change",
+ "status": "active",
+ })
+
+ # Agent processing hook β when the agent orchestrator is wired,
+ # replace this with actual streaming from agentic.py
+ try:
+ repo_full = session.repo_full_name or ""
+ parts = repo_full.split("/", 1)
+ if len(parts) == 2 and content.strip():
+ # Use canonical dispatcher signature
+ result = await dispatch_request(
+ user_request=content,
+ repo_full_name=f"{parts[0]}/{parts[1]}",
+ branch_name=session.branch,
+ )
+ answer = ""
+ if isinstance(result, dict):
+ answer = (
+ result.get("result")
+ or result.get("answer")
+ or result.get("message")
+ or result.get("summary")
+ or (result.get("plan", {}) or {}).get("summary")
+ or str(result)
+ )
+ else:
+ answer = str(result)
+
+ # Stream the response
+ await websocket.send_json({
+ "type": "agent_message",
+ "content": answer,
+ })
+
+ session.add_message("assistant", answer)
+ _session_mgr.save(session)
+ else:
+ await websocket.send_json({
+ "type": "agent_message",
+ "content": "Session is not connected to a repository.",
+ })
+ except Exception as agent_err:
+ logger.error(f"Agent error in WS session {session_id}: {agent_err}")
+ await websocket.send_json({
+ "type": "error",
+ "message": str(agent_err),
+ })
+
+ await websocket.send_json({
+ "type": "status_change",
+ "status": "waiting",
+ })
+
+ elif event_type == "cancel":
+ await websocket.send_json({
+ "type": "status_change",
+ "status": "waiting",
+ })
+
+ elif event_type == "ping":
+ await websocket.send_json({"type": "pong"})
+
+ except WebSocketDisconnect:
+ logger.info(f"WebSocket disconnected for session {session_id}")
+ except Exception as e:
+ logger.error(f"WebSocket error for session {session_id}: {e}")
+ try:
+ await websocket.send_json({"type": "error", "message": str(e)})
+ except Exception:
+ pass
+
+
+# ============================================================================
+# Static Files & Frontend Serving (SPA Support)
+# ============================================================================
+
+STATIC_DIR = Path(__file__).resolve().parent / "web"
+ASSETS_DIR = STATIC_DIR / "assets"
+
+if ASSETS_DIR.exists():
+ app.mount("/assets", StaticFiles(directory=ASSETS_DIR), name="assets")
+
+if STATIC_DIR.exists():
+ app.mount("/static", StaticFiles(directory=STATIC_DIR), name="static")
+
+
+@app.get("/api/health")
+async def health_check():
+ """Health check endpoint for monitoring and diagnostics."""
+ return {"status": "healthy", "service": "gitpilot-backend"}
+
+
+@app.get("/healthz")
+async def healthz():
+ """Health check endpoint (Render/Kubernetes standard)."""
+ return {"status": "healthy", "service": "gitpilot-backend"}
+
+
+@app.get("/", include_in_schema=False)
+async def index():
+ """Serve the React App entry point."""
+ index_file = STATIC_DIR / "index.html"
+ if index_file.exists():
+ return FileResponse(index_file)
+ return JSONResponse(
+ {"message": "GitPilot UI not built. The static files directory is missing."},
+ status_code=500,
+ )
+
+
+@app.get("/{full_path:path}", include_in_schema=False)
+async def catch_all_spa_routes(full_path: str):
+ """
+ Catch-all route to serve index.html for frontend routing.
+ Excludes '/api' paths to ensure genuine API 404s are returned as JSON.
+ """
+ if full_path.startswith("api/"):
+ return JSONResponse({"detail": "Not Found"}, status_code=404)
+
+ index_file = STATIC_DIR / "index.html"
+ if index_file.exists():
+ return FileResponse(index_file)
+
+ return JSONResponse(
+ {"message": "GitPilot UI not built. The static files directory is missing."},
+ status_code=500,
+ )
diff --git a/gitpilot/a2a_adapter.py b/gitpilot/a2a_adapter.py
new file mode 100644
index 0000000000000000000000000000000000000000..46ca4659db601f09477b71755cca8309c4f678b0
--- /dev/null
+++ b/gitpilot/a2a_adapter.py
@@ -0,0 +1,560 @@
+"""Optional A2A adapter for GitPilot (MCP ContextForge compatible).
+
+This module is feature-flagged. Nothing changes in GitPilot unless the main app
+mounts this router when GITPILOT_ENABLE_A2A=true.
+
+Supported protocols
+- JSON-RPC 2.0 (preferred)
+- ContextForge custom A2A envelope (fallback)
+
+Security model (recommended)
+- Gateway injects a shared secret:
+ X-A2A-Secret:
+ or
+ Authorization: Bearer
+
+- GitHub token (if needed) should be provided via:
+ X-Github-Token:
+ (avoid passing tokens in JSON bodies to reduce leak risk in logs)
+
+Environment
+- GITPILOT_A2A_REQUIRE_AUTH=true
+- GITPILOT_A2A_SHARED_SECRET=
+- GITPILOT_A2A_MAX_BODY_MB=2
+- GITPILOT_A2A_ALLOW_GITHUB_TOKEN_IN_PARAMS=false
+"""
+
+from __future__ import annotations
+
+import os
+import time
+import uuid
+from typing import Any, Dict, Optional, Tuple
+
+from fastapi import APIRouter, Header, HTTPException, Request
+from fastapi.responses import JSONResponse
+
+from .agentic import PlanResult, execute_plan, generate_plan, dispatch_request
+from .github_api import get_file, get_repo_tree, github_request, put_file
+from . import github_issues
+from . import github_pulls
+from . import github_search
+
+router = APIRouter(tags=["a2a"])
+
+
+def _env_bool(name: str, default: bool) -> bool:
+ raw = os.getenv(name)
+ if raw is None:
+ return default
+ return raw.strip().lower() in {"1", "true", "yes", "y", "on"}
+
+
+def _env_int(name: str, default: int) -> int:
+ raw = os.getenv(name)
+ if raw is None:
+ return default
+ try:
+ return int(raw.strip())
+ except Exception:
+ return default
+
+
+def _extract_bearer(value: Optional[str]) -> Optional[str]:
+ if not value:
+ return None
+ if value.startswith("Bearer "):
+ return value[7:]
+ if value.startswith("token "):
+ return value[6:]
+ return value
+
+
+def _get_trace_id(x_request_id: Optional[str]) -> str:
+ return (x_request_id or "").strip() or str(uuid.uuid4())
+
+
+def _require_gateway_secret(authorization: Optional[str], x_a2a_secret: Optional[str]) -> None:
+ require_auth = _env_bool("GITPILOT_A2A_REQUIRE_AUTH", True)
+ if not require_auth:
+ return
+
+ expected = os.getenv("GITPILOT_A2A_SHARED_SECRET", "").strip()
+ if not expected:
+ raise HTTPException(
+ status_code=500,
+ detail="A2A is enabled but GITPILOT_A2A_SHARED_SECRET is not set",
+ )
+
+ candidate = _extract_bearer(authorization) or (x_a2a_secret or "").strip()
+ if not candidate or candidate != expected:
+ raise HTTPException(status_code=401, detail="Unauthorized")
+
+
+def _split_full_name(repo_full_name: str) -> Tuple[str, str]:
+ if not repo_full_name or "/" not in repo_full_name:
+ raise HTTPException(status_code=400, detail="repo_full_name must be 'owner/repo'")
+ owner, repo = repo_full_name.split("/", 1)
+ owner, repo = owner.strip(), repo.strip()
+ if not owner or not repo:
+ raise HTTPException(status_code=400, detail="repo_full_name must be 'owner/repo'")
+ return owner, repo
+
+
+def _jsonrpc_error(id_value: Any, code: int, message: str, data: Any = None) -> Dict[str, Any]:
+ err: Dict[str, Any] = {"code": code, "message": message}
+ if data is not None:
+ err["data"] = data
+ return {"jsonrpc": "2.0", "error": err, "id": id_value}
+
+
+def _jsonrpc_result(id_value: Any, result: Any) -> Dict[str, Any]:
+ return {"jsonrpc": "2.0", "result": result, "id": id_value}
+
+
+async def _dispatch(method: str, params: Dict[str, Any], github_token: Optional[str]) -> Any:
+ if method == "repo.connect":
+ repo_full_name = params.get("repo_full_name")
+ owner, repo = _split_full_name(str(repo_full_name))
+ info = await github_request(f"/repos/{owner}/{repo}", token=github_token)
+ return {
+ "repo": {
+ "id": info.get("id"),
+ "full_name": info.get("full_name"),
+ "private": info.get("private"),
+ "html_url": info.get("html_url"),
+ },
+ "default_branch": info.get("default_branch"),
+ "permissions": info.get("permissions"),
+ }
+
+ if method == "repo.tree":
+ repo_full_name = params.get("repo_full_name")
+ ref = (params.get("ref") or "").strip() or "HEAD"
+ owner, repo = _split_full_name(str(repo_full_name))
+ tree = await get_repo_tree(owner, repo, token=github_token, ref=ref)
+ return {"entries": tree, "ref": ref}
+
+ if method == "repo.read":
+ repo_full_name = params.get("repo_full_name")
+ path = params.get("path")
+ if not path:
+ raise HTTPException(status_code=400, detail="Missing required param: path")
+ owner, repo = _split_full_name(str(repo_full_name))
+ # NOTE: current get_file() reads from default branch/ref in this repo.
+ # You can extend github_api.get_file to accept ref and pass it here later.
+ content = await get_file(owner, repo, str(path), token=github_token)
+ return {"path": str(path), "content": content, "encoding": "utf-8"}
+
+ if method == "repo.write":
+ repo_full_name = params.get("repo_full_name")
+ path = params.get("path")
+ content = params.get("content")
+ message = params.get("message") or "Update via GitPilot A2A"
+ branch = params.get("branch") or params.get("branch_name")
+ if not path:
+ raise HTTPException(status_code=400, detail="Missing required param: path")
+ if content is None:
+ raise HTTPException(status_code=400, detail="Missing required param: content")
+ owner, repo = _split_full_name(str(repo_full_name))
+ result = await put_file(
+ owner,
+ repo,
+ str(path),
+ str(content),
+ str(message),
+ token=github_token,
+ branch=branch,
+ )
+ return result
+
+ if method == "plan.generate":
+ repo_full_name = params.get("repo_full_name")
+ goal = params.get("goal")
+ branch_name = params.get("branch") or params.get("branch_name")
+ if not goal:
+ raise HTTPException(status_code=400, detail="Missing required param: goal")
+ if not repo_full_name:
+ raise HTTPException(status_code=400, detail="Missing required param: repo_full_name")
+ plan = await generate_plan(str(goal), str(repo_full_name), token=github_token, branch_name=branch_name)
+ return plan.model_dump() if hasattr(plan, "model_dump") else plan
+
+ if method == "plan.execute":
+ repo_full_name = params.get("repo_full_name")
+ branch_name = params.get("branch") or params.get("branch_name")
+ plan_raw = params.get("plan")
+ if not repo_full_name:
+ raise HTTPException(status_code=400, detail="Missing required param: repo_full_name")
+ if plan_raw is None:
+ raise HTTPException(status_code=400, detail="Missing required param: plan")
+ if isinstance(plan_raw, PlanResult):
+ plan_obj = plan_raw
+ else:
+ try:
+ plan_obj = PlanResult.model_validate(plan_raw) # pydantic v2
+ except Exception:
+ plan_obj = PlanResult.parse_obj(plan_raw) # pydantic v1
+ result = await execute_plan(plan_obj, str(repo_full_name), token=github_token, branch_name=branch_name)
+ return result
+
+ if method == "repo.search":
+ query = params.get("query")
+ if not query:
+ raise HTTPException(status_code=400, detail="Missing required param: query")
+ result = await github_request(
+ "/search/repositories",
+ params={"q": str(query), "per_page": 20},
+ token=github_token,
+ )
+ items = (result or {}).get("items", []) if isinstance(result, dict) else []
+ return {
+ "repos": [
+ {
+ "full_name": i.get("full_name"),
+ "private": i.get("private"),
+ "html_url": i.get("html_url"),
+ "description": i.get("description"),
+ "default_branch": i.get("default_branch"),
+ }
+ for i in items
+ ]
+ }
+
+ # --- v2 methods: issues, pulls, search, chat --------------------------
+
+ if method == "issue.list":
+ repo_full_name = params.get("repo_full_name")
+ owner, repo = _split_full_name(str(repo_full_name))
+ issues = await github_issues.list_issues(
+ owner, repo, state=params.get("state", "open"),
+ labels=params.get("labels"), per_page=params.get("per_page", 30),
+ token=github_token,
+ )
+ return {"issues": issues}
+
+ if method == "issue.get":
+ repo_full_name = params.get("repo_full_name")
+ issue_number = params.get("issue_number")
+ if not issue_number:
+ raise HTTPException(status_code=400, detail="Missing required param: issue_number")
+ owner, repo = _split_full_name(str(repo_full_name))
+ return await github_issues.get_issue(owner, repo, int(issue_number), token=github_token)
+
+ if method == "issue.create":
+ repo_full_name = params.get("repo_full_name")
+ title = params.get("title")
+ if not title:
+ raise HTTPException(status_code=400, detail="Missing required param: title")
+ owner, repo = _split_full_name(str(repo_full_name))
+ return await github_issues.create_issue(
+ owner, repo, str(title),
+ body=params.get("body"), labels=params.get("labels"),
+ assignees=params.get("assignees"), token=github_token,
+ )
+
+ if method == "issue.update":
+ repo_full_name = params.get("repo_full_name")
+ issue_number = params.get("issue_number")
+ if not issue_number:
+ raise HTTPException(status_code=400, detail="Missing required param: issue_number")
+ owner, repo = _split_full_name(str(repo_full_name))
+ return await github_issues.update_issue(
+ owner, repo, int(issue_number),
+ title=params.get("title"), body=params.get("body"),
+ state=params.get("state"), labels=params.get("labels"),
+ assignees=params.get("assignees"), token=github_token,
+ )
+
+ if method == "issue.comment":
+ repo_full_name = params.get("repo_full_name")
+ issue_number = params.get("issue_number")
+ body = params.get("body")
+ if not issue_number:
+ raise HTTPException(status_code=400, detail="Missing required param: issue_number")
+ if not body:
+ raise HTTPException(status_code=400, detail="Missing required param: body")
+ owner, repo = _split_full_name(str(repo_full_name))
+ return await github_issues.add_issue_comment(
+ owner, repo, int(issue_number), str(body), token=github_token,
+ )
+
+ if method == "pr.list":
+ repo_full_name = params.get("repo_full_name")
+ owner, repo = _split_full_name(str(repo_full_name))
+ return await github_pulls.list_pull_requests(
+ owner, repo, state=params.get("state", "open"),
+ per_page=params.get("per_page", 30), token=github_token,
+ )
+
+ if method == "pr.create":
+ repo_full_name = params.get("repo_full_name")
+ title = params.get("title")
+ head = params.get("head")
+ base = params.get("base")
+ if not title or not head or not base:
+ raise HTTPException(status_code=400, detail="Missing required params: title, head, base")
+ owner, repo = _split_full_name(str(repo_full_name))
+ return await github_pulls.create_pull_request(
+ owner, repo, title=str(title), head=str(head), base=str(base),
+ body=params.get("body"), token=github_token,
+ )
+
+ if method == "pr.merge":
+ repo_full_name = params.get("repo_full_name")
+ pull_number = params.get("pull_number")
+ if not pull_number:
+ raise HTTPException(status_code=400, detail="Missing required param: pull_number")
+ owner, repo = _split_full_name(str(repo_full_name))
+ return await github_pulls.merge_pull_request(
+ owner, repo, int(pull_number),
+ merge_method=params.get("merge_method", "merge"),
+ token=github_token,
+ )
+
+ if method == "search.code":
+ query = params.get("query")
+ if not query:
+ raise HTTPException(status_code=400, detail="Missing required param: query")
+ return await github_search.search_code(
+ str(query), owner=params.get("owner"), repo=params.get("repo"),
+ language=params.get("language"), per_page=params.get("per_page", 20),
+ token=github_token,
+ )
+
+ if method == "search.issues":
+ query = params.get("query")
+ if not query:
+ raise HTTPException(status_code=400, detail="Missing required param: query")
+ return await github_search.search_issues(
+ str(query), owner=params.get("owner"), repo=params.get("repo"),
+ state=params.get("state"), label=params.get("label"),
+ per_page=params.get("per_page", 20), token=github_token,
+ )
+
+ if method == "search.users":
+ query = params.get("query")
+ if not query:
+ raise HTTPException(status_code=400, detail="Missing required param: query")
+ return await github_search.search_users(
+ str(query), type_filter=params.get("type"),
+ location=params.get("location"), language=params.get("language"),
+ per_page=params.get("per_page", 20), token=github_token,
+ )
+
+ if method == "chat.message":
+ repo_full_name = params.get("repo_full_name")
+ message = params.get("message")
+ if not message:
+ raise HTTPException(status_code=400, detail="Missing required param: message")
+ if not repo_full_name:
+ raise HTTPException(status_code=400, detail="Missing required param: repo_full_name")
+ return await dispatch_request(
+ str(message), str(repo_full_name),
+ token=github_token,
+ branch_name=params.get("branch") or params.get("branch_name"),
+ )
+
+ raise HTTPException(status_code=404, detail=f"Unknown method: {method}")
+
+
+@router.get("/a2a/health")
+async def a2a_health() -> Dict[str, Any]:
+ return {"status": "ok", "ts": int(time.time())}
+
+
+@router.get("/a2a/manifest")
+async def a2a_manifest() -> Dict[str, Any]:
+ # Best-effort schemas (kept intentionally simple and stable)
+ return {
+ "name": "gitpilot",
+ "a2a_version": "1.0",
+ "protocols": ["jsonrpc-2.0", "a2a-envelope-1.0"],
+ "auth": {"type": "shared_secret", "header": "X-A2A-Secret"},
+ "rate_limits": {"hint": "apply gateway rate limiting; server enforces body size"},
+ "methods": {
+ "repo.connect": {
+ "params": {"repo_full_name": "string"},
+ "result": {"repo": "object", "default_branch": "string", "permissions": "object?"},
+ },
+ "repo.tree": {
+ "params": {"repo_full_name": "string", "ref": "string?"},
+ "result": {"entries": "array", "ref": "string"},
+ },
+ "repo.read": {
+ "params": {"repo_full_name": "string", "path": "string"},
+ "result": {"path": "string", "content": "string"},
+ },
+ "repo.write": {
+ "params": {
+ "repo_full_name": "string",
+ "path": "string",
+ "content": "string",
+ "message": "string?",
+ "branch": "string?",
+ },
+ "result": "object",
+ },
+ "plan.generate": {
+ "params": {"repo_full_name": "string", "goal": "string", "branch": "string?"},
+ "result": "PlanResult",
+ },
+ "plan.execute": {
+ "params": {"repo_full_name": "string", "plan": "PlanResult", "branch": "string?"},
+ "result": "object",
+ },
+ "repo.search": {
+ "params": {"query": "string"},
+ "result": {"repos": "array"},
+ },
+ # v2 methods
+ "issue.list": {
+ "params": {"repo_full_name": "string", "state": "string?", "labels": "string?"},
+ "result": {"issues": "array"},
+ },
+ "issue.get": {
+ "params": {"repo_full_name": "string", "issue_number": "integer"},
+ "result": "object",
+ },
+ "issue.create": {
+ "params": {"repo_full_name": "string", "title": "string", "body": "string?", "labels": "array?", "assignees": "array?"},
+ "result": "object",
+ },
+ "issue.update": {
+ "params": {"repo_full_name": "string", "issue_number": "integer", "title": "string?", "body": "string?", "state": "string?"},
+ "result": "object",
+ },
+ "issue.comment": {
+ "params": {"repo_full_name": "string", "issue_number": "integer", "body": "string"},
+ "result": "object",
+ },
+ "pr.list": {
+ "params": {"repo_full_name": "string", "state": "string?"},
+ "result": "array",
+ },
+ "pr.create": {
+ "params": {"repo_full_name": "string", "title": "string", "head": "string", "base": "string", "body": "string?"},
+ "result": "object",
+ },
+ "pr.merge": {
+ "params": {"repo_full_name": "string", "pull_number": "integer", "merge_method": "string?"},
+ "result": "object",
+ },
+ "search.code": {
+ "params": {"query": "string", "owner": "string?", "repo": "string?", "language": "string?"},
+ "result": {"total_count": "integer", "items": "array"},
+ },
+ "search.issues": {
+ "params": {"query": "string", "owner": "string?", "repo": "string?", "state": "string?"},
+ "result": {"total_count": "integer", "items": "array"},
+ },
+ "search.users": {
+ "params": {"query": "string", "type": "string?", "location": "string?"},
+ "result": {"total_count": "integer", "items": "array"},
+ },
+ "chat.message": {
+ "params": {"repo_full_name": "string", "message": "string", "branch": "string?"},
+ "result": "object",
+ },
+ },
+ }
+
+
+async def _handle_invoke(
+ request: Request,
+ authorization: Optional[str],
+ x_a2a_secret: Optional[str],
+ x_github_token: Optional[str],
+ x_request_id: Optional[str],
+) -> JSONResponse:
+ trace_id = _get_trace_id(x_request_id)
+ _require_gateway_secret(authorization=authorization, x_a2a_secret=x_a2a_secret)
+
+ # Body size guard (helps protect from abuse)
+ max_mb = _env_int("GITPILOT_A2A_MAX_BODY_MB", 2)
+ cl = request.headers.get("content-length")
+ if cl:
+ try:
+ if int(cl) > max_mb * 1024 * 1024:
+ raise HTTPException(status_code=413, detail="Request entity too large")
+ except ValueError:
+ pass
+
+ started = time.time()
+ payload = await request.json()
+
+ github_token = _extract_bearer(x_github_token) or None
+ if not github_token:
+ github_token = _extract_bearer(authorization)
+
+ # JSON-RPC mode
+ if isinstance(payload, dict) and payload.get("jsonrpc") == "2.0" and "method" in payload:
+ rpc_id = payload.get("id")
+ method = payload.get("method")
+ params = payload.get("params") or {}
+ if not isinstance(params, dict):
+ return JSONResponse(_jsonrpc_error(rpc_id, -32602, "Invalid params"), status_code=400)
+
+ allow_in_params = _env_bool("GITPILOT_A2A_ALLOW_GITHUB_TOKEN_IN_PARAMS", False)
+ if allow_in_params and not github_token:
+ github_token = _extract_bearer(params.get("github_token"))
+
+ try:
+ result = await _dispatch(str(method), params, github_token)
+ resp = _jsonrpc_result(rpc_id, result)
+ return JSONResponse(resp, headers={"X-Trace-Id": trace_id})
+ except HTTPException as e:
+ resp = _jsonrpc_error(rpc_id, e.status_code, str(e.detail), {"trace_id": trace_id})
+ return JSONResponse(resp, status_code=200, headers={"X-Trace-Id": trace_id})
+ except Exception as e:
+ resp = _jsonrpc_error(rpc_id, -32000, "Server error", {"trace_id": trace_id, "error": str(e)})
+ return JSONResponse(resp, status_code=200, headers={"X-Trace-Id": trace_id})
+ finally:
+ _ = time.time() - started
+
+ # Custom envelope fallback
+ if isinstance(payload, dict) and payload.get("interaction_type"):
+ interaction_type = str(payload.get("interaction_type"))
+ parameters = payload.get("parameters") or {}
+ if not isinstance(parameters, dict):
+ raise HTTPException(status_code=400, detail="Invalid parameters")
+
+ if interaction_type == "query":
+ repo_full_name = parameters.get("repo_full_name")
+ goal = parameters.get("query") or parameters.get("goal")
+ params = {
+ "repo_full_name": repo_full_name,
+ "goal": goal,
+ "branch": parameters.get("branch") or parameters.get("branch_name"),
+ }
+ result = await _dispatch("plan.generate", params, github_token)
+ return JSONResponse(
+ {"response": result, "protocol_version": payload.get("protocol_version", "1.0")},
+ headers={"X-Trace-Id": trace_id},
+ )
+
+ raise HTTPException(status_code=404, detail=f"Unsupported interaction_type: {interaction_type}")
+
+ raise HTTPException(status_code=400, detail=f"Invalid A2A payload (trace_id={trace_id})")
+
+
+@router.post("/a2a/invoke")
+async def a2a_invoke(
+ request: Request,
+ authorization: Optional[str] = Header(None),
+ x_a2a_secret: Optional[str] = Header(None, alias="X-A2A-Secret"),
+ x_github_token: Optional[str] = Header(None, alias="X-Github-Token"),
+ x_request_id: Optional[str] = Header(None, alias="X-Request-Id"),
+) -> JSONResponse:
+ return await _handle_invoke(request, authorization, x_a2a_secret, x_github_token, x_request_id)
+
+
+@router.post("/a2a/v1/invoke")
+async def a2a_v1_invoke(
+ request: Request,
+ authorization: Optional[str] = Header(None),
+ x_a2a_secret: Optional[str] = Header(None, alias="X-A2A-Secret"),
+ x_github_token: Optional[str] = Header(None, alias="X-Github-Token"),
+ x_request_id: Optional[str] = Header(None, alias="X-Request-Id"),
+) -> JSONResponse:
+ # Alias for versioned clients. Keep behavior identical to /a2a/invoke.
+ return await _handle_invoke(request, authorization, x_a2a_secret, x_github_token, x_request_id)
diff --git a/gitpilot/agent_router.py b/gitpilot/agent_router.py
new file mode 100644
index 0000000000000000000000000000000000000000..4639bf2c47a9fb100c9b296e093e0fbcf45daade
--- /dev/null
+++ b/gitpilot/agent_router.py
@@ -0,0 +1,284 @@
+# gitpilot/agent_router.py
+"""Intelligent Agent Router for GitPilot.
+
+Classifies user requests and delegates them to the appropriate specialised
+agent (or a pipeline of agents). The router itself does **not** use an LLM;
+it relies on lightweight keyword / pattern matching so that routing is
+instantaneous and deterministic.
+
+The router returns a *WorkflowPlan* describing which agents should run and
+in what order. The actual agent execution is handled by the orchestrator
+in ``agentic.py``.
+"""
+from __future__ import annotations
+
+import re
+from dataclasses import dataclass, field
+from enum import Enum
+from typing import List, Optional
+
+
+class AgentType(str, Enum):
+ """Available specialised agents."""
+
+ EXPLORER = "explorer"
+ PLANNER = "planner"
+ CODE_WRITER = "code_writer"
+ CODE_REVIEWER = "code_reviewer"
+ ISSUE_MANAGER = "issue_manager"
+ PR_MANAGER = "pr_manager"
+ SEARCH = "search"
+ LEARNING = "learning"
+ LOCAL_EDITOR = "local_editor" # Phase 1: local file editing + shell
+ TERMINAL = "terminal" # Phase 1: dedicated terminal agent
+
+
+class RequestCategory(str, Enum):
+ """High-level intent category inferred from the user request."""
+
+ PLAN_EXECUTE = "plan_execute" # Existing explore -> plan -> execute workflow
+ ISSUE_MANAGEMENT = "issue_management"
+ PR_MANAGEMENT = "pr_management"
+ CODE_SEARCH = "code_search"
+ CODE_REVIEW = "code_review"
+ LEARNING = "learning"
+ CONVERSATIONAL = "conversational" # Free-form chat / Q&A about the repo
+ LOCAL_EDIT = "local_edit" # Phase 1: direct file editing with verification
+ TERMINAL = "terminal" # Phase 1: shell command execution
+
+
+@dataclass
+class WorkflowPlan:
+ """Describes which agents to invoke and in what order."""
+
+ category: RequestCategory
+ agents: List[AgentType]
+ description: str
+ requires_repo_context: bool = True
+ # If the request mentions a specific issue/PR number, capture it.
+ entity_number: Optional[int] = None
+ # Additional metadata extracted from the request.
+ metadata: dict = field(default_factory=dict)
+
+
+# ---------------------------------------------------------------------------
+# Pattern definitions (order matters -- first match wins)
+# ---------------------------------------------------------------------------
+
+_ISSUE_CREATE_RE = re.compile(
+ r"\b(create|open|new|file|add)\b.*\bissue\b", re.IGNORECASE
+)
+_ISSUE_UPDATE_RE = re.compile(
+ r"\b(update|modify|edit|change|close|reopen|label|assign|milestone)\b.*\bissue\b",
+ re.IGNORECASE,
+)
+_ISSUE_LIST_RE = re.compile(
+ r"\b(list|show|get|find|search)\b.*\bissues?\b", re.IGNORECASE
+)
+_ISSUE_COMMENT_RE = re.compile(
+ r"\b(comment|reply|respond)\b.*\bissue\b", re.IGNORECASE
+)
+_ISSUE_NUMBER_RE = re.compile(r"#(\d+)")
+
+_PR_CREATE_RE = re.compile(
+ r"\b(create|open|new|make)\b.*\b(pull request|pr|pull)\b", re.IGNORECASE
+)
+_PR_MERGE_RE = re.compile(
+ r"\b(merge|squash|rebase)\b.*\b(pull request|pr|pull)\b", re.IGNORECASE
+)
+_PR_REVIEW_RE = re.compile(
+ r"\b(review|approve|request changes)\b.*\b(pull request|pr|pull)\b",
+ re.IGNORECASE,
+)
+_PR_LIST_RE = re.compile(
+ r"\b(list|show|get|find)\b.*\b(pull requests?|prs?|pulls?)\b", re.IGNORECASE
+)
+
+_SEARCH_CODE_RE = re.compile(
+ r"\b(search|find|locate|grep|look for)\b.*\b(code|function|class|symbol|pattern|file)\b",
+ re.IGNORECASE,
+)
+_SEARCH_USER_RE = re.compile(
+ r"\b(search|find|who)\b.*\b(user|developer|org|organization|contributor)\b",
+ re.IGNORECASE,
+)
+_SEARCH_REPO_RE = re.compile(
+ r"\b(search|find|discover)\b.*\b(repo|repository|project)\b", re.IGNORECASE
+)
+
+_TERMINAL_RE = re.compile(
+ r"\b(run|execute|launch)\b.*\b(command|test|tests|script|build|lint|npm|pip|make|docker|pytest|cargo|go)\b",
+ re.IGNORECASE,
+)
+_LOCAL_EDIT_RE = re.compile(
+ r"\b(edit|modify|change|update|fix|write|rewrite|patch)\b.*\b(file|code|function|class|method|module|line|lines)\b",
+ re.IGNORECASE,
+)
+
+_REVIEW_RE = re.compile(
+ r"\b(review|analyze|audit|check|inspect)\b.*\b(code|quality|security|performance)\b",
+ re.IGNORECASE,
+)
+
+_LEARNING_RE = re.compile(
+ r"\b(how (do|can|to)|explain|what is|guide|tutorial|best practice|help with)\b",
+ re.IGNORECASE,
+)
+_GITHUB_TOPICS_RE = re.compile(
+ r"\b(actions?|workflow|ci/?cd|pages?|packages?|discussions?|authentication|deploy|release)\b",
+ re.IGNORECASE,
+)
+
+
+def _extract_issue_number(text: str) -> Optional[int]:
+ m = _ISSUE_NUMBER_RE.search(text)
+ if m:
+ return int(m.group(1))
+ # Also try "issue 42" / "issue number 42"
+ m2 = re.search(r"\bissue\s*(?:number\s*)?(\d+)\b", text, re.IGNORECASE)
+ return int(m2.group(1)) if m2 else None
+
+
+def _extract_pr_number(text: str) -> Optional[int]:
+ m = re.search(r"\b(?:pr|pull request|pull)\s*#?(\d+)\b", text, re.IGNORECASE)
+ return int(m.group(1)) if m else None
+
+
+# ---------------------------------------------------------------------------
+# Public API
+# ---------------------------------------------------------------------------
+
+def route(user_request: str) -> WorkflowPlan:
+ """Classify *user_request* and return a ``WorkflowPlan``."""
+ text = user_request.strip()
+
+ # --- Issue management ------------------------------------------------
+ if _ISSUE_CREATE_RE.search(text):
+ return WorkflowPlan(
+ category=RequestCategory.ISSUE_MANAGEMENT,
+ agents=[AgentType.ISSUE_MANAGER],
+ description="Create a new GitHub issue",
+ entity_number=_extract_issue_number(text),
+ metadata={"action": "create"},
+ )
+ if _ISSUE_COMMENT_RE.search(text):
+ return WorkflowPlan(
+ category=RequestCategory.ISSUE_MANAGEMENT,
+ agents=[AgentType.ISSUE_MANAGER],
+ description="Comment on an issue",
+ entity_number=_extract_issue_number(text),
+ metadata={"action": "comment"},
+ )
+ if _ISSUE_UPDATE_RE.search(text):
+ return WorkflowPlan(
+ category=RequestCategory.ISSUE_MANAGEMENT,
+ agents=[AgentType.ISSUE_MANAGER],
+ description="Update an existing issue",
+ entity_number=_extract_issue_number(text),
+ metadata={"action": "update"},
+ )
+ if _ISSUE_LIST_RE.search(text):
+ return WorkflowPlan(
+ category=RequestCategory.ISSUE_MANAGEMENT,
+ agents=[AgentType.ISSUE_MANAGER],
+ description="List or search issues",
+ metadata={"action": "list"},
+ )
+
+ # --- PR management ---------------------------------------------------
+ if _PR_CREATE_RE.search(text):
+ return WorkflowPlan(
+ category=RequestCategory.PR_MANAGEMENT,
+ agents=[AgentType.PR_MANAGER],
+ description="Create a pull request",
+ metadata={"action": "create"},
+ )
+ if _PR_MERGE_RE.search(text):
+ return WorkflowPlan(
+ category=RequestCategory.PR_MANAGEMENT,
+ agents=[AgentType.PR_MANAGER],
+ description="Merge a pull request",
+ entity_number=_extract_pr_number(text),
+ metadata={"action": "merge"},
+ )
+ if _PR_REVIEW_RE.search(text):
+ return WorkflowPlan(
+ category=RequestCategory.PR_MANAGEMENT,
+ agents=[AgentType.CODE_REVIEWER, AgentType.PR_MANAGER],
+ description="Review a pull request",
+ entity_number=_extract_pr_number(text),
+ metadata={"action": "review"},
+ )
+ if _PR_LIST_RE.search(text):
+ return WorkflowPlan(
+ category=RequestCategory.PR_MANAGEMENT,
+ agents=[AgentType.PR_MANAGER],
+ description="List pull requests",
+ metadata={"action": "list"},
+ )
+
+ # --- Code search -----------------------------------------------------
+ if _SEARCH_USER_RE.search(text):
+ return WorkflowPlan(
+ category=RequestCategory.CODE_SEARCH,
+ agents=[AgentType.SEARCH],
+ description="Search for GitHub users or organisations",
+ requires_repo_context=False,
+ metadata={"search_type": "users"},
+ )
+ if _SEARCH_REPO_RE.search(text):
+ return WorkflowPlan(
+ category=RequestCategory.CODE_SEARCH,
+ agents=[AgentType.SEARCH],
+ description="Search for repositories",
+ requires_repo_context=False,
+ metadata={"search_type": "repositories"},
+ )
+ if _SEARCH_CODE_RE.search(text):
+ return WorkflowPlan(
+ category=RequestCategory.CODE_SEARCH,
+ agents=[AgentType.SEARCH],
+ description="Search for code in the repository",
+ metadata={"search_type": "code"},
+ )
+
+ # --- Terminal / shell commands ----------------------------------------
+ if _TERMINAL_RE.search(text):
+ return WorkflowPlan(
+ category=RequestCategory.TERMINAL,
+ agents=[AgentType.TERMINAL],
+ description="Run shell commands in the workspace",
+ metadata={"action": "execute"},
+ )
+
+ # --- Local file editing -----------------------------------------------
+ if _LOCAL_EDIT_RE.search(text):
+ return WorkflowPlan(
+ category=RequestCategory.LOCAL_EDIT,
+ agents=[AgentType.LOCAL_EDITOR],
+ description="Edit files directly in the local workspace",
+ )
+
+ # --- Code review -----------------------------------------------------
+ if _REVIEW_RE.search(text):
+ return WorkflowPlan(
+ category=RequestCategory.CODE_REVIEW,
+ agents=[AgentType.EXPLORER, AgentType.CODE_REVIEWER],
+ description="Analyse code quality and suggest improvements",
+ )
+
+ # --- Learning & guidance ---------------------------------------------
+ if _LEARNING_RE.search(text) or _GITHUB_TOPICS_RE.search(text):
+ return WorkflowPlan(
+ category=RequestCategory.LEARNING,
+ agents=[AgentType.LEARNING],
+ description="Provide guidance on GitHub features or best practices",
+ requires_repo_context=False,
+ )
+
+ # --- Default: existing plan+execute workflow -------------------------
+ return WorkflowPlan(
+ category=RequestCategory.PLAN_EXECUTE,
+ agents=[AgentType.EXPLORER, AgentType.PLANNER, AgentType.CODE_WRITER],
+ description="Explore repository, create plan, and execute changes",
+ )
diff --git a/gitpilot/agent_teams.py b/gitpilot/agent_teams.py
new file mode 100644
index 0000000000000000000000000000000000000000..354e3a0578e8d398299d716efe8442db1dc593e6
--- /dev/null
+++ b/gitpilot/agent_teams.py
@@ -0,0 +1,263 @@
+# gitpilot/agent_teams.py
+"""Parallel multi-agent execution on git worktrees.
+
+Coordinates multiple agents working on independent subtasks simultaneously.
+Each agent operates on its own git worktree to avoid conflicts, and a lead
+agent reviews and merges the results.
+
+Architecture inspired by the MapReduce pattern and the *divide-and-conquer*
+approach from distributed systems research (Dean & Ghemawat, 2004).
+
+Workflow::
+
+ User: "Add authentication to the API"
+ Lead agent splits β 4 subtasks
+ ββββββββββββββ ββββββββββββββ ββββββββββββββ ββββββββββββββ
+ β Agent A: β β Agent B: β β Agent C: β β Agent D: β
+ β User model β β Middleware β β Endpoints β β Tests β
+ β worktree/a β β worktree/b β β worktree/c β β worktree/d β
+ βββββββ¬βββββββ βββββββ¬βββββββ βββββββ¬βββββββ βββββββ¬βββββββ
+ βββββββββββββ΄ββββββββββββ΄ββββββββββββ
+ β
+ Lead reviews & merges
+"""
+from __future__ import annotations
+
+import asyncio
+import logging
+import uuid
+from dataclasses import dataclass, field
+from datetime import datetime, timezone
+from enum import Enum
+from pathlib import Path
+from typing import Any, Dict, List, Optional
+
+logger = logging.getLogger(__name__)
+
+
+class SubTaskStatus(str, Enum):
+ PENDING = "pending"
+ RUNNING = "running"
+ COMPLETED = "completed"
+ FAILED = "failed"
+
+
+@dataclass
+class SubTask:
+ """A single subtask to be executed by one agent."""
+
+ id: str = field(default_factory=lambda: uuid.uuid4().hex[:8])
+ title: str = ""
+ description: str = ""
+ assigned_agent: str = ""
+ files: List[str] = field(default_factory=list)
+ status: SubTaskStatus = SubTaskStatus.PENDING
+ result: str = ""
+ error: Optional[str] = None
+ worktree_path: Optional[Path] = None
+ started_at: Optional[str] = None
+ completed_at: Optional[str] = None
+
+ def to_dict(self) -> Dict[str, Any]:
+ return {
+ "id": self.id,
+ "title": self.title,
+ "description": self.description,
+ "assigned_agent": self.assigned_agent,
+ "files": self.files,
+ "status": self.status.value,
+ "result": self.result,
+ "error": self.error,
+ "started_at": self.started_at,
+ "completed_at": self.completed_at,
+ }
+
+
+@dataclass
+class TeamResult:
+ """Aggregated result from parallel agent execution."""
+
+ task: str
+ subtasks: List[SubTask] = field(default_factory=list)
+ merge_status: str = "pending" # pending | merged | conflict | failed
+ conflicts: List[str] = field(default_factory=list)
+ summary: str = ""
+
+ @property
+ def all_completed(self) -> bool:
+ return all(s.status == SubTaskStatus.COMPLETED for s in self.subtasks)
+
+ @property
+ def any_failed(self) -> bool:
+ return any(s.status == SubTaskStatus.FAILED for s in self.subtasks)
+
+ def to_dict(self) -> Dict[str, Any]:
+ return {
+ "task": self.task,
+ "subtasks": [s.to_dict() for s in self.subtasks],
+ "merge_status": self.merge_status,
+ "conflicts": self.conflicts,
+ "summary": self.summary,
+ "all_completed": self.all_completed,
+ "any_failed": self.any_failed,
+ }
+
+
+class AgentTeam:
+ """Coordinate multiple agents working in parallel.
+
+ Usage::
+
+ team = AgentTeam(workspace_path=Path("/repo"))
+ subtasks = team.plan_and_split("Add auth system", num_agents=4)
+ result = await team.execute_parallel(subtasks, executor_fn=my_agent_fn)
+ merge = await team.merge_results(result)
+ """
+
+ def __init__(self, workspace_path: Optional[Path] = None) -> None:
+ self.workspace_path = workspace_path
+ self._worktrees: List[Path] = []
+
+ def plan_and_split(
+ self,
+ task: str,
+ num_agents: int = 4,
+ subtask_descriptions: Optional[List[Dict[str, str]]] = None,
+ ) -> List[SubTask]:
+ """Split a task into independent subtasks.
+
+ If ``subtask_descriptions`` is provided, use those directly.
+ Otherwise, create generic subtasks from the task description.
+ """
+ subtasks = []
+
+ if subtask_descriptions:
+ for i, desc in enumerate(subtask_descriptions):
+ subtasks.append(SubTask(
+ title=desc.get("title", f"Subtask {i + 1}"),
+ description=desc.get("description", ""),
+ assigned_agent=desc.get("agent", f"agent_{i}"),
+ files=desc.get("files", []),
+ ))
+ else:
+ # Generic split β the LLM would normally do this
+ for i in range(min(num_agents, 8)):
+ subtasks.append(SubTask(
+ title=f"Part {i + 1} of {task}",
+ description=f"Handle part {i + 1} of the task: {task}",
+ assigned_agent=f"agent_{i}",
+ ))
+
+ return subtasks
+
+ async def execute_parallel(
+ self,
+ subtasks: List[SubTask],
+ executor_fn: Optional[Any] = None,
+ ) -> TeamResult:
+ """Execute subtasks in parallel.
+
+ ``executor_fn`` is an async callable(SubTask) -> str that runs the
+ agent logic for each subtask. If not provided, subtasks are marked
+ as completed with a placeholder result.
+ """
+ result = TeamResult(task="parallel_execution", subtasks=subtasks)
+
+ async def _run_subtask(subtask: SubTask) -> None:
+ subtask.status = SubTaskStatus.RUNNING
+ subtask.started_at = datetime.now(timezone.utc).isoformat()
+ try:
+ if executor_fn:
+ subtask.result = await executor_fn(subtask)
+ else:
+ subtask.result = f"Completed: {subtask.title}"
+ subtask.status = SubTaskStatus.COMPLETED
+ except Exception as e:
+ subtask.status = SubTaskStatus.FAILED
+ subtask.error = str(e)
+ logger.error("Subtask %s failed: %s", subtask.id, e)
+ finally:
+ subtask.completed_at = datetime.now(timezone.utc).isoformat()
+
+ # Run all subtasks concurrently
+ await asyncio.gather(*[_run_subtask(st) for st in subtasks])
+
+ return result
+
+ async def merge_results(self, team_result: TeamResult) -> TeamResult:
+ """Merge results from parallel execution.
+
+ In a full implementation, this would:
+ 1. Check for file conflicts between subtask outputs
+ 2. Use git merge-tree for conflict detection
+ 3. Have a lead agent resolve conflicts
+
+ For now, it aggregates results and detects file overlaps.
+ """
+ if team_result.any_failed:
+ team_result.merge_status = "failed"
+ failed = [s for s in team_result.subtasks if s.status == SubTaskStatus.FAILED]
+ team_result.summary = (
+ f"{len(failed)} subtask(s) failed: "
+ + ", ".join(f"{s.title} ({s.error})" for s in failed)
+ )
+ return team_result
+
+ # Detect file conflicts (same file modified by multiple agents)
+ file_owners: Dict[str, List[str]] = {}
+ for st in team_result.subtasks:
+ for f in st.files:
+ file_owners.setdefault(f, []).append(st.assigned_agent)
+
+ conflicts = [f for f, owners in file_owners.items() if len(owners) > 1]
+ team_result.conflicts = conflicts
+
+ if conflicts:
+ team_result.merge_status = "conflict"
+ team_result.summary = (
+ f"File conflicts detected in: {', '.join(conflicts)}. "
+ "Manual review required."
+ )
+ else:
+ team_result.merge_status = "merged"
+ completed = [s for s in team_result.subtasks if s.status == SubTaskStatus.COMPLETED]
+ team_result.summary = (
+ f"All {len(completed)} subtasks completed successfully. "
+ "No file conflicts detected."
+ )
+
+ return team_result
+
+ async def setup_worktrees(self, subtasks: List[SubTask], base_branch: str = "main") -> None:
+ """Create git worktrees for each subtask (requires workspace_path)."""
+ if not self.workspace_path:
+ return
+ for st in subtasks:
+ worktree_name = f"worktree-{st.id}"
+ worktree_path = self.workspace_path / ".worktrees" / worktree_name
+ branch_name = f"team/{st.id}"
+
+ proc = await asyncio.create_subprocess_exec(
+ "git", "worktree", "add", "-b", branch_name,
+ str(worktree_path), base_branch,
+ cwd=str(self.workspace_path),
+ stdout=asyncio.subprocess.PIPE,
+ stderr=asyncio.subprocess.PIPE,
+ )
+ await proc.communicate()
+ st.worktree_path = worktree_path
+ self._worktrees.append(worktree_path)
+
+ async def cleanup_worktrees(self) -> None:
+ """Remove all worktrees created by this team."""
+ if not self.workspace_path:
+ return
+ for wt in self._worktrees:
+ proc = await asyncio.create_subprocess_exec(
+ "git", "worktree", "remove", "--force", str(wt),
+ cwd=str(self.workspace_path),
+ stdout=asyncio.subprocess.PIPE,
+ stderr=asyncio.subprocess.PIPE,
+ )
+ await proc.communicate()
+ self._worktrees.clear()
diff --git a/gitpilot/agent_tools.py b/gitpilot/agent_tools.py
new file mode 100644
index 0000000000000000000000000000000000000000..dffef8b222618a3e5b049491f6850c12a68a13e6
--- /dev/null
+++ b/gitpilot/agent_tools.py
@@ -0,0 +1,191 @@
+"""
+Agent Tools for GitPilot Multi-Agent System
+Provides CrewAI-compatible tools for agents to explore and analyze repositories.
+"""
+import asyncio
+import threading
+from typing import Any, Dict, List, Optional, Tuple
+
+from crewai.tools import tool
+
+from .github_api import get_repo_tree, get_file
+
+# Global context for current repository
+# Now includes 'token' to ensure tools can authenticate even in threads
+# AND includes 'branch' to ensure tools operate on the correct ref (not default HEAD/main)
+_current_repo_context: Dict[str, Any] = {}
+_context_lock = threading.RLock()
+
+
+def set_repo_context(
+ owner: str,
+ repo: str,
+ token: Optional[str] = None,
+ branch: Optional[str] = None,
+):
+ """Set the current repository context for tools."""
+ global _current_repo_context
+ with _context_lock:
+ _current_repo_context = {
+ "owner": owner,
+ "repo": repo,
+ "token": token,
+ "branch": branch or "HEAD",
+ }
+
+
+def get_repo_context() -> Tuple[str, str, Optional[str], str]:
+ """Get the current repository context including token and branch."""
+ with _context_lock:
+ owner = _current_repo_context.get("owner", "")
+ repo = _current_repo_context.get("repo", "")
+ token = _current_repo_context.get("token")
+ branch = _current_repo_context.get("branch", "HEAD")
+
+ if not owner or not repo:
+ raise ValueError("Repository context not set. Call set_repo_context first.")
+ return owner, repo, token, branch
+
+
+async def get_repository_context_summary(
+ owner: str,
+ repo: str,
+ token: Optional[str] = None,
+ branch: str = "HEAD",
+) -> Dict[str, Any]:
+ """Programmatically gather repository context."""
+ try:
+ # Pass token + ref explicitly
+ tree = await get_repo_tree(owner, repo, token=token, ref=branch)
+
+ if not tree:
+ return {
+ "all_files": [],
+ "total_files": 0,
+ "extensions": {},
+ "directories": set(),
+ "key_files": [],
+ }
+
+ all_files = [item["path"] for item in tree]
+ extensions: Dict[str, int] = {}
+ directories: set = set()
+ key_files: List[str] = []
+
+ for item in tree:
+ path = item["path"]
+ if "." in path:
+ ext = "." + path.rsplit(".", 1)[1]
+ extensions[ext] = extensions.get(ext, 0) + 1
+ if "/" in path:
+ directories.add(path.split("/")[0])
+
+ path_lower = path.lower()
+ if any(
+ k in path_lower
+ for k in ["readme", "package.json", "requirements.txt", "dockerfile", "makefile"]
+ ):
+ key_files.append(path)
+
+ return {
+ "all_files": all_files,
+ "total_files": len(all_files),
+ "extensions": extensions,
+ "directories": directories,
+ "key_files": key_files,
+ }
+
+ except Exception as e:
+ print(f"[Error] Failed to get repository context: {str(e)}")
+ return {"error": str(e), "total_files": 0}
+
+
+@tool("List all files in repository")
+def list_repository_files() -> str:
+ """Lists all files in the current repository."""
+ try:
+ owner, repo, token, branch = get_repo_context()
+
+ loop = asyncio.new_event_loop()
+ asyncio.set_event_loop(loop)
+ try:
+ # Pass token + ref explicitly
+ tree = loop.run_until_complete(get_repo_tree(owner, repo, token=token, ref=branch))
+ finally:
+ loop.close()
+
+ if not tree:
+ return f"Repository is empty - no files found. (Branch: {branch})"
+
+ result = f"Repository: {owner}/{repo} (Branch: {branch})\nFiles:\n"
+ for item in sorted(tree, key=lambda x: x["path"]):
+ result += f" - {item['path']}\n"
+ return result
+ except Exception as e:
+ return f"Error listing files: {str(e)}"
+
+
+@tool("Get directory structure")
+def get_directory_structure() -> str:
+ """Gets the hierarchical directory structure."""
+ try:
+ owner, repo, token, branch = get_repo_context()
+
+ loop = asyncio.new_event_loop()
+ asyncio.set_event_loop(loop)
+ try:
+ # Pass token + ref explicitly
+ tree = loop.run_until_complete(get_repo_tree(owner, repo, token=token, ref=branch))
+ finally:
+ loop.close()
+
+ if not tree:
+ return f"No files. (Branch: {branch})"
+
+ # Simple structure generation
+ paths = [t["path"] for t in tree]
+ return f"Structure for {owner}/{repo} (Branch: {branch}):\n" + "\n".join(sorted(paths))
+ except Exception as e:
+ return f"Error: {str(e)}"
+
+
+@tool("Read file content")
+def read_file(file_path: str) -> str:
+ """Reads the content of a specific file."""
+ try:
+ owner, repo, token, branch = get_repo_context()
+
+ loop = asyncio.new_event_loop()
+ asyncio.set_event_loop(loop)
+ try:
+ # Pass token + ref explicitly
+ content = loop.run_until_complete(get_file(owner, repo, file_path, token=token, ref=branch))
+ finally:
+ loop.close()
+
+ return f"Content of {file_path}:\n---\n{content}\n---"
+ except Exception as e:
+ return f"Error reading file {file_path}: {str(e)}"
+
+
+@tool("Get repository summary")
+def get_repository_summary() -> str:
+ """Provides a comprehensive summary of the repository."""
+ try:
+ owner, repo, token, branch = get_repo_context()
+
+ loop = asyncio.new_event_loop()
+ asyncio.set_event_loop(loop)
+ try:
+ # Pass token + ref explicitly
+ tree = loop.run_until_complete(get_repo_tree(owner, repo, token=token, ref=branch))
+ finally:
+ loop.close()
+
+ return f"Summary for {owner}/{repo} (Branch: {branch}): {len(tree)} files found."
+ except Exception as e:
+ return f"Error: {str(e)}"
+
+
+# Export tools
+REPOSITORY_TOOLS = [list_repository_files, get_directory_structure, read_file, get_repository_summary]
diff --git a/gitpilot/agentic.py b/gitpilot/agentic.py
new file mode 100644
index 0000000000000000000000000000000000000000..c7aaaad6fd9b854ac7adb551e409a05b341a941e
--- /dev/null
+++ b/gitpilot/agentic.py
@@ -0,0 +1,1344 @@
+from __future__ import annotations
+
+import asyncio
+import contextvars
+import logging
+from textwrap import dedent
+from typing import Any, Dict, List, Literal, Optional
+
+from crewai import Agent, Crew, Process, Task
+from pydantic import BaseModel, Field
+
+from .llm_provider import build_llm
+from .agent_tools import REPOSITORY_TOOLS, set_repo_context, get_repository_context_summary
+from .issue_tools import ISSUE_TOOLS
+from .pr_tools import PR_TOOLS
+from .search_tools import SEARCH_TOOLS
+from .local_tools import LOCAL_TOOLS, LOCAL_FILE_TOOLS, LOCAL_GIT_TOOLS, LOCAL_SHELL_TOOLS
+from .agent_router import AgentType, RequestCategory, WorkflowPlan, route as route_request
+from .context_pack import build_context_pack
+from .topology_registry import (
+ get_topology,
+ get_topology_graph,
+ classify_message,
+ get_saved_topology_preference,
+ ExecutionStyle,
+ RoutingStrategy,
+)
+from fastapi import HTTPException
+
+logger = logging.getLogger(__name__)
+
+
+class PlanFile(BaseModel):
+ """Represents a file operation in a plan step."""
+ path: str
+ action: Literal["CREATE", "MODIFY", "DELETE", "READ"] = "MODIFY"
+
+
+class PlanStep(BaseModel):
+ """A single step in the execution plan."""
+ step_number: int
+ title: str
+ description: str
+ # Important: avoid mutable default list
+ files: List[PlanFile] = Field(default_factory=list)
+ risks: str | None = None
+
+
+class PlanResult(BaseModel):
+ """The complete execution plan."""
+ goal: str
+ summary: str
+ steps: List[PlanStep]
+
+
+async def generate_plan(
+ goal: str,
+ repo_full_name: str,
+ token: str | None = None,
+ branch_name: str | None = None,
+) -> PlanResult:
+ """Agentic planning: create a structured plan but DO NOT modify the repo.
+
+ Two-phase approach:
+ 1) Explore and understand the repository (on the correct branch)
+ 2) Create a plan based on actual repository state
+ """
+ llm = build_llm()
+
+ owner, repo = repo_full_name.split("/")
+
+ # CRITICAL: Set context INCLUDING branch so tools never fall back to HEAD/main
+ active_ref = branch_name or "HEAD"
+ set_repo_context(owner, repo, token=token, branch=active_ref)
+
+ # CONTEXT PACK: Load project context (conventions, active use case, asset chunks)
+ # This is additive β if nothing exists, context_pack is empty and agents behave as before.
+ from pathlib import Path as _P
+ workspace_path = _P.home() / ".gitpilot" / "workspaces" / owner / repo
+ context_pack = build_context_pack(workspace_path, query=goal)
+ if context_pack:
+ logger.info("[GitPilot] Context pack loaded (%d chars)", len(context_pack))
+
+ # PHASE 1: Explore repository (correct branch)
+ logger.info("[GitPilot] Phase 1: Exploring repository %s (ref=%s)...", repo_full_name, active_ref)
+
+ repo_context_data = await get_repository_context_summary(owner, repo, token=token, branch=active_ref)
+ logger.info(
+ "[GitPilot] Repository context gathered: %s files found (ref=%s)",
+ repo_context_data.get("total_files", 0),
+ active_ref,
+ )
+
+ explorer = Agent(
+ role="Repository Explorer",
+ goal="Thoroughly explore and document the current state of the repository",
+ backstory=(
+ "You are a meticulous code archaeologist who explores repositories "
+ "to understand their complete structure before any changes are made. "
+ "You use all available tools to build a comprehensive picture."
+ ),
+ llm=llm,
+ tools=REPOSITORY_TOOLS,
+ verbose=True,
+ allow_delegation=False,
+ )
+
+ explore_task = Task(
+ description=dedent(f"""
+ Repository: {repo_full_name}
+ Active Ref (branch/tag/SHA): {active_ref}
+
+ Your mission is to THOROUGHLY explore this repository and document its current state.
+ You MUST use your tools to gather the following information:
+
+ 1. Call "Get repository summary" - to get overall statistics
+ 2. Call "List all files in repository" - to see EVERY file that exists
+ 3. Call "Get directory structure" - to understand the organization
+ 4. If there are key files (README.md, package.json, etc.), read them
+
+ CRITICAL: You must ACTUALLY CALL these tools. Do not make assumptions.
+
+ After exploring, provide a detailed report in this EXACT format:
+
+ REPOSITORY EXPLORATION REPORT
+ =============================
+
+ Files Found: [list all file paths you discovered]
+
+ Key Files: [list important files like README.md, .gitignore, etc.]
+
+ Directory Structure: [describe the folder organization]
+
+ File Types: [count files by extension]
+
+ Your report MUST be based on ACTUAL tool calls, not assumptions.
+ """),
+ expected_output="A detailed exploration report listing ALL files found in the repository",
+ agent=explorer,
+ )
+
+ explore_crew = Crew(
+ agents=[explorer],
+ tasks=[explore_task],
+ process=Process.sequential,
+ verbose=True,
+ )
+
+ def _explore():
+ return explore_crew.kickoff()
+
+ # Propagate context to thread for CrewAI execution
+ ctx = contextvars.copy_context()
+ exploration_result = await asyncio.to_thread(ctx.run, _explore)
+
+ exploration_report = exploration_result.raw if hasattr(exploration_result, "raw") else str(exploration_result)
+ logger.info("[GitPilot] Exploration complete. Report length: %s chars", len(exploration_report))
+
+ # PHASE 2: Plan creation based on exploration
+ logger.info("[GitPilot] Phase 2: Creating plan based on repository exploration (ref=%s)...", active_ref)
+
+ # Build planner backstory with optional context pack injection
+ _planner_backstory = (
+ "You are an experienced staff engineer who creates plans based on FACTS, not assumptions. "
+ "You have received a complete exploration report of the repository. "
+ "You ONLY create plans for files that actually exist in the exploration report. "
+ "You are extremely careful with DELETE actions - you verify the file exists "
+ "and that it's not on the 'keep' list before marking it for deletion. "
+ "When users ask to delete files, you delete individual FILES, not directory names. "
+ "When users ask to ANALYZE files and GENERATE new content (code, docs, examples), "
+ "you create plans that READ existing files and CREATE new files with generated content. "
+ "You understand that 'analyze X and create Y' means: use tools to read X, then plan to CREATE Y. "
+ "You never make changes yourself, only create detailed plans."
+ )
+ if context_pack:
+ _planner_backstory += "\n\n" + context_pack
+
+ planner = Agent(
+ role="Repository Refactor Planner",
+ goal=(
+ "Design safe, step-by-step refactor plans based on ACTUAL repository state "
+ "discovered during exploration"
+ ),
+ backstory=_planner_backstory,
+ llm=llm,
+ tools=REPOSITORY_TOOLS,
+ verbose=True,
+ allow_delegation=False,
+ )
+
+ plan_task = Task(
+ description=dedent(f"""
+ User goal: {{goal}}
+ Repository: {repo_full_name}
+ Active Ref (branch/tag/SHA): {active_ref}
+
+ REPOSITORY EXPLORATION REPORT (CRITICAL CONTEXT):
+ ==================================================
+ {exploration_report}
+ ==================================================
+
+ Based on the ACTUAL files listed in the exploration report above, create a plan.
+
+ CRITICAL RULES FOR ANALYSIS AND GENERATION TASKS:
+ - If the goal mentions "analyze" or "generate" or "create examples/demos", you MUST create NEW files
+ - When the user asks to "analyze X and create Y":
+ * Step 1: Use "Read file content" tool to analyze existing files (if needed)
+ * Step 2: Plan CREATE actions for new files (e.g., demo.py, example.py, tutorial.md)
+ - NEW files can include: Python scripts, examples, demos, tutorials, documentation
+ - Examples of analysis tasks that should CREATE files:
+ * "analyze README and generate Python code" β CREATE: demo.py, example.py
+ * "create demo based on documentation" β CREATE: demo.py, test_example.py
+ * "generate tutorial from existing code" β CREATE: tutorial.md, examples/
+ - IMPORTANT: Empty plans (steps: []) are ONLY acceptable if the goal is purely informational
+ - If the user wants something generated/created, you MUST include CREATE actions
+
+ CRITICAL RULES FOR DELETION SCENARIOS:
+ - If the goal mentions "delete files" or "keep only", you MUST identify which files to DELETE
+ - For EACH file in the exploration report:
+ * If it should be KEPT (e.g., README.md if goal says "keep README.md"), do NOT include it in the plan
+ * If it should be DELETED (e.g., all other files), mark it with action "DELETE"
+ - ONLY delete files that actually exist (check the exploration report)
+ - NEVER delete files that the user wants to keep
+ - Be explicit: if the goal is "delete all files except README.md", then:
+ * README.md should NOT appear in your plan (it's being kept)
+ * ALL other files from the exploration report should have action "DELETE"
+
+ CRITICAL RULES FOR VERIFICATION:
+ - ONLY include files that appear in the exploration report
+ - For "CREATE" actions: file must NOT be in the exploration report
+ - For "MODIFY" or "DELETE" actions: file MUST be in the exploration report
+ - If you're unsure, you can still call your tools to double-check
+
+ Your FINAL ANSWER must be a single JSON object that matches exactly this schema:
+
+ {{
+ "goal": "string describing the goal",
+ "summary": "string with overall plan summary",
+ "steps": [
+ {{
+ "step_number": 1,
+ "title": "Step title",
+ "description": "What this step does",
+ "files": [
+ {{"path": "file/path.py", "action": "CREATE"}},
+ {{"path": "another/file.py", "action": "MODIFY"}},
+ {{"path": "old/file.py", "action": "DELETE"}},
+ {{"path": "README.md", "action": "READ"}}
+ ],
+ "risks": "Optional risk description or null"
+ }}
+ ]
+ }}
+
+ CRITICAL JSON RULES:
+ - Output MUST be valid JSON.
+ - STRICTLY NO COMMENTS allowed (no // or #).
+ - Double quotes around all keys and string values.
+ - No trailing commas.
+ - "action" MUST be exactly one of: "CREATE", "MODIFY", "DELETE", "READ"
+ - "step_number" MUST be an integer starting from 1
+ - "risks" can be either a string or null (the JSON null value, without quotes)
+ - Do NOT wrap the JSON in markdown code fences
+ - Do NOT add any explanation before or after the JSON
+ - The ENTIRE response MUST be ONLY the JSON object, starting with '{{' and ending with '}}'
+ """),
+ expected_output=dedent("""
+ A single valid JSON object matching the PlanResult schema:
+ - goal: string
+ - summary: string
+ - steps: array of objects, each with:
+ - step_number: integer
+ - title: string
+ - description: string
+ - files: array of { "path": string, "action": "CREATE" | "MODIFY" | "DELETE" | "READ" }
+ - risks: string or null
+ The response must contain ONLY pure JSON (no markdown, no prose, no code fences, NO COMMENTS).
+ """),
+ agent=planner,
+ output_pydantic=PlanResult,
+ )
+
+ plan_crew = Crew(
+ agents=[planner],
+ tasks=[plan_task],
+ process=Process.sequential,
+ verbose=True,
+ )
+
+ def _plan():
+ return plan_crew.kickoff(inputs={"goal": goal})
+
+ ctx = contextvars.copy_context()
+ result = await asyncio.to_thread(ctx.run, _plan)
+
+ if hasattr(result, "pydantic") and result.pydantic:
+ plan = result.pydantic
+ logger.info("[GitPilot] Plan created with %s steps (ref=%s)", len(plan.steps), active_ref)
+ return plan
+
+ logger.warning("[GitPilot] Unexpected planning result type: %r", type(result))
+ return result
+
+
+async def execute_plan(
+ plan: PlanResult,
+ repo_full_name: str,
+ token: str | None = None,
+ branch_name: str | None = None,
+) -> dict:
+ """Execute the approved plan by applying changes to the GitHub repository."""
+ from .github_api import get_file, put_file, create_branch, get_repo
+ import re
+ import time
+
+ owner, repo = repo_full_name.split("/")
+ execution_steps: list[dict] = []
+ llm = build_llm()
+
+ if branch_name is None:
+ sanitized = re.sub(r"[^a-z0-9-]+", "-", plan.goal.lower())
+ sanitized = sanitized[:40].strip("-")
+ timestamp = str(int(time.time()))[-6:]
+ branch_name = f"gitpilot-{sanitized}-{timestamp}"
+
+ try:
+ logger.info("[GitPilot] Creating feature branch: %s", branch_name)
+ await create_branch(owner, repo, branch_name, from_ref="HEAD", token=token)
+ logger.info("[GitPilot] Branch created successfully: %s", branch_name)
+ except HTTPException as e:
+ logger.warning(
+ "[GitPilot] Branch %s already exists or creation failed: %s. Attempting to use existing branch.",
+ branch_name,
+ e.detail,
+ )
+
+ # CRITICAL: ensure tools read from the ACTIVE execution branch
+ set_repo_context(owner, repo, token=token, branch=branch_name)
+
+ code_writer = Agent(
+ role="Expert Code Writer",
+ goal="Generate high-quality, production-ready code and documentation based on requirements.",
+ backstory=(
+ "You are a senior software engineer with expertise in multiple programming languages. "
+ "You write clean, well-documented, and functional code. "
+ "You understand context and generate appropriate content for each file type. "
+ "For documentation files (README.md, docs, etc.), you write clear, comprehensive content. "
+ "For code files, you follow best practices and include proper comments. "
+ "IMPORTANT: You ALWAYS use repository exploration tools before creating new content. "
+ "When asked to create demos/examples/tutorials, you first READ the existing files to understand "
+ "the project, then generate content that is relevant and accurate. "
+ "You never create generic examples - you create content specific to THIS repository."
+ ),
+ llm=llm,
+ tools=REPOSITORY_TOOLS,
+ verbose=True,
+ allow_delegation=False,
+ )
+
+ for step in plan.steps:
+ step_summary = f"Step {step.step_number}: {step.title}"
+
+ for file in step.files:
+ try:
+ if file.action == "CREATE":
+ create_task = Task(
+ description=(
+ f"Generate complete content for a new file: {file.path}\n\n"
+ f"Overall Goal: {plan.goal}\n"
+ f"Step Context: {step.description}\n\n"
+ "CRITICAL INSTRUCTIONS:\n"
+ "- You have access to repository exploration tools - USE THEM!\n"
+ "- If the goal mentions 'analyze' or 'based on', first read the relevant files:\n"
+ " * Use 'Read file content' to read existing files (README.md, source code, etc.)\n"
+ " * Use 'List all files in repository' to see what files exist\n"
+ "- Generate content that is INFORMED by the actual repository content\n"
+ "- If creating a demo/example, make it relevant to the actual project\n"
+ "- If creating documentation, reference actual files and code in the repository\n\n"
+ "Requirements:\n"
+ f"- Create production-ready content appropriate for {file.path}\n"
+ "- If it's a documentation file (.md, .txt, .rst), write comprehensive, well-structured documentation\n"
+ "- If it's a code file, include proper imports, comments, and follow best practices\n"
+ "- If it's a configuration file, include sensible defaults and comments\n"
+ "- Make the content complete and ready to use\n"
+ "- Do NOT include placeholder comments like 'TODO' or 'IMPLEMENT THIS'\n"
+ "- The content should be fully functional and informative\n\n"
+ "Return ONLY the file content, no explanations or markdown code blocks."
+ ),
+ expected_output=f"Complete, production-ready content for {file.path}",
+ agent=code_writer,
+ )
+
+ def _create():
+ crew = Crew(
+ agents=[code_writer],
+ tasks=[create_task],
+ process=Process.sequential,
+ verbose=False,
+ )
+ result = crew.kickoff()
+ if hasattr(result, "raw"):
+ return result.raw
+ return str(result)
+
+ ctx = contextvars.copy_context()
+ content = await asyncio.to_thread(ctx.run, _create)
+
+ content = content.strip()
+ if content.startswith("```"):
+ lines = content.split("\n")
+ if lines[-1].strip() == "```":
+ content = "\n".join(lines[1:-1])
+ else:
+ content = "\n".join(lines[1:])
+
+ await put_file(
+ owner,
+ repo,
+ file.path,
+ content,
+ f"GitPilot: Create {file.path} - {step.title}",
+ token=token,
+ branch=branch_name,
+ )
+ step_summary += f"\n β Created {file.path}"
+
+ elif file.action == "MODIFY":
+ try:
+ existing_content = await get_file(
+ owner, repo, file.path, token=token, ref=branch_name
+ )
+
+ modify_task = Task(
+ description=(
+ f"Modify the existing file: {file.path}\n\n"
+ f"Overall Goal: {plan.goal}\n"
+ f"Step Context: {step.description}\n\n"
+ f"Current File Content:\n"
+ f"---\n{existing_content}\n---\n\n"
+ "Requirements:\n"
+ "- Make the changes described in the step context\n"
+ "- Preserve the existing structure and format\n"
+ "- For documentation: update or add relevant sections\n"
+ "- For code: add/modify functions, imports, or logic as needed\n"
+ "- Ensure the result is complete and functional\n"
+ "- Do NOT just add comments - make real, substantive changes\n\n"
+ "Return ONLY the complete modified file content, no explanations."
+ ),
+ expected_output=f"Complete, modified content for {file.path}",
+ agent=code_writer,
+ )
+
+ def _modify():
+ crew = Crew(
+ agents=[code_writer],
+ tasks=[modify_task],
+ process=Process.sequential,
+ verbose=False,
+ )
+ result = crew.kickoff()
+ if hasattr(result, "raw"):
+ return result.raw
+ return str(result)
+
+ ctx = contextvars.copy_context()
+ modified_content = await asyncio.to_thread(ctx.run, _modify)
+
+ modified_content = modified_content.strip()
+ if modified_content.startswith("```"):
+ lines = modified_content.split("\n")
+ if lines[-1].strip() == "```":
+ modified_content = "\n".join(lines[1:-1])
+ else:
+ modified_content = "\n".join(lines[1:])
+
+ await put_file(
+ owner,
+ repo,
+ file.path,
+ modified_content,
+ f"GitPilot: Modify {file.path} - {step.title}",
+ token=token,
+ branch=branch_name,
+ )
+ step_summary += f"\n β Modified {file.path}"
+ except Exception as e: # noqa: BLE001
+ logger.exception(
+ "Failed to modify file %s in step %s: %s",
+ file.path,
+ step.step_number,
+ e,
+ )
+ step_summary += f"\n β Failed to modify {file.path}: {str(e)}"
+
+ elif file.action == "DELETE":
+ from .github_api import delete_file
+
+ try:
+ await delete_file(
+ owner,
+ repo,
+ file.path,
+ f"GitPilot: Delete {file.path} - {step.title}",
+ token=token,
+ branch=branch_name,
+ )
+ step_summary += f"\n β Deleted {file.path}"
+ except Exception as e: # noqa: BLE001
+ logger.exception(
+ "Failed to delete file %s in step %s: %s",
+ file.path,
+ step.step_number,
+ e,
+ )
+ step_summary += f"\n β Failed to delete {file.path}: {str(e)}"
+
+ elif file.action == "READ":
+ step_summary += f"\n βΉοΈ READ-only: inspected {file.path}"
+
+ except Exception as e: # noqa: BLE001
+ logger.exception(
+ "Error processing file %s in step %s: %s",
+ file.path,
+ step.step_number,
+ e,
+ )
+ step_summary += f"\n β Error processing {file.path}: {str(e)}"
+
+ execution_steps.append({"step_number": step.step_number, "summary": step_summary})
+
+ return {
+ "status": "completed",
+ "message": f"Successfully executed {len(plan.steps)} steps on {repo_full_name} in branch '{branch_name}'",
+ "branch": branch_name,
+ "branch_url": f"https://github.com/{repo_full_name}/tree/{branch_name}",
+ "executionLog": {"steps": execution_steps},
+ }
+
+
+# ============================================================================
+# New Agent Builders (v2 upgrade)
+# ============================================================================
+
+def _build_issue_agent(llm) -> Agent:
+ return Agent(
+ role="GitHub Issue Management Specialist",
+ goal="Create, modify, and manage GitHub issues with proper metadata and relationships",
+ backstory=(
+ "You are an expert in GitHub issue management. You can create new issues "
+ "with detailed descriptions, modify existing issues and their metadata, "
+ "manage labels, milestones, and assignees, and add comments. "
+ "You ensure issues are well-organised and provide clear status updates. "
+ "When creating issues you always include a concise title and a structured body."
+ ),
+ llm=llm,
+ tools=ISSUE_TOOLS,
+ verbose=True,
+ allow_delegation=False,
+ )
+
+
+def _build_pr_agent(llm) -> Agent:
+ return Agent(
+ role="Pull Request Management Specialist",
+ goal="Create, list, review, and merge pull requests",
+ backstory=(
+ "You are skilled in pull request workflows. You can create PRs from "
+ "feature branches, list open PRs, inspect changed files, add reviews, "
+ "and merge PRs using the appropriate strategy (merge, squash, rebase). "
+ "You always verify the source and target branches before acting."
+ ),
+ llm=llm,
+ tools=PR_TOOLS,
+ verbose=True,
+ allow_delegation=False,
+ )
+
+
+def _build_search_agent(llm) -> Agent:
+ return Agent(
+ role="Search & Discovery Specialist",
+ goal="Find code, repositories, issues, and users across GitHub",
+ backstory=(
+ "You are an expert at finding resources on GitHub. You can search for "
+ "code by keywords, symbols, or patterns within a repository or globally. "
+ "You can find users and organisations, discover repositories by topic, "
+ "and locate issues or PRs matching specific criteria. "
+ "You present results in a clear, structured format."
+ ),
+ llm=llm,
+ tools=SEARCH_TOOLS + REPOSITORY_TOOLS,
+ verbose=True,
+ allow_delegation=False,
+ )
+
+
+def _build_code_review_agent(llm) -> Agent:
+ return Agent(
+ role="Code Review & Analysis Specialist",
+ goal="Review code quality, identify patterns, and suggest improvements",
+ backstory=(
+ "You are an experienced code reviewer who analyses code for quality, "
+ "security issues, and performance problems. You inspect files in the "
+ "repository, read their contents, and provide constructive feedback. "
+ "For pull requests you examine the changed files and produce a detailed "
+ "review with actionable suggestions."
+ ),
+ llm=llm,
+ tools=REPOSITORY_TOOLS + PR_TOOLS + SEARCH_TOOLS,
+ verbose=True,
+ allow_delegation=False,
+ )
+
+
+def _build_learning_agent(llm) -> Agent:
+ return Agent(
+ role="GitHub Learning & Guidance Specialist",
+ goal="Provide expert guidance on GitHub features, best practices, and workflows",
+ backstory=(
+ "You are a GitHub expert who helps users understand GitHub Actions, "
+ "CI/CD workflows, authentication, pull request best practices, "
+ "repository maintenance, GitHub Pages, Packages, Discussions, "
+ "and security best practices. You provide clear, actionable guidance "
+ "with examples. You can also read the repository to give contextualised advice."
+ ),
+ llm=llm,
+ tools=REPOSITORY_TOOLS + SEARCH_TOOLS,
+ verbose=True,
+ allow_delegation=False,
+ )
+
+
+def _build_local_editor_agent(llm) -> Agent:
+ """Phase 1: Agent for direct local file editing with verification."""
+ return Agent(
+ role="Local File Editor",
+ goal="Read, write, and modify files in the local workspace with verification",
+ backstory=(
+ "You are an expert code editor that operates directly on the local "
+ "filesystem. You read files, make precise edits, write new files, "
+ "and verify changes using git diff. You always check file contents "
+ "before editing and confirm results after. You follow project "
+ "conventions and never introduce breaking changes."
+ ),
+ llm=llm,
+ tools=LOCAL_FILE_TOOLS + LOCAL_GIT_TOOLS,
+ verbose=True,
+ allow_delegation=False,
+ )
+
+
+def _build_terminal_agent(llm) -> Agent:
+ """Phase 1: Agent for sandboxed shell command execution."""
+ return Agent(
+ role="Terminal & Shell Executor",
+ goal="Execute shell commands safely in the workspace and report results",
+ backstory=(
+ "You are a terminal expert that runs shell commands in a sandboxed "
+ "environment. You can run tests, linters, build tools, and other "
+ "development commands. You always report exit codes and output. "
+ "You refuse to run destructive commands like rm -rf / or format disks. "
+ "You explain command output clearly to the user."
+ ),
+ llm=llm,
+ tools=LOCAL_SHELL_TOOLS + LOCAL_GIT_TOOLS,
+ verbose=True,
+ allow_delegation=False,
+ )
+
+
+# ============================================================================
+# Unified Dispatcher (v2 upgrade)
+# ============================================================================
+
+async def dispatch_request(
+ user_request: Optional[str] = None,
+ repo_full_name: Optional[str] = None,
+ token: Optional[str] = None,
+ branch_name: Optional[str] = None,
+ topology_id: Optional[str] = None,
+ # -----------------------------------------------------------------
+ # Backwards-compatible keyword arguments.
+ # Older callers (notably early WebSocket and A2A adapters) used:
+ # dispatch_request(repo_owner=..., repo_name=..., message=...)
+ # Keeping these kwargs prevents crashes when frontend/backend drift.
+ # -----------------------------------------------------------------
+ repo_owner: Optional[str] = None,
+ repo_name: Optional[str] = None,
+ message: Optional[str] = None,
+ **_ignored_kwargs: Any,
+) -> Dict[str, Any]:
+ """Route a free-form user request to the appropriate agent(s) and return the result.
+
+ This is the single entry-point for the new conversational mode. For backwards
+ compatibility the original ``generate_plan`` / ``execute_plan`` pair is still
+ available and untouched.
+
+ If *topology_id* is supplied, topology-aware routing is used:
+ - ``classify_and_dispatch`` β falls through to the existing agent_router
+ - ``always_main_agent`` β all requests go to the primary agent (T2)
+ - ``fixed_sequence`` β a CrewAI sequential crew is built from the
+ topology's agent sequence (T3-T7)
+
+ When *topology_id* is ``None``, behaviour is identical to the original v2
+ dispatcher.
+ """
+ # ---- Input normalization / compat layer ----
+ if user_request is None and message is not None:
+ user_request = message
+ if repo_full_name is None and repo_owner and repo_name:
+ repo_full_name = f"{repo_owner}/{repo_name}"
+
+ if not user_request:
+ raise ValueError("dispatch_request: missing user_request (or legacy 'message')")
+ if not repo_full_name:
+ raise ValueError("dispatch_request: missing repo_full_name (or legacy repo_owner/repo_name)")
+ # ---------- Topology-aware routing (additive) ----------
+ _active_topology = None
+ _resolved_tid = topology_id or get_saved_topology_preference()
+ if _resolved_tid:
+ _active_topology = get_topology(_resolved_tid)
+
+ if _active_topology and _active_topology.routing_policy.strategy == RoutingStrategy.fixed_sequence:
+ # Pipeline topologies (T3-T7): build a multi-task sequential crew
+ return await _dispatch_pipeline(
+ _active_topology, user_request, repo_full_name,
+ token=token, branch_name=branch_name,
+ )
+
+ # For ``classify_and_dispatch`` (T1/default) and ``always_main_agent`` (T2)
+ # we fall through to the existing routing. T2's react_loop execution will
+ # be wired in a future phase; for now it uses the same single-task path
+ # but the *visualization* already shows the correct graph.
+
+ workflow = route_request(user_request)
+ logger.info(
+ "[GitPilot] Router: category=%s agents=%s desc=%s",
+ workflow.category.value,
+ [a.value for a in workflow.agents],
+ workflow.description,
+ )
+
+ # Phase 2: Smart model routing
+ try:
+ from .smart_model_router import ModelRouter
+ _router = ModelRouter()
+ selection = _router.select(user_request, category=workflow.category.value)
+ logger.info(
+ "[GitPilot] ModelRouter: model=%s tier=%s complexity=%s reason=%s",
+ selection.model, selection.tier.value, selection.complexity.value, selection.reason,
+ )
+ except Exception:
+ pass # Model routing is optional; fall through to default LLM
+
+ # Set repo context if needed
+ if workflow.requires_repo_context and repo_full_name:
+ owner, repo = repo_full_name.split("/")
+ active_ref = branch_name or "HEAD"
+ set_repo_context(owner, repo, token=token, branch=active_ref)
+
+ llm = build_llm()
+
+ # If it's the existing plan+execute workflow, delegate there
+ if workflow.category == RequestCategory.PLAN_EXECUTE:
+ plan = await generate_plan(user_request, repo_full_name, token=token, branch_name=branch_name)
+ return {
+ "category": workflow.category.value,
+ "workflow": "plan_execute",
+ "plan": plan.model_dump() if hasattr(plan, "model_dump") else plan,
+ "message": "Plan generated. Review and approve to execute.",
+ }
+
+ # CONTEXT PACK: Load project context for non-plan agents too (additive)
+ _dispatch_ctx_pack = ""
+ if repo_full_name:
+ try:
+ _d_owner, _d_repo = repo_full_name.split("/")
+ from pathlib import Path as _P
+ _d_ws = _P.home() / ".gitpilot" / "workspaces" / _d_owner / _d_repo
+ _dispatch_ctx_pack = build_context_pack(_d_ws, query=user_request)
+ except Exception:
+ pass
+
+ # Build the task description
+ task_description = _build_task_description(workflow, user_request, repo_full_name, branch_name)
+ if _dispatch_ctx_pack:
+ task_description += "\n\n" + _dispatch_ctx_pack
+
+ # Build agent(s) for this workflow
+ agents = []
+ for agent_type in workflow.agents:
+ agents.append(_get_agent(agent_type, llm))
+
+ # Use the first agent as the primary executor
+ primary_agent = agents[0]
+ task = Task(
+ description=task_description,
+ expected_output="A clear, structured response addressing the user request",
+ agent=primary_agent,
+ )
+
+ crew = Crew(
+ agents=agents,
+ tasks=[task],
+ process=Process.sequential,
+ verbose=True,
+ )
+
+ def _run():
+ result = crew.kickoff()
+ if hasattr(result, "raw"):
+ return result.raw
+ return str(result)
+
+ ctx = contextvars.copy_context()
+ result_text = await asyncio.to_thread(ctx.run, _run)
+
+ return {
+ "category": workflow.category.value,
+ "agents_used": [a.value for a in workflow.agents],
+ "result": result_text,
+ "entity_number": workflow.entity_number,
+ }
+
+
+# ============================================================================
+# Topology Pipeline Dispatcher (additive β T3-T7)
+# ============================================================================
+
+# Maps topology agent IDs to AgentType enum + task descriptions.
+# This bridge lets the topology registry reference agents by string ID while
+# reusing the existing _get_agent() builders.
+_TOPO_AGENT_MAP = {
+ "explorer": (AgentType.EXPLORER, "Explore the codebase: map project structure, discover relevant files, "
+ "identify patterns, dependencies, and test conventions. "
+ "Return a structured analysis with file paths and key findings."),
+ "planner": (AgentType.PLANNER, "Based on the exploration results, create a detailed implementation plan. "
+ "Include: files to modify, files to create, step-by-step order, "
+ "and test strategy. Consider trade-offs and alternatives."),
+ "developer": (AgentType.CODE_WRITER, "Execute the implementation plan step by step. For each step: "
+ "make the code change, then run tests. If tests fail, fix the issue "
+ "before moving to the next step. Follow project coding standards."),
+ "reviewer": (AgentType.CODE_REVIEWER, "Review all code changes. Check for: security vulnerabilities, "
+ "code quality, test coverage, performance issues. "
+ "Organise findings by severity: Critical, Warning, Suggestion."),
+ "git_agent": (AgentType.PR_MANAGER, "Create a branch, commit all changes with a descriptive message, "
+ "push the branch, and create a GitHub PR. PR should summarise "
+ "the changes clearly with a test plan."),
+}
+
+
+async def _dispatch_pipeline(
+ topology,
+ user_request: str,
+ repo_full_name: str,
+ token: Optional[str] = None,
+ branch_name: Optional[str] = None,
+) -> Dict[str, Any]:
+ """Run a topology's fixed-sequence pipeline as a multi-task CrewAI crew.
+
+ Each agent in the sequence gets its own Task. Tasks are linked via
+ CrewAI's ``context`` parameter so the output of step N feeds step N+1.
+ """
+ sequence = topology.routing_policy.sequence or []
+ if not sequence:
+ return {"error": "Topology has no agent sequence defined"}
+
+ # Set repo context
+ if repo_full_name:
+ owner, repo = repo_full_name.split("/")
+ active_ref = branch_name or "HEAD"
+ set_repo_context(owner, repo, token=token, branch=active_ref)
+
+ llm = build_llm()
+
+ # Build agents and tasks
+ agents = []
+ tasks = []
+ for i, agent_id in enumerate(sequence):
+ mapping = _TOPO_AGENT_MAP.get(agent_id)
+ if not mapping:
+ logger.warning("[GitPilot] Unknown topology agent ID: %s β skipping", agent_id)
+ continue
+ agent_type, base_description = mapping
+ agent = _get_agent(agent_type, llm)
+ agents.append(agent)
+
+ # Build task description: combine base description with user request
+ task_desc = (
+ f"User request: {user_request}\n"
+ f"Repository: {repo_full_name}\n"
+ )
+ if branch_name:
+ task_desc += f"Branch: {branch_name}\n"
+ task_desc += f"\nYour role in this pipeline: {base_description}"
+
+ # Context chaining: each task after the first receives prior tasks
+ context = tasks[:] if tasks else []
+
+ task = Task(
+ description=task_desc,
+ expected_output=f"Structured output from the {agent_id} phase",
+ agent=agent,
+ context=context if context else None,
+ )
+ tasks.append(task)
+
+ if not agents:
+ return {"error": "No valid agents could be built for this topology"}
+
+ # Load optional context pack
+ _ctx_pack = ""
+ if repo_full_name:
+ try:
+ from pathlib import Path as _P
+ _owner, _repo = repo_full_name.split("/")
+ _ws = _P.home() / ".gitpilot" / "workspaces" / _owner / _repo
+ _ctx_pack = build_context_pack(_ws, query=user_request)
+ except Exception:
+ pass
+ if _ctx_pack:
+ # Append context pack to the first task's description
+ tasks[0].description += "\n\n" + _ctx_pack
+
+ crew = Crew(
+ agents=agents,
+ tasks=tasks,
+ process=Process.sequential,
+ verbose=True,
+ )
+
+ def _run():
+ result = crew.kickoff()
+ if hasattr(result, "raw"):
+ return result.raw
+ return str(result)
+
+ ctx = contextvars.copy_context()
+ result_text = await asyncio.to_thread(ctx.run, _run)
+
+ return {
+ "category": "topology_pipeline",
+ "topology_id": topology.id,
+ "topology_name": topology.name,
+ "execution_style": topology.execution_style.value,
+ "agents_used": sequence,
+ "result": result_text,
+ }
+
+
+def _get_agent(agent_type: AgentType, llm) -> Agent:
+ """Instantiate an agent by type."""
+ builders = {
+ AgentType.EXPLORER: lambda: Agent(
+ role="Repository Explorer",
+ goal="Thoroughly explore and document the current state of the repository",
+ backstory="You are a meticulous code archaeologist who explores repositories.",
+ llm=llm,
+ tools=REPOSITORY_TOOLS,
+ verbose=True,
+ allow_delegation=False,
+ ),
+ AgentType.PLANNER: lambda: Agent(
+ role="Repository Refactor Planner",
+ goal="Design safe, step-by-step refactor plans",
+ backstory="You are an experienced staff engineer who creates plans based on facts.",
+ llm=llm,
+ tools=REPOSITORY_TOOLS,
+ verbose=True,
+ allow_delegation=False,
+ ),
+ AgentType.CODE_WRITER: lambda: Agent(
+ role="Expert Code Writer",
+ goal="Generate high-quality, production-ready code",
+ backstory="You are a senior software engineer with multi-language expertise.",
+ llm=llm,
+ tools=REPOSITORY_TOOLS,
+ verbose=True,
+ allow_delegation=False,
+ ),
+ AgentType.CODE_REVIEWER: lambda: _build_code_review_agent(llm),
+ AgentType.ISSUE_MANAGER: lambda: _build_issue_agent(llm),
+ AgentType.PR_MANAGER: lambda: _build_pr_agent(llm),
+ AgentType.SEARCH: lambda: _build_search_agent(llm),
+ AgentType.LEARNING: lambda: _build_learning_agent(llm),
+ AgentType.LOCAL_EDITOR: lambda: _build_local_editor_agent(llm),
+ AgentType.TERMINAL: lambda: _build_terminal_agent(llm),
+ }
+ builder = builders.get(agent_type)
+ if not builder:
+ raise ValueError(f"Unknown agent type: {agent_type}")
+ return builder()
+
+
+def _build_task_description(
+ workflow: WorkflowPlan,
+ user_request: str,
+ repo_full_name: str,
+ branch_name: Optional[str],
+) -> str:
+ """Build a detailed task description for the agent based on the workflow."""
+ parts = [
+ f"User request: {user_request}",
+ f"Repository: {repo_full_name}",
+ ]
+ if branch_name:
+ parts.append(f"Branch: {branch_name}")
+ if workflow.entity_number:
+ parts.append(f"Entity number: #{workflow.entity_number}")
+
+ # Category-specific instructions
+ if workflow.category == RequestCategory.ISSUE_MANAGEMENT:
+ action = workflow.metadata.get("action", "")
+ parts.append(
+ "\nYou are handling an ISSUE MANAGEMENT request. "
+ f"Action hint: {action}. "
+ "Use your issue tools to fulfill the request. "
+ "If creating an issue, extract title and body from the user request. "
+ "If listing issues, present results in a clear table. "
+ "If updating, identify the issue number and fields to change. "
+ "Always confirm what you did with the issue URL."
+ )
+
+ elif workflow.category == RequestCategory.PR_MANAGEMENT:
+ action = workflow.metadata.get("action", "")
+ parts.append(
+ "\nYou are handling a PULL REQUEST request. "
+ f"Action hint: {action}. "
+ "Use your PR tools to fulfill the request. "
+ "If creating a PR, determine the head and base branches. "
+ "If merging, confirm the PR number and merge method. "
+ "Always confirm with the PR URL."
+ )
+
+ elif workflow.category == RequestCategory.CODE_SEARCH:
+ search_type = workflow.metadata.get("search_type", "code")
+ parts.append(
+ f"\nYou are handling a SEARCH request (type: {search_type}). "
+ "Use your search tools to find what the user is looking for. "
+ "Present results clearly with paths, URLs, and context snippets."
+ )
+
+ elif workflow.category == RequestCategory.CODE_REVIEW:
+ parts.append(
+ "\nYou are handling a CODE REVIEW request. "
+ "First explore the repository to understand the codebase, "
+ "then analyse code quality, identify potential issues "
+ "(security, performance, maintainability), and provide "
+ "constructive suggestions with specific file references."
+ )
+
+ elif workflow.category == RequestCategory.LEARNING:
+ parts.append(
+ "\nYou are handling a LEARNING / GUIDANCE request. "
+ "Provide clear, actionable guidance about GitHub features. "
+ "Include examples and best practices. "
+ "If relevant, reference the current repository for context."
+ )
+
+ elif workflow.category == RequestCategory.LOCAL_EDIT:
+ parts.append(
+ "\nYou are handling a LOCAL FILE EDITING request. "
+ "Use your local file tools to read, write, and modify files. "
+ "Always read the file before editing to understand current content. "
+ "After editing, use git_diff or git_status to verify your changes. "
+ "Report exactly what was changed."
+ )
+
+ elif workflow.category == RequestCategory.TERMINAL:
+ parts.append(
+ "\nYou are handling a TERMINAL / SHELL COMMAND request. "
+ "Use the run_command tool to execute the requested command. "
+ "Report the exit code and output. If tests fail, summarise "
+ "which tests failed and why. Never run destructive commands."
+ )
+
+ elif workflow.category == RequestCategory.CONVERSATIONAL:
+ parts.append(
+ "\nYou are handling a general question about the repository. "
+ "Use repository tools to explore and answer the question. "
+ "Be concise and helpful."
+ )
+
+ return "\n".join(parts)
+
+
+# ============================================================================
+# Auto PR Creation (v2 upgrade)
+# ============================================================================
+
+async def create_pr_after_execution(
+ repo_full_name: str,
+ branch_name: str,
+ goal: str,
+ execution_log: Dict[str, Any],
+ token: Optional[str] = None,
+) -> Optional[Dict[str, Any]]:
+ """Automatically create a PR after plan execution completes.
+
+ Returns the PR data dict or None if creation fails.
+ """
+ from .github_pulls import create_pull_request
+ from .github_api import get_repo
+
+ owner, repo = repo_full_name.split("/")
+
+ try:
+ repo_info = await get_repo(owner, repo, token=token)
+ default_branch = repo_info.get("default_branch", "main")
+ except Exception:
+ default_branch = "main"
+
+ # Build PR body from execution log
+ steps = execution_log.get("steps", [])
+ body_lines = [f"## GitPilot Auto-PR\n\n**Goal:** {goal}\n"]
+ for step in steps:
+ body_lines.append(f"- {step.get('summary', '')}")
+ body_lines.append(f"\n---\n*Created by GitPilot*")
+ body = "\n".join(body_lines)
+
+ # Truncate title to stay within GitHub limits
+ title = f"GitPilot: {goal}"
+ if len(title) > 256:
+ title = title[:253] + "..."
+
+ try:
+ pr = await create_pull_request(
+ owner,
+ repo,
+ title=title,
+ head=branch_name,
+ base=default_branch,
+ body=body,
+ token=token,
+ )
+ logger.info("[GitPilot] Auto-PR created: %s", pr.get("html_url", ""))
+ return pr
+ except Exception as e:
+ logger.warning("[GitPilot] Failed to create auto-PR: %s", e)
+ return None
+
+
+# ============================================================================
+# Flow Definition (v3 -- topology-aware with legacy fallback)
+# ============================================================================
+
+async def get_flow_definition(topology_id: Optional[str] = None) -> dict:
+ """Return the agent workflow as a visual graph.
+
+ When *topology_id* is provided (or a saved preference exists), the graph
+ is served from the topology registry. Otherwise the original hardcoded
+ graph is returned for backward compatibility.
+ """
+ tid = topology_id or get_saved_topology_preference()
+ if tid:
+ return get_topology_graph(tid)
+
+ # Legacy hardcoded graph (unchanged from v2)
+ return {
+ "nodes": [
+ {
+ "id": "router",
+ "label": "Request Router",
+ "type": "router",
+ "description": "Analyses user intent and delegates to the right agent(s)",
+ },
+ {
+ "id": "repo_explorer",
+ "label": "Repository Explorer",
+ "type": "agent",
+ "description": "Explores repository to gather current state",
+ },
+ {
+ "id": "planner",
+ "label": "Refactor Planner",
+ "type": "agent",
+ "description": "Creates safe, step-by-step refactor plans based on exploration",
+ },
+ {
+ "id": "code_writer",
+ "label": "Code Writer",
+ "type": "agent",
+ "description": "Implements approved changes to codebase",
+ },
+ {
+ "id": "reviewer",
+ "label": "Code Reviewer",
+ "type": "agent",
+ "description": "Reviews code quality, security, and performance",
+ },
+ {
+ "id": "issue_manager",
+ "label": "Issue Manager",
+ "type": "agent",
+ "description": "Creates, updates, and manages GitHub issues",
+ },
+ {
+ "id": "pr_manager",
+ "label": "PR Manager",
+ "type": "agent",
+ "description": "Creates, reviews, and merges pull requests",
+ },
+ {
+ "id": "search_agent",
+ "label": "Search & Discovery",
+ "type": "agent",
+ "description": "Searches code, repos, issues, and users",
+ },
+ {
+ "id": "learning_agent",
+ "label": "Learning & Guidance",
+ "type": "agent",
+ "description": "Provides GitHub feature guidance and best practices",
+ },
+ {
+ "id": "local_editor",
+ "label": "Local Editor",
+ "type": "agent",
+ "description": "Reads and writes files directly in the local workspace",
+ },
+ {
+ "id": "terminal_agent",
+ "label": "Terminal",
+ "type": "agent",
+ "description": "Executes shell commands in a sandboxed environment",
+ },
+ {
+ "id": "github_tools",
+ "label": "GitHub API",
+ "type": "tool",
+ "description": "Read/write/delete files, issues, PRs, search",
+ },
+ {
+ "id": "local_tools",
+ "label": "Local Tools",
+ "type": "tool",
+ "description": "File I/O, git operations, shell commands on local workspace",
+ },
+ ],
+ "edges": [
+ {
+ "id": "e0",
+ "source": "router",
+ "target": "repo_explorer",
+ "label": "Plan & Execute workflow",
+ },
+ {
+ "id": "e0b",
+ "source": "router",
+ "target": "issue_manager",
+ "label": "Issue management requests",
+ },
+ {
+ "id": "e0c",
+ "source": "router",
+ "target": "pr_manager",
+ "label": "PR management requests",
+ },
+ {
+ "id": "e0d",
+ "source": "router",
+ "target": "search_agent",
+ "label": "Search requests",
+ },
+ {
+ "id": "e0e",
+ "source": "router",
+ "target": "reviewer",
+ "label": "Code review requests",
+ },
+ {
+ "id": "e0f",
+ "source": "router",
+ "target": "learning_agent",
+ "label": "Learning & guidance requests",
+ },
+ {
+ "id": "e1",
+ "source": "repo_explorer",
+ "target": "planner",
+ "label": "Complete repository state & file listing",
+ },
+ {
+ "id": "e2",
+ "source": "planner",
+ "target": "code_writer",
+ "label": "Approved plan with verified file actions",
+ },
+ {
+ "id": "e3",
+ "source": "code_writer",
+ "target": "pr_manager",
+ "label": "Auto-create PR after execution",
+ },
+ {
+ "id": "e4",
+ "source": "reviewer",
+ "target": "pr_manager",
+ "label": "Review results",
+ },
+ {
+ "id": "e5",
+ "source": "issue_manager",
+ "target": "github_tools",
+ "label": "Issue operations",
+ },
+ {
+ "id": "e6",
+ "source": "pr_manager",
+ "target": "github_tools",
+ "label": "PR operations",
+ },
+ {
+ "id": "e7",
+ "source": "search_agent",
+ "target": "github_tools",
+ "label": "Search queries",
+ },
+ {
+ "id": "e8",
+ "source": "router",
+ "target": "local_editor",
+ "label": "Local file editing requests",
+ },
+ {
+ "id": "e9",
+ "source": "router",
+ "target": "terminal_agent",
+ "label": "Shell command requests",
+ },
+ {
+ "id": "e10",
+ "source": "local_editor",
+ "target": "local_tools",
+ "label": "File and git operations",
+ },
+ {
+ "id": "e11",
+ "source": "terminal_agent",
+ "target": "local_tools",
+ "label": "Command execution",
+ },
+ ],
+ }
diff --git a/gitpilot/api.py b/gitpilot/api.py
new file mode 100644
index 0000000000000000000000000000000000000000..9a6e121ba8b0436be7f06a3c3aa55cbac43ee76f
--- /dev/null
+++ b/gitpilot/api.py
@@ -0,0 +1,2649 @@
+# gitpilot/api.py
+
+from pathlib import Path
+from typing import List, Optional
+
+from fastapi import FastAPI, Query, Path as FPath, Header, HTTPException, UploadFile, File
+from fastapi.responses import FileResponse, JSONResponse
+from fastapi.staticfiles import StaticFiles
+from fastapi.middleware.cors import CORSMiddleware
+from pydantic import BaseModel, Field
+
+from .version import __version__
+from .github_api import (
+ list_user_repos,
+ list_user_repos_paginated, # Pagination support
+ search_user_repos, # Search across all repos
+ get_repo_tree,
+ get_file,
+ put_file,
+ execution_context,
+ github_request,
+)
+from .github_app import check_repo_write_access
+from .settings import AppSettings, get_settings, set_provider, update_settings, LLMProvider
+from .agentic import (
+ generate_plan,
+ execute_plan,
+ PlanResult,
+ get_flow_definition,
+ dispatch_request,
+ create_pr_after_execution,
+)
+from .agent_router import route as route_request
+from . import github_issues
+from . import github_pulls
+from . import github_search
+from .session import SessionManager, Session
+from .hooks import HookManager, HookEvent
+from .permissions import PermissionManager, PermissionMode
+from .memory import MemoryManager
+from .context_vault import ContextVault
+from .use_case import UseCaseManager
+from .mcp_client import MCPClient
+from .plugins import PluginManager
+from .skills import SkillManager
+from .smart_model_router import ModelRouter, ModelRouterConfig
+from .topology_registry import (
+ list_topologies as _list_topologies,
+ get_topology_graph as _get_topology_graph,
+ classify_message as _classify_message,
+ get_saved_topology_preference,
+ save_topology_preference,
+)
+from .agent_teams import AgentTeam
+from .learning import LearningEngine
+from .cross_repo import CrossRepoAnalyzer
+from .predictions import PredictiveEngine
+from .security import SecurityScanner
+from .nl_database import NLQueryEngine, QueryDialect, SafetyLevel, TableSchema
+from .github_oauth import (
+ generate_authorization_url,
+ exchange_code_for_token,
+ validate_token,
+ initiate_device_flow,
+ poll_device_token,
+ AuthSession,
+ GitHubUser,
+)
+import os
+import logging
+from .model_catalog import list_models_for_provider
+
+# Optional A2A adapter (MCP ContextForge)
+from .a2a_adapter import router as a2a_router
+
+logger = logging.getLogger(__name__)
+
+# --- Phase 1 singletons ---
+_session_mgr = SessionManager()
+_hook_mgr = HookManager()
+_perm_mgr = PermissionManager()
+
+# --- Phase 2 singletons ---
+_mcp_client = MCPClient()
+_plugin_mgr = PluginManager()
+_skill_mgr = SkillManager()
+_model_router = ModelRouter()
+
+# --- Phase 3 singletons ---
+_agent_team = AgentTeam()
+_learning_engine = LearningEngine()
+_cross_repo = CrossRepoAnalyzer()
+_predictive_engine = PredictiveEngine()
+_security_scanner = SecurityScanner()
+_nl_engine = NLQueryEngine()
+
+app = FastAPI(
+ title="GitPilot API",
+ version=__version__,
+ description="Agentic AI assistant for GitHub repositories.",
+)
+
+# ==========================================================================
+# Optional A2A Adapter (MCP ContextForge)
+# ==========================================================================
+# This is feature-flagged and does not affect the existing UI/REST API unless
+# explicitly enabled.
+def _env_bool(name: str, default: bool) -> bool:
+ raw = os.getenv(name)
+ if raw is None:
+ return default
+ return raw.strip().lower() in {"1", "true", "yes", "y", "on"}
+
+
+if _env_bool("GITPILOT_ENABLE_A2A", False):
+ logger.info("A2A adapter enabled (mounting /a2a/* endpoints)")
+ app.include_router(a2a_router)
+else:
+ logger.info("A2A adapter disabled (set GITPILOT_ENABLE_A2A=true to enable)")
+
+# ============================================================================
+# CORS Configuration
+# ============================================================================
+# Enable CORS to allow frontend (local dev or Vercel) to connect to backend
+allowed_origins_str = os.getenv("CORS_ORIGINS", "http://localhost:5173")
+allowed_origins = [origin.strip() for origin in allowed_origins_str.split(",")]
+
+logger.info(f"CORS enabled for origins: {allowed_origins}")
+
+app.add_middleware(
+ CORSMiddleware,
+ allow_origins=allowed_origins,
+ allow_credentials=True,
+ allow_methods=["*"],
+ allow_headers=["*"],
+)
+
+
+def get_github_token(authorization: Optional[str] = Header(None)) -> Optional[str]:
+ """
+ Extract GitHub token from Authorization header.
+
+ Supports formats:
+ - Bearer
+ - token
+ -
+ """
+ if not authorization:
+ return None
+
+ if authorization.startswith("Bearer "):
+ return authorization[7:]
+ elif authorization.startswith("token "):
+ return authorization[6:]
+ else:
+ return authorization
+
+
+# --- FIXED: Added default_branch to model ---
+class RepoSummary(BaseModel):
+ id: int
+ name: str
+ full_name: str
+ private: bool
+ owner: str
+ default_branch: str = "main" # <--- CRITICAL FIX: Defaults to main, but can be master/dev
+
+
+class PaginatedReposResponse(BaseModel):
+ """Response model for paginated repository listing."""
+ repositories: List[RepoSummary]
+ page: int
+ per_page: int
+ total_count: Optional[int] = None
+ has_more: bool
+ query: Optional[str] = None
+
+
+class FileEntry(BaseModel):
+ path: str
+ type: str
+
+
+class FileTreeResponse(BaseModel):
+ files: List[FileEntry] = Field(default_factory=list)
+
+
+class FileContent(BaseModel):
+ path: str
+ encoding: str = "utf-8"
+ content: str
+
+
+class CommitRequest(BaseModel):
+ path: str
+ content: str
+ message: str
+
+
+class CommitResponse(BaseModel):
+ path: str
+ commit_sha: str
+ commit_url: Optional[str] = None
+
+
+class SettingsResponse(BaseModel):
+ provider: LLMProvider
+ providers: List[LLMProvider]
+ openai: dict
+ claude: dict
+ watsonx: dict
+ ollama: dict
+ langflow_url: str
+ has_langflow_plan_flow: bool
+
+
+class ProviderModelsResponse(BaseModel):
+ provider: LLMProvider
+ models: List[str] = Field(default_factory=list)
+ error: Optional[str] = None
+
+
+class ProviderUpdate(BaseModel):
+ provider: LLMProvider
+
+
+class ChatPlanRequest(BaseModel):
+ repo_owner: str
+ repo_name: str
+ goal: str
+ branch_name: Optional[str] = None
+
+
+class ExecutePlanRequest(BaseModel):
+ repo_owner: str
+ repo_name: str
+ plan: PlanResult
+ branch_name: Optional[str] = None
+
+
+class AuthUrlResponse(BaseModel):
+ authorization_url: str
+ state: str
+
+
+class AuthCallbackRequest(BaseModel):
+ code: str
+ state: str
+
+
+class TokenValidationRequest(BaseModel):
+ access_token: str
+
+
+class UserInfoResponse(BaseModel):
+ user: GitHubUser
+ authenticated: bool
+
+
+class RepoAccessResponse(BaseModel):
+ can_write: bool
+ app_installed: bool
+ auth_type: str
+
+
+# --- v2 Request/Response models ---
+
+class ChatRequest(BaseModel):
+ """Unified chat request for the conversational dispatcher."""
+ repo_owner: str
+ repo_name: str
+ message: str
+ branch_name: Optional[str] = None
+ auto_pr: bool = False
+ topology_id: Optional[str] = None # Override topology for this request
+
+
+class IssueCreateRequest(BaseModel):
+ title: str
+ body: Optional[str] = None
+ labels: Optional[List[str]] = None
+ assignees: Optional[List[str]] = None
+ milestone: Optional[int] = None
+
+
+class IssueUpdateRequest(BaseModel):
+ title: Optional[str] = None
+ body: Optional[str] = None
+ state: Optional[str] = None
+ labels: Optional[List[str]] = None
+ assignees: Optional[List[str]] = None
+ milestone: Optional[int] = None
+
+
+class IssueCommentRequest(BaseModel):
+ body: str
+
+
+class PRCreateRequest(BaseModel):
+ title: str
+ head: str
+ base: str
+ body: Optional[str] = None
+ draft: bool = False
+
+
+class PRMergeRequest(BaseModel):
+ merge_method: str = "merge"
+ commit_title: Optional[str] = None
+ commit_message: Optional[str] = None
+
+
+class SearchRequest(BaseModel):
+ query: str
+ per_page: int = 30
+ page: int = 1
+
+
+# ============================================================================
+# Repository Endpoints - Enterprise Grade with Pagination & Search
+# ============================================================================
+
+@app.get("/api/repos", response_model=PaginatedReposResponse)
+async def api_list_repos(
+ query: Optional[str] = Query(None, description="Search query (searches across ALL repositories)"),
+ page: int = Query(1, ge=1, description="Page number (starts at 1)"),
+ per_page: int = Query(100, ge=1, le=100, description="Results per page (max 100)"),
+ authorization: Optional[str] = Header(None),
+):
+ """
+ List user repositories with enterprise-grade pagination and search.
+ Includes default_branch information for correct frontend routing.
+ """
+ token = get_github_token(authorization)
+
+ try:
+ if query:
+ # SEARCH MODE: Search across ALL repositories
+ result = await search_user_repos(
+ query=query,
+ page=page,
+ per_page=per_page,
+ token=token
+ )
+ else:
+ # PAGINATION MODE: Return repos page by page
+ result = await list_user_repos_paginated(
+ page=page,
+ per_page=per_page,
+ token=token
+ )
+
+ # --- FIXED: Mapping default_branch ---
+ repos = [
+ RepoSummary(
+ id=r["id"],
+ name=r["name"],
+ full_name=r["full_name"],
+ private=r["private"],
+ owner=r["owner"],
+ default_branch=r.get("default_branch", "main"), # <--- CRITICAL FIX
+ )
+ for r in result["repositories"]
+ ]
+
+ return PaginatedReposResponse(
+ repositories=repos,
+ page=result["page"],
+ per_page=result["per_page"],
+ total_count=result.get("total_count"),
+ has_more=result["has_more"],
+ query=query,
+ )
+
+ except Exception as e:
+ logging.exception("Error fetching repositories")
+ return JSONResponse(
+ content={
+ "error": f"Failed to fetch repositories: {str(e)}",
+ "repositories": [],
+ "page": page,
+ "per_page": per_page,
+ "has_more": False,
+ },
+ status_code=500
+ )
+
+
+@app.get("/api/repos/all")
+async def api_list_all_repos(
+ query: Optional[str] = Query(None, description="Search query"),
+ authorization: Optional[str] = Header(None),
+):
+ """
+ Fetch ALL user repositories at once (no pagination).
+ Useful for quick searches, but paginated endpoint is preferred.
+ """
+ token = get_github_token(authorization)
+
+ try:
+ # Fetch all repositories (this will make multiple API calls)
+ all_repos = []
+ page = 1
+ max_pages = 15 # Safety limit: 1500 repos max (15 * 100)
+
+ while page <= max_pages:
+ result = await list_user_repos_paginated(
+ page=page,
+ per_page=100,
+ token=token
+ )
+
+ all_repos.extend(result["repositories"])
+
+ if not result["has_more"]:
+ break
+
+ page += 1
+
+ # Filter by query if provided
+ if query:
+ query_lower = query.lower()
+ all_repos = [
+ r for r in all_repos
+ if query_lower in r["name"].lower() or query_lower in r["full_name"].lower()
+ ]
+
+ # --- FIXED: Mapping default_branch ---
+ repos = [
+ RepoSummary(
+ id=r["id"],
+ name=r["name"],
+ full_name=r["full_name"],
+ private=r["private"],
+ owner=r["owner"],
+ default_branch=r.get("default_branch", "main"), # <--- CRITICAL FIX
+ )
+ for r in all_repos
+ ]
+
+ return {
+ "repositories": repos,
+ "total_count": len(repos),
+ "query": query,
+ }
+
+ except Exception as e:
+ logging.exception("Error fetching all repositories")
+ return JSONResponse(
+ content={"error": f"Failed to fetch repositories: {str(e)}"},
+ status_code=500
+ )
+
+
+@app.get("/api/repos/{owner}/{repo}/tree", response_model=FileTreeResponse)
+async def api_repo_tree(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ ref: Optional[str] = Query(
+ None,
+ description="Git reference (branch, tag, or commit SHA). If omitted, defaults to HEAD.",
+ ),
+ authorization: Optional[str] = Header(None),
+):
+ """
+ Get the file tree for a repository.
+ Handles 'main' vs 'master' discrepancies and empty repositories gracefully.
+ """
+ token = get_github_token(authorization)
+
+ # Keep legacy behavior: missing/empty ref behaves like HEAD.
+ ref_value = (ref or "").strip() or "HEAD"
+
+ try:
+ tree = await get_repo_tree(owner, repo, token=token, ref=ref_value)
+ return FileTreeResponse(files=[FileEntry(**f) for f in tree])
+
+ except HTTPException as e:
+ if e.status_code == 409:
+ return FileTreeResponse(files=[])
+
+ if e.status_code == 404:
+ return JSONResponse(
+ status_code=404,
+ content={
+ "detail": f"Ref '{ref_value}' not found. The repository might be using a different default branch (e.g., 'master')."
+ }
+ )
+
+ raise e
+
+
+@app.get("/api/repos/{owner}/{repo}/file", response_model=FileContent)
+async def api_get_file(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ path: str = Query(...),
+ authorization: Optional[str] = Header(None),
+):
+ token = get_github_token(authorization)
+ content = await get_file(owner, repo, path, token=token)
+ return FileContent(path=path, content=content)
+
+
+@app.post("/api/repos/{owner}/{repo}/file", response_model=CommitResponse)
+async def api_put_file(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ payload: CommitRequest = ...,
+ authorization: Optional[str] = Header(None),
+):
+ token = get_github_token(authorization)
+ result = await put_file(
+ owner, repo, payload.path, payload.content, payload.message, token=token
+ )
+ return CommitResponse(**result)
+
+
+# ============================================================================
+# Settings Endpoints
+# ============================================================================
+
+@app.get("/api/settings", response_model=SettingsResponse)
+async def api_get_settings():
+ s: AppSettings = get_settings()
+ return SettingsResponse(
+ provider=s.provider,
+ providers=[LLMProvider.openai, LLMProvider.claude, LLMProvider.watsonx, LLMProvider.ollama],
+ openai=s.openai.model_dump(),
+ claude=s.claude.model_dump(),
+ watsonx=s.watsonx.model_dump(),
+ ollama=s.ollama.model_dump(),
+ langflow_url=s.langflow_url,
+ has_langflow_plan_flow=bool(s.langflow_plan_flow_id),
+ )
+
+
+@app.get("/api/settings/models", response_model=ProviderModelsResponse)
+async def api_list_models(provider: Optional[LLMProvider] = Query(None)):
+ """
+ Return the list of LLM models available for a provider.
+
+ If 'provider' is not given, use the currently active provider from settings.
+ """
+ s: AppSettings = get_settings()
+ effective_provider = provider or s.provider
+
+ models, error = list_models_for_provider(effective_provider, s)
+
+ return ProviderModelsResponse(
+ provider=effective_provider,
+ models=models,
+ error=error,
+ )
+
+
+@app.post("/api/settings/provider", response_model=SettingsResponse)
+async def api_set_provider(update: ProviderUpdate):
+ s = set_provider(update.provider)
+ return SettingsResponse(
+ provider=s.provider,
+ providers=[LLMProvider.openai, LLMProvider.claude, LLMProvider.watsonx, LLMProvider.ollama],
+ openai=s.openai.model_dump(),
+ claude=s.claude.model_dump(),
+ watsonx=s.watsonx.model_dump(),
+ ollama=s.ollama.model_dump(),
+ langflow_url=s.langflow_url,
+ has_langflow_plan_flow=bool(s.langflow_plan_flow_id),
+ )
+
+
+@app.put("/api/settings/llm", response_model=SettingsResponse)
+async def api_update_llm_settings(updates: dict):
+ """Update full LLM settings including provider-specific configs."""
+ s = update_settings(updates)
+ return SettingsResponse(
+ provider=s.provider,
+ providers=[LLMProvider.openai, LLMProvider.claude, LLMProvider.watsonx, LLMProvider.ollama],
+ openai=s.openai.model_dump(),
+ claude=s.claude.model_dump(),
+ watsonx=s.watsonx.model_dump(),
+ ollama=s.ollama.model_dump(),
+ langflow_url=s.langflow_url,
+ has_langflow_plan_flow=bool(s.langflow_plan_flow_id),
+ )
+
+
+# ============================================================================
+# Chat Endpoints
+# ============================================================================
+
+@app.post("/api/chat/plan", response_model=PlanResult)
+async def api_chat_plan(req: ChatPlanRequest, authorization: Optional[str] = Header(None)):
+ token = get_github_token(authorization)
+
+ # β
Added logging for branch_name received
+ logger.info(
+ "PLAN REQUEST: %s/%s | branch_name=%r",
+ req.repo_owner,
+ req.repo_name,
+ req.branch_name,
+ )
+
+ with execution_context(token, ref=req.branch_name): # β
set ref context
+ full_name = f"{req.repo_owner}/{req.repo_name}"
+ plan = await generate_plan(req.goal, full_name, token=token, branch_name=req.branch_name)
+ return plan
+
+
+@app.post("/api/chat/execute")
+async def api_chat_execute(
+ req: ExecutePlanRequest,
+ authorization: Optional[str] = Header(None)
+):
+ token = get_github_token(authorization)
+
+ # β
FIX: use execution_context(token, ref=req.branch_name) so tool calls that rely on context
+ # never accidentally run on HEAD/default when branch_name is provided.
+ with execution_context(token, ref=req.branch_name):
+ full_name = f"{req.repo_owner}/{req.repo_name}"
+ result = await execute_plan(
+ req.plan, full_name, token=token, branch_name=req.branch_name
+ )
+ if isinstance(result, dict):
+ result.setdefault(
+ "mode",
+ "sticky" if req.branch_name else "hard-switch",
+ )
+ return result
+
+
+@app.get("/api/flow/current")
+async def api_get_flow(topology: Optional[str] = Query(None)):
+ """Return the agent flow definition as a graph.
+
+ If ``topology`` query param is provided, returns the graph for that
+ topology. Otherwise falls back to the user's saved preference, and
+ finally to the legacy ``get_flow_definition()`` output for full
+ backward compatibility.
+ """
+ tid = topology or get_saved_topology_preference()
+ if tid:
+ return _get_topology_graph(tid)
+ # Legacy path β returns the original hardcoded graph
+ flow = await get_flow_definition()
+ return flow
+
+
+# ============================================================================
+# Topology Registry Endpoints (additive β no existing behaviour changed)
+# ============================================================================
+
+@app.get("/api/flow/topologies")
+async def api_list_topologies():
+ """Return lightweight summaries of all available topology presets."""
+ return _list_topologies()
+
+
+@app.get("/api/flow/topology/{topology_id}")
+async def api_get_topology(topology_id: str):
+ """Return the full flow graph for a specific topology."""
+ return _get_topology_graph(topology_id)
+
+
+class ClassifyRequest(BaseModel):
+ message: str
+
+
+@app.post("/api/flow/classify")
+async def api_classify_message(req: ClassifyRequest):
+ """Auto-detect the best topology for a given user message.
+
+ Returns the recommended topology, confidence score, and up to 4
+ alternatives ranked by relevance.
+ """
+ result = _classify_message(req.message)
+ return result.to_dict()
+
+
+class TopologyPrefRequest(BaseModel):
+ topology: str
+
+
+@app.get("/api/settings/topology")
+async def api_get_topology_pref():
+ """Return the user's saved topology preference (or null)."""
+ pref = get_saved_topology_preference()
+ return {"topology": pref}
+
+
+@app.post("/api/settings/topology")
+async def api_set_topology_pref(req: TopologyPrefRequest):
+ """Save the user's preferred topology."""
+ save_topology_preference(req.topology)
+ return {"status": "ok", "topology": req.topology}
+
+
+# ============================================================================
+# Conversational Chat Endpoint (v2 upgrade)
+# ============================================================================
+
+@app.post("/api/chat/message")
+async def api_chat_message(req: ChatRequest, authorization: Optional[str] = Header(None)):
+ """
+ Unified conversational endpoint. The router analyses the message and
+ dispatches to the appropriate agent (issue, PR, search, review, learning,
+ or the existing plan+execute pipeline).
+ """
+ token = get_github_token(authorization)
+
+ logger.info(
+ "CHAT MESSAGE: %s/%s | message=%r | branch=%r",
+ req.repo_owner,
+ req.repo_name,
+ req.message[:80],
+ req.branch_name,
+ )
+
+ with execution_context(token, ref=req.branch_name):
+ full_name = f"{req.repo_owner}/{req.repo_name}"
+ result = await dispatch_request(
+ req.message, full_name, token=token, branch_name=req.branch_name,
+ topology_id=req.topology_id,
+ )
+
+ # If auto_pr is requested and execution completed, create PR
+ if (
+ req.auto_pr
+ and isinstance(result, dict)
+ and result.get("category") == "plan_execute"
+ and result.get("plan")
+ ):
+ result["auto_pr_hint"] = (
+ "Plan generated. Execute it first, then auto-PR will be created."
+ )
+
+ return result
+
+
+@app.post("/api/chat/execute-with-pr")
+async def api_chat_execute_with_pr(
+ req: ExecutePlanRequest,
+ authorization: Optional[str] = Header(None),
+):
+ """Execute a plan AND automatically create a pull request afterwards."""
+ token = get_github_token(authorization)
+
+ with execution_context(token, ref=req.branch_name):
+ full_name = f"{req.repo_owner}/{req.repo_name}"
+ result = await execute_plan(
+ req.plan, full_name, token=token, branch_name=req.branch_name,
+ )
+
+ if isinstance(result, dict) and result.get("status") == "completed":
+ branch = result.get("branch", req.branch_name)
+ if branch:
+ pr = await create_pr_after_execution(
+ full_name,
+ branch,
+ req.plan.goal,
+ result.get("executionLog", {}),
+ token=token,
+ )
+ if pr:
+ result["pull_request"] = {
+ "number": pr.get("number"),
+ "url": pr.get("html_url"),
+ "title": pr.get("title"),
+ }
+
+ result.setdefault(
+ "mode",
+ "sticky" if req.branch_name else "hard-switch",
+ )
+
+ return result
+
+
+# ============================================================================
+# Issue Endpoints (v2 upgrade)
+# ============================================================================
+
+@app.get("/api/repos/{owner}/{repo}/issues")
+async def api_list_issues(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ state: str = Query("open"),
+ labels: Optional[str] = Query(None),
+ per_page: int = Query(30, ge=1, le=100),
+ page: int = Query(1, ge=1),
+ authorization: Optional[str] = Header(None),
+):
+ """List issues for a repository."""
+ token = get_github_token(authorization)
+ issues = await github_issues.list_issues(
+ owner, repo, state=state, labels=labels,
+ per_page=per_page, page=page, token=token,
+ )
+ return {"issues": issues, "page": page, "per_page": per_page}
+
+
+@app.get("/api/repos/{owner}/{repo}/issues/{issue_number}")
+async def api_get_issue(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ issue_number: int = FPath(...),
+ authorization: Optional[str] = Header(None),
+):
+ """Get a single issue."""
+ token = get_github_token(authorization)
+ return await github_issues.get_issue(owner, repo, issue_number, token=token)
+
+
+@app.post("/api/repos/{owner}/{repo}/issues")
+async def api_create_issue(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ payload: IssueCreateRequest = ...,
+ authorization: Optional[str] = Header(None),
+):
+ """Create a new issue."""
+ token = get_github_token(authorization)
+ return await github_issues.create_issue(
+ owner, repo, payload.title,
+ body=payload.body, labels=payload.labels,
+ assignees=payload.assignees, milestone=payload.milestone,
+ token=token,
+ )
+
+
+@app.patch("/api/repos/{owner}/{repo}/issues/{issue_number}")
+async def api_update_issue(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ issue_number: int = FPath(...),
+ payload: IssueUpdateRequest = ...,
+ authorization: Optional[str] = Header(None),
+):
+ """Update an existing issue."""
+ token = get_github_token(authorization)
+ return await github_issues.update_issue(
+ owner, repo, issue_number,
+ title=payload.title, body=payload.body, state=payload.state,
+ labels=payload.labels, assignees=payload.assignees,
+ milestone=payload.milestone, token=token,
+ )
+
+
+@app.get("/api/repos/{owner}/{repo}/issues/{issue_number}/comments")
+async def api_list_issue_comments(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ issue_number: int = FPath(...),
+ authorization: Optional[str] = Header(None),
+):
+ """List comments on an issue."""
+ token = get_github_token(authorization)
+ return await github_issues.list_issue_comments(owner, repo, issue_number, token=token)
+
+
+@app.post("/api/repos/{owner}/{repo}/issues/{issue_number}/comments")
+async def api_add_issue_comment(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ issue_number: int = FPath(...),
+ payload: IssueCommentRequest = ...,
+ authorization: Optional[str] = Header(None),
+):
+ """Add a comment to an issue."""
+ token = get_github_token(authorization)
+ return await github_issues.add_issue_comment(
+ owner, repo, issue_number, payload.body, token=token,
+ )
+
+
+# ============================================================================
+# Pull Request Endpoints (v2 upgrade)
+# ============================================================================
+
+@app.get("/api/repos/{owner}/{repo}/pulls")
+async def api_list_pulls(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ state: str = Query("open"),
+ per_page: int = Query(30, ge=1, le=100),
+ page: int = Query(1, ge=1),
+ authorization: Optional[str] = Header(None),
+):
+ """List pull requests."""
+ token = get_github_token(authorization)
+ prs = await github_pulls.list_pull_requests(
+ owner, repo, state=state, per_page=per_page, page=page, token=token,
+ )
+ return {"pull_requests": prs, "page": page, "per_page": per_page}
+
+
+@app.get("/api/repos/{owner}/{repo}/pulls/{pull_number}")
+async def api_get_pull(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ pull_number: int = FPath(...),
+ authorization: Optional[str] = Header(None),
+):
+ """Get a single pull request."""
+ token = get_github_token(authorization)
+ return await github_pulls.get_pull_request(owner, repo, pull_number, token=token)
+
+
+@app.post("/api/repos/{owner}/{repo}/pulls")
+async def api_create_pull(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ payload: PRCreateRequest = ...,
+ authorization: Optional[str] = Header(None),
+):
+ """Create a new pull request."""
+ token = get_github_token(authorization)
+ return await github_pulls.create_pull_request(
+ owner, repo, title=payload.title, head=payload.head,
+ base=payload.base, body=payload.body, draft=payload.draft,
+ token=token,
+ )
+
+
+@app.put("/api/repos/{owner}/{repo}/pulls/{pull_number}/merge")
+async def api_merge_pull(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ pull_number: int = FPath(...),
+ payload: PRMergeRequest = ...,
+ authorization: Optional[str] = Header(None),
+):
+ """Merge a pull request."""
+ token = get_github_token(authorization)
+ return await github_pulls.merge_pull_request(
+ owner, repo, pull_number,
+ merge_method=payload.merge_method,
+ commit_title=payload.commit_title,
+ commit_message=payload.commit_message,
+ token=token,
+ )
+
+
+@app.get("/api/repos/{owner}/{repo}/pulls/{pull_number}/files")
+async def api_list_pr_files(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ pull_number: int = FPath(...),
+ authorization: Optional[str] = Header(None),
+):
+ """List files changed in a pull request."""
+ token = get_github_token(authorization)
+ return await github_pulls.list_pr_files(owner, repo, pull_number, token=token)
+
+
+# ============================================================================
+# Search Endpoints (v2 upgrade)
+# ============================================================================
+
+@app.get("/api/search/code")
+async def api_search_code(
+ q: str = Query(..., description="Search query"),
+ owner: Optional[str] = Query(None),
+ repo: Optional[str] = Query(None),
+ language: Optional[str] = Query(None),
+ per_page: int = Query(30, ge=1, le=100),
+ page: int = Query(1, ge=1),
+ authorization: Optional[str] = Header(None),
+):
+ """Search for code across GitHub."""
+ token = get_github_token(authorization)
+ return await github_search.search_code(
+ q, owner=owner, repo=repo, language=language,
+ per_page=per_page, page=page, token=token,
+ )
+
+
+@app.get("/api/search/issues")
+async def api_search_issues(
+ q: str = Query(..., description="Search query"),
+ owner: Optional[str] = Query(None),
+ repo: Optional[str] = Query(None),
+ state: Optional[str] = Query(None),
+ label: Optional[str] = Query(None),
+ per_page: int = Query(30, ge=1, le=100),
+ page: int = Query(1, ge=1),
+ authorization: Optional[str] = Header(None),
+):
+ """Search issues and pull requests."""
+ token = get_github_token(authorization)
+ return await github_search.search_issues(
+ q, owner=owner, repo=repo, state=state, label=label,
+ per_page=per_page, page=page, token=token,
+ )
+
+
+@app.get("/api/search/repositories")
+async def api_search_repositories(
+ q: str = Query(..., description="Search query"),
+ language: Optional[str] = Query(None),
+ sort: Optional[str] = Query(None),
+ per_page: int = Query(30, ge=1, le=100),
+ page: int = Query(1, ge=1),
+ authorization: Optional[str] = Header(None),
+):
+ """Search for repositories."""
+ token = get_github_token(authorization)
+ return await github_search.search_repositories(
+ q, language=language, sort=sort,
+ per_page=per_page, page=page, token=token,
+ )
+
+
+@app.get("/api/search/users")
+async def api_search_users(
+ q: str = Query(..., description="Search query"),
+ type_filter: Optional[str] = Query(None, alias="type"),
+ location: Optional[str] = Query(None),
+ language: Optional[str] = Query(None),
+ per_page: int = Query(30, ge=1, le=100),
+ page: int = Query(1, ge=1),
+ authorization: Optional[str] = Header(None),
+):
+ """Search for GitHub users and organizations."""
+ token = get_github_token(authorization)
+ return await github_search.search_users(
+ q, type_filter=type_filter, location=location, language=language,
+ per_page=per_page, page=page, token=token,
+ )
+
+
+# ============================================================================
+# Route Analysis Endpoint (v2 upgrade)
+# ============================================================================
+
+@app.post("/api/chat/route")
+async def api_chat_route(payload: dict):
+ """Preview how a message would be routed without executing it.
+
+ Useful for the frontend to display which agent(s) will handle the request.
+ """
+ message = payload.get("message", "")
+ if not message:
+ return JSONResponse({"error": "message is required"}, status_code=400)
+
+ workflow = route_request(message)
+ return {
+ "category": workflow.category.value,
+ "agents": [a.value for a in workflow.agents],
+ "description": workflow.description,
+ "requires_repo_context": workflow.requires_repo_context,
+ "entity_number": workflow.entity_number,
+ "metadata": workflow.metadata,
+ }
+
+
+# ============================================================================
+# Authentication Endpoints (Web Flow + Device Flow)
+# ============================================================================
+
+@app.get("/api/auth/url", response_model=AuthUrlResponse)
+async def api_get_auth_url():
+ """
+ Generate GitHub OAuth authorization URL (Web Flow).
+ Requires Client Secret to be configured.
+ """
+ auth_url, state = generate_authorization_url()
+ return AuthUrlResponse(authorization_url=auth_url, state=state)
+
+
+@app.post("/api/auth/callback", response_model=AuthSession)
+async def api_auth_callback(request: AuthCallbackRequest):
+ """
+ Handle GitHub OAuth callback (Web Flow).
+ Exchange the authorization code for an access token.
+ """
+ try:
+ session = await exchange_code_for_token(request.code, request.state)
+ return session
+ except ValueError as e:
+ return JSONResponse(
+ {"error": str(e)},
+ status_code=400,
+ )
+
+
+@app.post("/api/auth/validate", response_model=UserInfoResponse)
+async def api_validate_token(request: TokenValidationRequest):
+ """
+ Validate a GitHub access token and return user information.
+ """
+ user = await validate_token(request.access_token)
+ if user:
+ return UserInfoResponse(user=user, authenticated=True)
+ return UserInfoResponse(
+ user=GitHubUser(login="", id=0, avatar_url=""),
+ authenticated=False,
+ )
+
+
+@app.post("/api/auth/device/code")
+async def api_device_code():
+ """
+ Start the device login flow (Step 1).
+ Does NOT require a client secret.
+ """
+ try:
+ data = await initiate_device_flow()
+ return data
+ except Exception as e:
+ return JSONResponse({"error": str(e)}, status_code=500)
+
+
+@app.post("/api/auth/device/poll")
+async def api_device_poll(payload: dict):
+ """
+ Poll GitHub to check if user authorized the device (Step 2).
+ """
+ device_code = payload.get("device_code")
+ if not device_code:
+ return JSONResponse({"error": "Missing device_code"}, status_code=400)
+
+ try:
+ session = await poll_device_token(device_code)
+ if session:
+ return session
+
+ return JSONResponse({"status": "pending"}, status_code=202)
+ except ValueError as e:
+ return JSONResponse({"error": str(e)}, status_code=400)
+
+
+@app.get("/api/auth/status")
+async def api_auth_status():
+ """
+ Smart check: Do we have a secret (Web Flow) or just ID (Device Flow)?
+ This tells the frontend which UI to render.
+ """
+ has_secret = bool(os.getenv("GITHUB_CLIENT_SECRET"))
+ has_id = bool(os.getenv("GITHUB_CLIENT_ID", "Iv23litmRp80Z6wmlyRn"))
+
+ return {
+ "mode": "web" if has_secret else "device",
+ "configured": has_id,
+ "oauth_configured": has_secret,
+ "pat_configured": bool(os.getenv("GITPILOT_GITHUB_TOKEN") or os.getenv("GITHUB_TOKEN")),
+ }
+
+
+@app.get("/api/auth/app-url")
+async def api_get_app_url():
+ """Get GitHub App installation URL."""
+ app_slug = os.getenv("GITHUB_APP_SLUG", "gitpilota")
+ app_url = f"https://github.com/apps/{app_slug}"
+ return {
+ "app_url": app_url,
+ "app_slug": app_slug,
+ }
+
+
+@app.get("/api/auth/installation-status")
+async def api_check_installation_status():
+ """Check if GitHub App is installed for the current user."""
+ pat_token = os.getenv("GITPILOT_GITHUB_TOKEN") or os.getenv("GITHUB_TOKEN")
+
+ if pat_token:
+ user = await validate_token(pat_token)
+ if user:
+ return {
+ "installed": True,
+ "access_token": pat_token,
+ "user": user,
+ "auth_type": "pat",
+ }
+
+ github_app_id = os.getenv("GITHUB_APP_ID", "2313985")
+ if not github_app_id:
+ return {
+ "installed": False,
+ "message": "GitHub authentication not configured.",
+ "auth_type": "none",
+ }
+
+ return {
+ "installed": False,
+ "message": "GitHub App not installed.",
+ "auth_type": "github_app",
+ }
+
+
+@app.get("/api/auth/repo-access", response_model=RepoAccessResponse)
+async def api_check_repo_access(
+ owner: str = Query(...),
+ repo: str = Query(...),
+ authorization: Optional[str] = Header(None),
+):
+ """
+ Check if we have write access to a repository via User token or GitHub App.
+
+ This endpoint helps the frontend determine if it should show
+ installation prompts or if the user already has sufficient permissions.
+ """
+ token = get_github_token(authorization)
+ access_info = await check_repo_write_access(owner, repo, user_token=token)
+
+ return RepoAccessResponse(
+ can_write=access_info["can_write"],
+ app_installed=access_info["app_installed"],
+ auth_type=access_info["auth_type"],
+ )
+
+
+# ============================================================================
+# Session Endpoints (Phase 1)
+# ============================================================================
+
+@app.get("/api/sessions")
+async def api_list_sessions():
+ """List all saved sessions."""
+ return {"sessions": _session_mgr.list_sessions()}
+
+
+@app.post("/api/sessions")
+async def api_create_session(payload: dict):
+ """Create a new session.
+
+ Accepts either legacy single-repo or multi-repo format:
+ Legacy: {"repo_full_name": "owner/repo", "branch": "main"}
+ Multi: {"repos": [{full_name, branch, mode}], "active_repo": "owner/repo"}
+ """
+ repo = payload.get("repo_full_name", "")
+ branch = payload.get("branch")
+ name = payload.get("name") # optional β derived from first user prompt
+ session = _session_mgr.create(repo_full_name=repo, branch=branch, name=name)
+
+ # Multi-repo context support
+ if payload.get("repos"):
+ session.repos = payload["repos"]
+ session.active_repo = payload.get("active_repo", repo)
+ elif repo:
+ session.repos = [{"full_name": repo, "branch": branch or "main", "mode": "write"}]
+ session.active_repo = repo
+
+ _session_mgr.save(session)
+ return {"session_id": session.id, "status": session.status}
+
+
+@app.get("/api/sessions/{session_id}")
+async def api_get_session(session_id: str):
+ """Get session details."""
+ session = _session_mgr.load(session_id)
+ if not session:
+ raise HTTPException(status_code=404, detail="Session not found")
+ return {
+ "id": session.id,
+ "status": session.status,
+ "repo_full_name": session.repo_full_name,
+ "branch": session.branch,
+ "created_at": session.created_at,
+ "message_count": len(session.messages),
+ "checkpoint_count": len(session.checkpoints),
+ "repos": session.repos,
+ "active_repo": session.active_repo,
+ }
+
+
+@app.delete("/api/sessions/{session_id}")
+async def api_delete_session(session_id: str):
+ """Delete a session."""
+ deleted = _session_mgr.delete(session_id)
+ if not deleted:
+ raise HTTPException(status_code=404, detail="Session not found")
+ return {"deleted": True}
+
+
+@app.patch("/api/sessions/{session_id}/context")
+async def api_update_session_context(session_id: str, payload: dict):
+ """Add, remove, or activate repos in a session's multi-repo context.
+
+ Actions:
+ {"action": "add", "repo_full_name": "owner/repo", "branch": "main"}
+ {"action": "remove", "repo_full_name": "owner/repo"}
+ {"action": "set_active", "repo_full_name": "owner/repo"}
+ """
+ session = _session_mgr.load(session_id)
+ if not session:
+ raise HTTPException(status_code=404, detail="Session not found")
+
+ action = payload.get("action")
+ repo_name = payload.get("repo_full_name")
+ if not action or not repo_name:
+ raise HTTPException(status_code=400, detail="action and repo_full_name required")
+
+ if action == "add":
+ branch = payload.get("branch", "main")
+ if not any(r.get("full_name") == repo_name for r in session.repos):
+ session.repos.append({
+ "full_name": repo_name,
+ "branch": branch,
+ "mode": "read",
+ })
+ if not session.active_repo:
+ session.active_repo = repo_name
+ elif action == "remove":
+ session.repos = [r for r in session.repos if r.get("full_name") != repo_name]
+ if session.active_repo == repo_name:
+ session.active_repo = session.repos[0]["full_name"] if session.repos else None
+ elif action == "set_active":
+ if any(r.get("full_name") == repo_name for r in session.repos):
+ # Update mode flags
+ for r in session.repos:
+ r["mode"] = "write" if r.get("full_name") == repo_name else "read"
+ session.active_repo = repo_name
+ else:
+ raise HTTPException(status_code=400, detail="Repo not in session context")
+ else:
+ raise HTTPException(status_code=400, detail=f"Unknown action: {action}")
+
+ _session_mgr.save(session)
+ return {
+ "repos": session.repos,
+ "active_repo": session.active_repo,
+ }
+
+
+@app.post("/api/sessions/{session_id}/checkpoint")
+async def api_create_checkpoint(session_id: str, payload: dict):
+ """Create a checkpoint for a session."""
+ session = _session_mgr.load(session_id)
+ if not session:
+ raise HTTPException(status_code=404, detail="Session not found")
+ label = payload.get("label", "checkpoint")
+ cp = _session_mgr.create_checkpoint(session, label=label)
+ return {"checkpoint_id": cp.id, "label": cp.label, "created_at": cp.created_at}
+
+
+# ============================================================================
+# Hooks Endpoints (Phase 1)
+# ============================================================================
+
+@app.get("/api/hooks")
+async def api_list_hooks():
+ """List registered hooks."""
+ return {"hooks": _hook_mgr.list_hooks()}
+
+
+@app.post("/api/hooks")
+async def api_register_hook(payload: dict):
+ """Register a new hook."""
+ from .hooks import HookDefinition
+ try:
+ hook = HookDefinition(
+ event=HookEvent(payload["event"]),
+ name=payload["name"],
+ command=payload.get("command"),
+ blocking=payload.get("blocking", False),
+ timeout=payload.get("timeout", 30),
+ )
+ _hook_mgr.register(hook)
+ return {"registered": True, "name": hook.name, "event": hook.event.value}
+ except (KeyError, ValueError) as e:
+ raise HTTPException(status_code=400, detail=str(e))
+
+
+@app.delete("/api/hooks/{event}/{name}")
+async def api_unregister_hook(event: str, name: str):
+ """Unregister a hook by event and name."""
+ try:
+ _hook_mgr.unregister(HookEvent(event), name)
+ return {"unregistered": True}
+ except ValueError as e:
+ raise HTTPException(status_code=400, detail=str(e))
+
+
+# ============================================================================
+# Permissions Endpoints (Phase 1)
+# ============================================================================
+
+@app.get("/api/permissions")
+async def api_get_permissions():
+ """Get current permission policy."""
+ return _perm_mgr.to_dict()
+
+
+@app.put("/api/permissions/mode")
+async def api_set_permission_mode(payload: dict):
+ """Set the permission mode (normal, plan, auto)."""
+ mode_str = payload.get("mode", "normal")
+ try:
+ _perm_mgr.policy.mode = PermissionMode(mode_str)
+ return {"mode": _perm_mgr.policy.mode.value}
+ except ValueError:
+ raise HTTPException(status_code=400, detail=f"Invalid mode: {mode_str}")
+
+
+# ============================================================================
+# Project Context / Memory Endpoints (Phase 1)
+# ============================================================================
+
+@app.get("/api/repos/{owner}/{repo}/context")
+async def api_get_project_context(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+):
+ """Get project conventions and memory for a repository workspace."""
+ from pathlib import Path as StdPath
+ workspace_path = StdPath.home() / ".gitpilot" / "workspaces" / owner / repo
+ if not workspace_path.exists():
+ return {"conventions": "", "rules": [], "auto_memory": {}, "system_prompt": ""}
+ mgr = MemoryManager(workspace_path)
+ ctx = mgr.load_context()
+ return {
+ "conventions": ctx.conventions,
+ "rules": ctx.rules,
+ "auto_memory": ctx.auto_memory,
+ "system_prompt": ctx.to_system_prompt(),
+ }
+
+
+@app.post("/api/repos/{owner}/{repo}/context/init")
+async def api_init_project_context(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+):
+ """Initialize .gitpilot/ directory with template GITPILOT.md."""
+ from pathlib import Path as StdPath
+ workspace_path = StdPath.home() / ".gitpilot" / "workspaces" / owner / repo
+ workspace_path.mkdir(parents=True, exist_ok=True)
+ mgr = MemoryManager(workspace_path)
+ md_path = mgr.init_project()
+ return {"initialized": True, "path": str(md_path)}
+
+
+@app.post("/api/repos/{owner}/{repo}/context/pattern")
+async def api_add_learned_pattern(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ payload: dict = ...,
+):
+ """Add a learned pattern to auto-memory."""
+ from pathlib import Path as StdPath
+ pattern = payload.get("pattern", "")
+ if not pattern:
+ raise HTTPException(status_code=400, detail="pattern is required")
+ workspace_path = StdPath.home() / ".gitpilot" / "workspaces" / owner / repo
+ workspace_path.mkdir(parents=True, exist_ok=True)
+ mgr = MemoryManager(workspace_path)
+ mgr.add_learned_pattern(pattern)
+ return {"added": True, "pattern": pattern}
+
+
+# ============================================================================
+# Context Vault Endpoints (additive β Context + Use Case system)
+# ============================================================================
+
+def _workspace_path(owner: str, repo: str) -> Path:
+ """Resolve the local workspace path for a repo."""
+ return Path.home() / ".gitpilot" / "workspaces" / owner / repo
+
+
+@app.get("/api/repos/{owner}/{repo}/context/assets")
+async def api_list_context_assets(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+):
+ """List all uploaded context assets for a repository."""
+ vault = ContextVault(_workspace_path(owner, repo))
+ assets = vault.list_assets()
+ return {"assets": [a.to_dict() for a in assets]}
+
+
+@app.post("/api/repos/{owner}/{repo}/context/assets/upload")
+async def api_upload_context_asset(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ file: UploadFile = File(...),
+):
+ """Upload a file to the project context vault."""
+ vault = ContextVault(_workspace_path(owner, repo))
+ content = await file.read()
+ mime = file.content_type or ""
+ filename = file.filename or "upload"
+
+ try:
+ meta = vault.upload_asset(filename, content, mime=mime)
+ return {"asset": meta.to_dict()}
+ except ValueError as e:
+ raise HTTPException(status_code=400, detail=str(e))
+
+
+@app.delete("/api/repos/{owner}/{repo}/context/assets/{asset_id}")
+async def api_delete_context_asset(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ asset_id: str = FPath(...),
+):
+ """Delete a context asset."""
+ vault = ContextVault(_workspace_path(owner, repo))
+ vault.delete_asset(asset_id)
+ return {"deleted": True, "asset_id": asset_id}
+
+
+@app.get("/api/repos/{owner}/{repo}/context/assets/{asset_id}/download")
+async def api_download_context_asset(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ asset_id: str = FPath(...),
+):
+ """Download a raw context asset file."""
+ vault = ContextVault(_workspace_path(owner, repo))
+ asset_path = vault.get_asset_path(asset_id)
+ if not asset_path:
+ raise HTTPException(status_code=404, detail="Asset not found")
+ filename = vault.get_asset_filename(asset_id)
+ return FileResponse(asset_path, filename=filename)
+
+
+# ============================================================================
+# Use Case Endpoints (additive β guided requirement clarification)
+# ============================================================================
+
+@app.get("/api/repos/{owner}/{repo}/use-cases")
+async def api_list_use_cases(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+):
+ """List all use cases for a repository."""
+ mgr = UseCaseManager(_workspace_path(owner, repo))
+ return {"use_cases": mgr.list_use_cases()}
+
+
+@app.post("/api/repos/{owner}/{repo}/use-cases")
+async def api_create_use_case(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ payload: dict = ...,
+):
+ """Create a new use case."""
+ title = payload.get("title", "New Use Case")
+ initial_notes = payload.get("initial_notes", "")
+ mgr = UseCaseManager(_workspace_path(owner, repo))
+ uc = mgr.create_use_case(title=title, initial_notes=initial_notes)
+ return {"use_case": uc.to_dict()}
+
+
+@app.get("/api/repos/{owner}/{repo}/use-cases/{use_case_id}")
+async def api_get_use_case(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ use_case_id: str = FPath(...),
+):
+ """Get a single use case with messages and spec."""
+ mgr = UseCaseManager(_workspace_path(owner, repo))
+ uc = mgr.get_use_case(use_case_id)
+ if not uc:
+ raise HTTPException(status_code=404, detail="Use case not found")
+ return {"use_case": uc.to_dict()}
+
+
+@app.post("/api/repos/{owner}/{repo}/use-cases/{use_case_id}/chat")
+async def api_use_case_chat(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ use_case_id: str = FPath(...),
+ payload: dict = ...,
+):
+ """Send a guided chat message and get assistant response + updated spec."""
+ message = payload.get("message", "")
+ if not message:
+ raise HTTPException(status_code=400, detail="message is required")
+ mgr = UseCaseManager(_workspace_path(owner, repo))
+ uc = mgr.chat(use_case_id, message)
+ if not uc:
+ raise HTTPException(status_code=404, detail="Use case not found")
+ return {"use_case": uc.to_dict()}
+
+
+@app.post("/api/repos/{owner}/{repo}/use-cases/{use_case_id}/finalize")
+async def api_finalize_use_case(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ use_case_id: str = FPath(...),
+):
+ """Finalize a use case: mark active, export markdown spec."""
+ mgr = UseCaseManager(_workspace_path(owner, repo))
+ uc = mgr.finalize(use_case_id)
+ if not uc:
+ raise HTTPException(status_code=404, detail="Use case not found")
+ return {"use_case": uc.to_dict()}
+
+
+# ============================================================================
+# MCP Endpoints (Phase 2)
+# ============================================================================
+
+@app.get("/api/mcp/servers")
+async def api_mcp_list_servers():
+ """List configured MCP servers and their connection status."""
+ return _mcp_client.to_dict()
+
+
+@app.post("/api/mcp/connect/{server_name}")
+async def api_mcp_connect(server_name: str):
+ """Connect to a named MCP server."""
+ try:
+ conn = await _mcp_client.connect(server_name)
+ return {
+ "connected": True,
+ "server": server_name,
+ "tools": [{"name": t.name, "description": t.description} for t in conn.tools],
+ }
+ except Exception as e:
+ raise HTTPException(status_code=400, detail=str(e))
+
+
+@app.post("/api/mcp/disconnect/{server_name}")
+async def api_mcp_disconnect(server_name: str):
+ """Disconnect from a named MCP server."""
+ await _mcp_client.disconnect(server_name)
+ return {"disconnected": True, "server": server_name}
+
+
+@app.post("/api/mcp/call")
+async def api_mcp_call_tool(payload: dict):
+ """Call a tool on a connected MCP server."""
+ server = payload.get("server", "")
+ tool_name = payload.get("tool", "")
+ params = payload.get("params", {})
+ if not server or not tool_name:
+ raise HTTPException(status_code=400, detail="server and tool are required")
+ conn = _mcp_client._connections.get(server)
+ if not conn:
+ raise HTTPException(status_code=404, detail=f"Not connected to server: {server}")
+ try:
+ result = await _mcp_client.call_tool(conn, tool_name, params)
+ return {"result": result}
+ except Exception as e:
+ raise HTTPException(status_code=500, detail=str(e))
+
+
+# ============================================================================
+# Plugin Endpoints (Phase 2)
+# ============================================================================
+
+@app.get("/api/plugins")
+async def api_list_plugins():
+ """List installed plugins."""
+ plugins = _plugin_mgr.list_installed()
+ return {"plugins": [p.to_dict() for p in plugins]}
+
+
+@app.post("/api/plugins/install")
+async def api_install_plugin(payload: dict):
+ """Install a plugin from a git URL or local path."""
+ source = payload.get("source", "")
+ if not source:
+ raise HTTPException(status_code=400, detail="source is required")
+ try:
+ info = _plugin_mgr.install(source)
+ return {"installed": True, "plugin": info.to_dict()}
+ except Exception as e:
+ raise HTTPException(status_code=400, detail=str(e))
+
+
+@app.delete("/api/plugins/{name}")
+async def api_uninstall_plugin(name: str):
+ """Uninstall a plugin by name."""
+ removed = _plugin_mgr.uninstall(name)
+ if not removed:
+ raise HTTPException(status_code=404, detail=f"Plugin not found: {name}")
+ return {"uninstalled": True, "name": name}
+
+
+# ============================================================================
+# Skills Endpoints (Phase 2)
+# ============================================================================
+
+@app.get("/api/skills")
+async def api_list_skills():
+ """List all available skills."""
+ return {"skills": _skill_mgr.list_skills()}
+
+
+@app.post("/api/skills/invoke")
+async def api_invoke_skill(payload: dict):
+ """Invoke a skill by name."""
+ name = payload.get("name", "")
+ context = payload.get("context", {})
+ if not name:
+ raise HTTPException(status_code=400, detail="name is required")
+ prompt = _skill_mgr.invoke(name, context)
+ if prompt is None:
+ raise HTTPException(status_code=404, detail=f"Skill not found: {name}")
+ return {"skill": name, "rendered_prompt": prompt}
+
+
+@app.post("/api/skills/reload")
+async def api_reload_skills():
+ """Reload skills from all sources."""
+ count = _skill_mgr.load_all()
+ return {"reloaded": True, "count": count}
+
+
+# ============================================================================
+# Vision Endpoints (Phase 2)
+# ============================================================================
+
+@app.post("/api/vision/analyze")
+async def api_vision_analyze(payload: dict):
+ """Analyze an image with a text prompt."""
+ from .vision import VisionAnalyzer
+ image_path = payload.get("image_path", "")
+ prompt = payload.get("prompt", "Describe this image.")
+ provider = payload.get("provider", "openai")
+ if not image_path:
+ raise HTTPException(status_code=400, detail="image_path is required")
+ try:
+ analyzer = VisionAnalyzer(provider=provider)
+ result = await analyzer.analyze_image(Path(image_path), prompt)
+ return result.to_dict()
+ except Exception as e:
+ raise HTTPException(status_code=400, detail=str(e))
+
+
+# ============================================================================
+# Model Router Endpoints (Phase 2)
+# ============================================================================
+
+@app.post("/api/model-router/select")
+async def api_model_select(payload: dict):
+ """Preview which model would be selected for a request."""
+ request = payload.get("request", "")
+ category = payload.get("category")
+ if not request:
+ raise HTTPException(status_code=400, detail="request is required")
+ selection = _model_router.select(request, category)
+ return {
+ "model": selection.model,
+ "tier": selection.tier.value,
+ "complexity": selection.complexity.value,
+ "provider": selection.provider,
+ "reason": selection.reason,
+ }
+
+
+@app.get("/api/model-router/usage")
+async def api_model_usage():
+ """Get model usage summary and budget status."""
+ return _model_router.get_usage_summary()
+
+
+# ============================================================================
+# Agent Teams Endpoints (Phase 3)
+# ============================================================================
+
+@app.post("/api/agent-teams/plan")
+async def api_team_plan(payload: dict):
+ """Split a complex task into parallel subtasks."""
+ task = payload.get("task", "")
+ if not task:
+ raise HTTPException(status_code=400, detail="task is required")
+ subtasks = _agent_team.plan_and_split(task)
+ return {"subtasks": [{"id": s.id, "title": s.title, "description": s.description} for s in subtasks]}
+
+
+@app.post("/api/agent-teams/execute")
+async def api_team_execute(payload: dict):
+ """Execute subtasks in parallel and merge results."""
+ task = payload.get("task", "")
+ if not task:
+ raise HTTPException(status_code=400, detail="task is required")
+ subtasks = _agent_team.plan_and_split(task)
+ result = await _agent_team.execute_parallel(subtasks)
+ return result.to_dict()
+
+
+# ============================================================================
+# Learning Engine Endpoints (Phase 3)
+# ============================================================================
+
+@app.post("/api/learning/evaluate")
+async def api_learning_evaluate(payload: dict):
+ """Evaluate an action outcome for learning."""
+ action = payload.get("action", "")
+ outcome = payload.get("outcome", {})
+ repo = payload.get("repo", "")
+ if not action:
+ raise HTTPException(status_code=400, detail="action is required")
+ evaluation = _learning_engine.evaluate_outcome(action, outcome, repo=repo)
+ return {
+ "action": evaluation.action,
+ "success": evaluation.success,
+ "score": evaluation.score,
+ "feedback": evaluation.feedback,
+ }
+
+
+@app.get("/api/learning/insights/{owner}/{repo}")
+async def api_learning_insights(owner: str = FPath(...), repo: str = FPath(...)):
+ """Get learned insights for a repository."""
+ repo_name = f"{owner}/{repo}"
+ insights = _learning_engine.get_repo_insights(repo_name)
+ return {
+ "repo": repo_name,
+ "patterns": insights.patterns,
+ "preferred_style": insights.preferred_style,
+ "success_rate": insights.success_rate,
+ "total_evaluations": insights.total_evaluations,
+ }
+
+
+@app.post("/api/learning/style")
+async def api_learning_set_style(payload: dict):
+ """Set preferred coding style for a repository."""
+ repo = payload.get("repo", "")
+ style = payload.get("style", {})
+ if not repo:
+ raise HTTPException(status_code=400, detail="repo is required")
+ _learning_engine.set_preferred_style(repo, style)
+ return {"repo": repo, "style": style}
+
+
+# ============================================================================
+# Cross-Repo Intelligence Endpoints (Phase 3)
+# ============================================================================
+
+@app.post("/api/cross-repo/dependencies")
+async def api_cross_repo_dependencies(payload: dict):
+ """Analyze dependencies from provided file contents."""
+ files = payload.get("files", {})
+ if not files:
+ raise HTTPException(status_code=400, detail="files dict is required (filename -> content)")
+ graph = _cross_repo.analyze_dependencies_from_files(files)
+ return graph.to_dict()
+
+
+@app.post("/api/cross-repo/impact")
+async def api_cross_repo_impact(payload: dict):
+ """Analyze impact of updating a package."""
+ files = payload.get("files", {})
+ package_name = payload.get("package", "")
+ new_version = payload.get("new_version")
+ if not package_name:
+ raise HTTPException(status_code=400, detail="package is required")
+ graph = _cross_repo.analyze_dependencies_from_files(files)
+ report = _cross_repo.impact_analysis(graph, package_name, new_version)
+ return report.to_dict()
+
+
+# ============================================================================
+# Predictions Endpoints (Phase 3)
+# ============================================================================
+
+@app.post("/api/predictions/suggest")
+async def api_predictions_suggest(payload: dict):
+ """Get proactive suggestions based on context."""
+ context = payload.get("context", "")
+ if not context:
+ raise HTTPException(status_code=400, detail="context is required")
+ suggestions = _predictive_engine.predict(context)
+ return {"suggestions": [s.to_dict() for s in suggestions]}
+
+
+@app.get("/api/predictions/rules")
+async def api_predictions_rules():
+ """List all prediction rules."""
+ return {"rules": _predictive_engine.list_rules()}
+
+
+# ============================================================================
+# Security Scanner Endpoints (Phase 3)
+# ============================================================================
+
+@app.post("/api/security/scan-file")
+async def api_security_scan_file(payload: dict):
+ """Scan a single file for security issues."""
+ file_path = payload.get("file_path", "")
+ if not file_path:
+ raise HTTPException(status_code=400, detail="file_path is required")
+ findings = _security_scanner.scan_file(file_path)
+ return {"findings": [f.to_dict() for f in findings], "count": len(findings)}
+
+
+@app.post("/api/security/scan-directory")
+async def api_security_scan_directory(payload: dict):
+ """Recursively scan a directory for security issues."""
+ directory = payload.get("directory", "")
+ if not directory:
+ raise HTTPException(status_code=400, detail="directory is required")
+ result = _security_scanner.scan_directory(directory)
+ return result.to_dict()
+
+
+@app.post("/api/security/scan-diff")
+async def api_security_scan_diff(payload: dict):
+ """Scan a git diff for security issues in added lines."""
+ diff_text = payload.get("diff", "")
+ if not diff_text:
+ raise HTTPException(status_code=400, detail="diff is required")
+ findings = _security_scanner.scan_diff(diff_text)
+ return {"findings": [f.to_dict() for f in findings], "count": len(findings)}
+
+
+# ============================================================================
+# Natural Language Database Endpoints (Phase 3)
+# ============================================================================
+
+@app.post("/api/nl-database/translate")
+async def api_nl_translate(payload: dict):
+ """Translate natural language to SQL."""
+ question = payload.get("question", "")
+ dialect = payload.get("dialect", "postgresql")
+ tables = payload.get("tables", [])
+ if not question:
+ raise HTTPException(status_code=400, detail="question is required")
+ engine = NLQueryEngine(dialect=QueryDialect(dialect))
+ for t in tables:
+ engine.add_table(TableSchema(
+ name=t["name"],
+ columns=t.get("columns", []),
+ primary_key=t.get("primary_key"),
+ ))
+ sql = engine.translate(question)
+ error = engine.validate_query(sql)
+ return {"question": question, "sql": sql, "valid": error is None, "error": error}
+
+
+@app.post("/api/nl-database/explain")
+async def api_nl_explain(payload: dict):
+ """Explain what a SQL query does in plain English."""
+ sql = payload.get("sql", "")
+ if not sql:
+ raise HTTPException(status_code=400, detail="sql is required")
+ explanation = _nl_engine.explain(sql)
+ return {"sql": sql, "explanation": explanation}
+
+
+# ============================================================================
+# Branch Listing Endpoint (Claude-Code-on-Web Parity)
+# ============================================================================
+
+class BranchInfo(BaseModel):
+ name: str
+ is_default: bool = False
+ protected: bool = False
+ commit_sha: Optional[str] = None
+
+
+class BranchListResponse(BaseModel):
+ repository: str
+ default_branch: str
+ page: int
+ per_page: int
+ has_more: bool
+ branches: List[BranchInfo]
+
+
+@app.get("/api/repos/{owner}/{repo}/branches", response_model=BranchListResponse)
+async def api_list_branches(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ page: int = Query(1, ge=1),
+ per_page: int = Query(100, ge=1, le=100),
+ query: Optional[str] = Query(None, description="Substring filter"),
+ authorization: Optional[str] = Header(None),
+):
+ """List branches for a repository with optional search filtering."""
+ import httpx as _httpx
+
+ token = get_github_token(authorization)
+ if not token:
+ raise HTTPException(status_code=401, detail="GitHub token required")
+
+ headers = {
+ "Authorization": f"Bearer {token}",
+ "Accept": "application/vnd.github+json",
+ "User-Agent": "gitpilot",
+ }
+ timeout = _httpx.Timeout(connect=10.0, read=30.0, write=30.0, pool=10.0)
+
+ async with _httpx.AsyncClient(
+ base_url="https://api.github.com", headers=headers, timeout=timeout
+ ) as client:
+ # Fetch repo info for default_branch
+ repo_resp = await client.get(f"/repos/{owner}/{repo}")
+ if repo_resp.status_code >= 400:
+ logging.warning(
+ "branches: repo lookup failed %s/%s β %s %s",
+ owner, repo, repo_resp.status_code, repo_resp.text[:200],
+ )
+ raise HTTPException(
+ status_code=repo_resp.status_code,
+ detail=f"Cannot access repository: {repo_resp.status_code}",
+ )
+
+ repo_data = repo_resp.json()
+ default_branch_name = repo_data.get("default_branch", "main")
+
+ # Fetch ALL branch pages (GitHub caps at 100 per page)
+ all_raw = []
+ current_page = page
+ while True:
+ branch_resp = await client.get(
+ f"/repos/{owner}/{repo}/branches",
+ params={"page": current_page, "per_page": per_page},
+ )
+ if branch_resp.status_code >= 400:
+ logging.warning(
+ "branches: list failed %s/%s page=%s β %s %s",
+ owner, repo, current_page, branch_resp.status_code, branch_resp.text[:200],
+ )
+ raise HTTPException(
+ status_code=branch_resp.status_code,
+ detail=f"Failed to list branches: {branch_resp.status_code}",
+ )
+
+ page_data = branch_resp.json() if isinstance(branch_resp.json(), list) else []
+ all_raw.extend(page_data)
+
+ # Check if there are more pages
+ link_header = branch_resp.headers.get("Link", "") or ""
+ if 'rel="next"' not in link_header or len(page_data) < per_page:
+ break
+ current_page += 1
+ # Safety: cap at 10 pages (1000 branches)
+ if current_page - page >= 10:
+ break
+
+ q = (query or "").strip().lower()
+
+ branches = []
+ for b in all_raw:
+ name = (b.get("name") or "").strip()
+ if not name:
+ continue
+ if q and q not in name.lower():
+ continue
+ branches.append(BranchInfo(
+ name=name,
+ is_default=(name == default_branch_name),
+ protected=bool(b.get("protected", False)),
+ commit_sha=(b.get("commit") or {}).get("sha"),
+ ))
+
+ # Sort: default branch first, then alphabetical
+ branches.sort(key=lambda x: (0 if x.is_default else 1, x.name.lower()))
+
+ return BranchListResponse(
+ repository=f"{owner}/{repo}",
+ default_branch=default_branch_name,
+ page=page,
+ per_page=per_page,
+ has_more=False,
+ branches=branches,
+ )
+
+
+# ============================================================================
+# Environment Configuration Endpoints (Claude-Code-on-Web Parity)
+# ============================================================================
+
+import json as _json
+_ENV_ROOT = Path.home() / ".gitpilot" / "environments"
+
+
+class EnvironmentConfig(BaseModel):
+ id: Optional[str] = None
+ name: str = "Default"
+ network_access: str = Field("limited", description="limited | full | none")
+ env_vars: dict = Field(default_factory=dict)
+
+
+class EnvironmentListResponse(BaseModel):
+ environments: List[EnvironmentConfig]
+
+
+@app.get("/api/environments", response_model=EnvironmentListResponse)
+async def api_list_environments():
+ """List all environment configurations."""
+ _ENV_ROOT.mkdir(parents=True, exist_ok=True)
+ envs = []
+ for path in sorted(_ENV_ROOT.glob("*.json")):
+ try:
+ data = _json.loads(path.read_text())
+ envs.append(EnvironmentConfig(**data))
+ except Exception:
+ continue
+ if not envs:
+ envs.append(EnvironmentConfig(id="default", name="Default", network_access="limited"))
+ return EnvironmentListResponse(environments=envs)
+
+
+@app.post("/api/environments")
+async def api_create_environment(config: EnvironmentConfig):
+ """Create a new environment configuration."""
+ import uuid
+ _ENV_ROOT.mkdir(parents=True, exist_ok=True)
+ config.id = config.id or uuid.uuid4().hex[:12]
+ path = _ENV_ROOT / f"{config.id}.json"
+ path.write_text(_json.dumps(config.model_dump(), indent=2))
+ return config.model_dump()
+
+
+@app.put("/api/environments/{env_id}")
+async def api_update_environment(env_id: str, config: EnvironmentConfig):
+ """Update an environment configuration."""
+ _ENV_ROOT.mkdir(parents=True, exist_ok=True)
+ path = _ENV_ROOT / f"{env_id}.json"
+ config.id = env_id
+ path.write_text(_json.dumps(config.model_dump(), indent=2))
+ return config.model_dump()
+
+
+@app.delete("/api/environments/{env_id}")
+async def api_delete_environment(env_id: str):
+ """Delete an environment configuration."""
+ path = _ENV_ROOT / f"{env_id}.json"
+ if path.exists():
+ path.unlink()
+ return {"deleted": True}
+ raise HTTPException(status_code=404, detail="Environment not found")
+
+
+# ============================================================================
+# Session Messages + Diff Endpoints (Claude-Code-on-Web Parity)
+# ============================================================================
+
+@app.post("/api/sessions/{session_id}/message")
+async def api_add_session_message(session_id: str, payload: dict):
+ """Add a message to a session's conversation history."""
+ try:
+ session = _session_mgr.load(session_id)
+ except FileNotFoundError:
+ raise HTTPException(status_code=404, detail="Session not found")
+ role = payload.get("role", "user")
+ content = payload.get("content", "")
+ session.add_message(role, content, **payload.get("metadata", {}))
+ _session_mgr.save(session)
+ return {"message_count": len(session.messages)}
+
+
+@app.get("/api/sessions/{session_id}/messages")
+async def api_get_session_messages(session_id: str):
+ """Get all messages for a session."""
+ try:
+ session = _session_mgr.load(session_id)
+ except FileNotFoundError:
+ raise HTTPException(status_code=404, detail="Session not found")
+ return {
+ "session_id": session.id,
+ "messages": [
+ {
+ "role": m.role,
+ "content": m.content,
+ "timestamp": m.timestamp,
+ "metadata": m.metadata,
+ }
+ for m in session.messages
+ ],
+ }
+
+
+@app.get("/api/sessions/{session_id}/diff")
+async def api_get_session_diff(session_id: str):
+ """Get diff stats for a session (placeholder for sandbox integration)."""
+ try:
+ session = _session_mgr.load(session_id)
+ except FileNotFoundError:
+ raise HTTPException(status_code=404, detail="Session not found")
+ diff = session.metadata.get("diff", {
+ "files_changed": 0,
+ "additions": 0,
+ "deletions": 0,
+ "files": [],
+ })
+ return {"session_id": session.id, "diff": diff}
+
+
+@app.post("/api/sessions/{session_id}/status")
+async def api_update_session_status(session_id: str, payload: dict):
+ """Update session status (active, completed, failed, waiting)."""
+ try:
+ session = _session_mgr.load(session_id)
+ except FileNotFoundError:
+ raise HTTPException(status_code=404, detail="Session not found")
+ new_status = payload.get("status", "active")
+ if new_status not in ("active", "paused", "completed", "failed", "waiting"):
+ raise HTTPException(status_code=400, detail="Invalid status")
+ session.status = new_status
+ _session_mgr.save(session)
+ return {"session_id": session.id, "status": session.status}
+
+
+# ============================================================================
+# WebSocket Streaming Endpoint (Claude-Code-on-Web Parity)
+# ============================================================================
+
+from fastapi import WebSocket, WebSocketDisconnect
+
+
+@app.websocket("/ws/sessions/{session_id}")
+async def session_websocket(websocket: WebSocket, session_id: str):
+ """
+ Real-time bidirectional communication for a coding session.
+
+ Server events:
+ { type: "agent_message", content: "..." }
+ { type: "tool_use", tool: "bash", input: "npm test" }
+ { type: "tool_result", tool: "bash", output: "All tests passed" }
+ { type: "diff_update", stats: { additions: N, deletions: N, files: N } }
+ { type: "status_change", status: "completed" }
+ { type: "error", message: "..." }
+
+ Client events:
+ { type: "user_message", content: "..." }
+ { type: "cancel" }
+ """
+ await websocket.accept()
+
+ # Verify session exists
+ try:
+ session = _session_mgr.load(session_id)
+ except FileNotFoundError:
+ await websocket.send_json({"type": "error", "message": "Session not found"})
+ await websocket.close()
+ return
+
+ # Send session history on connect
+ await websocket.send_json({
+ "type": "session_restored",
+ "session_id": session.id,
+ "status": session.status,
+ "message_count": len(session.messages),
+ })
+
+ try:
+ while True:
+ data = await websocket.receive_json()
+ event_type = data.get("type", "")
+
+ if event_type == "user_message":
+ content = data.get("content", "")
+ session.add_message("user", content)
+ _session_mgr.save(session)
+
+ # Acknowledge receipt
+ await websocket.send_json({
+ "type": "message_received",
+ "message_index": len(session.messages) - 1,
+ })
+
+ # Stream agent response (integration point for agentic.py)
+ await websocket.send_json({
+ "type": "status_change",
+ "status": "active",
+ })
+
+ # Agent processing hook β when the agent orchestrator is wired,
+ # replace this with actual streaming from agentic.py
+ try:
+ repo_full = session.repo_full_name or ""
+ parts = repo_full.split("/", 1)
+ if len(parts) == 2 and content.strip():
+ # Use canonical dispatcher signature
+ result = await dispatch_request(
+ user_request=content,
+ repo_full_name=f"{parts[0]}/{parts[1]}",
+ branch_name=session.branch,
+ )
+ answer = ""
+ if isinstance(result, dict):
+ answer = (
+ result.get("result")
+ or result.get("answer")
+ or result.get("message")
+ or result.get("summary")
+ or (result.get("plan", {}) or {}).get("summary")
+ or str(result)
+ )
+ else:
+ answer = str(result)
+
+ # Stream the response
+ await websocket.send_json({
+ "type": "agent_message",
+ "content": answer,
+ })
+
+ session.add_message("assistant", answer)
+ _session_mgr.save(session)
+ else:
+ await websocket.send_json({
+ "type": "agent_message",
+ "content": "Session is not connected to a repository.",
+ })
+ except Exception as agent_err:
+ logger.error(f"Agent error in WS session {session_id}: {agent_err}")
+ await websocket.send_json({
+ "type": "error",
+ "message": str(agent_err),
+ })
+
+ await websocket.send_json({
+ "type": "status_change",
+ "status": "waiting",
+ })
+
+ elif event_type == "cancel":
+ await websocket.send_json({
+ "type": "status_change",
+ "status": "waiting",
+ })
+
+ elif event_type == "ping":
+ await websocket.send_json({"type": "pong"})
+
+ except WebSocketDisconnect:
+ logger.info(f"WebSocket disconnected for session {session_id}")
+ except Exception as e:
+ logger.error(f"WebSocket error for session {session_id}: {e}")
+ try:
+ await websocket.send_json({"type": "error", "message": str(e)})
+ except Exception:
+ pass
+
+
+# βββ Redesigned API Endpoints (Phase 1β4) ββββββββββββββββββββββββββββββββ
+
+from gitpilot.models import (
+ ProviderTestRequest as _ProviderTestRequest,
+ StartSessionRequest as _StartSessionRequest,
+ ChatMessageRequest as _ChatMessageRequest,
+)
+
+
+@app.get("/api/status")
+async def api_status():
+ """Normalized status endpoint for the redesigned extension/UI."""
+ from gitpilot.models import (
+ StatusResponse, ProviderStatusResponse, ProviderName,
+ WorkspaceCapabilitySummary, GithubStatusSummary, ProviderHealth,
+ )
+ from gitpilot.settings import get_settings
+ from gitpilot.github_api import get_github_status_summary
+
+ s = get_settings()
+ provider_summary = s.get_provider_summary()
+
+ # Build provider status
+ provider = ProviderStatusResponse(
+ configured=provider_summary.configured,
+ name=ProviderName(provider_summary.name.value if hasattr(provider_summary.name, 'value') else str(provider_summary.name)),
+ source=provider_summary.source,
+ model=provider_summary.model,
+ base_url=provider_summary.base_url,
+ connection_type=provider_summary.connection_type,
+ has_api_key=provider_summary.has_api_key,
+ health=provider_summary.health,
+ models_available=provider_summary.models_available,
+ warning=provider_summary.warning,
+ )
+
+ # Workspace capabilities
+ workspace = WorkspaceCapabilitySummary(
+ folder_mode_available=True,
+ local_git_available=True,
+ github_mode_available=False,
+ )
+
+ # GitHub status
+ try:
+ github = await get_github_status_summary()
+ workspace.github_mode_available = github.connected
+ except Exception:
+ github = GithubStatusSummary()
+
+ return StatusResponse(
+ server_ready=True,
+ provider=provider,
+ workspace=workspace,
+ github=github,
+ )
+
+
+@app.get("/api/providers/status")
+async def api_providers_status():
+ """Get detailed status for the active provider."""
+ from gitpilot.settings import get_settings
+ from gitpilot.llm_provider import test_provider_connection
+
+ s = get_settings()
+ summary = await test_provider_connection(s)
+ return summary
+
+
+@app.post("/api/providers/test")
+async def api_providers_test(req: _ProviderTestRequest):
+ """Test a specific provider configuration."""
+ from gitpilot.models import (
+ ProviderTestRequest, ProviderTestResponse, ProviderName,
+ ProviderHealth,
+ )
+ from gitpilot.settings import get_settings, AppSettings
+ from gitpilot.llm_provider import test_provider_connection
+ import copy
+
+ s = get_settings()
+ # Apply test overrides temporarily
+ test_settings = copy.deepcopy(s)
+
+ provider = req.provider
+ if provider == ProviderName.openai and req.openai:
+ if req.openai.api_key:
+ test_settings.openai.api_key = req.openai.api_key
+ if req.openai.base_url:
+ test_settings.openai.base_url = req.openai.base_url
+ if req.openai.model:
+ test_settings.openai.model = req.openai.model
+ test_settings.provider = test_settings.provider.__class__("openai")
+ elif provider == ProviderName.claude and req.claude:
+ if req.claude.api_key:
+ test_settings.claude.api_key = req.claude.api_key
+ if req.claude.base_url:
+ test_settings.claude.base_url = req.claude.base_url
+ if req.claude.model:
+ test_settings.claude.model = req.claude.model
+ test_settings.provider = test_settings.provider.__class__("claude")
+ elif provider == ProviderName.watsonx and req.watsonx:
+ if req.watsonx.api_key:
+ test_settings.watsonx.api_key = req.watsonx.api_key
+ if req.watsonx.project_id:
+ test_settings.watsonx.project_id = req.watsonx.project_id
+ if req.watsonx.base_url:
+ test_settings.watsonx.base_url = req.watsonx.base_url
+ if req.watsonx.model_id:
+ test_settings.watsonx.model_id = req.watsonx.model_id
+ test_settings.provider = test_settings.provider.__class__("watsonx")
+ elif provider == ProviderName.ollama and req.ollama:
+ if req.ollama.base_url:
+ test_settings.ollama.base_url = req.ollama.base_url
+ if req.ollama.model:
+ test_settings.ollama.model = req.ollama.model
+ test_settings.provider = test_settings.provider.__class__("ollama")
+ elif provider == ProviderName.ollabridge and req.ollabridge:
+ if req.ollabridge.base_url:
+ test_settings.ollabridge.base_url = req.ollabridge.base_url
+ if req.ollabridge.model:
+ test_settings.ollabridge.model = req.ollabridge.model
+ if req.ollabridge.api_key:
+ test_settings.ollabridge.api_key = req.ollabridge.api_key
+ test_settings.provider = test_settings.provider.__class__("ollabridge")
+
+ summary = await test_provider_connection(test_settings)
+ return ProviderTestResponse(
+ configured=summary.configured,
+ name=summary.name,
+ source=summary.source,
+ model=summary.model,
+ base_url=summary.base_url,
+ connection_type=summary.connection_type,
+ has_api_key=summary.has_api_key,
+ health=summary.health,
+ models_available=summary.models_available,
+ warning=summary.warning,
+ details=f"Provider {provider.value} test completed",
+ )
+
+
+@app.post("/api/session/start")
+async def api_session_start(req: _StartSessionRequest):
+ """Start a new session by mode (folder, local_git, github)."""
+ from gitpilot.models import (
+ StartSessionRequest, StartSessionResponse, WorkspaceMode,
+ )
+ from gitpilot.session import SessionManager
+
+ mgr = SessionManager()
+
+ if req.mode == WorkspaceMode.folder:
+ if not req.folder_path:
+ raise HTTPException(status_code=422, detail="folder_path is required for folder mode")
+ session = mgr.create_folder_session(req.folder_path)
+ elif req.mode == WorkspaceMode.local_git:
+ repo_root = req.repo_root or req.folder_path
+ if not repo_root:
+ raise HTTPException(status_code=422, detail="repo_root is required for local_git mode")
+ session = mgr.create_local_git_session(repo_root, req.branch)
+ elif req.mode == WorkspaceMode.github:
+ if not req.repo_full_name:
+ raise HTTPException(status_code=422, detail="repo_full_name is required for github mode")
+ session = mgr.create_github_session(req.repo_full_name, req.branch)
+ else:
+ raise HTTPException(status_code=400, detail=f"Unknown mode: {req.mode}")
+
+ return StartSessionResponse(
+ session_id=session.id,
+ mode=req.mode,
+ title=session.name,
+ folder_path=session.folder_path,
+ repo_root=session.repo_root,
+ repo_full_name=session.repo_full_name,
+ branch=session.branch,
+ )
+
+
+@app.post("/api/chat/send")
+async def api_chat_message_v2(req: _ChatMessageRequest):
+ """Normalized chat message endpoint for the redesigned extension."""
+ from gitpilot.models import ChatMessageRequest, ChatMessageResponse
+ from gitpilot.session import SessionManager
+ import uuid
+
+ mgr = SessionManager()
+
+ # Load session
+ try:
+ session = mgr.load(req.session_id)
+ except Exception:
+ raise HTTPException(status_code=404, detail=f"Session {req.session_id} not found")
+
+ # Use the canonical dispatcher for chat
+ answer = ""
+ plan = None
+ references = []
+
+ repo_full = session.repo_full_name or ""
+ try:
+ if repo_full:
+ result = await dispatch_request(
+ user_request=req.message,
+ repo_full_name=repo_full,
+ branch_name=session.branch,
+ )
+ if isinstance(result, dict):
+ answer = (
+ result.get("result")
+ or result.get("answer")
+ or result.get("message")
+ or result.get("summary")
+ or str(result)
+ )
+ plan = result.get("plan")
+ references = result.get("references", [])
+ else:
+ answer = str(result)
+ else:
+ # Folder-mode: use LLM directly for simple chat
+ from gitpilot.llm_provider import build_llm
+ llm = build_llm()
+ answer = llm.call(
+ [{"role": "user", "content": req.message}]
+ )
+ except Exception as e:
+ answer = f"Error processing message: {str(e)}"
+
+ # Store message in session
+ from gitpilot.session import Message
+ session.messages.append(Message(role="user", content=req.message))
+ session.messages.append(Message(role="assistant", content=answer))
+ mgr.save(session)
+
+ return ChatMessageResponse(
+ session_id=req.session_id,
+ answer=answer,
+ message_id=str(uuid.uuid4()),
+ plan=plan,
+ references=references,
+ )
+
+
+@app.get("/api/workspace/summary")
+async def api_workspace_summary(folder_path: str = Query(default=".")):
+ """Get workspace summary for UI display."""
+ from gitpilot.workspace import summarize_workspace
+ return await summarize_workspace(folder_path)
+
+
+@app.get("/api/security/scan-workspace")
+async def api_security_scan_workspace(path: str = Query(default=".")):
+ """Quick action security scan for workspace."""
+ from gitpilot.security import scan_current_workspace
+ return scan_current_workspace(path)
+
+
+# ============================================================================
+# Static Files & Frontend Serving (SPA Support)
+# ============================================================================
+
+STATIC_DIR = Path(__file__).resolve().parent / "web"
+ASSETS_DIR = STATIC_DIR / "assets"
+
+if ASSETS_DIR.exists():
+ app.mount("/assets", StaticFiles(directory=ASSETS_DIR), name="assets")
+
+if STATIC_DIR.exists():
+ app.mount("/static", StaticFiles(directory=STATIC_DIR), name="static")
+
+
+@app.get("/api/health")
+async def health_check():
+ """Health check endpoint for monitoring and diagnostics."""
+ return {"status": "healthy", "service": "gitpilot-backend"}
+
+
+@app.get("/healthz")
+async def healthz():
+ """Health check endpoint (Render/Kubernetes standard)."""
+ return {"status": "healthy", "service": "gitpilot-backend"}
+
+
+@app.get("/", include_in_schema=False)
+async def index():
+ """Serve the React App entry point."""
+ index_file = STATIC_DIR / "index.html"
+ if index_file.exists():
+ return FileResponse(index_file)
+ return JSONResponse(
+ {"message": "GitPilot UI not built. The static files directory is missing."},
+ status_code=500,
+ )
+
+
+@app.get("/{full_path:path}", include_in_schema=False)
+async def catch_all_spa_routes(full_path: str):
+ """
+ Catch-all route to serve index.html for frontend routing.
+ Excludes '/api' paths to ensure genuine API 404s are returned as JSON.
+ """
+ if full_path.startswith("api/"):
+ return JSONResponse({"detail": "Not Found"}, status_code=404)
+
+ index_file = STATIC_DIR / "index.html"
+ if index_file.exists():
+ return FileResponse(index_file)
+
+ return JSONResponse(
+ {"message": "GitPilot UI not built. The static files directory is missing."},
+ status_code=500,
+ )
+
+# ---------------------------------------------------------------------------
+# OllaBridge Cloud Extension (additive, non-destructive)
+# ---------------------------------------------------------------------------
+try:
+ from .api_ollabridge_ext import apply_ollabridge_extension as _apply_ob
+ _apply_ob(app)
+ del _apply_ob
+except ImportError:
+ pass # Extension not available, skip gracefully
diff --git a/gitpilot/api_ollabridge_ext.py b/gitpilot/api_ollabridge_ext.py
new file mode 100644
index 0000000000000000000000000000000000000000..b0803a1fa5dd7007e3248796aadd6a12e2f82ee3
--- /dev/null
+++ b/gitpilot/api_ollabridge_ext.py
@@ -0,0 +1,106 @@
+"""OllaBridge Cloud integration extension for GitPilot API.
+
+This module patches the FastAPI app at import time to add:
+- OllaBridge as a first-class LLM provider in settings
+- /api/ollabridge/* proxy endpoints (pairing, models, health)
+
+Completely additive - does not modify api.py.
+Imported automatically via __init__.py or cli.py startup.
+"""
+from __future__ import annotations
+
+import logging
+
+from pydantic import BaseModel
+
+from .settings import (
+ AppSettings,
+ LLMProvider,
+ get_settings,
+ set_provider,
+ update_settings,
+)
+
+logger = logging.getLogger(__name__)
+
+
+# Extended SettingsResponse that includes ollabridge
+class SettingsResponseExt(BaseModel):
+ provider: LLMProvider
+ providers: list[LLMProvider]
+ openai: dict
+ claude: dict
+ watsonx: dict
+ ollama: dict
+ ollabridge: dict = {}
+ ollabridge_connection_type: str | None = None
+ langflow_url: str
+ has_langflow_plan_flow: bool
+
+
+ALL_PROVIDERS = [
+ LLMProvider.ollabridge,
+ LLMProvider.openai,
+ LLMProvider.claude,
+ LLMProvider.watsonx,
+ LLMProvider.ollama,
+]
+
+
+def _build_settings_response(s: AppSettings) -> SettingsResponseExt:
+ ollabridge_connection_type = "local"
+ if s.ollabridge.api_key:
+ ollabridge_connection_type = "api_key"
+
+ # Warn if user included /v1 in base_url
+ ob_base = s.ollabridge.base_url or ""
+ if ob_base.rstrip("/").endswith("/v1"):
+ # The response should carry a warning; we'll handle this in the settings response
+ pass
+
+ return SettingsResponseExt(
+ provider=s.provider,
+ providers=ALL_PROVIDERS,
+ openai=s.openai.model_dump(),
+ claude=s.claude.model_dump(),
+ watsonx=s.watsonx.model_dump(),
+ ollama=s.ollama.model_dump(),
+ ollabridge=s.ollabridge.model_dump(),
+ ollabridge_connection_type=ollabridge_connection_type,
+ langflow_url=s.langflow_url,
+ has_langflow_plan_flow=bool(s.langflow_plan_flow_id),
+ )
+
+
+def apply_ollabridge_extension(app):
+ """Apply OllaBridge integration to the FastAPI app.
+
+ Call this after the app is created but before it starts serving.
+ Adds/overrides the settings endpoints to include ollabridge,
+ and mounts the ollabridge proxy router.
+ """
+ from .ollabridge_proxy import router as ollabridge_router
+
+ # Mount proxy routes
+ app.include_router(ollabridge_router)
+ logger.info("OllaBridge proxy mounted at /api/ollabridge/*")
+
+ # Override settings endpoints to include ollabridge
+ @app.get("/api/settings", response_model=SettingsResponseExt)
+ async def api_get_settings_ext():
+ return _build_settings_response(get_settings())
+
+ class ProviderUpdate(BaseModel):
+ provider: LLMProvider
+
+ @app.post("/api/settings/provider", response_model=SettingsResponseExt)
+ async def api_set_provider_ext(update: ProviderUpdate):
+ s = set_provider(update.provider)
+ return _build_settings_response(s)
+
+ @app.put("/api/settings/llm", response_model=SettingsResponseExt)
+ async def api_update_llm_settings_ext(updates: dict):
+ s = update_settings(updates)
+ return _build_settings_response(s)
+
+ logger.info("OllaBridge settings endpoints registered (overrides original)")
diff --git a/gitpilot/cli.py b/gitpilot/cli.py
new file mode 100644
index 0000000000000000000000000000000000000000..097bba253a60bfcc808c8e47ea8c43cda8789935
--- /dev/null
+++ b/gitpilot/cli.py
@@ -0,0 +1,540 @@
+from __future__ import annotations
+
+import os
+import sys
+import threading
+import time
+import webbrowser
+from pathlib import Path
+
+import typer
+import uvicorn
+from rich.console import Console
+from rich.panel import Panel
+from rich.table import Table
+
+from .version import __version__
+from .settings import get_settings, LLMProvider
+from .model_catalog import list_models_for_provider
+
+
+cli = typer.Typer(add_completion=False, help="GitPilot - Agentic AI assistant for GitHub")
+console = Console()
+
+
+def _check_configuration():
+ """Check and display configuration status."""
+ issues = []
+ warnings = []
+
+ # Check for .env file
+ env_file = Path.cwd() / ".env"
+ has_env = env_file.exists()
+
+ # Check GitHub token
+ github_token = os.getenv("GITPILOT_GITHUB_TOKEN") or os.getenv("GITHUB_TOKEN")
+ if not github_token:
+ issues.append("β GitHub token not found")
+ warnings.append(" Set GITPILOT_GITHUB_TOKEN or GITHUB_TOKEN in .env")
+ warnings.append(" Get token at: https://github.com/settings/tokens")
+
+ # Check LLM provider configuration
+ settings = get_settings()
+ provider = settings.provider
+
+ provider_configured = False
+ if provider == LLMProvider.openai:
+ api_key = settings.openai.api_key or os.getenv("OPENAI_API_KEY")
+ provider_configured = bool(api_key)
+ elif provider == LLMProvider.claude:
+ api_key = settings.claude.api_key or os.getenv("ANTHROPIC_API_KEY")
+ provider_configured = bool(api_key)
+ elif provider == LLMProvider.watsonx:
+ api_key = settings.watsonx.api_key or os.getenv("WATSONX_API_KEY")
+ provider_configured = bool(api_key)
+ elif provider == LLMProvider.ollama:
+ # Ollama doesn't require API key, just needs to be running
+ provider_configured = True
+
+ if not provider_configured:
+ issues.append(f"β {provider.value.upper()} API key not configured")
+ warnings.append(f" Configure in Admin UI or set environment variable")
+
+ return has_env, github_token is not None, provider_configured, issues, warnings
+
+
+def _display_startup_banner(host: str, port: int):
+ """Display a professional startup banner with configuration status."""
+ console.print()
+
+ # Header
+ console.print(Panel.fit(
+ f"[bold cyan]GitPilot[/bold cyan] [dim]v{__version__}[/dim]\n"
+ "[white]Agentic AI Assistant for GitHub Repositories[/white]",
+ border_style="cyan"
+ ))
+
+ # Check configuration
+ has_env, has_github, has_llm, issues, warnings = _check_configuration()
+ settings = get_settings()
+
+ # Configuration table
+ table = Table(show_header=False, box=None, padding=(0, 2))
+ table.add_column("Key", style="cyan")
+ table.add_column("Value", style="white")
+
+ # Environment file status
+ env_status = "β
Found" if has_env else "β οΈ Not found (using defaults)"
+ table.add_row("Environment File", env_status)
+
+ # GitHub token status
+ github_status = "β
Configured" if has_github else "β Not configured"
+ table.add_row("GitHub Token", github_status)
+
+ # LLM Provider status
+ provider_name = settings.provider.value.upper()
+ llm_status = f"β
{provider_name}" if has_llm else f"β οΈ {provider_name} (not configured)"
+ table.add_row("LLM Provider", llm_status)
+
+ # Server info
+ table.add_row("Server", f"http://{host}:{port}")
+
+ console.print(table)
+ console.print()
+
+ # Display issues and warnings
+ if issues:
+ console.print("[bold yellow]β οΈ Configuration Issues:[/bold yellow]")
+ for issue in issues:
+ console.print(f" {issue}")
+ for warning in warnings:
+ console.print(f" [dim]{warning}[/dim]")
+ console.print()
+
+ # Setup instructions if needed
+ if not has_env and (not has_github or not has_llm):
+ console.print(Panel(
+ "[bold]Quick Setup:[/bold]\n\n"
+ "1. Copy .env.template to .env:\n"
+ " [cyan]cp .env.template .env[/cyan]\n\n"
+ "2. Edit .env and add your credentials\n\n"
+ "3. Or configure via Admin UI in your browser\n\n"
+ "[dim]See README.md for detailed setup instructions[/dim]",
+ title="[yellow]Setup Required[/yellow]",
+ border_style="yellow"
+ ))
+ else:
+ console.print("[bold green]β[/bold green] GitPilot is ready!")
+ console.print()
+ console.print("[bold]Next Steps:[/bold]")
+ console.print(" β’ Open the Admin UI to configure LLM providers")
+ console.print(" β’ Select a repository in the Workspace tab")
+ console.print(" β’ Start chatting with your AI coding assistant")
+
+ console.print()
+ console.print("[dim]Press Ctrl+C to stop the server[/dim]")
+ console.print()
+
+
+def _run_server(host: str, port: int, reload: bool = False):
+ """Run the FastAPI server."""
+ uvicorn.run(
+ "gitpilot.api:app",
+ host=host,
+ port=port,
+ reload=reload,
+ log_level="info",
+ )
+
+
+@cli.command()
+def serve(
+ host: str = typer.Option("127.0.0.1", "--host", "-h", help="Host to bind"),
+ port: int = typer.Option(8000, "--port", "-p", help="Port to bind"),
+ reload: bool = typer.Option(False, "--reload", help="Enable auto-reload"),
+ open_browser: bool = typer.Option(True, "--open/--no-open", help="Open browser"),
+):
+ """Start the GitPilot server with web UI."""
+ # Display startup banner
+ _display_startup_banner(host, port)
+
+ # Start server in background thread
+ thread = threading.Thread(
+ target=_run_server,
+ kwargs={"host": host, "port": port, "reload": reload},
+ daemon=False,
+ )
+ thread.start()
+
+ # Open browser after a short delay
+ if open_browser:
+ time.sleep(1.5)
+ try:
+ webbrowser.open(f"http://{host}:{port}")
+ console.print(f"[green]β[/green] Browser opened at http://{host}:{port}")
+ except Exception:
+ console.print(f"[yellow]![/yellow] Please open http://{host}:{port} in your browser")
+
+ # Wait for server thread
+ try:
+ thread.join()
+ except KeyboardInterrupt:
+ console.print("\n[yellow]Shutting down GitPilot...[/yellow]")
+ sys.exit(0)
+
+
+@cli.command()
+def config():
+ """Show current configuration."""
+ console.print()
+ console.print(Panel.fit(
+ "[bold cyan]GitPilot Configuration[/bold cyan]",
+ border_style="cyan"
+ ))
+
+ settings = get_settings()
+
+ # Configuration details
+ table = Table(title="Settings", show_header=True, header_style="bold cyan")
+ table.add_column("Setting", style="cyan")
+ table.add_column("Value", style="white")
+ table.add_column("Source", style="dim")
+
+ # Provider
+ env_provider = os.getenv("GITPILOT_PROVIDER")
+ provider_source = "Environment" if env_provider else "Settings file"
+ table.add_row("Active Provider", settings.provider.value, provider_source)
+
+ # GitHub token
+ github_token = os.getenv("GITPILOT_GITHUB_TOKEN") or os.getenv("GITHUB_TOKEN")
+ github_status = "Configured" if github_token else "Not set"
+ github_source = "Environment" if github_token else "N/A"
+ table.add_row("GitHub Token", github_status, github_source)
+
+ # Provider-specific config
+ if settings.provider == LLMProvider.openai:
+ api_key = settings.openai.api_key or os.getenv("OPENAI_API_KEY")
+ key_status = "Configured" if api_key else "Not set"
+ key_source = "Environment" if os.getenv("OPENAI_API_KEY") else ("Settings" if settings.openai.api_key else "N/A")
+ table.add_row("OpenAI API Key", key_status, key_source)
+ table.add_row("OpenAI Model", settings.openai.model or "gpt-4o-mini", "Settings")
+
+ elif settings.provider == LLMProvider.claude:
+ api_key = settings.claude.api_key or os.getenv("ANTHROPIC_API_KEY")
+ key_status = "Configured" if api_key else "Not set"
+ key_source = "Environment" if os.getenv("ANTHROPIC_API_KEY") else ("Settings" if settings.claude.api_key else "N/A")
+ table.add_row("Claude API Key", key_status, key_source)
+ table.add_row("Claude Model", settings.claude.model, "Settings")
+
+ elif settings.provider == LLMProvider.watsonx:
+ api_key = settings.watsonx.api_key or os.getenv("WATSONX_API_KEY")
+ key_status = "Configured" if api_key else "Not set"
+ key_source = "Environment" if os.getenv("WATSONX_API_KEY") else ("Settings" if settings.watsonx.api_key else "N/A")
+ table.add_row("Watsonx API Key", key_status, key_source)
+ table.add_row("Watsonx Model", settings.watsonx.model_id, "Settings")
+
+ elif settings.provider == LLMProvider.ollama:
+ table.add_row("Ollama URL", settings.ollama.base_url, "Settings")
+ table.add_row("Ollama Model", settings.ollama.model, "Settings")
+
+ console.print(table)
+ console.print()
+ console.print(f"[dim]Settings file: ~/.gitpilot/settings.json[/dim]")
+ console.print()
+
+
+@cli.command()
+def version():
+ """Show GitPilot version."""
+ console.print(f"GitPilot [cyan]v{__version__}[/cyan]")
+
+
+def main():
+ """Main entry point - run server by default."""
+ if len(sys.argv) == 1:
+ # No arguments, run server with defaults
+ _display_startup_banner("127.0.0.1", 8000)
+ try:
+ _run_server("127.0.0.1", 8000, reload=False)
+ except KeyboardInterrupt:
+ console.print("\n[yellow]Shutting down GitPilot...[/yellow]")
+ sys.exit(0)
+ else:
+ # Run CLI commands
+ cli()
+
+
+def serve_only():
+ """Entry point for gitpilot-api command."""
+ console.print("[cyan]GitPilot API Server[/cyan]")
+ console.print("[dim]Starting on http://127.0.0.1:8000[/dim]\n")
+ try:
+ _run_server("127.0.0.1", 8000, reload=False)
+ except KeyboardInterrupt:
+ console.print("\n[yellow]Shutting down...[/yellow]")
+ sys.exit(0)
+@cli.command()
+def run(
+ repo: str = typer.Option(..., "--repo", "-r", help="Repository as owner/repo"),
+ message: str = typer.Option("", "--message", "-m", help="User request message"),
+ branch: str = typer.Option(None, "--branch", "-b", help="Target branch"),
+ auto_pr: bool = typer.Option(False, "--auto-pr", help="Create PR after execution"),
+ from_pr: int = typer.Option(None, "--from-pr", help="Fetch context from PR number"),
+ headless: bool = typer.Option(False, "--headless", help="Non-interactive JSON output"),
+):
+ """Run GitPilot non-interactively (headless mode for CI/CD)."""
+ import asyncio
+ import sys
+
+ if not message and not sys.stdin.isatty():
+ message = sys.stdin.read().strip()
+
+ if not message:
+ console.print("[red]Error:[/red] --message is required (or pipe via stdin)")
+ raise typer.Exit(code=1)
+
+ token = os.getenv("GITPILOT_GITHUB_TOKEN") or os.getenv("GITHUB_TOKEN")
+ if not token:
+ console.print("[red]Error:[/red] GITPILOT_GITHUB_TOKEN or GITHUB_TOKEN must be set")
+ raise typer.Exit(code=1)
+
+ from .headless import run_headless
+
+ result = asyncio.run(run_headless(
+ repo_full_name=repo,
+ message=message,
+ token=token,
+ branch=branch,
+ auto_pr=auto_pr,
+ from_pr=from_pr,
+ ))
+
+ if headless:
+ # Pure JSON for CI/CD consumption
+ console.print(result.to_json())
+ else:
+ if result.success:
+ console.print(f"[green]Success:[/green] {result.output[:500]}")
+ else:
+ console.print(f"[red]Failed:[/red] {result.error}")
+ if result.pr_url:
+ console.print(f"[cyan]PR:[/cyan] {result.pr_url}")
+
+ raise typer.Exit(code=0 if result.success else 1)
+
+
+@cli.command("init")
+def init_project(
+ path: str = typer.Argument(".", help="Project directory to initialise"),
+):
+ """Initialize .gitpilot/ directory with template GITPILOT.md."""
+ from pathlib import Path as StdPath
+ from .memory import MemoryManager
+
+ workspace = StdPath(path).resolve()
+ mgr = MemoryManager(workspace)
+ md_path = mgr.init_project()
+ console.print(f"[green]Initialized:[/green] {md_path}")
+ console.print("[dim]Edit .gitpilot/GITPILOT.md to add your project conventions.[/dim]")
+
+
+@cli.command("plugin")
+def plugin_cmd(
+ action: str = typer.Argument(..., help="install | uninstall | list"),
+ source: str = typer.Argument(None, help="Git URL, local path, or plugin name"),
+):
+ """Manage GitPilot plugins."""
+ from .plugins import PluginManager
+
+ mgr = PluginManager()
+
+ if action == "list":
+ plugins = mgr.list_installed()
+ if not plugins:
+ console.print("[dim]No plugins installed.[/dim]")
+ return
+ table = Table(show_header=True, header_style="bold cyan")
+ table.add_column("Name")
+ table.add_column("Version")
+ table.add_column("Description")
+ for p in plugins:
+ table.add_row(p.name, p.version, p.description)
+ console.print(table)
+
+ elif action == "install":
+ if not source:
+ console.print("[red]Error:[/red] source is required for install")
+ raise typer.Exit(code=1)
+ try:
+ info = mgr.install(source)
+ console.print(f"[green]Installed:[/green] {info.name} v{info.version}")
+ except Exception as e:
+ console.print(f"[red]Error:[/red] {e}")
+ raise typer.Exit(code=1)
+
+ elif action == "uninstall":
+ if not source:
+ console.print("[red]Error:[/red] plugin name is required")
+ raise typer.Exit(code=1)
+ if mgr.uninstall(source):
+ console.print(f"[green]Uninstalled:[/green] {source}")
+ else:
+ console.print(f"[yellow]Not found:[/yellow] {source}")
+
+ else:
+ console.print(f"[red]Unknown action:[/red] {action}. Use: install, uninstall, list")
+ raise typer.Exit(code=1)
+
+
+@cli.command("skill")
+def skill_cmd(
+ name: str = typer.Argument(None, help="Skill name to invoke (or 'list')"),
+):
+ """List or invoke skills."""
+ from .skills import SkillManager
+
+ mgr = SkillManager(workspace_path=Path.cwd())
+ mgr.load_all()
+
+ if not name or name == "list":
+ skills = mgr.list_skills()
+ if not skills:
+ console.print("[dim]No skills found.[/dim]")
+ console.print("[dim]Create .gitpilot/skills/*.md to add skills.[/dim]")
+ return
+ table = Table(show_header=True, header_style="bold cyan")
+ table.add_column("Name")
+ table.add_column("Description")
+ table.add_column("Auto")
+ for s in skills:
+ table.add_row(s["name"], s["description"], str(s.get("auto_trigger", False)))
+ console.print(table)
+ else:
+ prompt = mgr.invoke(name)
+ if prompt is None:
+ console.print(f"[red]Skill not found:[/red] {name}")
+ raise typer.Exit(code=1)
+ console.print(f"[cyan]/{name}[/cyan]")
+ console.print(prompt)
+
+
+@cli.command("scan")
+def scan_cmd(
+ path: str = typer.Argument(".", help="Directory or file to scan"),
+ min_confidence: float = typer.Option(0.5, "--min-confidence", help="Minimum confidence threshold"),
+):
+ """Run AI-powered security scan on a directory or file."""
+ from .security import SecurityScanner
+
+ scanner = SecurityScanner(min_confidence=min_confidence)
+ target = Path(path).resolve()
+
+ if target.is_file():
+ findings = scanner.scan_file(str(target))
+ if not findings:
+ console.print("[green]No security issues found.[/green]")
+ return
+ table = Table(show_header=True, header_style="bold red")
+ table.add_column("Severity")
+ table.add_column("Rule")
+ table.add_column("Title")
+ table.add_column("Line")
+ table.add_column("File")
+ for f in findings:
+ table.add_row(f.severity.value, f.rule_id, f.title, str(f.line_number), f.file_path)
+ console.print(table)
+ else:
+ result = scanner.scan_directory(str(target))
+ console.print(f"[cyan]Scanned:[/cyan] {result.files_scanned} files in {result.scan_duration_ms:.0f}ms")
+ if not result.findings:
+ console.print("[green]No security issues found.[/green]")
+ return
+ console.print(f"[yellow]Found {len(result.findings)} issues:[/yellow]")
+ for sev, count in sorted(result.summary.items()):
+ color = "red" if sev in ("critical", "high") else "yellow" if sev == "medium" else "dim"
+ console.print(f" [{color}]{sev}: {count}[/{color}]")
+ console.print()
+ table = Table(show_header=True, header_style="bold red")
+ table.add_column("Severity")
+ table.add_column("Rule")
+ table.add_column("Title")
+ table.add_column("Line")
+ table.add_column("File")
+ for f in result.findings[:50]:
+ table.add_row(f.severity.value, f.rule_id, f.title, str(f.line_number), f.file_path)
+ console.print(table)
+ if len(result.findings) > 50:
+ console.print(f"[dim]... and {len(result.findings) - 50} more[/dim]")
+
+
+@cli.command("predict")
+def predict_cmd(
+ context: str = typer.Argument(..., help="Context string to get predictions for"),
+):
+ """Get proactive suggestions based on context."""
+ from .predictions import PredictiveEngine
+
+ engine = PredictiveEngine()
+ suggestions = engine.predict(context)
+
+ if not suggestions:
+ console.print("[dim]No suggestions for this context.[/dim]")
+ return
+
+ for s in suggestions:
+ score_color = "green" if s.relevance_score >= 0.8 else "yellow" if s.relevance_score >= 0.6 else "dim"
+ console.print(f" [{score_color}][{s.relevance_score:.0%}][/{score_color}] [bold]{s.title}[/bold]")
+ console.print(f" {s.description}")
+ console.print(f" [cyan]Prompt:[/cyan] {s.prompt}")
+ console.print()
+
+
+@cli.command("list-models")
+def list_models_cmd(
+ provider: str = typer.Option(
+ None,
+ "--provider",
+ "-p",
+ help="LLM provider (openai, claude, watsonx, ollama). Defaults to active provider.",
+ )
+):
+ """List LLM models available for the configured provider."""
+ settings = get_settings()
+
+ if provider is None:
+ target = settings.provider
+ else:
+ # Normalize to enum
+ try:
+ target = LLMProvider(provider)
+ except ValueError:
+ console.print(f"[red]Unknown provider:[/red] {provider}")
+ raise typer.Exit(code=1)
+
+ models, error = list_models_for_provider(target, settings)
+
+ console.print()
+ console.print(
+ Panel.fit(
+ f"[bold cyan]Models for provider[/bold cyan] [white]{target.value}[/white]",
+ border_style="cyan",
+ )
+ )
+
+ if error:
+ console.print(f"[yellow]Warning:[/yellow] {error}")
+
+ if not models:
+ console.print("No models found.")
+ return
+
+ table = Table(show_header=True, header_style="bold cyan")
+ table.add_column("#", style="dim", justify="right")
+ table.add_column("Model ID", style="white")
+
+ for i, m in enumerate(models, start=1):
+ table.add_row(str(i), m)
+
+ console.print(table)
+ console.print()
+
diff --git a/gitpilot/context_pack.py b/gitpilot/context_pack.py
new file mode 100644
index 0000000000000000000000000000000000000000..86dc9cc361f12ec704e50acb1765cad146e02633
--- /dev/null
+++ b/gitpilot/context_pack.py
@@ -0,0 +1,151 @@
+# gitpilot/context_pack.py
+"""Context Pack β compose a bounded, token-safe context injection for agents.
+
+Non-destructive, additive feature. If no context assets or use cases exist
+the pack is empty and agents behave exactly as before.
+
+Usage in agentic.py / agent builders:
+ from .context_pack import build_context_pack
+ pack = build_context_pack(workspace_path, query=goal)
+ # Prepend ``pack`` to agent backstory or system prompt.
+"""
+from __future__ import annotations
+
+import logging
+from pathlib import Path
+from typing import Optional
+
+logger = logging.getLogger(__name__)
+
+# ---------------------------------------------------------------------------
+# Limits (keep total injection well under 8 K chars to avoid token blowups)
+# ---------------------------------------------------------------------------
+MAX_CONVENTIONS_CHARS = 2_000
+MAX_USE_CASE_CHARS = 2_000
+MAX_CHUNKS_CHARS = 3_000
+MAX_CHUNKS = 8
+
+
+def build_context_pack(
+ workspace_path: Path,
+ query: str = "",
+ *,
+ include_conventions: bool = True,
+ include_use_case: bool = True,
+ include_assets: bool = True,
+ max_total_chars: int = 7_000,
+) -> str:
+ """Build a markdown context pack for agent prompt injection.
+
+ Returns an empty string when nothing is available (zero overhead).
+ """
+ parts: list[str] = []
+ total = 0
+
+ # 1) Conventions / Rules (existing MemoryManager)
+ if include_conventions:
+ section = _conventions_section(workspace_path)
+ if section and total + len(section) <= max_total_chars:
+ parts.append(section)
+ total += len(section)
+
+ # 2) Active Use Case
+ if include_use_case:
+ section = _use_case_section(workspace_path)
+ if section and total + len(section) <= max_total_chars:
+ parts.append(section)
+ total += len(section)
+
+ # 3) Relevant context chunks from uploaded assets
+ if include_assets and query:
+ remaining = max_total_chars - total
+ section = _assets_section(workspace_path, query, max_chars=min(remaining, MAX_CHUNKS_CHARS))
+ if section:
+ parts.append(section)
+
+ if not parts:
+ return ""
+
+ return "## Project Context Pack (auto)\n\n" + "\n\n".join(parts)
+
+
+# ---------------------------------------------------------------------------
+# Section builders
+# ---------------------------------------------------------------------------
+def _conventions_section(workspace_path: Path) -> str:
+ try:
+ from .memory import MemoryManager
+
+ mgr = MemoryManager(workspace_path)
+ ctx = mgr.load_context()
+ prompt = ctx.to_system_prompt()
+ if not prompt:
+ return ""
+ return "### Conventions\n\n" + prompt[:MAX_CONVENTIONS_CHARS]
+ except Exception:
+ logger.debug("Could not load conventions for context pack", exc_info=True)
+ return ""
+
+
+def _use_case_section(workspace_path: Path) -> str:
+ try:
+ from .use_case import UseCaseManager
+
+ mgr = UseCaseManager(workspace_path)
+ uc = mgr.get_active_use_case()
+ if not uc:
+ return ""
+
+ spec = uc.spec
+ lines = ["### Active Use Case"]
+ if spec.title:
+ lines.append(f"- **Title:** {spec.title}")
+ if spec.summary:
+ lines.append(f"- **Summary:** {spec.summary}")
+ if spec.problem:
+ lines.append(f"- **Problem:** {spec.problem}")
+ if spec.users:
+ lines.append(f"- **Users:** {spec.users}")
+ if spec.requirements:
+ lines.append("- **Requirements:**")
+ for r in spec.requirements[:10]:
+ lines.append(f" - {r}")
+ if spec.acceptance_criteria:
+ lines.append("- **Acceptance Criteria:**")
+ for ac in spec.acceptance_criteria[:10]:
+ lines.append(f" - {ac}")
+ if spec.constraints:
+ lines.append("- **Constraints:**")
+ for c in spec.constraints[:5]:
+ lines.append(f" - {c}")
+
+ result = "\n".join(lines)
+ return result[:MAX_USE_CASE_CHARS]
+ except Exception:
+ logger.debug("Could not load active use case for context pack", exc_info=True)
+ return ""
+
+
+def _assets_section(
+ workspace_path: Path,
+ query: str,
+ max_chars: int = MAX_CHUNKS_CHARS,
+) -> str:
+ try:
+ from .context_vault import ContextVault
+
+ vault = ContextVault(workspace_path)
+ chunks = vault.search_chunks(query, max_chunks=MAX_CHUNKS, max_chars=max_chars)
+ if not chunks:
+ return ""
+
+ lines = [f"### Relevant References (Top {len(chunks)})"]
+ for chunk in chunks:
+ lines.append(
+ f"[Asset: {chunk.filename} | chunk {chunk.chunk_index}]\n{chunk.text}"
+ )
+
+ return "\n\n".join(lines)[:max_chars]
+ except Exception:
+ logger.debug("Could not search context vault for context pack", exc_info=True)
+ return ""
diff --git a/gitpilot/context_vault.py b/gitpilot/context_vault.py
new file mode 100644
index 0000000000000000000000000000000000000000..1cc78bb954ea62126f14442bf9b4ef735cafb213
--- /dev/null
+++ b/gitpilot/context_vault.py
@@ -0,0 +1,525 @@
+# gitpilot/context_vault.py
+"""Context Vault β upload, extract, index, and retrieve project context assets.
+
+Non-destructive, additive feature. Stores everything under:
+ ~/.gitpilot/workspaces/{owner}/{repo}/.gitpilot/context/
+
+Directory layout:
+ context/
+ assets/ raw uploaded files
+ extracted/ extracted text + metadata JSON
+ index/ SQLite metadata + chunk index
+ use_cases/ structured use-case JSON + markdown exports
+
+This module handles asset lifecycle (upload, extract, index, delete)
+and chunk retrieval for context-pack injection into agent prompts.
+"""
+from __future__ import annotations
+
+import json
+import logging
+import os
+import re
+import shutil
+import sqlite3
+import time
+import uuid
+from dataclasses import asdict, dataclass, field
+from pathlib import Path
+from typing import Any, Dict, List, Optional
+
+logger = logging.getLogger(__name__)
+
+# ---------------------------------------------------------------------------
+# Limits
+# ---------------------------------------------------------------------------
+MAX_UPLOAD_BYTES = 200 * 1024 * 1024 # 200 MB default
+MAX_EXTRACT_CHARS = 500_000
+CHUNK_SIZE = 800 # chars per chunk (approx)
+CHUNK_OVERLAP = 100
+MAX_RETRIEVAL_CHUNKS = 8
+MAX_RETRIEVAL_CHARS = 6_000
+
+
+# ---------------------------------------------------------------------------
+# Data classes
+# ---------------------------------------------------------------------------
+@dataclass
+class AssetMeta:
+ asset_id: str
+ filename: str
+ mime: str
+ size_bytes: int
+ created_at: str
+ extracted_chars: int = 0
+ indexed_chunks: int = 0
+ notes: str = ""
+
+ def to_dict(self) -> dict:
+ return asdict(self)
+
+
+@dataclass
+class ExtractedAsset:
+ asset_id: str
+ filename: str
+ mime: str
+ extracted_text: str
+ pages: Optional[int] = None
+ created_at: str = ""
+ notes: str = ""
+
+
+@dataclass
+class ChunkResult:
+ asset_id: str
+ filename: str
+ chunk_index: int
+ text: str
+ score: float = 0.0
+
+
+# ---------------------------------------------------------------------------
+# Vault manager
+# ---------------------------------------------------------------------------
+class ContextVault:
+ """Manages per-repo context vault under .gitpilot/context/."""
+
+ def __init__(self, workspace_path: Path):
+ self.workspace_path = workspace_path
+ self.vault_dir = workspace_path / ".gitpilot" / "context"
+ self.assets_dir = self.vault_dir / "assets"
+ self.extracted_dir = self.vault_dir / "extracted"
+ self.index_dir = self.vault_dir / "index"
+ self.use_cases_dir = self.vault_dir / "use_cases"
+
+ # ------------------------------------------------------------------
+ # Init & safety
+ # ------------------------------------------------------------------
+ def _ensure_dirs(self):
+ for d in (self.assets_dir, self.extracted_dir, self.index_dir, self.use_cases_dir):
+ d.mkdir(parents=True, exist_ok=True)
+
+ def _safe_resolve(self, base: Path, name: str) -> Path:
+ """Prevent path traversal attacks."""
+ full = (base / name).resolve()
+ if not str(full).startswith(str(base.resolve())):
+ raise PermissionError(f"Path traversal blocked: {name}")
+ return full
+
+ # ------------------------------------------------------------------
+ # Asset CRUD
+ # ------------------------------------------------------------------
+ def list_assets(self) -> List[AssetMeta]:
+ """Return metadata for all uploaded assets."""
+ self._ensure_dirs()
+ results: List[AssetMeta] = []
+ for ext_file in sorted(self.extracted_dir.glob("*.json")):
+ try:
+ data = json.loads(ext_file.read_text(encoding="utf-8"))
+ results.append(AssetMeta(
+ asset_id=data.get("asset_id", ext_file.stem),
+ filename=data.get("filename", ""),
+ mime=data.get("mime", ""),
+ size_bytes=data.get("size_bytes", 0),
+ created_at=data.get("created_at", ""),
+ extracted_chars=len(data.get("extracted_text", "")),
+ indexed_chunks=data.get("indexed_chunks", 0),
+ notes=data.get("notes", ""),
+ ))
+ except Exception:
+ logger.warning("Skipping corrupt metadata: %s", ext_file)
+ return results
+
+ def upload_asset(self, filename: str, content: bytes, mime: str = "") -> AssetMeta:
+ """Store a raw asset and run extraction + indexing."""
+ self._ensure_dirs()
+
+ if len(content) > MAX_UPLOAD_BYTES:
+ raise ValueError(
+ f"File too large ({len(content)} bytes). Max is {MAX_UPLOAD_BYTES}."
+ )
+
+ asset_id = uuid.uuid4().hex[:12]
+ safe_name = re.sub(r"[^\w.\-]", "_", filename)
+ stored_name = f"{asset_id}_{safe_name}"
+
+ asset_path = self._safe_resolve(self.assets_dir, stored_name)
+ asset_path.write_bytes(content)
+
+ if not mime:
+ mime = _guess_mime(filename)
+
+ now = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
+
+ # Extract text
+ extracted_text = _extract_text(asset_path, mime)
+
+ # Chunk + index
+ chunks = _chunk_text(extracted_text, CHUNK_SIZE, CHUNK_OVERLAP)
+ indexed_count = self._index_chunks(asset_id, filename, chunks)
+
+ # Save extracted metadata
+ meta_data = {
+ "asset_id": asset_id,
+ "filename": filename,
+ "stored_name": stored_name,
+ "mime": mime,
+ "size_bytes": len(content),
+ "extracted_text": extracted_text[:MAX_EXTRACT_CHARS],
+ "pages": None,
+ "created_at": now,
+ "indexed_chunks": indexed_count,
+ "notes": "",
+ }
+ meta_path = self._safe_resolve(self.extracted_dir, f"{asset_id}.json")
+ meta_path.write_text(json.dumps(meta_data, indent=2), encoding="utf-8")
+
+ return AssetMeta(
+ asset_id=asset_id,
+ filename=filename,
+ mime=mime,
+ size_bytes=len(content),
+ created_at=now,
+ extracted_chars=len(extracted_text),
+ indexed_chunks=indexed_count,
+ )
+
+ def delete_asset(self, asset_id: str) -> bool:
+ """Remove asset, extracted data, and index entries."""
+ self._ensure_dirs()
+
+ # Remove extracted metadata
+ meta_path = self.extracted_dir / f"{asset_id}.json"
+ stored_name = None
+ if meta_path.exists():
+ try:
+ data = json.loads(meta_path.read_text(encoding="utf-8"))
+ stored_name = data.get("stored_name")
+ except Exception:
+ pass
+ meta_path.unlink()
+
+ # Remove raw asset
+ if stored_name:
+ asset_path = self.assets_dir / stored_name
+ if asset_path.exists():
+ asset_path.unlink()
+ else:
+ # fallback: find by prefix
+ for f in self.assets_dir.iterdir():
+ if f.name.startswith(asset_id):
+ f.unlink()
+ break
+
+ # Remove from index
+ self._remove_from_index(asset_id)
+
+ return True
+
+ def get_asset_path(self, asset_id: str) -> Optional[Path]:
+ """Return the raw asset path for download."""
+ self._ensure_dirs()
+ meta_path = self.extracted_dir / f"{asset_id}.json"
+ if meta_path.exists():
+ try:
+ data = json.loads(meta_path.read_text(encoding="utf-8"))
+ stored_name = data.get("stored_name", "")
+ if stored_name:
+ p = self.assets_dir / stored_name
+ if p.exists():
+ return p
+ except Exception:
+ pass
+
+ # fallback
+ for f in self.assets_dir.iterdir():
+ if f.name.startswith(asset_id):
+ return f
+ return None
+
+ def get_asset_filename(self, asset_id: str) -> str:
+ """Return original filename for an asset."""
+ meta_path = self.extracted_dir / f"{asset_id}.json"
+ if meta_path.exists():
+ try:
+ data = json.loads(meta_path.read_text(encoding="utf-8"))
+ return data.get("filename", "unknown")
+ except Exception:
+ pass
+ return "unknown"
+
+ # ------------------------------------------------------------------
+ # Indexing (SQLite-backed)
+ # ------------------------------------------------------------------
+ def _get_db(self) -> sqlite3.Connection:
+ self._ensure_dirs()
+ db_path = self.index_dir / "context.sqlite"
+ conn = sqlite3.connect(str(db_path))
+ conn.execute("""
+ CREATE TABLE IF NOT EXISTS chunks (
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
+ asset_id TEXT NOT NULL,
+ filename TEXT NOT NULL,
+ chunk_index INTEGER NOT NULL,
+ text TEXT NOT NULL
+ )
+ """)
+ conn.execute("""
+ CREATE INDEX IF NOT EXISTS idx_chunks_asset ON chunks(asset_id)
+ """)
+ conn.commit()
+ return conn
+
+ def _index_chunks(self, asset_id: str, filename: str, chunks: List[str]) -> int:
+ conn = self._get_db()
+ try:
+ # Remove old entries for this asset (re-index)
+ conn.execute("DELETE FROM chunks WHERE asset_id = ?", (asset_id,))
+ for i, chunk_text in enumerate(chunks):
+ conn.execute(
+ "INSERT INTO chunks (asset_id, filename, chunk_index, text) VALUES (?, ?, ?, ?)",
+ (asset_id, filename, i, chunk_text),
+ )
+ conn.commit()
+ return len(chunks)
+ finally:
+ conn.close()
+
+ def _remove_from_index(self, asset_id: str):
+ try:
+ conn = self._get_db()
+ conn.execute("DELETE FROM chunks WHERE asset_id = ?", (asset_id,))
+ conn.commit()
+ conn.close()
+ except Exception:
+ pass
+
+ # ------------------------------------------------------------------
+ # Retrieval
+ # ------------------------------------------------------------------
+ def search_chunks(
+ self,
+ query: str,
+ max_chunks: int = MAX_RETRIEVAL_CHUNKS,
+ max_chars: int = MAX_RETRIEVAL_CHARS,
+ ) -> List[ChunkResult]:
+ """Simple keyword-based retrieval (BM25-like scoring).
+
+ Phase 1: naive keyword matching. Phase 2 can add embeddings.
+ """
+ if not query.strip():
+ return []
+
+ keywords = _extract_keywords(query)
+ if not keywords:
+ return []
+
+ try:
+ conn = self._get_db()
+ except Exception:
+ return []
+
+ try:
+ rows = conn.execute(
+ "SELECT asset_id, filename, chunk_index, text FROM chunks"
+ ).fetchall()
+ finally:
+ conn.close()
+
+ scored: List[ChunkResult] = []
+ for asset_id, filename, chunk_index, text in rows:
+ text_lower = text.lower()
+ score = 0.0
+ for kw in keywords:
+ count = text_lower.count(kw.lower())
+ if count > 0:
+ # simple TF score
+ score += count * (1.0 + len(kw) * 0.1)
+ if score > 0:
+ scored.append(ChunkResult(
+ asset_id=asset_id,
+ filename=filename,
+ chunk_index=chunk_index,
+ text=text,
+ score=score,
+ ))
+
+ scored.sort(key=lambda c: c.score, reverse=True)
+
+ # Enforce limits
+ results: List[ChunkResult] = []
+ total_chars = 0
+ for chunk in scored[:max_chunks * 2]: # over-fetch then trim
+ if len(results) >= max_chunks:
+ break
+ if total_chars + len(chunk.text) > max_chars:
+ break
+ results.append(chunk)
+ total_chars += len(chunk.text)
+
+ return results
+
+
+# ---------------------------------------------------------------------------
+# Text extraction helpers
+# ---------------------------------------------------------------------------
+def _guess_mime(filename: str) -> str:
+ ext = Path(filename).suffix.lower()
+ mime_map = {
+ ".txt": "text/plain",
+ ".md": "text/markdown",
+ ".csv": "text/csv",
+ ".json": "application/json",
+ ".pdf": "application/pdf",
+ ".docx": "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
+ ".doc": "application/msword",
+ ".png": "image/png",
+ ".jpg": "image/jpeg",
+ ".jpeg": "image/jpeg",
+ ".gif": "image/gif",
+ ".svg": "image/svg+xml",
+ ".mp4": "video/mp4",
+ ".mov": "video/quicktime",
+ ".mp3": "audio/mpeg",
+ ".wav": "audio/wav",
+ ".vtt": "text/vtt",
+ ".srt": "text/srt",
+ ".py": "text/x-python",
+ ".js": "text/javascript",
+ ".ts": "text/typescript",
+ ".jsx": "text/jsx",
+ ".tsx": "text/tsx",
+ ".html": "text/html",
+ ".css": "text/css",
+ ".yaml": "text/yaml",
+ ".yml": "text/yaml",
+ ".toml": "text/toml",
+ ".xml": "text/xml",
+ ".sh": "text/x-shellscript",
+ ".go": "text/x-go",
+ ".rs": "text/x-rust",
+ ".java": "text/x-java",
+ ".rb": "text/x-ruby",
+ }
+ return mime_map.get(ext, "application/octet-stream")
+
+
+def _extract_text(path: Path, mime: str) -> str:
+ """Best-effort text extraction from a file."""
+ # Text-based files
+ if mime.startswith("text/") or mime in (
+ "application/json",
+ "text/markdown",
+ "text/csv",
+ "text/vtt",
+ "text/srt",
+ "text/toml",
+ "text/yaml",
+ ):
+ try:
+ return path.read_text(encoding="utf-8", errors="replace")[:MAX_EXTRACT_CHARS]
+ except Exception:
+ return ""
+
+ # PDF
+ if mime == "application/pdf":
+ return _extract_pdf(path)
+
+ # DOCX
+ if "wordprocessingml" in mime or mime == "application/msword":
+ return _extract_docx(path)
+
+ # Binary/media β no extraction, store only
+ return ""
+
+
+def _extract_pdf(path: Path) -> str:
+ """Extract text from PDF. Tries PyPDF2/pypdf first, falls back gracefully."""
+ try:
+ import pypdf
+ reader = pypdf.PdfReader(str(path))
+ pages = []
+ for page in reader.pages:
+ text = page.extract_text()
+ if text:
+ pages.append(text)
+ return "\n\n".join(pages)[:MAX_EXTRACT_CHARS]
+ except ImportError:
+ pass
+
+ try:
+ import PyPDF2 # noqa: N813
+ reader = PyPDF2.PdfReader(str(path))
+ pages = []
+ for page in reader.pages:
+ text = page.extract_text()
+ if text:
+ pages.append(text)
+ return "\n\n".join(pages)[:MAX_EXTRACT_CHARS]
+ except ImportError:
+ pass
+
+ logger.info("PDF extraction unavailable (install pypdf or PyPDF2). Storing PDF without text.")
+ return ""
+
+
+def _extract_docx(path: Path) -> str:
+ """Extract text from DOCX."""
+ try:
+ import docx
+ doc = docx.Document(str(path))
+ paragraphs = [p.text for p in doc.paragraphs if p.text.strip()]
+ return "\n\n".join(paragraphs)[:MAX_EXTRACT_CHARS]
+ except ImportError:
+ logger.info("DOCX extraction unavailable (install python-docx). Storing without text.")
+ return ""
+ except Exception as e:
+ logger.warning("DOCX extraction failed: %s", e)
+ return ""
+
+
+# ---------------------------------------------------------------------------
+# Chunking
+# ---------------------------------------------------------------------------
+def _chunk_text(text: str, chunk_size: int = CHUNK_SIZE, overlap: int = CHUNK_OVERLAP) -> List[str]:
+ """Split text into overlapping chunks."""
+ if not text:
+ return []
+
+ chunks: List[str] = []
+ start = 0
+ while start < len(text):
+ end = start + chunk_size
+ chunk = text[start:end]
+ if chunk.strip():
+ chunks.append(chunk.strip())
+ start = end - overlap
+ if start >= len(text):
+ break
+
+ return chunks
+
+
+def _extract_keywords(query: str) -> List[str]:
+ """Extract meaningful keywords from a query string."""
+ # Remove common stop words
+ stop_words = {
+ "the", "a", "an", "is", "are", "was", "were", "be", "been", "being",
+ "have", "has", "had", "do", "does", "did", "will", "would", "could",
+ "should", "may", "might", "shall", "can", "need", "dare", "ought",
+ "used", "to", "of", "in", "for", "on", "with", "at", "by", "from",
+ "as", "into", "through", "during", "before", "after", "above",
+ "below", "between", "out", "off", "over", "under", "again",
+ "further", "then", "once", "here", "there", "when", "where", "why",
+ "how", "all", "both", "each", "few", "more", "most", "other",
+ "some", "such", "no", "nor", "not", "only", "own", "same", "so",
+ "than", "too", "very", "just", "because", "but", "and", "or", "if",
+ "while", "what", "which", "who", "whom", "this", "that", "these",
+ "those", "i", "me", "my", "we", "our", "you", "your", "he", "him",
+ "she", "her", "it", "its", "they", "them", "their",
+ }
+
+ words = re.findall(r"\w+", query.lower())
+ keywords = [w for w in words if w not in stop_words and len(w) > 1]
+ return keywords
diff --git a/gitpilot/cross_repo.py b/gitpilot/cross_repo.py
new file mode 100644
index 0000000000000000000000000000000000000000..45ccb790864d0f87e1dcc39ea9ce6894e0147988
--- /dev/null
+++ b/gitpilot/cross_repo.py
@@ -0,0 +1,351 @@
+# gitpilot/cross_repo.py
+"""Cross-repository intelligence β dependency graphs and impact analysis.
+
+Analyses patterns across multiple repositories to provide:
+- Dependency graphs (repo A depends on repo B)
+- Impact analysis (change in lib affects services)
+- Shared convention detection
+- Migration planning across repos
+
+Draws on the concept of *software ecosystems analysis* from research
+on large-scale dependency management (Decan et al., 2019).
+"""
+from __future__ import annotations
+
+import json
+import logging
+import re
+from dataclasses import dataclass, field
+from pathlib import Path
+from typing import Any, Dict, List, Optional, Set
+
+logger = logging.getLogger(__name__)
+
+# Common dependency file patterns
+_DEP_FILES = {
+ "package.json": "npm",
+ "requirements.txt": "pip",
+ "Pipfile": "pipenv",
+ "pyproject.toml": "pyproject",
+ "Cargo.toml": "cargo",
+ "go.mod": "go",
+ "Gemfile": "bundler",
+ "pom.xml": "maven",
+ "build.gradle": "gradle",
+ "composer.json": "composer",
+}
+
+
+@dataclass
+class Dependency:
+ """A dependency relationship between two entities."""
+
+ source: str # e.g., "owner/repo-a"
+ target: str # e.g., "owner/repo-b" or "package-name"
+ dep_type: str = "runtime" # runtime | dev | peer | optional
+ version: str = ""
+ ecosystem: str = "" # npm, pip, cargo, etc.
+
+ def to_dict(self) -> Dict[str, Any]:
+ return {
+ "source": self.source,
+ "target": self.target,
+ "dep_type": self.dep_type,
+ "version": self.version,
+ "ecosystem": self.ecosystem,
+ }
+
+
+@dataclass
+class DependencyGraph:
+ """A graph of dependencies across repositories."""
+
+ repos: List[str] = field(default_factory=list)
+ dependencies: List[Dependency] = field(default_factory=list)
+ ecosystems: List[str] = field(default_factory=list)
+
+ @property
+ def node_count(self) -> int:
+ nodes: Set[str] = set()
+ for d in self.dependencies:
+ nodes.add(d.source)
+ nodes.add(d.target)
+ return len(nodes)
+
+ @property
+ def edge_count(self) -> int:
+ return len(self.dependencies)
+
+ def to_dict(self) -> Dict[str, Any]:
+ return {
+ "repos": self.repos,
+ "dependencies": [d.to_dict() for d in self.dependencies],
+ "ecosystems": self.ecosystems,
+ "node_count": self.node_count,
+ "edge_count": self.edge_count,
+ }
+
+
+@dataclass
+class ImpactReport:
+ """Impact analysis report for a change in a repository."""
+
+ source_repo: str
+ change_description: str
+ affected_repos: List[str] = field(default_factory=list)
+ risk_level: str = "low" # low | medium | high | critical
+ details: List[str] = field(default_factory=list)
+
+ def to_dict(self) -> Dict[str, Any]:
+ return {
+ "source_repo": self.source_repo,
+ "change_description": self.change_description,
+ "affected_repos": self.affected_repos,
+ "risk_level": self.risk_level,
+ "details": self.details,
+ }
+
+
+@dataclass
+class MigrationPlan:
+ """Plan for migrating a pattern across repositories."""
+
+ target_pattern: str
+ repos: List[str] = field(default_factory=list)
+ steps: List[Dict[str, str]] = field(default_factory=list)
+ estimated_effort: str = "unknown" # low | medium | high
+
+ def to_dict(self) -> Dict[str, Any]:
+ return {
+ "target_pattern": self.target_pattern,
+ "repos": self.repos,
+ "steps": self.steps,
+ "estimated_effort": self.estimated_effort,
+ }
+
+
+class CrossRepoAnalyzer:
+ """Analyze patterns and dependencies across multiple repositories.
+
+ Usage::
+
+ analyzer = CrossRepoAnalyzer()
+ graph = analyzer.analyze_dependencies_from_files({
+ "owner/repo-a": {"package.json": '{"dependencies": {"lodash": "^4"}}'},
+ "owner/repo-b": {"requirements.txt": "requests>=2.28\\nflask>=3.0"},
+ })
+ impact = analyzer.impact_analysis(graph, "owner/repo-a", "Breaking change in API v2")
+ """
+
+ def analyze_dependencies_from_files(
+ self,
+ repo_files: Dict[str, Dict[str, str]],
+ ) -> DependencyGraph:
+ """Build a dependency graph from dependency files.
+
+ Args:
+ repo_files: Mapping of repo name β {filename: content}.
+ """
+ graph = DependencyGraph(repos=list(repo_files.keys()))
+ ecosystems: Set[str] = set()
+
+ for repo, files in repo_files.items():
+ for filename, content in files.items():
+ ecosystem = _DEP_FILES.get(filename)
+ if not ecosystem:
+ continue
+ ecosystems.add(ecosystem)
+ deps = self._parse_dependencies(filename, content, ecosystem)
+ for dep in deps:
+ dep.source = repo
+ graph.dependencies.append(dep)
+
+ graph.ecosystems = sorted(ecosystems)
+ return graph
+
+ def impact_analysis(
+ self,
+ graph: DependencyGraph,
+ source_repo: str,
+ change_description: str,
+ ) -> ImpactReport:
+ """Analyze the impact of a change in one repo on others.
+
+ Walks the dependency graph to find repos that depend (directly
+ or transitively) on the source repo.
+ """
+ # Build reverse adjacency: target β [sources]
+ dependents: Dict[str, List[str]] = {}
+ for dep in graph.dependencies:
+ dependents.setdefault(dep.target, []).append(dep.source)
+
+ # BFS from source_repo
+ affected: Set[str] = set()
+ queue = [source_repo]
+ visited: Set[str] = set()
+
+ while queue:
+ current = queue.pop(0)
+ if current in visited:
+ continue
+ visited.add(current)
+ for dependent in dependents.get(current, []):
+ if dependent != source_repo:
+ affected.add(dependent)
+ queue.append(dependent)
+
+ # Risk assessment
+ if len(affected) == 0:
+ risk = "low"
+ elif len(affected) <= 3:
+ risk = "medium"
+ elif len(affected) <= 10:
+ risk = "high"
+ else:
+ risk = "critical"
+
+ details = []
+ for repo in sorted(affected):
+ deps_on_source = [
+ d for d in graph.dependencies
+ if d.source == repo and d.target == source_repo
+ ]
+ for d in deps_on_source:
+ details.append(f"{repo} depends on {source_repo} ({d.dep_type}, {d.version})")
+
+ return ImpactReport(
+ source_repo=source_repo,
+ change_description=change_description,
+ affected_repos=sorted(affected),
+ risk_level=risk,
+ details=details,
+ )
+
+ def detect_shared_conventions(
+ self,
+ repo_files: Dict[str, Dict[str, str]],
+ ) -> Dict[str, List[str]]:
+ """Detect shared conventions across repos.
+
+ Looks for common config files, linters, formatters, CI configs, etc.
+ """
+ conventions: Dict[str, List[str]] = {}
+
+ convention_files = [
+ ".eslintrc", ".eslintrc.json", ".prettierrc",
+ "ruff.toml", "pyproject.toml", ".flake8",
+ ".github/workflows", "Makefile", "Dockerfile",
+ "tsconfig.json", "jest.config",
+ ]
+
+ for repo, files in repo_files.items():
+ for cf in convention_files:
+ for filename in files:
+ if cf in filename:
+ conventions.setdefault(cf, []).append(repo)
+
+ return conventions
+
+ def suggest_migration(
+ self,
+ repos: List[str],
+ target_pattern: str,
+ ) -> MigrationPlan:
+ """Suggest a migration plan for applying a pattern across repos."""
+ steps = []
+ for i, repo in enumerate(repos):
+ steps.append({
+ "order": str(i + 1),
+ "repo": repo,
+ "action": f"Apply {target_pattern} to {repo}",
+ "status": "pending",
+ })
+
+ effort = "low" if len(repos) <= 3 else ("medium" if len(repos) <= 10 else "high")
+
+ return MigrationPlan(
+ target_pattern=target_pattern,
+ repos=repos,
+ steps=steps,
+ estimated_effort=effort,
+ )
+
+ # ------------------------------------------------------------------
+ # Dependency parsers
+ # ------------------------------------------------------------------
+
+ def _parse_dependencies(
+ self, filename: str, content: str, ecosystem: str,
+ ) -> List[Dependency]:
+ if ecosystem == "npm":
+ return self._parse_npm(content)
+ if ecosystem in ("pip", "pipenv"):
+ return self._parse_pip(content)
+ if ecosystem == "pyproject":
+ return self._parse_pyproject(content)
+ if ecosystem == "go":
+ return self._parse_gomod(content)
+ return []
+
+ def _parse_npm(self, content: str) -> List[Dependency]:
+ deps = []
+ try:
+ data = json.loads(content)
+ for section, dep_type in [
+ ("dependencies", "runtime"),
+ ("devDependencies", "dev"),
+ ("peerDependencies", "peer"),
+ ]:
+ for name, version in data.get(section, {}).items():
+ deps.append(Dependency(
+ source="", target=name,
+ dep_type=dep_type, version=version, ecosystem="npm",
+ ))
+ except json.JSONDecodeError:
+ pass
+ return deps
+
+ def _parse_pip(self, content: str) -> List[Dependency]:
+ deps = []
+ for line in content.strip().split("\n"):
+ line = line.strip()
+ if not line or line.startswith("#") or line.startswith("-"):
+ continue
+ m = re.match(r"([a-zA-Z0-9_-]+)\s*([><=!~]+.+)?", line)
+ if m:
+ deps.append(Dependency(
+ source="", target=m.group(1),
+ dep_type="runtime", version=m.group(2) or "", ecosystem="pip",
+ ))
+ return deps
+
+ def _parse_pyproject(self, content: str) -> List[Dependency]:
+ deps = []
+ in_deps = False
+ for line in content.split("\n"):
+ stripped = line.strip()
+ if stripped.startswith("dependencies"):
+ in_deps = True
+ continue
+ if in_deps:
+ if stripped.startswith("]"):
+ in_deps = False
+ continue
+ m = re.match(r'"([a-zA-Z0-9_-]+)', stripped)
+ if m:
+ deps.append(Dependency(
+ source="", target=m.group(1),
+ dep_type="runtime", ecosystem="pyproject",
+ ))
+ return deps
+
+ def _parse_gomod(self, content: str) -> List[Dependency]:
+ deps = []
+ for line in content.split("\n"):
+ m = re.match(r"\s+(\S+)\s+(\S+)", line)
+ if m and not line.strip().startswith("//"):
+ deps.append(Dependency(
+ source="", target=m.group(1),
+ dep_type="runtime", version=m.group(2), ecosystem="go",
+ ))
+ return deps
diff --git a/gitpilot/github_api.py b/gitpilot/github_api.py
new file mode 100644
index 0000000000000000000000000000000000000000..51b2975c001bab2e94261579c79fa7e27e6d1ebb
--- /dev/null
+++ b/gitpilot/github_api.py
@@ -0,0 +1,585 @@
+# gitpilot/github_api.py
+from __future__ import annotations
+
+import contextvars
+import logging
+import os
+import re
+from base64 import b64decode, b64encode
+from contextlib import contextmanager
+from typing import Any
+
+import httpx
+from fastapi import HTTPException
+
+from gitpilot.models import GithubStatusSummary
+
+GITHUB_API_BASE = "https://api.github.com"
+
+# Context variable to store the GitHub token for the current request/execution scope
+_request_token: contextvars.ContextVar[str | None] = contextvars.ContextVar(
+ "request_token", default=None
+)
+
+# Git SHA (40-hex) validator
+_SHA_RE = re.compile(r"^[0-9a-fA-F]{40}$")
+
+# add near _request_token
+_request_ref: contextvars.ContextVar[str | None] = contextvars.ContextVar(
+ "request_ref", default=None
+)
+
+
+@contextmanager
+def execution_context(token: str | None, ref: str | None = None):
+ token_var = _request_token.set(token)
+ ref_var = _request_ref.set(ref)
+ try:
+ yield
+ finally:
+ _request_token.reset(token_var)
+ _request_ref.reset(ref_var)
+
+
+def _github_ref(provided_ref: str | None = None) -> str | None:
+ if provided_ref:
+ return provided_ref
+ return _request_ref.get()
+
+
+def _github_token(provided_token: str | None = None) -> str:
+ """
+ Get GitHub token from:
+ 1. Explicit argument
+ 2. Request Context (set via execution_context)
+ 3. Environment variables (Fallback)
+ """
+ if provided_token:
+ return provided_token
+
+ ctx_token = _request_token.get()
+ if ctx_token:
+ return ctx_token
+
+ token = os.getenv("GITPILOT_GITHUB_TOKEN") or os.getenv("GITHUB_TOKEN")
+ if not token:
+ raise HTTPException(
+ status_code=401,
+ detail=(
+ "GitHub authentication required. "
+ "Please log in via the UI or set GITPILOT_GITHUB_TOKEN in your environment."
+ ),
+ )
+ return token
+
+
+async def github_request(
+ path: str,
+ *,
+ method: str = "GET",
+ json: dict[str, Any] | None = None,
+ params: dict[str, Any] | None = None,
+ token: str | None = None,
+) -> Any:
+ """
+ Core GitHub request helper.
+ Raises HTTPException with GitHub's error message on failures.
+ """
+ github_token = _github_token(token)
+
+ headers = {
+ "Authorization": f"Bearer {github_token}",
+ "Accept": "application/vnd.github+json",
+ "User-Agent": "gitpilot",
+ }
+
+ timeout = httpx.Timeout(connect=10.0, read=30.0, write=30.0, pool=10.0)
+
+ async with httpx.AsyncClient(
+ base_url=GITHUB_API_BASE, headers=headers, timeout=timeout
+ ) as client:
+ resp = await client.request(method, path, json=json, params=params)
+
+ if resp.status_code >= 400:
+ try:
+ data = resp.json()
+ msg = data.get("message") or resp.text
+ except Exception:
+ msg = resp.text
+
+ if resp.status_code == 401:
+ msg = "GitHub Token Expired or Invalid. Please refresh your login."
+
+ raise HTTPException(status_code=resp.status_code, detail=msg)
+
+ if resp.status_code == 204:
+ return None
+
+ # Some GitHub endpoints return 200 with empty body
+ if not resp.content:
+ return None
+
+ return resp.json()
+
+
+# -----------------------------------------------------------------------------
+# Repos listing (legacy + pagination/search)
+# -----------------------------------------------------------------------------
+
+async def list_user_repos(
+ query: str | None = None, token: str | None = None
+) -> list[dict[str, Any]]:
+ """
+ Legacy function - fetches first 100 repos.
+ (Retro-compatible with older GitPilot versions.)
+ """
+ params = {
+ "per_page": 100,
+ "affiliation": "owner,collaborator,organization_member",
+ "sort": "updated",
+ "direction": "desc",
+ }
+ data = await github_request("/user/repos", params=params, token=token)
+
+ # FIXED: Added default_branch mapping
+ repos = [
+ {
+ "id": r["id"],
+ "name": r["name"],
+ "full_name": r["full_name"],
+ "private": r["private"],
+ "owner": r["owner"]["login"],
+ "default_branch": r.get("default_branch", "main"), # Critical Fix
+ }
+ for r in data
+ ]
+
+ if query:
+ q = query.lower()
+ repos = [r for r in repos if q in r["full_name"].lower()]
+ return repos
+
+
+async def list_user_repos_paginated(
+ page: int = 1,
+ per_page: int = 100,
+ token: str | None = None,
+) -> dict[str, Any]:
+ """
+ Fetch user repositories with pagination support.
+ Returns:
+ {
+ "repositories": [...],
+ "page": int,
+ "per_page": int,
+ "has_more": bool,
+ }
+ """
+ per_page = min(per_page, 100)
+ params = {
+ "page": page,
+ "per_page": per_page,
+ "affiliation": "owner,collaborator,organization_member",
+ "sort": "updated",
+ "direction": "desc",
+ }
+
+ github_token = _github_token(token)
+ headers = {
+ "Authorization": f"Bearer {github_token}",
+ "Accept": "application/vnd.github+json",
+ "User-Agent": "gitpilot",
+ }
+
+ timeout = httpx.Timeout(connect=10.0, read=30.0, write=30.0, pool=10.0)
+
+ async with httpx.AsyncClient(
+ base_url=GITHUB_API_BASE, headers=headers, timeout=timeout
+ ) as client:
+ resp = await client.get("/user/repos", params=params)
+
+ if resp.status_code >= 400:
+ raise HTTPException(status_code=resp.status_code, detail=resp.text)
+
+ data = resp.json()
+
+ # FIXED: Added default_branch mapping
+ repos = [
+ {
+ "id": r["id"],
+ "name": r["name"],
+ "full_name": r["full_name"],
+ "private": r["private"],
+ "owner": r["owner"]["login"],
+ "default_branch": r.get("default_branch", "main"), # Critical Fix
+ }
+ for r in data
+ ]
+
+ link_header = resp.headers.get("Link", "") or ""
+ has_more = 'rel="next"' in link_header
+
+ return {
+ "repositories": repos,
+ "page": page,
+ "per_page": per_page,
+ "has_more": has_more,
+ }
+
+
+async def search_user_repos(
+ query: str,
+ page: int = 1,
+ per_page: int = 100,
+ token: str | None = None,
+) -> dict[str, Any]:
+ """
+ Search across ALL user repositories, then return paginated results.
+
+ Returns:
+ {
+ "repositories": [...],
+ "page": int,
+ "per_page": int,
+ "total_count": int,
+ "has_more": bool,
+ }
+ """
+ all_repos: list[dict[str, Any]] = []
+ fetch_page = 1
+ max_pages = 15 # safety (1500 repos)
+
+ while fetch_page <= max_pages:
+ result = await list_user_repos_paginated(
+ page=fetch_page,
+ per_page=100,
+ token=token,
+ )
+ all_repos.extend(result["repositories"])
+ if not result["has_more"]:
+ break
+ fetch_page += 1
+
+ q = query.lower()
+ filtered = [
+ r
+ for r in all_repos
+ if q in r["name"].lower() or q in r["full_name"].lower()
+ ]
+
+ total_count = len(filtered)
+ start = (page - 1) * per_page
+ end = start + per_page
+ paginated = filtered[start:end]
+
+ return {
+ "repositories": paginated,
+ "page": page,
+ "per_page": per_page,
+ "total_count": total_count,
+ "has_more": end < total_count,
+ }
+
+
+# -----------------------------------------------------------------------------
+# Repo + Ref resolution helpers (fixes "No commit found for SHA: main")
+# -----------------------------------------------------------------------------
+
+async def get_repo(owner: str, repo: str, token: str | None = None) -> dict[str, Any]:
+ """
+ Get repository information including default_branch.
+ """
+ return await github_request(f"/repos/{owner}/{repo}", token=token)
+
+
+async def _resolve_head_ref(owner: str, repo: str, token: str | None) -> str:
+ repo_data = await get_repo(owner, repo, token=token)
+ return repo_data.get("default_branch", "main")
+
+
+async def _resolve_ref_to_commit_sha(
+ owner: str,
+ repo: str,
+ ref: str | None,
+ token: str | None,
+) -> str:
+ """
+ Resolve a ref (branch/tag/commit SHA/"HEAD"/None) to a commit SHA.
+ """
+ if not ref or ref == "HEAD":
+ ref = await _resolve_head_ref(owner, repo, token)
+
+ if _SHA_RE.match(ref):
+ return ref.lower()
+
+ # Branch ref
+ try:
+ data = await github_request(
+ f"/repos/{owner}/{repo}/git/ref/heads/{ref}",
+ token=token,
+ )
+ return data["object"]["sha"]
+ except HTTPException:
+ pass
+
+ # Tag ref (lightweight or annotated)
+ try:
+ data = await github_request(
+ f"/repos/{owner}/{repo}/git/ref/tags/{ref}",
+ token=token,
+ )
+ obj = data.get("object") or {}
+ sha = obj.get("sha")
+ obj_type = obj.get("type")
+
+ if not sha:
+ raise HTTPException(status_code=404, detail=f"Tag ref '{ref}' not found.")
+
+ # Annotated tag -> dereference to commit SHA
+ if obj_type == "tag":
+ tag_obj = await github_request(
+ f"/repos/{owner}/{repo}/git/tags/{sha}",
+ token=token,
+ )
+ target = tag_obj.get("object") or {}
+ target_sha = target.get("sha")
+ if not target_sha:
+ raise HTTPException(
+ status_code=404, detail=f"Annotated tag '{ref}' has no target sha."
+ )
+ return target_sha
+
+ # Lightweight tag points directly to commit SHA
+ return sha
+ except HTTPException:
+ pass
+
+ # Fallback: commits endpoint resolves branch/tag names to a commit
+ try:
+ commit = await github_request(
+ f"/repos/{owner}/{repo}/commits/{ref}",
+ token=token,
+ )
+ sha = commit.get("sha")
+ if not sha:
+ raise HTTPException(status_code=404, detail=f"Ref not found: {ref}")
+ return sha
+ except HTTPException as e:
+ raise HTTPException(status_code=404, detail=f"Ref not found: {ref}") from e
+
+
+async def _commit_sha_to_tree_sha(
+ owner: str,
+ repo: str,
+ commit_sha: str,
+ token: str | None,
+) -> str:
+ """
+ Convert commit SHA -> tree SHA using /git/commits/{sha}.
+ """
+ commit = await github_request(
+ f"/repos/{owner}/{repo}/git/commits/{commit_sha}",
+ token=token,
+ )
+ tree = commit.get("tree") or {}
+ tree_sha = tree.get("sha")
+ if not tree_sha:
+ raise HTTPException(status_code=500, detail="Failed to resolve tree SHA from commit.")
+ return tree_sha
+
+
+# -----------------------------------------------------------------------------
+# Branch creation
+# -----------------------------------------------------------------------------
+
+async def create_branch(
+ owner: str,
+ repo: str,
+ new_branch: str,
+ from_ref: str = "HEAD",
+ token: str | None = None,
+) -> str:
+ """
+ Create a new branch from a ref (default: HEAD = default branch).
+ """
+ base_commit_sha = await _resolve_ref_to_commit_sha(owner, repo, from_ref, token)
+
+ body = {"ref": f"refs/heads/{new_branch}", "sha": base_commit_sha}
+ new_ref = await github_request(
+ f"/repos/{owner}/{repo}/git/refs",
+ method="POST",
+ json=body,
+ token=token,
+ )
+ return new_ref["ref"]
+
+
+# -----------------------------------------------------------------------------
+# Tree + File APIs (branch-aware)
+# -----------------------------------------------------------------------------
+
+async def get_repo_tree(
+ owner: str,
+ repo: str,
+ token: str | None = None,
+ ref: str = "HEAD",
+):
+ # β
FIX: Only use context ref if caller did NOT provide a specific ref
+ # i.e. only when ref is missing/empty or explicitly "HEAD"
+ ctx_ref = _github_ref(None)
+ if (not ref or ref == "HEAD") and ctx_ref:
+ ref = ctx_ref
+
+ commit_sha = await _resolve_ref_to_commit_sha(owner, repo, ref, token)
+ tree_sha = await _commit_sha_to_tree_sha(owner, repo, commit_sha, token)
+
+ tree_data = await github_request(
+ f"/repos/{owner}/{repo}/git/trees/{tree_sha}",
+ params={"recursive": 1},
+ token=token,
+ )
+
+ return [
+ {"path": item["path"], "type": item["type"]}
+ for item in tree_data.get("tree", [])
+ if item.get("type") == "blob"
+ ]
+
+
+async def get_file(
+ owner: str,
+ repo: str,
+ path: str,
+ token: str | None = None,
+ ref: str | None = None,
+) -> str:
+ # β
FIX: Only use context ref if ref is missing or "HEAD"
+ ctx_ref = _github_ref(None)
+ if (not ref or ref == "HEAD") and ctx_ref:
+ ref = ctx_ref
+
+ params = {"ref": ref} if ref else None
+ data = await github_request(
+ f"/repos/{owner}/{repo}/contents/{path}",
+ params=params,
+ token=token,
+ )
+ content_b64 = data.get("content") or ""
+ return b64decode(content_b64.encode("utf-8")).decode("utf-8", errors="replace")
+
+
+async def put_file(
+ owner: str,
+ repo: str,
+ path: str,
+ content: str,
+ message: str,
+ token: str | None = None,
+ branch: str | None = None,
+) -> dict[str, Any]:
+ """
+ Create or update a file in the repository on a specific branch.
+ (Retro-compatible signature with older GitPilot versions.)
+ """
+ sha: str | None = None
+ try:
+ params = {"ref": branch} if branch else None
+ existing = await github_request(
+ f"/repos/{owner}/{repo}/contents/{path}",
+ params=params,
+ token=token,
+ )
+ sha = existing.get("sha")
+ except HTTPException:
+ sha = None
+
+ body: dict[str, Any] = {
+ "message": message,
+ "content": b64encode(content.encode("utf-8")).decode("utf-8"),
+ }
+ if sha:
+ body["sha"] = sha
+ if branch:
+ body["branch"] = branch
+
+ result = await github_request(
+ f"/repos/{owner}/{repo}/contents/{path}",
+ method="PUT",
+ json=body,
+ token=token,
+ )
+ commit = (result or {}).get("commit", {}) if isinstance(result, dict) else {}
+ return {
+ "path": path,
+ "commit_sha": commit.get("sha", ""),
+ "commit_url": commit.get("html_url"),
+ }
+
+
+async def delete_file(
+ owner: str,
+ repo: str,
+ path: str,
+ message: str,
+ token: str | None = None,
+ branch: str | None = None,
+) -> dict[str, Any]:
+ """
+ Delete a file from the repository on a specific branch.
+ (Retro-compatible signature with older GitPilot versions.)
+ """
+ params = {"ref": branch} if branch else None
+ existing = await github_request(
+ f"/repos/{owner}/{repo}/contents/{path}",
+ params=params,
+ token=token,
+ )
+ sha = existing.get("sha")
+ if not sha:
+ raise HTTPException(status_code=404, detail=f"File {path} not found or has no SHA")
+
+ body: dict[str, Any] = {"message": message, "sha": sha}
+ if branch:
+ body["branch"] = branch
+
+ result = await github_request(
+ f"/repos/{owner}/{repo}/contents/{path}",
+ method="DELETE",
+ json=body,
+ token=token,
+ )
+ commit = (result or {}).get("commit", {}) if isinstance(result, dict) else {}
+ return {
+ "path": path,
+ "commit_sha": commit.get("sha", ""),
+ "commit_url": commit.get("html_url"),
+ }
+
+
+async def get_github_status_summary() -> GithubStatusSummary:
+ """Return GitHub connection status for the redesigned UI."""
+ token = (
+ os.environ.get("GITPILOT_GITHUB_TOKEN")
+ or os.environ.get("GITHUB_TOKEN")
+ or None
+ )
+ token_configured = bool(token)
+
+ summary = GithubStatusSummary(
+ connected=False,
+ token_configured=token_configured,
+ )
+
+ if not token_configured:
+ return summary
+
+ # Try to get authenticated user
+ try:
+ data = await github_request("/user", token=token)
+ if data and "login" in data:
+ summary.connected = True
+ summary.username = data["login"]
+ except Exception:
+ logging.debug("GitHub connection check failed", exc_info=True)
+
+ return summary
diff --git a/gitpilot/github_app.py b/gitpilot/github_app.py
new file mode 100644
index 0000000000000000000000000000000000000000..8b134a33785683420ba97ae31d0d1404bc25ee7c
--- /dev/null
+++ b/gitpilot/github_app.py
@@ -0,0 +1,232 @@
+"""
+GitHub App Installation Management - PROPER FIX
+
+This checks which repositories ACTUALLY have the GitHub App installed
+by querying the user's app installations.
+"""
+from __future__ import annotations
+
+import logging
+import os
+from typing import Optional, Dict, Any, Set
+
+import httpx
+
+logger = logging.getLogger("gitpilot.github_app")
+
+# Cache for installed repositories
+_installed_repos_cache: Dict[str, Set[str]] = {}
+_cache_timestamp: Dict[str, float] = {}
+CACHE_TTL_SECONDS = 300 # 5 minutes
+
+
+class GitHubAppConfig:
+ """Configuration for GitHub App."""
+
+ def __init__(self):
+ self.app_id = os.getenv("GITHUB_APP_ID", "2313985")
+ self.client_id = os.getenv("GITHUB_CLIENT_ID", "Iv23litmRp80Z6wmlyRn")
+ self.app_slug = os.getenv("GITHUB_APP_SLUG", "gitpilota")
+
+ @property
+ def is_configured(self) -> bool:
+ """Check if GitHub App is configured."""
+ return bool(self.app_id and self.client_id)
+
+
+def get_app_config() -> GitHubAppConfig:
+ """Get GitHub App configuration."""
+ return GitHubAppConfig()
+
+
+async def get_installed_repositories(user_token: str) -> Set[str]:
+ """
+ Get list of repositories where the GitHub App is installed.
+
+ Uses /user/installations endpoint to get all installations,
+ then fetches repositories for each installation.
+
+ Returns:
+ Set of repository full names (e.g., "owner/repo")
+ """
+ cache_key = "installed_repos"
+
+ # Check cache
+ import time
+ if cache_key in _installed_repos_cache:
+ if time.time() - _cache_timestamp.get(cache_key, 0) < CACHE_TTL_SECONDS:
+ logger.debug(f"Using cached installed repositories ({len(_installed_repos_cache[cache_key])} repos)")
+ return _installed_repos_cache[cache_key]
+
+ installed_repos: Set[str] = set()
+
+ try:
+ config = get_app_config()
+
+ async with httpx.AsyncClient(timeout=10.0) as client:
+ # Get user's app installations
+ installations_response = await client.get(
+ "https://api.github.com/user/installations",
+ headers={
+ "Authorization": f"Bearer {user_token}",
+ "Accept": "application/vnd.github+json",
+ "User-Agent": "gitpilot",
+ },
+ )
+
+ if installations_response.status_code != 200:
+ logger.warning(f"Failed to get installations: {installations_response.status_code}")
+ return installed_repos
+
+ installations_data = installations_response.json()
+ installations = installations_data.get("installations", [])
+
+ logger.info(f"Found {len(installations)} app installations")
+
+ # For each installation, get the repositories
+ for installation in installations:
+ installation_id = installation.get("id")
+
+ # Get repositories for this installation
+ repos_response = await client.get(
+ f"https://api.github.com/user/installations/{installation_id}/repositories",
+ headers={
+ "Authorization": f"Bearer {user_token}",
+ "Accept": "application/vnd.github+json",
+ "User-Agent": "gitpilot",
+ },
+ )
+
+ if repos_response.status_code == 200:
+ repos_data = repos_response.json()
+ repositories = repos_data.get("repositories", [])
+
+ for repo in repositories:
+ full_name = repo.get("full_name") # e.g., "owner/repo"
+ if full_name:
+ installed_repos.add(full_name)
+ logger.debug(f" β App installed on: {full_name}")
+
+ logger.info(f"GitHub App is installed on {len(installed_repos)} repositories")
+
+ # Cache the results
+ _installed_repos_cache[cache_key] = installed_repos
+ _cache_timestamp[cache_key] = time.time()
+
+ return installed_repos
+
+ except Exception as e:
+ logger.error(f"Error getting installed repositories: {e}")
+ return installed_repos
+
+
+async def check_repo_write_access(
+ owner: str,
+ repo: str,
+ user_token: Optional[str] = None
+) -> Dict[str, Any]:
+ """
+ Check if user has write access to a repository.
+
+ PROPER FIX: Checks BOTH:
+ 1. User has push permissions
+ 2. GitHub App is ACTUALLY installed on this specific repository
+
+ Args:
+ owner: Repository owner
+ repo: Repository name
+ user_token: User's OAuth token
+
+ Returns:
+ Dict with 'can_write', 'app_installed', 'auth_type', 'reason'
+ """
+ result = {
+ "can_write": False,
+ "app_installed": False,
+ "auth_type": "none",
+ "reason": "No token provided",
+ }
+
+ if not user_token:
+ return result
+
+ full_repo_name = f"{owner}/{repo}"
+
+ try:
+ # Step 1: Check user's push permissions
+ async with httpx.AsyncClient(timeout=5.0) as client:
+ response = await client.get(
+ f"https://api.github.com/repos/{owner}/{repo}",
+ headers={
+ "Authorization": f"Bearer {user_token}",
+ "Accept": "application/vnd.github+json",
+ "User-Agent": "gitpilot",
+ },
+ )
+
+ if response.status_code != 200:
+ result["reason"] = f"Cannot access repository (status: {response.status_code})"
+ logger.warning(f"β {full_repo_name}: {result['reason']}")
+ return result
+
+ repo_data = response.json()
+ permissions = repo_data.get("permissions", {})
+ has_push = permissions.get("push", False)
+
+ # Step 2: Check if GitHub App is installed on this repo
+ installed_repos = await get_installed_repositories(user_token)
+ app_installed = full_repo_name in installed_repos
+
+ # Step 3: Determine write access
+ if app_installed:
+ # App IS installed - agent can write!
+ result["can_write"] = True
+ result["app_installed"] = True
+ result["auth_type"] = "github_app"
+ result["reason"] = "GitHub App installed with write access"
+ logger.info(f"β
{full_repo_name}: App installed (agent can write)")
+ elif has_push:
+ # User has push but App NOT installed - agent operations will FAIL
+ result["can_write"] = False
+ result["app_installed"] = False
+ result["auth_type"] = "user_only"
+ result["reason"] = "User has push access but GitHub App NOT installed (install app for agent operations)"
+ logger.warning(f"β οΈ {full_repo_name}: User can push but app NOT installed - agent will get 403 errors")
+ else:
+ # User has no push and App NOT installed
+ result["can_write"] = False
+ result["app_installed"] = False
+ result["auth_type"] = "read_only"
+ result["reason"] = "No push access and GitHub App not installed"
+ logger.info(f"βΉοΈ {full_repo_name}: Read-only access")
+
+ except Exception as e:
+ result["reason"] = f"Error checking access: {str(e)}"
+ logger.error(f"β Error checking {full_repo_name}: {e}")
+
+ return result
+
+
+def clear_cache():
+ """Clear all caches."""
+ _installed_repos_cache.clear()
+ _cache_timestamp.clear()
+ logger.info("Cleared installation cache")
+
+
+async def check_installation_for_repo(
+ owner: str,
+ repo: str,
+ user_token: str
+) -> Optional[Dict[str, Any]]:
+ """
+ Legacy function - kept for compatibility.
+ """
+ result = await check_repo_write_access(owner, repo, user_token)
+ if result["app_installed"]:
+ return {
+ "installed": True,
+ "owner": owner,
+ "repo": repo,
+ }
+ return None
\ No newline at end of file
diff --git a/gitpilot/github_issues.py b/gitpilot/github_issues.py
new file mode 100644
index 0000000000000000000000000000000000000000..c78b758cef584c1b468a31f800ab17ed5417a025
--- /dev/null
+++ b/gitpilot/github_issues.py
@@ -0,0 +1,224 @@
+# gitpilot/github_issues.py
+"""GitHub Issues API wrapper.
+
+Provides async functions for creating, reading, updating, and managing
+GitHub issues including labels, assignees, milestones, and comments.
+"""
+from __future__ import annotations
+
+from typing import Any, Dict, List, Optional
+
+from .github_api import github_request
+
+
+# ---------------------------------------------------------------------------
+# Issue CRUD
+# ---------------------------------------------------------------------------
+
+async def list_issues(
+ owner: str,
+ repo: str,
+ *,
+ state: str = "open",
+ labels: Optional[str] = None,
+ assignee: Optional[str] = None,
+ milestone: Optional[str] = None,
+ sort: str = "created",
+ direction: str = "desc",
+ per_page: int = 30,
+ page: int = 1,
+ token: Optional[str] = None,
+) -> List[Dict[str, Any]]:
+ """List issues for a repository with optional filters."""
+ params: Dict[str, Any] = {
+ "state": state,
+ "sort": sort,
+ "direction": direction,
+ "per_page": min(per_page, 100),
+ "page": page,
+ }
+ if labels:
+ params["labels"] = labels
+ if assignee:
+ params["assignee"] = assignee
+ if milestone:
+ params["milestone"] = milestone
+
+ data = await github_request(
+ f"/repos/{owner}/{repo}/issues",
+ params=params,
+ token=token,
+ )
+ # GitHub's issues endpoint also returns PRs; filter them out
+ return [i for i in (data or []) if "pull_request" not in i]
+
+
+async def get_issue(
+ owner: str,
+ repo: str,
+ issue_number: int,
+ token: Optional[str] = None,
+) -> Dict[str, Any]:
+ """Get a single issue by number."""
+ return await github_request(
+ f"/repos/{owner}/{repo}/issues/{issue_number}",
+ token=token,
+ )
+
+
+async def create_issue(
+ owner: str,
+ repo: str,
+ title: str,
+ *,
+ body: Optional[str] = None,
+ labels: Optional[List[str]] = None,
+ assignees: Optional[List[str]] = None,
+ milestone: Optional[int] = None,
+ token: Optional[str] = None,
+) -> Dict[str, Any]:
+ """Create a new issue."""
+ payload: Dict[str, Any] = {"title": title}
+ if body is not None:
+ payload["body"] = body
+ if labels:
+ payload["labels"] = labels
+ if assignees:
+ payload["assignees"] = assignees
+ if milestone is not None:
+ payload["milestone"] = milestone
+
+ return await github_request(
+ f"/repos/{owner}/{repo}/issues",
+ method="POST",
+ json=payload,
+ token=token,
+ )
+
+
+async def update_issue(
+ owner: str,
+ repo: str,
+ issue_number: int,
+ *,
+ title: Optional[str] = None,
+ body: Optional[str] = None,
+ state: Optional[str] = None,
+ labels: Optional[List[str]] = None,
+ assignees: Optional[List[str]] = None,
+ milestone: Optional[int] = None,
+ token: Optional[str] = None,
+) -> Dict[str, Any]:
+ """Update an existing issue (title, body, state, labels, assignees, milestone)."""
+ payload: Dict[str, Any] = {}
+ if title is not None:
+ payload["title"] = title
+ if body is not None:
+ payload["body"] = body
+ if state is not None:
+ payload["state"] = state
+ if labels is not None:
+ payload["labels"] = labels
+ if assignees is not None:
+ payload["assignees"] = assignees
+ if milestone is not None:
+ payload["milestone"] = milestone
+
+ if not payload:
+ return await get_issue(owner, repo, issue_number, token=token)
+
+ return await github_request(
+ f"/repos/{owner}/{repo}/issues/{issue_number}",
+ method="PATCH",
+ json=payload,
+ token=token,
+ )
+
+
+# ---------------------------------------------------------------------------
+# Comments
+# ---------------------------------------------------------------------------
+
+async def list_issue_comments(
+ owner: str,
+ repo: str,
+ issue_number: int,
+ *,
+ per_page: int = 30,
+ page: int = 1,
+ token: Optional[str] = None,
+) -> List[Dict[str, Any]]:
+ """List comments on an issue."""
+ return await github_request(
+ f"/repos/{owner}/{repo}/issues/{issue_number}/comments",
+ params={"per_page": min(per_page, 100), "page": page},
+ token=token,
+ ) or []
+
+
+async def add_issue_comment(
+ owner: str,
+ repo: str,
+ issue_number: int,
+ body: str,
+ token: Optional[str] = None,
+) -> Dict[str, Any]:
+ """Add a comment to an issue."""
+ return await github_request(
+ f"/repos/{owner}/{repo}/issues/{issue_number}/comments",
+ method="POST",
+ json={"body": body},
+ token=token,
+ )
+
+
+# ---------------------------------------------------------------------------
+# Labels
+# ---------------------------------------------------------------------------
+
+async def add_labels(
+ owner: str,
+ repo: str,
+ issue_number: int,
+ labels: List[str],
+ token: Optional[str] = None,
+) -> List[Dict[str, Any]]:
+ """Add labels to an issue."""
+ return await github_request(
+ f"/repos/{owner}/{repo}/issues/{issue_number}/labels",
+ method="POST",
+ json={"labels": labels},
+ token=token,
+ ) or []
+
+
+async def remove_label(
+ owner: str,
+ repo: str,
+ issue_number: int,
+ label: str,
+ token: Optional[str] = None,
+) -> None:
+ """Remove a single label from an issue."""
+ await github_request(
+ f"/repos/{owner}/{repo}/issues/{issue_number}/labels/{label}",
+ method="DELETE",
+ token=token,
+ )
+
+
+# ---------------------------------------------------------------------------
+# Assignees
+# ---------------------------------------------------------------------------
+
+async def set_assignees(
+ owner: str,
+ repo: str,
+ issue_number: int,
+ assignees: List[str],
+ token: Optional[str] = None,
+) -> Dict[str, Any]:
+ """Replace assignees on an issue."""
+ return await update_issue(
+ owner, repo, issue_number, assignees=assignees, token=token,
+ )
diff --git a/gitpilot/github_oauth.py b/gitpilot/github_oauth.py
new file mode 100644
index 0000000000000000000000000000000000000000..a87718115fc04704250004ac850f27bf7944d009
--- /dev/null
+++ b/gitpilot/github_oauth.py
@@ -0,0 +1,285 @@
+# gitpilot/github_oauth.py
+
+"""GitHub OAuth 2.0 authentication flow implementation (Web + Device Flow)."""
+from __future__ import annotations
+
+import logging
+import os
+import secrets
+import time
+from typing import Optional, Dict, Any
+from urllib.parse import urlencode
+
+import httpx
+from pydantic import BaseModel
+
+# Configure logging
+logger = logging.getLogger("gitpilot.auth")
+
+class OAuthConfig(BaseModel):
+ """GitHub OAuth App configuration."""
+ client_id: str
+ # Secret is now optional to allow Device Flow
+ client_secret: Optional[str] = None
+
+class OAuthState(BaseModel):
+ """OAuth state management."""
+ state: str
+ code_verifier: str
+ timestamp: float
+
+class GitHubUser(BaseModel):
+ """GitHub user information."""
+ login: str
+ id: int
+ avatar_url: str
+ name: Optional[str] = None
+ email: Optional[str] = None
+ bio: Optional[str] = None
+ html_url: Optional[str] = None
+
+class AuthSession(BaseModel):
+ """Authenticated user session."""
+ access_token: str
+ token_type: str = "bearer"
+ scope: str = ""
+ user: GitHubUser
+
+# In-memory OAuth state storage (For Web Flow)
+_oauth_states: dict[str, OAuthState] = {}
+
+
+def get_oauth_config() -> OAuthConfig:
+ """
+ Load OAuth configuration from environment variables.
+ """
+ # Use your App's Client ID.
+ # NOTE: Ensure "Device Flow" is enabled in your GitHub App settings.
+ client_id = os.getenv("GITHUB_CLIENT_ID", "Iv23litmRp80Z6wmlyRn")
+ client_secret = os.getenv("GITHUB_CLIENT_SECRET", "")
+
+ return OAuthConfig(
+ client_id=client_id,
+ # Convert empty string to None
+ client_secret=client_secret if client_secret else None
+ )
+
+# ============================================================================
+# WEB FLOW (Standard OAuth2 - Requires Client Secret)
+# ============================================================================
+
+def generate_authorization_url() -> tuple[str, str]:
+ """
+ Generate GitHub OAuth authorization URL with PKCE (Web Flow).
+ Returns: (authorization_url, state)
+ """
+ config = get_oauth_config()
+
+ # 1. State for CSRF protection
+ state = secrets.token_urlsafe(32)
+ code_verifier = secrets.token_urlsafe(32)
+
+ # 2. Store state
+ _oauth_states[state] = OAuthState(
+ state=state,
+ code_verifier=code_verifier,
+ timestamp=time.time(),
+ )
+ _cleanup_old_states()
+
+ # 3. Build URL
+ params = {
+ "client_id": config.client_id,
+ "scope": "repo user:email",
+ "state": state,
+ "allow_signup": "true",
+ }
+
+ auth_url = f"https://github.com/login/oauth/authorize?{urlencode(params)}"
+ return auth_url, state
+
+
+async def exchange_code_for_token(code: str, state: str) -> AuthSession:
+ """
+ Exchange authorization code for access token (Web Flow).
+ Requires GITHUB_CLIENT_SECRET to be set.
+ """
+ config = get_oauth_config()
+
+ if not config.client_secret:
+ raise ValueError("Web Flow requires GITHUB_CLIENT_SECRET. Please use Device Flow or configure the secret.")
+
+ # 1. Validate State
+ if state not in _oauth_states:
+ logger.error(f"State mismatch or expiration. Received: {state}")
+ raise ValueError("Invalid OAuth state. The session may have expired. Please try again.")
+
+ oauth_state = _oauth_states.pop(state)
+ if time.time() - oauth_state.timestamp > 600:
+ raise ValueError("OAuth interaction timed out.")
+
+ # 2. Exchange Code
+ async with httpx.AsyncClient(timeout=10.0) as client:
+ try:
+ token_response = await client.post(
+ "https://github.com/login/oauth/access_token",
+ data={
+ "client_id": config.client_id,
+ "client_secret": config.client_secret,
+ "code": code,
+ },
+ headers={"Accept": "application/json"},
+ )
+ token_response.raise_for_status()
+ token_data = token_response.json()
+ except httpx.HTTPError as e:
+ logger.error(f"HTTP Error contacting GitHub: {e}")
+ raise ValueError("Failed to contact GitHub authentication server.")
+
+ if "error" in token_data:
+ raise ValueError(f"GitHub refused the connection: {token_data.get('error_description')}")
+
+ access_token = token_data.get("access_token")
+ if not access_token:
+ raise ValueError("No access_token returned from GitHub.")
+
+ # 3. Fetch User
+ user = await _fetch_user_profile(client, access_token)
+
+ return AuthSession(
+ access_token=access_token,
+ token_type=token_data.get("token_type", "bearer"),
+ scope=token_data.get("scope", ""),
+ user=user,
+ )
+
+# ============================================================================
+# DEVICE FLOW (No Secret Required)
+# ============================================================================
+
+async def initiate_device_flow() -> Dict[str, Any]:
+ """
+ Step 1: Request a device code from GitHub.
+ """
+ config = get_oauth_config()
+
+ async with httpx.AsyncClient(timeout=10.0) as client:
+ response = await client.post(
+ "https://github.com/login/device/code",
+ data={
+ "client_id": config.client_id,
+ "scope": "repo user:email",
+ },
+ headers={"Accept": "application/json"}
+ )
+ response.raise_for_status()
+ return response.json()
+
+
+async def poll_device_token(device_code: str) -> Optional[AuthSession]:
+ """
+ Step 2: Exchange device code for token (Polling).
+
+ Returns:
+ AuthSession: If authentication is successful.
+ None: If status is 'authorization_pending' or 'slow_down'.
+
+ Raises:
+ ValueError: If the code expired, access denied, or other errors.
+ """
+ config = get_oauth_config()
+
+ async with httpx.AsyncClient(timeout=10.0) as client:
+ response = await client.post(
+ "https://github.com/login/oauth/access_token",
+ data={
+ "client_id": config.client_id,
+ "device_code": device_code,
+ "grant_type": "urn:ietf:params:oauth:grant-type:device_code",
+ },
+ headers={"Accept": "application/json"}
+ )
+ data = response.json()
+
+ # Handle GitHub Device Flow Errors
+ if "error" in data:
+ error_code = data["error"]
+ # These are expected during polling
+ if error_code in ["authorization_pending", "slow_down"]:
+ return None
+
+ # These are actual failures
+ desc = data.get("error_description", error_code)
+ if error_code == "expired_token":
+ raise ValueError("The device code has expired. Please try again.")
+ if error_code == "access_denied":
+ raise ValueError("Access denied by user.")
+
+ raise ValueError(f"GitHub Auth Error: {desc}")
+
+ access_token = data.get("access_token")
+ if not access_token:
+ return None
+
+ # Success: Fetch User details
+ user = await _fetch_user_profile(client, access_token)
+
+ return AuthSession(
+ access_token=access_token,
+ token_type=data.get("token_type", "bearer"),
+ scope=data.get("scope", ""),
+ user=user
+ )
+
+# ============================================================================
+# SHARED HELPERS
+# ============================================================================
+
+async def _fetch_user_profile(client: httpx.AsyncClient, token: str) -> GitHubUser:
+ """Internal helper to fetch user profile with an existing client."""
+ response = await client.get(
+ "https://api.github.com/user",
+ headers={
+ "Authorization": f"Bearer {token}",
+ "Accept": "application/json",
+ },
+ )
+ response.raise_for_status()
+ u = response.json()
+
+ return GitHubUser(
+ login=u["login"],
+ id=u["id"],
+ avatar_url=u["avatar_url"],
+ name=u.get("name"),
+ email=u.get("email"),
+ bio=u.get("bio"),
+ html_url=u.get("html_url")
+ )
+
+
+async def validate_token(access_token: str) -> Optional[GitHubUser]:
+ """
+ Validate GitHub access token and return user info.
+ Useful for checking if a stored session is still valid.
+ """
+ if not access_token:
+ return None
+
+ try:
+ async with httpx.AsyncClient(timeout=5.0) as client:
+ return await _fetch_user_profile(client, access_token)
+ except Exception as e:
+ logger.debug(f"Token validation failed: {e}")
+ return None
+
+
+def _cleanup_old_states():
+ """Remove OAuth states older than 10 minutes to prevent memory leaks."""
+ current_time = time.time()
+ expired_states = [
+ state for state, data in _oauth_states.items()
+ if current_time - data.timestamp > 600
+ ]
+ for state in expired_states:
+ _oauth_states.pop(state, None)
\ No newline at end of file
diff --git a/gitpilot/github_pulls.py b/gitpilot/github_pulls.py
new file mode 100644
index 0000000000000000000000000000000000000000..e47aea832776f9f13fb028711af948672db39302
--- /dev/null
+++ b/gitpilot/github_pulls.py
@@ -0,0 +1,230 @@
+# gitpilot/github_pulls.py
+"""GitHub Pull Requests API wrapper.
+
+Provides async functions for creating, listing, reviewing, and merging
+pull requests.
+"""
+from __future__ import annotations
+
+from typing import Any, Dict, List, Optional
+
+from .github_api import github_request
+
+
+# ---------------------------------------------------------------------------
+# PR CRUD
+# ---------------------------------------------------------------------------
+
+async def list_pull_requests(
+ owner: str,
+ repo: str,
+ *,
+ state: str = "open",
+ sort: str = "created",
+ direction: str = "desc",
+ head: Optional[str] = None,
+ base: Optional[str] = None,
+ per_page: int = 30,
+ page: int = 1,
+ token: Optional[str] = None,
+) -> List[Dict[str, Any]]:
+ """List pull requests with optional filters."""
+ params: Dict[str, Any] = {
+ "state": state,
+ "sort": sort,
+ "direction": direction,
+ "per_page": min(per_page, 100),
+ "page": page,
+ }
+ if head:
+ params["head"] = head
+ if base:
+ params["base"] = base
+
+ return await github_request(
+ f"/repos/{owner}/{repo}/pulls",
+ params=params,
+ token=token,
+ ) or []
+
+
+async def get_pull_request(
+ owner: str,
+ repo: str,
+ pull_number: int,
+ token: Optional[str] = None,
+) -> Dict[str, Any]:
+ """Get a single pull request."""
+ return await github_request(
+ f"/repos/{owner}/{repo}/pulls/{pull_number}",
+ token=token,
+ )
+
+
+async def create_pull_request(
+ owner: str,
+ repo: str,
+ *,
+ title: str,
+ head: str,
+ base: str,
+ body: Optional[str] = None,
+ draft: bool = False,
+ token: Optional[str] = None,
+) -> Dict[str, Any]:
+ """Create a new pull request."""
+ payload: Dict[str, Any] = {
+ "title": title,
+ "head": head,
+ "base": base,
+ "draft": draft,
+ }
+ if body is not None:
+ payload["body"] = body
+
+ return await github_request(
+ f"/repos/{owner}/{repo}/pulls",
+ method="POST",
+ json=payload,
+ token=token,
+ )
+
+
+async def update_pull_request(
+ owner: str,
+ repo: str,
+ pull_number: int,
+ *,
+ title: Optional[str] = None,
+ body: Optional[str] = None,
+ state: Optional[str] = None,
+ base: Optional[str] = None,
+ token: Optional[str] = None,
+) -> Dict[str, Any]:
+ """Update an existing pull request."""
+ payload: Dict[str, Any] = {}
+ if title is not None:
+ payload["title"] = title
+ if body is not None:
+ payload["body"] = body
+ if state is not None:
+ payload["state"] = state
+ if base is not None:
+ payload["base"] = base
+
+ if not payload:
+ return await get_pull_request(owner, repo, pull_number, token=token)
+
+ return await github_request(
+ f"/repos/{owner}/{repo}/pulls/{pull_number}",
+ method="PATCH",
+ json=payload,
+ token=token,
+ )
+
+
+# ---------------------------------------------------------------------------
+# Merge
+# ---------------------------------------------------------------------------
+
+async def merge_pull_request(
+ owner: str,
+ repo: str,
+ pull_number: int,
+ *,
+ commit_title: Optional[str] = None,
+ commit_message: Optional[str] = None,
+ merge_method: str = "merge",
+ token: Optional[str] = None,
+) -> Dict[str, Any]:
+ """Merge a pull request.
+
+ merge_method: one of 'merge', 'squash', 'rebase'.
+ """
+ payload: Dict[str, Any] = {"merge_method": merge_method}
+ if commit_title:
+ payload["commit_title"] = commit_title
+ if commit_message:
+ payload["commit_message"] = commit_message
+
+ return await github_request(
+ f"/repos/{owner}/{repo}/pulls/{pull_number}/merge",
+ method="PUT",
+ json=payload,
+ token=token,
+ )
+
+
+# ---------------------------------------------------------------------------
+# PR Files & Diff
+# ---------------------------------------------------------------------------
+
+async def list_pr_files(
+ owner: str,
+ repo: str,
+ pull_number: int,
+ *,
+ per_page: int = 100,
+ page: int = 1,
+ token: Optional[str] = None,
+) -> List[Dict[str, Any]]:
+ """List files changed in a pull request."""
+ return await github_request(
+ f"/repos/{owner}/{repo}/pulls/{pull_number}/files",
+ params={"per_page": min(per_page, 100), "page": page},
+ token=token,
+ ) or []
+
+
+# ---------------------------------------------------------------------------
+# PR Reviews & Comments
+# ---------------------------------------------------------------------------
+
+async def list_pr_reviews(
+ owner: str,
+ repo: str,
+ pull_number: int,
+ token: Optional[str] = None,
+) -> List[Dict[str, Any]]:
+ """List reviews on a pull request."""
+ return await github_request(
+ f"/repos/{owner}/{repo}/pulls/{pull_number}/reviews",
+ token=token,
+ ) or []
+
+
+async def create_pr_review(
+ owner: str,
+ repo: str,
+ pull_number: int,
+ *,
+ body: str,
+ event: str = "COMMENT",
+ token: Optional[str] = None,
+) -> Dict[str, Any]:
+ """Create a review on a pull request.
+
+ event: one of 'APPROVE', 'REQUEST_CHANGES', 'COMMENT'.
+ """
+ return await github_request(
+ f"/repos/{owner}/{repo}/pulls/{pull_number}/reviews",
+ method="POST",
+ json={"body": body, "event": event},
+ token=token,
+ )
+
+
+async def add_pr_comment(
+ owner: str,
+ repo: str,
+ pull_number: int,
+ body: str,
+ token: Optional[str] = None,
+) -> Dict[str, Any]:
+ """Add a general comment to a pull request (via issues API)."""
+ return await github_request(
+ f"/repos/{owner}/{repo}/issues/{pull_number}/comments",
+ method="POST",
+ json={"body": body},
+ token=token,
+ )
diff --git a/gitpilot/github_search.py b/gitpilot/github_search.py
new file mode 100644
index 0000000000000000000000000000000000000000..0e3f9de16320b4127bc9d13f22f7d2987b03bbb4
--- /dev/null
+++ b/gitpilot/github_search.py
@@ -0,0 +1,157 @@
+# gitpilot/github_search.py
+"""GitHub Search API wrapper.
+
+Provides async functions for searching code, repositories, issues, and users
+via GitHub's Search API.
+"""
+from __future__ import annotations
+
+from typing import Any, Dict, List, Optional
+
+from .github_api import github_request
+
+
+async def search_code(
+ query: str,
+ *,
+ owner: Optional[str] = None,
+ repo: Optional[str] = None,
+ language: Optional[str] = None,
+ path: Optional[str] = None,
+ per_page: int = 30,
+ page: int = 1,
+ token: Optional[str] = None,
+) -> Dict[str, Any]:
+ """Search for code across GitHub repositories.
+
+ Builds a qualified search query string from the parameters.
+ Returns: {total_count, incomplete_results, items[...]}.
+ """
+ parts = [query]
+ if owner and repo:
+ parts.append(f"repo:{owner}/{repo}")
+ elif owner:
+ parts.append(f"user:{owner}")
+ if language:
+ parts.append(f"language:{language}")
+ if path:
+ parts.append(f"path:{path}")
+
+ q = " ".join(parts)
+
+ result = await github_request(
+ "/search/code",
+ params={"q": q, "per_page": min(per_page, 100), "page": page},
+ token=token,
+ )
+ return _normalise_search_result(result)
+
+
+async def search_issues(
+ query: str,
+ *,
+ owner: Optional[str] = None,
+ repo: Optional[str] = None,
+ state: Optional[str] = None,
+ label: Optional[str] = None,
+ is_pr: Optional[bool] = None,
+ per_page: int = 30,
+ page: int = 1,
+ token: Optional[str] = None,
+) -> Dict[str, Any]:
+ """Search issues and pull requests."""
+ parts = [query]
+ if owner and repo:
+ parts.append(f"repo:{owner}/{repo}")
+ elif owner:
+ parts.append(f"user:{owner}")
+ if state:
+ parts.append(f"state:{state}")
+ if label:
+ parts.append(f"label:{label}")
+ if is_pr is True:
+ parts.append("type:pr")
+ elif is_pr is False:
+ parts.append("type:issue")
+
+ q = " ".join(parts)
+
+ result = await github_request(
+ "/search/issues",
+ params={"q": q, "per_page": min(per_page, 100), "page": page},
+ token=token,
+ )
+ return _normalise_search_result(result)
+
+
+async def search_repositories(
+ query: str,
+ *,
+ language: Optional[str] = None,
+ sort: Optional[str] = None,
+ order: str = "desc",
+ per_page: int = 30,
+ page: int = 1,
+ token: Optional[str] = None,
+) -> Dict[str, Any]:
+ """Search for repositories."""
+ parts = [query]
+ if language:
+ parts.append(f"language:{language}")
+
+ q = " ".join(parts)
+
+ params: Dict[str, Any] = {
+ "q": q,
+ "per_page": min(per_page, 100),
+ "page": page,
+ "order": order,
+ }
+ if sort:
+ params["sort"] = sort
+
+ result = await github_request("/search/repositories", params=params, token=token)
+ return _normalise_search_result(result)
+
+
+async def search_users(
+ query: str,
+ *,
+ type_filter: Optional[str] = None,
+ location: Optional[str] = None,
+ language: Optional[str] = None,
+ per_page: int = 30,
+ page: int = 1,
+ token: Optional[str] = None,
+) -> Dict[str, Any]:
+ """Search for users and organizations.
+
+ type_filter: 'user' or 'org' to narrow results.
+ """
+ parts = [query]
+ if type_filter:
+ parts.append(f"type:{type_filter}")
+ if location:
+ parts.append(f"location:{location}")
+ if language:
+ parts.append(f"language:{language}")
+
+ q = " ".join(parts)
+
+ result = await github_request(
+ "/search/users",
+ params={"q": q, "per_page": min(per_page, 100), "page": page},
+ token=token,
+ )
+ return _normalise_search_result(result)
+
+
+def _normalise_search_result(result: Any) -> Dict[str, Any]:
+ """Ensure consistent shape even if GitHub returns None."""
+ if not isinstance(result, dict):
+ return {"total_count": 0, "incomplete_results": False, "items": []}
+ return {
+ "total_count": result.get("total_count", 0),
+ "incomplete_results": result.get("incomplete_results", False),
+ "items": result.get("items", []),
+ }
diff --git a/gitpilot/headless.py b/gitpilot/headless.py
new file mode 100644
index 0000000000000000000000000000000000000000..72f4be6e531c4a8cb6f370f29976725d21588454
--- /dev/null
+++ b/gitpilot/headless.py
@@ -0,0 +1,96 @@
+# gitpilot/headless.py
+"""Headless execution mode for CI/CD pipelines.
+
+Runs GitPilot non-interactively from the command line, GitHub Actions,
+or GitLab CI, returning structured JSON output.
+
+Usage examples::
+
+ gitpilot run --headless -r owner/repo -m "fix the login bug"
+ gitpilot run --headless -r owner/repo --from-pr 42
+ echo "add tests for auth module" | gitpilot run --headless -r owner/repo
+"""
+from __future__ import annotations
+
+import json
+import logging
+from dataclasses import dataclass, field
+from typing import Any, Dict, Optional
+
+from .agent_tools import set_repo_context
+from .agentic import dispatch_request
+
+logger = logging.getLogger(__name__)
+
+
+@dataclass
+class HeadlessResult:
+ """Result of a headless execution."""
+
+ success: bool
+ output: str
+ session_id: Optional[str] = None
+ pr_url: Optional[str] = None
+ plan: Optional[Dict[str, Any]] = None
+ error: Optional[str] = None
+
+ def to_json(self) -> str:
+ return json.dumps(
+ {
+ "success": self.success,
+ "output": self.output,
+ "session_id": self.session_id,
+ "pr_url": self.pr_url,
+ "error": self.error,
+ },
+ indent=2,
+ )
+
+
+async def run_headless(
+ repo_full_name: str,
+ message: str,
+ token: str,
+ branch: Optional[str] = None,
+ auto_pr: bool = False,
+ from_pr: Optional[int] = None,
+) -> HeadlessResult:
+ """Execute a request non-interactively."""
+ owner, repo = repo_full_name.split("/", 1)
+ set_repo_context(owner, repo, token=token, branch=branch or "main")
+
+ # If from_pr, fetch PR context
+ if from_pr:
+ try:
+ from .github_pulls import get_pull_request
+
+ pr = await get_pull_request(owner, repo, from_pr, token=token)
+ message = (
+ f"PR #{from_pr}: {pr.get('title', '')}\n"
+ f"{pr.get('body', '')}\n\n"
+ f"User request: {message}"
+ )
+ except Exception as e:
+ logger.warning("Could not fetch PR #%s: %s", from_pr, e)
+
+ try:
+ result = await dispatch_request(
+ user_request=message,
+ repo_full_name=repo_full_name,
+ token=token,
+ branch_name=branch,
+ )
+
+ output = result.get("result", "") if isinstance(result, dict) else str(result)
+
+ return HeadlessResult(
+ success=True,
+ output=output,
+ )
+ except Exception as e:
+ logger.exception("Headless execution failed")
+ return HeadlessResult(
+ success=False,
+ output="",
+ error=str(e),
+ )
diff --git a/gitpilot/hf_space_tools.py b/gitpilot/hf_space_tools.py
new file mode 100644
index 0000000000000000000000000000000000000000..c9d1dc2f39e70d5f5d63a439558a1e4f649cd471
--- /dev/null
+++ b/gitpilot/hf_space_tools.py
@@ -0,0 +1,407 @@
+"""HuggingFace Space management tools for GitPilot.
+
+Provides CrewAI-compatible tools for:
+- Cloning HF Spaces
+- Analyzing Space health (SDK, deps, dead patterns)
+- Generating fixes via OllaBridge LLM
+- Pushing fixes to HF repos
+- Managing ZeroGPU hardware allocation
+
+Designed to work with GitPilot's multi-agent architecture
+and OllaBridge Cloud as the LLM backend.
+"""
+from __future__ import annotations
+
+import json
+import logging
+import os
+import re
+import subprocess
+import tempfile
+from pathlib import Path
+from typing import Any
+
+logger = logging.getLogger(__name__)
+
+# Dead/deprecated patterns to scan for
+DEAD_PATTERNS: list[tuple[str, str]] = [
+ (r'st\.secrets\[.*BACKEND_SERVER.*\]', 'Dead backend server dependency'),
+ (r'api-inference\.huggingface\.co', 'Deprecated HF Inference API endpoint'),
+ (r'from\s+dalle_mini', 'Deprecated dalle-mini imports'),
+ (r'from\s+min_dalle', 'Deprecated min-dalle imports'),
+ (r'from\s+transformers\.file_utils', 'Removed transformers.file_utils'),
+ (r'jax\.experimental\.PartitionSpec', 'Moved JAX PartitionSpec API'),
+ (r'gr\.inputs\.', 'Deprecated Gradio inputs API'),
+ (r'gr\.outputs\.', 'Deprecated Gradio outputs API'),
+]
+
+
+def clone_hf_space(space_id: str, token: str | None = None) -> dict[str, Any]:
+ """Clone a HuggingFace Space repository to a temp directory.
+
+ Args:
+ space_id: Full Space ID (e.g. 'user/space-name').
+ token: Optional HF token for private repos.
+
+ Returns:
+ Dict with 'path' (str), 'success' (bool), 'error' (str|None).
+ """
+ tmpdir = tempfile.mkdtemp(prefix="gitpilot_hf_")
+ name = space_id.split("/")[-1]
+ repo_dir = os.path.join(tmpdir, name)
+
+ clone_url = f"https://huggingface.co/spaces/{space_id}"
+ if token:
+ clone_url = f"https://user:{token}@huggingface.co/spaces/{space_id}"
+
+ try:
+ result = subprocess.run(
+ ["git", "clone", "--depth=1", clone_url, repo_dir],
+ capture_output=True, text=True, timeout=120,
+ )
+ if result.returncode != 0:
+ return {"path": "", "success": False, "error": result.stderr.strip()}
+ return {"path": repo_dir, "success": True, "error": None}
+ except subprocess.TimeoutExpired:
+ return {"path": "", "success": False, "error": "Clone timed out (120s)"}
+ except Exception as exc:
+ return {"path": "", "success": False, "error": str(exc)}
+
+
+def analyze_hf_space(repo_dir: str) -> dict[str, Any]:
+ """Analyze a cloned HuggingFace Space for issues.
+
+ Returns a diagnosis dict with:
+ sdk, app_file, issues, dead_patterns, needs_gpu,
+ needs_rebuild, severity, recommendations.
+ """
+ path = Path(repo_dir)
+ diag: dict[str, Any] = {
+ "sdk": "unknown",
+ "app_file": "app.py",
+ "issues": [],
+ "dead_patterns": [],
+ "needs_gpu": False,
+ "needs_rebuild": False,
+ "severity": "info",
+ "recommendations": [],
+ "files": [],
+ }
+
+ # Parse README front matter
+ readme = path / "README.md"
+ if readme.exists():
+ text = readme.read_text(errors="replace")
+ sdk_match = re.search(r'^sdk:\s*(\S+)', text, re.MULTILINE)
+ app_match = re.search(r'^app_file:\s*(\S+)', text, re.MULTILINE)
+ if sdk_match:
+ diag["sdk"] = sdk_match.group(1)
+ if app_match:
+ diag["app_file"] = app_match.group(1)
+ else:
+ diag["issues"].append("Missing README.md")
+
+ # Check app_file exists
+ app_path = path / diag["app_file"]
+ if not app_path.exists():
+ diag["issues"].append(f"app_file '{diag['app_file']}' does not exist")
+ diag["severity"] = "critical"
+ diag["needs_rebuild"] = True
+
+ # Check requirements.txt
+ req = path / "requirements.txt"
+ if not req.exists():
+ diag["issues"].append("Missing requirements.txt")
+ elif not req.read_text(errors="replace").strip():
+ diag["issues"].append("Empty requirements.txt")
+
+ # Scan for dead patterns
+ for py_file in path.rglob("*.py"):
+ try:
+ content = py_file.read_text(errors="replace")
+ except OSError:
+ continue
+ for pattern, desc in DEAD_PATTERNS:
+ if re.search(pattern, content):
+ rel = str(py_file.relative_to(path))
+ diag["dead_patterns"].append(f"{rel}: {desc}")
+ diag["issues"].append(f"Dead pattern in {rel}: {desc}")
+ diag["severity"] = "critical"
+ diag["needs_rebuild"] = True
+
+ # Check GPU needs
+ gpu_indicators = [
+ "torch", "diffusers", "transformers", "accelerate",
+ "spaces.GPU", "@spaces.GPU", "cuda", ".to(\"cuda\")",
+ ]
+ for py_file in path.rglob("*.py"):
+ try:
+ content = py_file.read_text(errors="replace")
+ except OSError:
+ continue
+ for indicator in gpu_indicators:
+ if indicator in content:
+ diag["needs_gpu"] = True
+ break
+ if diag["needs_gpu"]:
+ break
+
+ # File listing
+ for p in sorted(path.rglob("*")):
+ if p.is_file() and ".git" not in p.parts:
+ diag["files"].append(str(p.relative_to(path)))
+
+ # Build recommendations
+ if diag["needs_rebuild"]:
+ diag["recommendations"].append("Rebuild app.py with modern dependencies")
+ if diag["sdk"] == "streamlit":
+ diag["recommendations"].append("Consider migrating to Gradio SDK")
+ if diag["dead_patterns"]:
+ diag["recommendations"].append("Remove deprecated API calls")
+ if diag["needs_gpu"]:
+ diag["recommendations"].append("Request ZeroGPU (zero-a10g) hardware")
+
+ return diag
+
+
+def generate_space_fix(
+ space_id: str,
+ diagnosis: dict[str, Any],
+ app_content: str = "",
+ ollabridge_url: str | None = None,
+ ollabridge_model: str = "qwen2.5:1.5b",
+ ollabridge_key: str | None = None,
+) -> dict[str, Any]:
+ """Generate a fix for a broken HF Space.
+
+ If ollabridge_url is provided, uses LLM for intelligent fix.
+ Otherwise falls back to template-based fix.
+
+ Returns dict with 'files' (dict of filename->content), 'explanation' (str).
+ """
+ # Try LLM-powered fix via OllaBridge
+ if ollabridge_url:
+ try:
+ import httpx
+ prompt = _build_repair_prompt(space_id, diagnosis, app_content)
+ payload = {
+ "model": ollabridge_model,
+ "messages": [
+ {"role": "system", "content": "You are an expert HuggingFace Spaces developer. Output valid JSON."},
+ {"role": "user", "content": prompt},
+ ],
+ "temperature": 0.3,
+ "max_tokens": 4096,
+ }
+ headers = {"Content-Type": "application/json"}
+ if ollabridge_key:
+ headers["Authorization"] = f"Bearer {ollabridge_key}"
+
+ resp = httpx.post(
+ f"{ollabridge_url.rstrip('/')}/v1/chat/completions",
+ json=payload, headers=headers, timeout=120.0,
+ )
+ if resp.status_code == 200:
+ content = resp.json()["choices"][0]["message"]["content"]
+ fix = _parse_llm_fix(content)
+ if fix:
+ return fix
+ except Exception as exc:
+ logger.warning("OllaBridge fix generation failed: %s", exc)
+
+ # Template fallback
+ return _generate_template_fix(space_id, diagnosis)
+
+
+def push_space_fix(
+ repo_dir: str,
+ fix: dict[str, Any],
+ commit_message: str = "fix: auto-repair by GitPilot + RepoGuardian",
+) -> dict[str, Any]:
+ """Apply fix files and push to the Space repo.
+
+ Args:
+ repo_dir: Path to cloned Space repo.
+ fix: Fix dict with 'files' key.
+ commit_message: Git commit message.
+
+ Returns:
+ Dict with 'success' (bool), 'changed_files' (list), 'error' (str|None).
+ """
+ path = Path(repo_dir)
+ changed = []
+
+ # Write fix files
+ for filename, content in fix.get("files", {}).items():
+ filepath = path / filename
+ filepath.parent.mkdir(parents=True, exist_ok=True)
+ filepath.write_text(content)
+ changed.append(filename)
+
+ if not changed:
+ return {"success": False, "changed_files": [], "error": "No files to write"}
+
+ # Git add, commit, push
+ cmds = [
+ ["git", "add", "-A"],
+ ["git", "commit", "-m", commit_message],
+ ["git", "push", "origin", "main"],
+ ]
+ for cmd in cmds:
+ result = subprocess.run(cmd, cwd=repo_dir, capture_output=True, text=True, timeout=60)
+ if result.returncode != 0:
+ return {
+ "success": False,
+ "changed_files": changed,
+ "error": f"Command '{' '.join(cmd)}' failed: {result.stderr.strip()}",
+ }
+
+ return {"success": True, "changed_files": changed, "error": None}
+
+
+def manage_space_hardware(
+ space_id: str,
+ token: str,
+ hardware: str = "zero-a10g",
+ auto_free: bool = True,
+) -> dict[str, Any]:
+ """Request hardware for a HuggingFace Space.
+
+ If ZeroGPU slots are full and auto_free is True,
+ automatically downgrades a paused Space to free a slot.
+
+ Returns dict with 'success', 'hardware', 'freed_slot', 'error'.
+ """
+ try:
+ from huggingface_hub import HfApi
+ api = HfApi(token=token)
+
+ # Try direct request
+ try:
+ api.request_space_hardware(space_id, hardware)
+ return {"success": True, "hardware": hardware, "freed_slot": None, "error": None}
+ except Exception as exc:
+ if "limited to" not in str(exc).lower():
+ return {"success": False, "hardware": None, "freed_slot": None, "error": str(exc)}
+
+ if not auto_free:
+ return {"success": False, "hardware": None, "freed_slot": None, "error": "Slots full, auto_free disabled"}
+
+ # Find and downgrade a paused Space
+ namespace = space_id.split("/")[0]
+ spaces = list(api.list_spaces(author=namespace))
+ for s in spaces:
+ try:
+ info = api.space_info(s.id)
+ if not info.runtime:
+ continue
+ raw_hw = info.runtime.raw.get("hardware", {})
+ req_hw = raw_hw.get("requested", "")
+ stage = info.runtime.stage
+ if "zero" in str(req_hw).lower() and stage in ("PAUSED", "SLEEPING") and s.id != space_id:
+ api.request_space_hardware(s.id, "cpu-basic")
+ # Retry the original request
+ api.request_space_hardware(space_id, hardware)
+ return {
+ "success": True,
+ "hardware": hardware,
+ "freed_slot": s.id,
+ "error": None,
+ }
+ except Exception:
+ continue
+
+ return {"success": False, "hardware": None, "freed_slot": None, "error": "No paused Spaces to free"}
+
+ except ImportError:
+ return {"success": False, "hardware": None, "freed_slot": None, "error": "huggingface_hub not installed"}
+
+
+def get_space_runtime_info(space_id: str, token: str | None = None) -> dict[str, Any]:
+ """Fetch runtime info for a HuggingFace Space.
+
+ Returns dict with sdk, stage, hardware, domain, etc.
+ """
+ try:
+ from huggingface_hub import HfApi
+ api = HfApi(token=token)
+ info = api.space_info(space_id)
+ result: dict[str, Any] = {
+ "space_id": space_id,
+ "sdk": info.sdk,
+ "success": True,
+ }
+ if info.runtime:
+ result["stage"] = info.runtime.stage
+ hw = info.runtime.raw.get("hardware", {})
+ result["current_hardware"] = hw.get("current")
+ result["requested_hardware"] = hw.get("requested")
+ domains = info.runtime.raw.get("domains", [])
+ if domains:
+ result["domain"] = domains[0].get("domain")
+ return result
+ except Exception as exc:
+ return {"space_id": space_id, "success": False, "error": str(exc)}
+
+
+# ---- Internal helpers ----
+
+def _build_repair_prompt(space_id: str, diagnosis: dict[str, Any], app_content: str) -> str:
+ return f"""A HuggingFace Space is broken and needs repair.
+
+## Space: {space_id}
+- SDK: {diagnosis.get('sdk', 'unknown')}
+- app_file: {diagnosis.get('app_file', 'app.py')}
+
+## Issues
+{chr(10).join('- ' + i for i in diagnosis.get('issues', []))}
+
+## Dead Patterns
+{chr(10).join('- ' + p for p in diagnosis.get('dead_patterns', []))}
+
+## Current app.py (first 150 lines)
+{app_content[:5000]}
+
+Generate a complete fix as JSON:
+{{
+ "files": {{
+ "app.py": "",
+ "requirements.txt": "",
+ "README.md": ""
+ }},
+ "explanation": ""
+}}"""
+
+
+def _parse_llm_fix(response: str) -> dict[str, Any] | None:
+ try:
+ return json.loads(response)
+ except json.JSONDecodeError:
+ pass
+ match = re.search(r'```(?:json)?\s*\n(.+?)\n```', response, re.DOTALL)
+ if match:
+ try:
+ return json.loads(match.group(1))
+ except json.JSONDecodeError:
+ pass
+ return None
+
+
+def _generate_template_fix(space_id: str, diagnosis: dict[str, Any]) -> dict[str, Any]:
+ name = space_id.split("/")[-1]
+ title = name.replace("-", " ").replace("_", " ").title()
+ needs_gpu = diagnosis.get("needs_gpu", False)
+
+ if needs_gpu:
+ app = f'''"""\n{title} - Auto-repaired by GitPilot + RepoGuardian\n"""\nimport gradio as gr\nimport numpy as np\n\ntry:\n import spaces\n GPU = True\nexcept ImportError:\n GPU = False\n\ndef process(prompt: str, progress=gr.Progress(track_tqdm=True)):\n if not prompt.strip():\n raise gr.Error("Please enter a prompt.")\n return f"Output for: {{prompt}}"\n\nif GPU:\n process = spaces.GPU(process)\n\nwith gr.Blocks(theme=gr.themes.Soft(), title="{title}") as demo:\n gr.Markdown("# {title}")\n with gr.Row():\n inp = gr.Textbox(label="Prompt", lines=3)\n out = gr.Textbox(label="Output", lines=5)\n gr.Button("Generate", variant="primary").click(process, [inp], [out])\n\nif __name__ == "__main__":\n demo.launch()\n'''
+ reqs = "gradio>=4.0.0\ntorch>=2.0.0\nnumpy>=1.24.0\n"
+ else:
+ app = f'''"""\n{title} - Auto-repaired by GitPilot + RepoGuardian\n"""\nimport gradio as gr\n\ndef process(text: str):\n if not text.strip():\n raise gr.Error("Please enter text.")\n return f"Processed: {{text}}"\n\nwith gr.Blocks(theme=gr.themes.Soft(), title="{title}") as demo:\n gr.Markdown("# {title}")\n with gr.Row():\n inp = gr.Textbox(label="Input", lines=3)\n out = gr.Textbox(label="Output", lines=3)\n gr.Button("Process", variant="primary").click(process, [inp], [out])\n\nif __name__ == "__main__":\n demo.launch()\n'''
+ reqs = "gradio>=4.0.0\n"
+
+ readme = f"""---\ntitle: {title}\nemoji: \U0001f680\ncolorFrom: blue\ncolorTo: purple\nsdk: gradio\nsdk_version: 5.23.0\napp_file: app.py\npinned: false\nlicense: apache-2.0\n---\n\n# {title}\n\nAuto-repaired by [GitPilot](https://github.com/ruslanmv/gitpilot) + [RepoGuardian](https://github.com/ruslanmv/RepoGuardian).\n"""
+
+ return {
+ "files": {"app.py": app, "requirements.txt": reqs, "README.md": readme},
+ "explanation": "Template fix: replaced broken app with working Gradio placeholder",
+ }
diff --git a/gitpilot/hooks.py b/gitpilot/hooks.py
new file mode 100644
index 0000000000000000000000000000000000000000..18027fbeea9e0dc54a3d1794b6071de4d6f225fc
--- /dev/null
+++ b/gitpilot/hooks.py
@@ -0,0 +1,195 @@
+# gitpilot/hooks.py
+"""Event hook system for workflow automation.
+
+Allows users to register shell commands or Python callables that fire
+on specific lifecycle events. Hooks are defined in .gitpilot/hooks.json
+or programmatically via the API.
+
+Events
+------
+- session_start Session begins
+- session_end Session ends
+- pre_tool_use Before a tool runs (blocking hooks can cancel)
+- post_tool_use After a tool completes
+- pre_edit Before file edit (blocking hooks can cancel)
+- post_edit After file edit
+- pre_commit Before git commit (blocking hooks can cancel)
+- post_commit After git commit
+- pre_push Before git push (blocking hooks can cancel)
+- user_message When the user sends a message
+"""
+from __future__ import annotations
+
+import asyncio
+import json
+import logging
+import os
+from dataclasses import dataclass, field
+from enum import Enum
+from pathlib import Path
+from typing import Any, Callable, Dict, List, Optional
+
+logger = logging.getLogger(__name__)
+
+
+class HookEvent(str, Enum):
+ SESSION_START = "session_start"
+ SESSION_END = "session_end"
+ PRE_TOOL_USE = "pre_tool_use"
+ POST_TOOL_USE = "post_tool_use"
+ PRE_EDIT = "pre_edit"
+ POST_EDIT = "post_edit"
+ PRE_COMMIT = "pre_commit"
+ POST_COMMIT = "post_commit"
+ PRE_PUSH = "pre_push"
+ USER_MESSAGE = "user_message"
+
+
+@dataclass
+class HookDefinition:
+ event: HookEvent
+ name: str
+ command: Optional[str] = None
+ handler: Optional[Callable] = None
+ blocking: bool = False
+ timeout: int = 30
+
+
+@dataclass
+class HookResult:
+ hook_name: str
+ event: HookEvent
+ success: bool
+ output: str = ""
+ blocked: bool = False
+
+
+class HookManager:
+ """Register and fire lifecycle hooks."""
+
+ def __init__(self):
+ self._hooks: Dict[HookEvent, List[HookDefinition]] = {
+ e: [] for e in HookEvent
+ }
+
+ def register(self, hook: HookDefinition):
+ self._hooks[hook.event].append(hook)
+ logger.info("Registered hook '%s' for event '%s'", hook.name, hook.event)
+
+ def unregister(self, event: HookEvent, name: str):
+ self._hooks[event] = [h for h in self._hooks[event] if h.name != name]
+
+ def list_hooks(self) -> List[Dict[str, Any]]:
+ result = []
+ for event, hooks in self._hooks.items():
+ for h in hooks:
+ result.append({
+ "event": event.value,
+ "name": h.name,
+ "command": h.command,
+ "blocking": h.blocking,
+ "timeout": h.timeout,
+ })
+ return result
+
+ def load_from_file(self, path: Path):
+ """Load hooks from a JSON config file.
+
+ Format::
+
+ [
+ {"event": "post_edit", "name": "lint", "command": "ruff check ."},
+ {"event": "pre_commit", "name": "test", "command": "pytest", "blocking": true}
+ ]
+ """
+ if not path.exists():
+ return
+ try:
+ hooks = json.loads(path.read_text())
+ for h in hooks:
+ self.register(HookDefinition(
+ event=HookEvent(h["event"]),
+ name=h["name"],
+ command=h.get("command"),
+ blocking=h.get("blocking", False),
+ timeout=h.get("timeout", 30),
+ ))
+ except Exception as e:
+ logger.warning("Failed to load hooks from %s: %s", path, e)
+
+ async def fire(
+ self,
+ event: HookEvent,
+ context: Optional[Dict[str, Any]] = None,
+ cwd: Optional[Path] = None,
+ ) -> List[HookResult]:
+ results = []
+ for hook in self._hooks.get(event, []):
+ result = await self._run_hook(hook, context, cwd)
+ results.append(result)
+ if hook.blocking and not result.success:
+ result.blocked = True
+ break
+ return results
+
+ def is_blocked(self, results: List[HookResult]) -> bool:
+ return any(r.blocked for r in results)
+
+ async def _run_hook(
+ self,
+ hook: HookDefinition,
+ context: Optional[Dict[str, Any]],
+ cwd: Optional[Path],
+ ) -> HookResult:
+ try:
+ if hook.command:
+ return await self._run_command_hook(hook, context, cwd)
+ if hook.handler:
+ output = hook.handler(context or {})
+ return HookResult(
+ hook_name=hook.name, event=hook.event,
+ success=True, output=str(output),
+ )
+ return HookResult(
+ hook_name=hook.name, event=hook.event,
+ success=True, output="No action",
+ )
+ except Exception as e:
+ return HookResult(
+ hook_name=hook.name, event=hook.event,
+ success=False, output=str(e),
+ )
+
+ async def _run_command_hook(
+ self,
+ hook: HookDefinition,
+ context: Optional[Dict[str, Any]],
+ cwd: Optional[Path],
+ ) -> HookResult:
+ env = {**os.environ}
+ if context:
+ for k, v in context.items():
+ env[f"GITPILOT_HOOK_{k.upper()}"] = str(v)
+
+ proc = await asyncio.create_subprocess_shell(
+ hook.command,
+ cwd=str(cwd) if cwd else None,
+ stdout=asyncio.subprocess.PIPE,
+ stderr=asyncio.subprocess.STDOUT,
+ env=env,
+ )
+ try:
+ stdout, _ = await asyncio.wait_for(
+ proc.communicate(), timeout=hook.timeout,
+ )
+ return HookResult(
+ hook_name=hook.name, event=hook.event,
+ success=proc.returncode == 0,
+ output=stdout.decode("utf-8", errors="replace"),
+ )
+ except asyncio.TimeoutError:
+ proc.kill()
+ return HookResult(
+ hook_name=hook.name, event=hook.event,
+ success=False, output="Hook timed out",
+ )
diff --git a/gitpilot/issue_tools.py b/gitpilot/issue_tools.py
new file mode 100644
index 0000000000000000000000000000000000000000..38506746709e8d2b787a33c5ddc044dead0f36b7
--- /dev/null
+++ b/gitpilot/issue_tools.py
@@ -0,0 +1,161 @@
+"""CrewAI tools for GitHub Issue management.
+
+These tools allow agents to create, list, update, and comment on GitHub issues.
+They reuse the repo context mechanism from agent_tools.
+"""
+import asyncio
+import json
+from typing import Optional
+
+from crewai.tools import tool
+
+from .agent_tools import get_repo_context
+from . import github_issues as gi
+
+
+def _run_async(coro):
+ """Run an async coroutine from a sync CrewAI tool."""
+ loop = asyncio.new_event_loop()
+ asyncio.set_event_loop(loop)
+ try:
+ return loop.run_until_complete(coro)
+ finally:
+ loop.close()
+
+
+def _fmt_issue(issue: dict) -> str:
+ labels = ", ".join(l.get("name", "") for l in issue.get("labels", []))
+ assignees = ", ".join(a.get("login", "") for a in issue.get("assignees", []))
+ return (
+ f"#{issue.get('number')} [{issue.get('state', 'open')}] "
+ f"{issue.get('title', '')}\n"
+ f" Labels: {labels or 'none'} | Assignees: {assignees or 'none'}\n"
+ f" URL: {issue.get('html_url', '')}"
+ )
+
+
+@tool("List repository issues")
+def list_issues(
+ state: str = "open",
+ labels: Optional[str] = None,
+ per_page: int = 20,
+) -> str:
+ """Lists issues in the current repository. Optional filters: state (open/closed/all), labels (comma-separated), per_page."""
+ try:
+ owner, repo, token, _branch = get_repo_context()
+ issues = _run_async(
+ gi.list_issues(owner, repo, state=state, labels=labels, per_page=per_page, token=token)
+ )
+ if not issues:
+ return f"No {state} issues found in {owner}/{repo}."
+ header = f"Issues in {owner}/{repo} (state={state}):\n"
+ return header + "\n".join(_fmt_issue(i) for i in issues)
+ except Exception as e:
+ return f"Error listing issues: {e}"
+
+
+@tool("Get issue details")
+def get_issue(issue_number: int) -> str:
+ """Gets full details of a specific issue by number."""
+ try:
+ owner, repo, token, _branch = get_repo_context()
+ issue = _run_async(gi.get_issue(owner, repo, issue_number, token=token))
+ body = (issue.get("body") or "")[:500]
+ return (
+ f"Issue #{issue.get('number')}: {issue.get('title')}\n"
+ f"State: {issue.get('state')} | Created: {issue.get('created_at')}\n"
+ f"Author: {issue.get('user', {}).get('login', 'unknown')}\n"
+ f"Body:\n{body}\n"
+ f"URL: {issue.get('html_url', '')}"
+ )
+ except Exception as e:
+ return f"Error getting issue: {e}"
+
+
+@tool("Create a new issue")
+def create_issue(
+ title: str,
+ body: str = "",
+ labels: str = "",
+ assignees: str = "",
+) -> str:
+ """Creates a new GitHub issue. labels and assignees are comma-separated strings."""
+ try:
+ owner, repo, token, _branch = get_repo_context()
+ label_list = [l.strip() for l in labels.split(",") if l.strip()] if labels else None
+ assignee_list = [a.strip() for a in assignees.split(",") if a.strip()] if assignees else None
+ issue = _run_async(
+ gi.create_issue(owner, repo, title, body=body or None, labels=label_list, assignees=assignee_list, token=token)
+ )
+ return f"Created issue #{issue.get('number')}: {issue.get('title')}\nURL: {issue.get('html_url', '')}"
+ except Exception as e:
+ return f"Error creating issue: {e}"
+
+
+@tool("Update an issue")
+def update_issue(
+ issue_number: int,
+ title: str = "",
+ body: str = "",
+ state: str = "",
+ labels: str = "",
+ assignees: str = "",
+) -> str:
+ """Updates an existing issue. Only non-empty fields are changed. labels/assignees are comma-separated."""
+ try:
+ owner, repo, token, _branch = get_repo_context()
+ kwargs: dict = {}
+ if title:
+ kwargs["title"] = title
+ if body:
+ kwargs["body"] = body
+ if state:
+ kwargs["state"] = state
+ if labels:
+ kwargs["labels"] = [l.strip() for l in labels.split(",") if l.strip()]
+ if assignees:
+ kwargs["assignees"] = [a.strip() for a in assignees.split(",") if a.strip()]
+ issue = _run_async(gi.update_issue(owner, repo, issue_number, token=token, **kwargs))
+ return f"Updated issue #{issue.get('number')}: {issue.get('title')}\nState: {issue.get('state')}"
+ except Exception as e:
+ return f"Error updating issue: {e}"
+
+
+@tool("Add a comment to an issue")
+def add_issue_comment(issue_number: int, body: str) -> str:
+ """Adds a comment to an existing issue."""
+ try:
+ owner, repo, token, _branch = get_repo_context()
+ comment = _run_async(gi.add_issue_comment(owner, repo, issue_number, body, token=token))
+ return f"Comment added to issue #{issue_number}\nURL: {comment.get('html_url', '')}"
+ except Exception as e:
+ return f"Error adding comment: {e}"
+
+
+@tool("List issue comments")
+def list_issue_comments(issue_number: int) -> str:
+ """Lists all comments on an issue."""
+ try:
+ owner, repo, token, _branch = get_repo_context()
+ comments = _run_async(gi.list_issue_comments(owner, repo, issue_number, token=token))
+ if not comments:
+ return f"No comments on issue #{issue_number}."
+ lines = [f"Comments on issue #{issue_number}:"]
+ for c in comments:
+ author = c.get("user", {}).get("login", "unknown")
+ body_preview = (c.get("body") or "")[:200]
+ lines.append(f" [{author}] {body_preview}")
+ return "\n".join(lines)
+ except Exception as e:
+ return f"Error listing comments: {e}"
+
+
+# Export all issue tools
+ISSUE_TOOLS = [
+ list_issues,
+ get_issue,
+ create_issue,
+ update_issue,
+ add_issue_comment,
+ list_issue_comments,
+]
diff --git a/gitpilot/langflow_client.py b/gitpilot/langflow_client.py
new file mode 100644
index 0000000000000000000000000000000000000000..e271a9966dfcb03acdb74272f5489e48718487a5
--- /dev/null
+++ b/gitpilot/langflow_client.py
@@ -0,0 +1,50 @@
+from __future__ import annotations
+
+from typing import Any, Dict, Optional
+
+import httpx
+from fastapi import HTTPException
+
+from .settings import get_settings
+
+
+async def run_langflow_flow(
+ flow_id: str,
+ input_value: str,
+ *,
+ session_id: str = "gitpilot-session",
+ tweaks: Optional[Dict[str, Any]] = None,
+) -> str:
+ """Run a LangFlow flow and return the first chat-like output as text."""
+ settings = get_settings()
+ url = f"{settings.langflow_url.rstrip('/')}/api/v1/run/{flow_id}"
+ headers = {"Content-Type": "application/json"}
+ if settings.langflow_api_key:
+ headers["x-api-key"] = settings.langflow_api_key
+
+ payload: Dict[str, Any] = {
+ "input_value": input_value,
+ "session_id": session_id,
+ "input_type": "chat",
+ "output_type": "chat",
+ "output_component": "",
+ "tweaks": tweaks or {},
+ }
+
+ async with httpx.AsyncClient() as client:
+ resp = await client.post(url, headers=headers, json=payload)
+
+ if resp.status_code >= 400:
+ raise HTTPException(resp.status_code, resp.text)
+
+ data = resp.json()
+ try:
+ outputs = data["outputs"][0]["outputs"][0]["results"]
+ if isinstance(outputs, dict):
+ for key in ("message", "text", "output_text"):
+ if key in outputs:
+ return str(outputs[key])
+ except Exception:
+ pass
+
+ return str(data)
diff --git a/gitpilot/learning.py b/gitpilot/learning.py
new file mode 100644
index 0000000000000000000000000000000000000000..b5fc5a2b8e9e2df2882a1a5801edb3c0f90b1f74
--- /dev/null
+++ b/gitpilot/learning.py
@@ -0,0 +1,251 @@
+# gitpilot/learning.py
+"""Self-improving agent learning engine.
+
+After each task execution, evaluates outcomes, extracts patterns,
+and stores them in the project's auto-memory. Over time, GitPilot
+becomes specialised to each project's patterns and conventions.
+
+Inspired by reinforcement learning from human feedback (RLHF) principles
+and the experience-replay mechanism from DeepMind's DQN (Mnih et al., 2015),
+adapted for a software engineering context.
+
+Learning loop::
+
+ Execute task β Evaluate outcome β Extract patterns β Store in memory
+ β
+ Future tasks β Agent reads patterns from memory β Memory updated
+"""
+from __future__ import annotations
+
+import json
+import logging
+from dataclasses import dataclass, field
+from datetime import datetime, timezone
+from pathlib import Path
+from typing import Any, Dict, List, Optional
+
+logger = logging.getLogger(__name__)
+
+LEARNING_DIR = "learning"
+MAX_INSIGHTS_PER_REPO = 200
+INSIGHT_CATEGORIES = [
+ "code_style",
+ "testing",
+ "architecture",
+ "workflow",
+ "error_pattern",
+ "performance",
+ "security",
+]
+
+
+@dataclass
+class Evaluation:
+ """Result of evaluating a task outcome."""
+
+ task_description: str
+ success: bool
+ outcome_type: str = "" # tests_passed, pr_approved, error_fixed, etc.
+ details: str = ""
+ confidence: float = 0.8 # 0.0 - 1.0
+ timestamp: str = field(
+ default_factory=lambda: datetime.now(timezone.utc).isoformat()
+ )
+
+ def to_dict(self) -> Dict[str, Any]:
+ return {
+ "task_description": self.task_description,
+ "success": self.success,
+ "outcome_type": self.outcome_type,
+ "details": self.details,
+ "confidence": self.confidence,
+ "timestamp": self.timestamp,
+ }
+
+
+@dataclass
+class RepoInsights:
+ """Accumulated insights for a repository."""
+
+ repo: str
+ patterns: List[str] = field(default_factory=list)
+ preferred_style: Dict[str, str] = field(default_factory=dict)
+ common_errors: List[str] = field(default_factory=list)
+ success_rate: float = 0.0
+ total_tasks: int = 0
+ successful_tasks: int = 0
+
+ def to_dict(self) -> Dict[str, Any]:
+ return {
+ "repo": self.repo,
+ "patterns": self.patterns,
+ "preferred_style": self.preferred_style,
+ "common_errors": self.common_errors,
+ "success_rate": self.success_rate,
+ "total_tasks": self.total_tasks,
+ "successful_tasks": self.successful_tasks,
+ }
+
+
+class LearningEngine:
+ """Learn from task execution outcomes and improve over time.
+
+ Usage::
+
+ engine = LearningEngine(storage_dir=Path("~/.gitpilot"))
+ evaluation = engine.evaluate_outcome(
+ task="Fix login bug",
+ result={"tests_passed": True, "pr_approved": True},
+ )
+ patterns = engine.extract_patterns(evaluation)
+ engine.update_strategies("owner/repo", patterns)
+ insights = engine.get_repo_insights("owner/repo")
+ """
+
+ def __init__(self, storage_dir: Optional[Path] = None) -> None:
+ self.storage_dir = storage_dir or (Path.home() / ".gitpilot")
+ self._learning_dir = self.storage_dir / LEARNING_DIR
+ self._learning_dir.mkdir(parents=True, exist_ok=True)
+
+ def evaluate_outcome(
+ self,
+ task: str,
+ result: Optional[Dict[str, Any]] = None,
+ ) -> Evaluation:
+ """Evaluate a task outcome based on result signals.
+
+ Checks for success signals like:
+ - tests_passed: True
+ - pr_approved: True
+ - error_fixed: True
+ - build_success: True
+ """
+ result = result or {}
+ success_signals = [
+ result.get("tests_passed", False),
+ result.get("pr_approved", False),
+ result.get("error_fixed", False),
+ result.get("build_success", False),
+ ]
+ explicit_success = result.get("success")
+
+ if explicit_success is not None:
+ success = bool(explicit_success)
+ else:
+ success = any(success_signals)
+
+ # Determine outcome type
+ if result.get("tests_passed"):
+ outcome_type = "tests_passed"
+ elif result.get("pr_approved"):
+ outcome_type = "pr_approved"
+ elif result.get("error_fixed"):
+ outcome_type = "error_fixed"
+ elif result.get("error"):
+ outcome_type = "error"
+ success = False
+ else:
+ outcome_type = "completed" if success else "unknown"
+
+ confidence = 0.9 if success else 0.6
+
+ return Evaluation(
+ task_description=task,
+ success=success,
+ outcome_type=outcome_type,
+ details=result.get("details", ""),
+ confidence=confidence,
+ )
+
+ def extract_patterns(self, evaluation: Evaluation) -> List[str]:
+ """Extract learnable patterns from an evaluation.
+
+ Generates natural-language patterns that can be injected
+ into future agent system prompts.
+ """
+ patterns = []
+
+ if evaluation.success:
+ patterns.append(
+ f"Task '{evaluation.task_description}' succeeded "
+ f"(outcome: {evaluation.outcome_type})"
+ )
+ if evaluation.outcome_type == "tests_passed":
+ patterns.append("Tests are available and should be run after changes")
+ if evaluation.outcome_type == "pr_approved":
+ patterns.append("PR workflow is active; create PRs for review")
+ else:
+ patterns.append(
+ f"Task '{evaluation.task_description}' failed "
+ f"(outcome: {evaluation.outcome_type})"
+ )
+ if evaluation.details:
+ patterns.append(f"Error context: {evaluation.details[:200]}")
+
+ return patterns
+
+ def update_strategies(self, repo: str, patterns: List[str]) -> None:
+ """Store learned patterns for a repository."""
+ repo_file = self._repo_path(repo)
+ data = self._load_repo_data(repo)
+
+ existing = set(data.get("patterns", []))
+ for p in patterns:
+ existing.add(p)
+
+ data["patterns"] = list(existing)[-MAX_INSIGHTS_PER_REPO:]
+ data["updated_at"] = datetime.now(timezone.utc).isoformat()
+ data.setdefault("total_tasks", 0)
+ data["total_tasks"] += 1
+
+ # Update success rate
+ if any("succeeded" in p for p in patterns):
+ data.setdefault("successful_tasks", 0)
+ data["successful_tasks"] += 1
+
+ total = data.get("total_tasks", 1)
+ successful = data.get("successful_tasks", 0)
+ data["success_rate"] = round(successful / total, 3) if total > 0 else 0.0
+
+ repo_file.write_text(json.dumps(data, indent=2))
+
+ def get_repo_insights(self, repo: str) -> RepoInsights:
+ """Get accumulated insights for a repository."""
+ data = self._load_repo_data(repo)
+ return RepoInsights(
+ repo=repo,
+ patterns=data.get("patterns", []),
+ preferred_style=data.get("preferred_style", {}),
+ common_errors=data.get("common_errors", []),
+ success_rate=data.get("success_rate", 0.0),
+ total_tasks=data.get("total_tasks", 0),
+ successful_tasks=data.get("successful_tasks", 0),
+ )
+
+ def record_error(self, repo: str, error: str) -> None:
+ """Record a common error pattern for a repo."""
+ data = self._load_repo_data(repo)
+ errors = data.setdefault("common_errors", [])
+ if error not in errors:
+ errors.append(error)
+ data["common_errors"] = errors[-50:] # Keep last 50
+ self._repo_path(repo).write_text(json.dumps(data, indent=2))
+
+ def set_preferred_style(self, repo: str, key: str, value: str) -> None:
+ """Set a preferred code style for a repo (e.g., indent: 4spaces)."""
+ data = self._load_repo_data(repo)
+ data.setdefault("preferred_style", {})[key] = value
+ self._repo_path(repo).write_text(json.dumps(data, indent=2))
+
+ def _repo_path(self, repo: str) -> Path:
+ safe_name = repo.replace("/", "__")
+ return self._learning_dir / f"{safe_name}.json"
+
+ def _load_repo_data(self, repo: str) -> Dict[str, Any]:
+ path = self._repo_path(repo)
+ if path.exists():
+ try:
+ return json.loads(path.read_text())
+ except Exception:
+ return {}
+ return {}
diff --git a/gitpilot/llm_provider.py b/gitpilot/llm_provider.py
new file mode 100644
index 0000000000000000000000000000000000000000..18d93ceb93d12ca4b4a1268476600b93d3f87f1d
--- /dev/null
+++ b/gitpilot/llm_provider.py
@@ -0,0 +1,287 @@
+from __future__ import annotations
+
+import os
+
+import httpx
+from crewai import LLM
+
+from gitpilot.models import ProviderHealth, ProviderSummary
+
+from .settings import LLMProvider, get_settings
+
+
+def build_llm() -> LLM:
+ """Return an initialized CrewAI LLM using the active provider."""
+ settings = get_settings()
+ provider = settings.provider
+
+ if provider == LLMProvider.openai:
+ # Use settings config if available, otherwise fall back to env vars
+ api_key = settings.openai.api_key or os.getenv("OPENAI_API_KEY", "")
+ model = settings.openai.model or os.getenv("GITPILOT_OPENAI_MODEL", "gpt-4o-mini")
+ base_url = settings.openai.base_url or os.getenv("OPENAI_BASE_URL", "")
+
+ # Validate required credentials
+ if not api_key:
+ raise ValueError(
+ "OpenAI API key is required. "
+ "Configure it in Admin / LLM Settings or set OPENAI_API_KEY environment variable."
+ )
+
+ # Ensure model has provider prefix for CrewAI
+ if not model.startswith("openai/"):
+ model = f"openai/{model}"
+
+ return LLM(
+ model=model,
+ api_key=api_key,
+ base_url=base_url if base_url else None,
+ )
+
+ if provider == LLMProvider.claude:
+ # Use settings config if available, otherwise fall back to env vars
+ api_key = settings.claude.api_key or os.getenv("ANTHROPIC_API_KEY", "")
+ model = settings.claude.model or os.getenv("GITPILOT_CLAUDE_MODEL", "claude-sonnet-4-5")
+ base_url = settings.claude.base_url or os.getenv("ANTHROPIC_BASE_URL", "")
+
+ # Validate required credentials
+ if not api_key:
+ raise ValueError(
+ "Claude API key is required. "
+ "Configure it in Admin / LLM Settings or set "
+ "ANTHROPIC_API_KEY environment variable."
+ )
+
+ # CRITICAL: Set API key as environment variable
+ # (required by CrewAI's native Anthropic provider)
+ # CrewAI's Anthropic integration checks for this env var internally
+ os.environ["ANTHROPIC_API_KEY"] = api_key
+
+ # Optional: Set base URL as environment variable if provided
+ if base_url:
+ os.environ["ANTHROPIC_BASE_URL"] = base_url
+
+ # Ensure model has provider prefix for CrewAI
+ if not model.startswith("anthropic/"):
+ model = f"anthropic/{model}"
+
+ return LLM(
+ model=model,
+ api_key=api_key,
+ base_url=base_url if base_url else None,
+ )
+
+ if provider == LLMProvider.watsonx:
+ # FIXED: Use settings config with proper watsonx.ai integration
+ api_key = settings.watsonx.api_key or os.getenv("WATSONX_API_KEY", "")
+ project_id = settings.watsonx.project_id or os.getenv("WATSONX_PROJECT_ID", "")
+ model = settings.watsonx.model_id or os.getenv(
+ "GITPILOT_WATSONX_MODEL",
+ "ibm/granite-3-8b-instruct", # Default model (without prefix)
+ )
+ base_url = settings.watsonx.base_url or os.getenv(
+ "WATSONX_BASE_URL",
+ "https://us-south.ml.cloud.ibm.com", # Default to US South
+ )
+
+ # Validate required credentials
+ if not api_key:
+ raise ValueError(
+ "Watsonx API key is required. "
+ "Configure it in Admin / LLM Settings or set WATSONX_API_KEY environment variable."
+ )
+ if not project_id:
+ raise ValueError(
+ "Watsonx project ID is required. "
+ "Configure it in Admin / LLM Settings or set "
+ "WATSONX_PROJECT_ID environment variable."
+ )
+
+ # CRITICAL: Set project ID as environment variable (required by watsonx.ai SDK)
+ os.environ["WATSONX_PROJECT_ID"] = project_id
+
+ # CRITICAL: Also set the base URL as WATSONX_URL (some integrations use this)
+ os.environ["WATSONX_URL"] = base_url
+
+ # Ensure model has provider prefix for CrewAI (watsonx/provider/model)
+ # Format: watsonx/ibm/granite-3-8b-instruct
+ if not model.startswith("watsonx/"):
+ model = f"watsonx/{model}"
+
+ # FIXED: Create LLM with project_id parameter (CRITICAL!)
+ return LLM(
+ model=model,
+ api_key=api_key,
+ base_url=base_url,
+ project_id=project_id, # \u2190 CRITICAL: This was missing!
+ temperature=0.3, # Default temperature
+ max_tokens=1024, # Default max tokens
+ )
+
+ if provider == LLMProvider.ollama:
+ # Use settings config if available, otherwise fall back to env vars
+ model = settings.ollama.model or os.getenv("GITPILOT_OLLAMA_MODEL", "llama3")
+ base_url = settings.ollama.base_url or os.getenv("OLLAMA_BASE_URL", "http://localhost:11434")
+
+ # Validate required configuration
+ if not base_url:
+ raise ValueError(
+ "Ollama base URL is required. "
+ "Configure it in Admin / LLM Settings or set OLLAMA_BASE_URL environment variable."
+ )
+
+ # Ensure model has provider prefix for CrewAI
+ if not model.startswith("ollama/"):
+ model = f"ollama/{model}"
+
+ return LLM(model=model, base_url=base_url)
+
+ if provider == LLMProvider.ollabridge:
+ # OllaBridge / OllaBridge Cloud - OpenAI-compatible API
+ model = settings.ollabridge.model or os.getenv("GITPILOT_OLLABRIDGE_MODEL", "qwen2.5:1.5b")
+ base_url = settings.ollabridge.base_url or os.getenv("OLLABRIDGE_BASE_URL", "http://localhost:8000")
+ api_key = settings.ollabridge.api_key or os.getenv("OLLABRIDGE_API_KEY", "")
+
+ # Validate required configuration
+ if not base_url:
+ raise ValueError(
+ "OllaBridge base URL is required. "
+ "Configure it in Admin / LLM Settings or set "
+ "OLLABRIDGE_BASE_URL environment variable."
+ )
+
+ # OllaBridge exposes an OpenAI-compatible API at /v1/
+ # Use the openai/ prefix so CrewAI routes through the OpenAI adapter
+ if not model.startswith("openai/"):
+ model = f"openai/{model}"
+
+ ollabridge_api_base = f"{base_url.rstrip('/')}/v1"
+ ollabridge_key = api_key or "ollabridge"
+
+ # CRITICAL: Set environment variables so litellm/OpenAI client uses
+ # the remote OllaBridge URL instead of falling back to localhost.
+ # Without this, the openai/ prefix causes litellm to check OPENAI_API_BASE
+ # and default to localhost when it's not set.
+ os.environ["OPENAI_API_KEY"] = ollabridge_key
+ os.environ["OPENAI_API_BASE"] = ollabridge_api_base
+
+ return LLM(
+ model=model,
+ api_key=ollabridge_key,
+ base_url=ollabridge_api_base,
+ )
+
+ raise ValueError(f"Unsupported provider: {provider}")
+
+
+def validate_provider_config(settings) -> tuple[bool, list[str]]:
+ """Validate provider configuration and return (is_valid, errors)."""
+ errors = []
+ provider = settings.provider
+
+ if provider == LLMProvider.openai:
+ if not settings.openai.api_key:
+ errors.append("OpenAI API key is required")
+ elif provider == LLMProvider.claude:
+ if not settings.claude.api_key:
+ errors.append("Anthropic API key is required")
+ elif provider == LLMProvider.watsonx:
+ if not settings.watsonx.api_key:
+ errors.append("Watsonx API key is required")
+ if not settings.watsonx.project_id:
+ errors.append("Watsonx project ID is required")
+ elif provider == LLMProvider.ollama:
+ pass # Local, always valid
+ elif provider == LLMProvider.ollabridge:
+ pass # Local default, always valid
+
+ return (len(errors) == 0, errors)
+
+
+def get_effective_model(settings) -> str | None:
+ """Get the active model name for the current provider."""
+ provider = settings.provider
+ if provider == LLMProvider.openai:
+ return settings.openai.model
+ if provider == LLMProvider.claude:
+ return settings.claude.model
+ if provider == LLMProvider.watsonx:
+ return settings.watsonx.model_id
+ if provider == LLMProvider.ollama:
+ return settings.ollama.model
+ if provider == LLMProvider.ollabridge:
+ return settings.ollabridge.model
+ return None
+
+
+def _apply_health(summary: ProviderSummary, status_code: int) -> None:
+ """Set health and models_available from HTTP status code."""
+ ok = status_code == 200
+ summary.health = ProviderHealth.ok if ok else ProviderHealth.error
+ summary.models_available = ok
+
+
+async def test_provider_connection(settings) -> ProviderSummary:
+ """Test the current provider connection and return status."""
+ summary = settings.get_provider_summary()
+ provider = settings.provider
+
+ try:
+ async with httpx.AsyncClient(timeout=10.0) as client:
+ if provider == LLMProvider.openai:
+ url = settings.openai.base_url or "https://api.openai.com"
+ resp = await client.get(
+ f"{url}/v1/models",
+ headers={"Authorization": f"Bearer {settings.openai.api_key}"},
+ )
+ _apply_health(summary, resp.status_code)
+
+ elif provider == LLMProvider.claude:
+ url = settings.claude.base_url or "https://api.anthropic.com"
+ headers = {
+ "x-api-key": settings.claude.api_key,
+ "anthropic-version": "2023-06-01",
+ }
+ resp = await client.get(f"{url}/v1/models", headers=headers)
+ _apply_health(summary, resp.status_code)
+
+ elif provider == LLMProvider.watsonx:
+ base = settings.watsonx.base_url or "https://us-south.ml.cloud.ibm.com"
+ resp = await client.get(
+ f"{base}/ml/v1/foundation_model_specs",
+ params={"version": "2024-03-14", "limit": "1"},
+ headers={"Authorization": f"Bearer {settings.watsonx.api_key}"},
+ )
+ _apply_health(summary, resp.status_code)
+
+ elif provider == LLMProvider.ollama:
+ base = settings.ollama.base_url or "http://127.0.0.1:11434"
+ resp = await client.get(f"{base}/api/tags")
+ _apply_health(summary, resp.status_code)
+
+ elif provider == LLMProvider.ollabridge:
+ base = settings.ollabridge.base_url or "http://127.0.0.1:8000"
+ base = base.rstrip("/")
+ if base.endswith("/v1"):
+ base = base[:-3]
+ summary.warning = (
+ "Do not include /v1; GitPilot adds it automatically."
+ )
+ api_key = settings.ollabridge.api_key or "ollabridge"
+ resp = await client.get(
+ f"{base}/v1/models",
+ headers={"Authorization": f"Bearer {api_key}"},
+ )
+ _apply_health(summary, resp.status_code)
+
+ except httpx.ConnectError:
+ summary.health = ProviderHealth.error
+ summary.warning = f"Cannot connect to {provider.value} server"
+ except httpx.TimeoutException:
+ summary.health = ProviderHealth.warning
+ summary.warning = f"Connection to {provider.value} timed out"
+ except Exception as e:
+ summary.health = ProviderHealth.error
+ summary.warning = str(e)
+
+ return summary
diff --git a/gitpilot/local_tools.py b/gitpilot/local_tools.py
new file mode 100644
index 0000000000000000000000000000000000000000..05b18d216e76141222c79d655699ab0c1a09c3ef
--- /dev/null
+++ b/gitpilot/local_tools.py
@@ -0,0 +1,212 @@
+# gitpilot/local_tools.py
+"""CrewAI tools for local workspace file and shell operations.
+
+These tools give agents the ability to read, write, search, and navigate
+files on the local filesystem (within the sandboxed workspace directory),
+and to run shell commands like test suites, linters, and build scripts.
+"""
+import asyncio
+import concurrent.futures
+import json
+from typing import Optional
+
+from crewai.tools import tool
+
+from .workspace import WorkspaceManager, WorkspaceInfo
+from .terminal import TerminalExecutor, TerminalSession
+
+_ws_manager = WorkspaceManager()
+_executor = TerminalExecutor()
+_current_workspace: Optional[WorkspaceInfo] = None
+
+
+def set_active_workspace(ws: WorkspaceInfo):
+ global _current_workspace
+ _current_workspace = ws
+
+
+def get_active_workspace() -> Optional[WorkspaceInfo]:
+ return _current_workspace
+
+
+def _require_workspace() -> WorkspaceInfo:
+ if _current_workspace is None:
+ raise RuntimeError("No active workspace. Call set_active_workspace() first.")
+ return _current_workspace
+
+
+def _run_async(coro):
+ """Bridge sync CrewAI tools to async workspace/terminal calls."""
+ try:
+ asyncio.get_running_loop()
+ except RuntimeError:
+ return asyncio.run(coro)
+ # If a loop is already running (CrewAI thread), use a thread pool
+ with concurrent.futures.ThreadPoolExecutor(max_workers=1) as pool:
+ return pool.submit(asyncio.run, coro).result()
+
+
+# -----------------------------------------------------------------------
+# File operations
+# -----------------------------------------------------------------------
+
+@tool("Read local file")
+def read_local_file(file_path: str) -> str:
+ """Read a file from the local workspace. Returns the file content."""
+ ws = _require_workspace()
+ try:
+ content = _run_async(_ws_manager.read_file(ws, file_path))
+ return f"Content of {file_path}:\n---\n{content}\n---"
+ except Exception as e:
+ return f"Error reading {file_path}: {e}"
+
+
+@tool("Write local file")
+def write_local_file(file_path: str, content: str) -> str:
+ """Write content to a file in the local workspace. Creates parent directories."""
+ ws = _require_workspace()
+ try:
+ result = _run_async(_ws_manager.write_file(ws, file_path, content))
+ return f"Written {result['size']} bytes to {result['path']}"
+ except Exception as e:
+ return f"Error writing {file_path}: {e}"
+
+
+@tool("Delete local file")
+def delete_local_file(file_path: str) -> str:
+ """Delete a file from the local workspace."""
+ ws = _require_workspace()
+ try:
+ deleted = _run_async(_ws_manager.delete_file(ws, file_path))
+ return f"Deleted: {deleted}"
+ except Exception as e:
+ return f"Error deleting {file_path}: {e}"
+
+
+@tool("List local files")
+def list_local_files(directory: str = ".") -> str:
+ """List all tracked and untracked files in a directory."""
+ ws = _require_workspace()
+ try:
+ files = _run_async(_ws_manager.list_files(ws, directory))
+ return "\n".join(files) if files else "No files found."
+ except Exception as e:
+ return f"Error listing files: {e}"
+
+
+@tool("Search in files")
+def search_in_files(pattern: str, path: str = ".") -> str:
+ """Search for a text pattern across all files using git grep.
+ Returns matching lines with file paths and line numbers."""
+ ws = _require_workspace()
+ try:
+ matches = _run_async(_ws_manager.search_files(ws, pattern, path))
+ if not matches:
+ return "No matches found."
+ lines = [f"{m['file']}:{m['line']}: {m['content']}" for m in matches[:50]]
+ return "\n".join(lines)
+ except Exception as e:
+ return f"Error searching: {e}"
+
+
+# -----------------------------------------------------------------------
+# Git operations
+# -----------------------------------------------------------------------
+
+@tool("Git diff")
+def git_diff(staged: str = "false") -> str:
+ """Show the current git diff (unstaged changes by default)."""
+ ws = _require_workspace()
+ try:
+ return _run_async(_ws_manager.diff(ws, staged=staged.lower() == "true")) or "No changes."
+ except Exception as e:
+ return f"Error: {e}"
+
+
+@tool("Git status")
+def git_status() -> str:
+ """Show the current git status."""
+ ws = _require_workspace()
+ try:
+ status = _run_async(_ws_manager.status(ws))
+ return json.dumps(status, indent=2)
+ except Exception as e:
+ return f"Error: {e}"
+
+
+@tool("Git commit")
+def git_commit(message: str, files: str = "") -> str:
+ """Commit changes. Optionally specify files (comma-separated)."""
+ ws = _require_workspace()
+ try:
+ file_list = [f.strip() for f in files.split(",") if f.strip()] or None
+ result = _run_async(_ws_manager.commit(ws, message, file_list))
+ return json.dumps(result)
+ except Exception as e:
+ return f"Error: {e}"
+
+
+@tool("Git log")
+def git_log(count: str = "10") -> str:
+ """Show recent commit history."""
+ ws = _require_workspace()
+ try:
+ commits = _run_async(_ws_manager.log(ws, int(count)))
+ return json.dumps(commits, indent=2)
+ except Exception as e:
+ return f"Error: {e}"
+
+
+# -----------------------------------------------------------------------
+# Shell command execution
+# -----------------------------------------------------------------------
+
+@tool("Run shell command")
+def run_command(command: str, timeout: str = "120") -> str:
+ """Run a shell command in the workspace directory.
+ Returns stdout, stderr, and exit code.
+ Examples: 'npm test', 'python -m pytest', 'make build', 'ls -la'."""
+ ws = _require_workspace()
+ try:
+ session = TerminalSession(workspace_path=ws.path)
+ result = _run_async(_executor.execute(session, command, int(timeout)))
+ output = f"Exit code: {result.exit_code}\n"
+ if result.stdout:
+ output += f"--- stdout ---\n{result.stdout}\n"
+ if result.stderr:
+ output += f"--- stderr ---\n{result.stderr}\n"
+ if result.timed_out:
+ output += "WARNING: Command timed out\n"
+ if result.truncated:
+ output += "WARNING: Output was truncated\n"
+ return output
+ except PermissionError as e:
+ return f"Permission denied: {e}"
+ except Exception as e:
+ return f"Error: {e}"
+
+
+# -----------------------------------------------------------------------
+# Exports
+# -----------------------------------------------------------------------
+
+LOCAL_FILE_TOOLS = [
+ read_local_file,
+ write_local_file,
+ delete_local_file,
+ list_local_files,
+ search_in_files,
+]
+
+LOCAL_GIT_TOOLS = [
+ git_diff,
+ git_status,
+ git_commit,
+ git_log,
+]
+
+LOCAL_SHELL_TOOLS = [
+ run_command,
+]
+
+LOCAL_TOOLS = LOCAL_FILE_TOOLS + LOCAL_GIT_TOOLS + LOCAL_SHELL_TOOLS
diff --git a/gitpilot/mcp_client.py b/gitpilot/mcp_client.py
new file mode 100644
index 0000000000000000000000000000000000000000..3c248d9fb58c0e818e270bc35e30b63fecb6cc06
--- /dev/null
+++ b/gitpilot/mcp_client.py
@@ -0,0 +1,341 @@
+# gitpilot/mcp_client.py
+"""Model Context Protocol (MCP) client for GitPilot.
+
+Connects to MCP servers (databases, Slack, Figma, Sentry, etc.) and
+exposes their tools to GitPilot agents. Supports three transport types:
+
+- **stdio** β launch a local subprocess and communicate via stdin/stdout
+- **http** β send JSON-RPC requests over HTTP
+- **sse** β Server-Sent Events streaming connection
+
+Configuration lives in ``.gitpilot/mcp.json``.
+"""
+from __future__ import annotations
+
+import asyncio
+import json
+import logging
+import os
+from dataclasses import dataclass, field
+from enum import Enum
+from pathlib import Path
+from typing import Any, Dict, List, Optional
+
+import httpx
+
+logger = logging.getLogger(__name__)
+
+MCP_CONFIG_FILE = "mcp.json"
+MCP_JSONRPC_VERSION = "2.0"
+
+
+class TransportType(str, Enum):
+ STDIO = "stdio"
+ HTTP = "http"
+ SSE = "sse"
+
+
+@dataclass
+class MCPServerConfig:
+ """Configuration for a single MCP server."""
+
+ name: str
+ transport: TransportType
+ # stdio
+ command: Optional[str] = None
+ args: List[str] = field(default_factory=list)
+ env: Dict[str, str] = field(default_factory=dict)
+ # http / sse
+ url: Optional[str] = None
+ headers: Dict[str, str] = field(default_factory=dict)
+ # auth
+ auth_token: Optional[str] = None
+
+ @classmethod
+ def from_dict(cls, data: dict) -> "MCPServerConfig":
+ transport = TransportType(data.get("type", data.get("transport", "stdio")))
+ env = {}
+ for k, v in data.get("env", {}).items():
+ # Expand $ENV_VAR references
+ env[k] = os.path.expandvars(v) if isinstance(v, str) else v
+ return cls(
+ name=data["name"],
+ transport=transport,
+ command=data.get("command"),
+ args=[os.path.expandvars(a) for a in data.get("args", [])],
+ env=env,
+ url=data.get("url"),
+ headers=data.get("headers", {}),
+ auth_token=os.path.expandvars(data["auth_token"]) if data.get("auth_token") else None,
+ )
+
+
+@dataclass
+class MCPTool:
+ """A tool discovered from an MCP server."""
+
+ name: str
+ description: str
+ input_schema: Dict[str, Any] = field(default_factory=dict)
+ server_name: str = ""
+
+
+@dataclass
+class MCPConnection:
+ """An active connection to an MCP server."""
+
+ config: MCPServerConfig
+ tools: List[MCPTool] = field(default_factory=list)
+ _process: Optional[asyncio.subprocess.Process] = field(default=None, repr=False)
+ _request_id: int = field(default=0, repr=False)
+
+ @property
+ def is_alive(self) -> bool:
+ if self.config.transport == TransportType.STDIO:
+ return self._process is not None and self._process.returncode is None
+ return True # HTTP/SSE are stateless per-request
+
+ def next_id(self) -> int:
+ self._request_id += 1
+ return self._request_id
+
+
+class MCPClient:
+ """Connect to MCP servers and call their tools.
+
+ Usage::
+
+ client = MCPClient()
+ client.load_config(workspace / ".gitpilot")
+ conn = await client.connect("postgres")
+ tools = await client.list_tools(conn)
+ result = await client.call_tool(conn, "query", {"sql": "SELECT 1"})
+ """
+
+ def __init__(self) -> None:
+ self._configs: Dict[str, MCPServerConfig] = {}
+ self._connections: Dict[str, MCPConnection] = {}
+
+ # ------------------------------------------------------------------
+ # Configuration
+ # ------------------------------------------------------------------
+
+ def load_config(self, gitpilot_dir: Path) -> int:
+ """Load MCP server configs from ``.gitpilot/mcp.json``. Returns count."""
+ config_path = gitpilot_dir / MCP_CONFIG_FILE
+ if not config_path.exists():
+ return 0
+ try:
+ data = json.loads(config_path.read_text())
+ servers = data if isinstance(data, list) else data.get("servers", [])
+ for entry in servers:
+ cfg = MCPServerConfig.from_dict(entry)
+ self._configs[cfg.name] = cfg
+ logger.info("Loaded %d MCP server configs", len(servers))
+ return len(servers)
+ except Exception as e:
+ logger.warning("Failed to load MCP config: %s", e)
+ return 0
+
+ def add_server(self, config: MCPServerConfig) -> None:
+ self._configs[config.name] = config
+
+ def list_servers(self) -> List[str]:
+ return list(self._configs.keys())
+
+ # ------------------------------------------------------------------
+ # Connection management
+ # ------------------------------------------------------------------
+
+ async def connect(self, server_name: str) -> MCPConnection:
+ """Connect to a named MCP server and discover its tools."""
+ if server_name in self._connections and self._connections[server_name].is_alive:
+ return self._connections[server_name]
+
+ config = self._configs.get(server_name)
+ if not config:
+ raise ValueError(f"Unknown MCP server: {server_name}")
+
+ conn = MCPConnection(config=config)
+
+ if config.transport == TransportType.STDIO:
+ await self._connect_stdio(conn)
+
+ # Discover tools via initialize + tools/list
+ await self._initialize(conn)
+ conn.tools = await self.list_tools(conn)
+
+ self._connections[server_name] = conn
+ logger.info("Connected to MCP server '%s' β %d tools", server_name, len(conn.tools))
+ return conn
+
+ async def disconnect(self, server_name: str) -> None:
+ conn = self._connections.pop(server_name, None)
+ if conn and conn._process:
+ conn._process.terminate()
+ await conn._process.wait()
+
+ async def disconnect_all(self) -> None:
+ for name in list(self._connections):
+ await self.disconnect(name)
+
+ # ------------------------------------------------------------------
+ # Tool operations
+ # ------------------------------------------------------------------
+
+ async def list_tools(self, conn: MCPConnection) -> List[MCPTool]:
+ """List tools available on the connected server."""
+ result = await self._send_request(conn, "tools/list", {})
+ tools = []
+ for t in result.get("tools", []):
+ tools.append(MCPTool(
+ name=t["name"],
+ description=t.get("description", ""),
+ input_schema=t.get("inputSchema", {}),
+ server_name=conn.config.name,
+ ))
+ return tools
+
+ async def call_tool(
+ self,
+ conn: MCPConnection,
+ tool_name: str,
+ params: Optional[Dict[str, Any]] = None,
+ ) -> Any:
+ """Call a tool on the connected server."""
+ result = await self._send_request(conn, "tools/call", {
+ "name": tool_name,
+ "arguments": params or {},
+ })
+ # MCP returns content array; flatten text content
+ content = result.get("content", [])
+ texts = [c.get("text", "") for c in content if c.get("type") == "text"]
+ return "\n".join(texts) if texts else result
+
+ def to_crewai_tools(self, conn: MCPConnection) -> list:
+ """Wrap MCP tools as CrewAI-compatible tool functions.
+
+ Returns a list of callables decorated with ``@tool``.
+ """
+ from crewai.tools import tool as crewai_tool
+
+ wrapped = []
+ for mcp_tool in conn.tools:
+ # Capture in closure
+ _conn = conn
+ _name = mcp_tool.name
+ _desc = mcp_tool.description or f"MCP tool: {_name}"
+
+ @crewai_tool(_name)
+ def _wrapper(params: str = "{}") -> str:
+ __doc__ = _desc # noqa: F841
+ import asyncio as _aio
+ loop = _aio.new_event_loop()
+ try:
+ parsed = json.loads(params) if isinstance(params, str) else params
+ return str(loop.run_until_complete(
+ MCPClient.call_tool(self, _conn, _name, parsed)
+ ))
+ finally:
+ loop.close()
+
+ _wrapper.__doc__ = _desc
+ wrapped.append(_wrapper)
+ return wrapped
+
+ # ------------------------------------------------------------------
+ # Transport internals
+ # ------------------------------------------------------------------
+
+ async def _connect_stdio(self, conn: MCPConnection) -> None:
+ config = conn.config
+ if not config.command:
+ raise ValueError(f"stdio server '{config.name}' requires a command")
+ env = {**os.environ, **config.env}
+ conn._process = await asyncio.create_subprocess_exec(
+ config.command, *config.args,
+ stdin=asyncio.subprocess.PIPE,
+ stdout=asyncio.subprocess.PIPE,
+ stderr=asyncio.subprocess.PIPE,
+ env=env,
+ )
+
+ async def _initialize(self, conn: MCPConnection) -> Dict[str, Any]:
+ return await self._send_request(conn, "initialize", {
+ "protocolVersion": "2024-11-05",
+ "capabilities": {},
+ "clientInfo": {"name": "gitpilot", "version": "1.0"},
+ })
+
+ async def _send_request(
+ self,
+ conn: MCPConnection,
+ method: str,
+ params: Dict[str, Any],
+ ) -> Dict[str, Any]:
+ """Send a JSON-RPC request via the appropriate transport."""
+ msg = {
+ "jsonrpc": MCP_JSONRPC_VERSION,
+ "id": conn.next_id(),
+ "method": method,
+ "params": params,
+ }
+
+ if conn.config.transport == TransportType.STDIO:
+ return await self._send_stdio(conn, msg)
+ else:
+ return await self._send_http(conn, msg)
+
+ async def _send_stdio(
+ self, conn: MCPConnection, msg: dict,
+ ) -> Dict[str, Any]:
+ proc = conn._process
+ if not proc or not proc.stdin or not proc.stdout:
+ raise RuntimeError(f"stdio process not running for '{conn.config.name}'")
+ payload = json.dumps(msg) + "\n"
+ proc.stdin.write(payload.encode())
+ await proc.stdin.drain()
+ line = await proc.stdout.readline()
+ if not line:
+ raise RuntimeError(f"No response from MCP server '{conn.config.name}'")
+ resp = json.loads(line)
+ if "error" in resp:
+ raise RuntimeError(f"MCP error: {resp['error']}")
+ return resp.get("result", {})
+
+ async def _send_http(
+ self, conn: MCPConnection, msg: dict,
+ ) -> Dict[str, Any]:
+ url = conn.config.url
+ if not url:
+ raise ValueError(f"HTTP server '{conn.config.name}' requires a url")
+ headers = {**conn.config.headers, "Content-Type": "application/json"}
+ if conn.config.auth_token:
+ headers["Authorization"] = f"Bearer {conn.config.auth_token}"
+ async with httpx.AsyncClient(timeout=30) as client:
+ resp = await client.post(url, json=msg, headers=headers)
+ resp.raise_for_status()
+ data = resp.json()
+ if "error" in data:
+ raise RuntimeError(f"MCP error: {data['error']}")
+ return data.get("result", {})
+
+ # ------------------------------------------------------------------
+ # Serialisation
+ # ------------------------------------------------------------------
+
+ def to_dict(self) -> Dict[str, Any]:
+ return {
+ "servers": [
+ {
+ "name": c.name,
+ "transport": c.transport.value,
+ "command": c.command,
+ "url": c.url,
+ "tools_count": len(self._connections[c.name].tools)
+ if c.name in self._connections else 0,
+ "connected": c.name in self._connections,
+ }
+ for c in self._configs.values()
+ ]
+ }
diff --git a/gitpilot/memory.py b/gitpilot/memory.py
new file mode 100644
index 0000000000000000000000000000000000000000..552e01e9d46096749c4cec4307616ff087f28589
--- /dev/null
+++ b/gitpilot/memory.py
@@ -0,0 +1,137 @@
+# gitpilot/memory.py
+"""Project context memory β the GITPILOT.md system.
+
+Loads project-specific conventions, rules, and context from:
+
+1. ``.gitpilot/GITPILOT.md`` β project root (committed to repo)
+2. ``.gitpilot/rules/*.md`` β modular rule files
+3. ``.gitpilot/memory.json`` β auto-learned patterns (local only)
+
+The combined context is injected into agent system prompts so they
+follow project conventions automatically.
+"""
+from __future__ import annotations
+
+import json
+import logging
+from dataclasses import dataclass, field
+from pathlib import Path
+from typing import Any, Dict, List
+
+logger = logging.getLogger(__name__)
+
+MEMORY_FILE = "GITPILOT.md"
+RULES_DIR = "rules"
+AUTO_MEMORY_FILE = "memory.json"
+MAX_CONVENTIONS_CHARS = 10_000
+MAX_RULE_CHARS = 5_000
+MAX_PATTERNS = 100
+
+
+@dataclass
+class ProjectContext:
+ """Combined project context for agent injection."""
+
+ conventions: str = ""
+ rules: List[str] = field(default_factory=list)
+ auto_memory: Dict[str, Any] = field(default_factory=dict)
+
+ def to_system_prompt(self) -> str:
+ """Format as a system-prompt section to prepend to agent backstory."""
+ parts: List[str] = []
+ if self.conventions:
+ parts.append(f"## Project Conventions\n\n{self.conventions}")
+ if self.rules:
+ parts.append("## Project Rules\n\n" + "\n\n---\n\n".join(self.rules))
+ patterns = self.auto_memory.get("patterns", [])
+ if patterns:
+ parts.append(
+ "## Learned Patterns\n\n"
+ + "\n".join(f"- {p}" for p in patterns)
+ )
+ return "\n\n".join(parts)
+
+ @property
+ def is_empty(self) -> bool:
+ return not self.conventions and not self.rules and not self.auto_memory
+
+
+class MemoryManager:
+ """Load and manage project-level context and conventions."""
+
+ def __init__(self, workspace_path: Path):
+ self.workspace_path = workspace_path
+ self.gitpilot_dir = workspace_path / ".gitpilot"
+
+ def load_context(self) -> ProjectContext:
+ ctx = ProjectContext()
+
+ # 1. GITPILOT.md
+ md_path = self.gitpilot_dir / MEMORY_FILE
+ if md_path.exists():
+ ctx.conventions = md_path.read_text(encoding="utf-8")[
+ :MAX_CONVENTIONS_CHARS
+ ]
+
+ # 2. rules/*.md
+ rules_dir = self.gitpilot_dir / RULES_DIR
+ if rules_dir.is_dir():
+ for rule_file in sorted(rules_dir.glob("*.md")):
+ content = rule_file.read_text(encoding="utf-8")[:MAX_RULE_CHARS]
+ ctx.rules.append(f"### {rule_file.stem}\n\n{content}")
+
+ # 3. auto-learned memory
+ auto_path = self.gitpilot_dir / AUTO_MEMORY_FILE
+ if auto_path.exists():
+ try:
+ ctx.auto_memory = json.loads(auto_path.read_text())
+ except Exception:
+ pass
+
+ return ctx
+
+ def save_auto_memory(self, memory: Dict[str, Any]):
+ self.gitpilot_dir.mkdir(parents=True, exist_ok=True)
+ auto_path = self.gitpilot_dir / AUTO_MEMORY_FILE
+ auto_path.write_text(json.dumps(memory, indent=2))
+
+ def add_learned_pattern(self, pattern: str):
+ auto_path = self.gitpilot_dir / AUTO_MEMORY_FILE
+ memory: Dict[str, Any] = {}
+ if auto_path.exists():
+ try:
+ memory = json.loads(auto_path.read_text())
+ except Exception:
+ pass
+ patterns = memory.setdefault("patterns", [])
+ if pattern not in patterns:
+ patterns.append(pattern)
+ memory["patterns"] = patterns[-MAX_PATTERNS:]
+ self.save_auto_memory(memory)
+
+ def init_project(self) -> Path:
+ """Create .gitpilot/ with template GITPILOT.md. Returns path."""
+ self.gitpilot_dir.mkdir(parents=True, exist_ok=True)
+ md_path = self.gitpilot_dir / MEMORY_FILE
+ if not md_path.exists():
+ md_path.write_text(
+ "# GitPilot Project Conventions\n\n"
+ "\n"
+ "\n\n"
+ "## Code Style\n\n\n"
+ "## Testing\n\n\n"
+ "## Commit Messages\n\n\n"
+ )
+ (self.gitpilot_dir / RULES_DIR).mkdir(exist_ok=True)
+ return md_path
+
+ def get_conventions_text(self) -> str:
+ md_path = self.gitpilot_dir / MEMORY_FILE
+ if md_path.exists():
+ return md_path.read_text(encoding="utf-8")
+ return ""
+
+ def set_conventions_text(self, text: str):
+ self.gitpilot_dir.mkdir(parents=True, exist_ok=True)
+ md_path = self.gitpilot_dir / MEMORY_FILE
+ md_path.write_text(text, encoding="utf-8")
diff --git a/gitpilot/model_catalog.py b/gitpilot/model_catalog.py
new file mode 100644
index 0000000000000000000000000000000000000000..0656298644852fc50180f366fd0ae4e182b8f9f7
--- /dev/null
+++ b/gitpilot/model_catalog.py
@@ -0,0 +1,197 @@
+# gitpilot/model_catalog.py
+from __future__ import annotations
+
+import os
+from datetime import datetime
+from typing import List, Tuple, Optional, Dict, Any
+
+import requests
+
+from .settings import AppSettings, LLMProvider, get_settings
+
+# --- Watsonx.ai config (public endpoint, no key needed for IBM-managed models) ---
+
+WATSONX_BASE_URLS = [
+ "https://us-south.ml.cloud.ibm.com",
+ "https://eu-de.ml.cloud.ibm.com",
+ "https://jp-tok.ml.cloud.ibm.com",
+ "https://au-syd.ml.cloud.ibm.com",
+]
+
+WATSONX_ENDPOINT = "/ml/v1/foundation_model_specs"
+WATSONX_PARAMS = {
+ "version": "2024-09-16",
+ "filters": "!function_embedding,!lifecycle_withdrawn",
+}
+TODAY = datetime.today().strftime("%Y-%m-%d")
+
+
+def _is_deprecated_or_withdrawn(lifecycle: List[Dict[str, Any]]) -> bool:
+ """Return True if a model lifecycle includes a deprecated/withdrawn item active today."""
+ for entry in lifecycle:
+ if entry.get("id") in {"deprecated", "withdrawn"} and entry.get("start_date", "") <= TODAY:
+ return True
+ return False
+
+
+# --- Provider-specific listing functions --------------------------------------
+
+
+def _list_openai_models(settings: AppSettings) -> Tuple[List[str], Optional[str]]:
+ """
+ Use OpenAI /v1/models endpoint to list models available to the configured key.
+ Requires OPENAI_API_KEY or settings.openai.api_key.
+ """
+ api_key = settings.openai.api_key or os.getenv("OPENAI_API_KEY")
+ if not api_key:
+ return [], "OpenAI API key not configured"
+
+ base_url = os.getenv("OPENAI_BASE_URL", "https://api.openai.com")
+ url = f"{base_url.rstrip('/')}/v1/models"
+
+ try:
+ resp = requests.get(
+ url,
+ headers={"Authorization": f"Bearer {api_key}"},
+ timeout=10,
+ )
+ resp.raise_for_status()
+ data = resp.json().get("data", [])
+ models = sorted({m.get("id", "") for m in data if m.get("id")})
+ return models, None
+ except Exception as e:
+ return [], f"Error listing OpenAI models: {e}"
+
+
+def _list_claude_models(settings: AppSettings) -> Tuple[List[str], Optional[str]]:
+ """
+ Use Anthropic /v1/models endpoint to list Claude models available to the key.
+ Requires ANTHROPIC_API_KEY or settings.claude.api_key.
+ """
+ api_key = settings.claude.api_key or os.getenv("ANTHROPIC_API_KEY")
+ if not api_key:
+ return [], "Claude (Anthropic) API key not configured"
+
+ base_url = os.getenv("ANTHROPIC_BASE_URL", "https://api.anthropic.com")
+ url = f"{base_url.rstrip('/')}/v1/models"
+ anthropic_version = os.getenv("ANTHROPIC_VERSION", "2023-06-01")
+
+ try:
+ resp = requests.get(
+ url,
+ headers={
+ "x-api-key": api_key,
+ "anthropic-version": anthropic_version,
+ },
+ timeout=10,
+ )
+ resp.raise_for_status()
+ data = resp.json().get("data", [])
+ models = sorted({m.get("id", "") for m in data if m.get("id")})
+ return models, None
+ except Exception as e:
+ return [], f"Error listing Claude models: {e}"
+
+
+def _list_watsonx_models(settings: AppSettings) -> Tuple[List[str], Optional[str]]:
+ """
+ List foundation models from Watsonx public specs endpoint.
+ No API key required for IBM-managed models.
+ Returns a unique sorted list of model_id's across major regions.
+ """
+ all_models = set()
+
+ for base in WATSONX_BASE_URLS:
+ url = f"{base}{WATSONX_ENDPOINT}"
+ try:
+ resp = requests.get(url, params=WATSONX_PARAMS, timeout=10)
+ resp.raise_for_status()
+ resources = resp.json().get("resources", [])
+ for m in resources:
+ if _is_deprecated_or_withdrawn(m.get("lifecycle", [])):
+ continue
+ model_id = m.get("model_id")
+ if model_id:
+ all_models.add(model_id)
+ except Exception:
+ # Just skip this region on error
+ continue
+
+ if not all_models:
+ return [], "No Watsonx models found (public specs call failed for all regions?)"
+
+ return sorted(all_models), None
+
+
+def _list_ollama_models(settings: AppSettings) -> Tuple[List[str], Optional[str]]:
+ """
+ List models from a local/remote Ollama server via /api/tags.
+ """
+ base_url = getattr(settings.ollama, "base_url", None) or os.getenv(
+ "OLLAMA_BASE_URL", "http://localhost:11434"
+ )
+ url = f"{base_url.rstrip('/')}/api/tags"
+
+ try:
+ resp = requests.get(url, timeout=5)
+ resp.raise_for_status()
+ data = resp.json().get("models", [])
+ models = sorted({m.get("name", "") for m in data if m.get("name")})
+ return models, None
+ except Exception as e:
+ return [], f"Error listing Ollama models from {url}: {e}"
+
+
+def _list_ollabridge_models(settings: AppSettings) -> Tuple[List[str], Optional[str]]:
+ """
+ List models from an OllaBridge / OllaBridge Cloud instance via /v1/models.
+ Uses the OpenAI-compatible endpoint.
+ """
+ base_url = getattr(settings.ollabridge, "base_url", None) or os.getenv(
+ "OLLABRIDGE_BASE_URL", "http://localhost:8000"
+ )
+ api_key = getattr(settings.ollabridge, "api_key", None) or os.getenv("OLLABRIDGE_API_KEY", "")
+ url = f"{base_url.rstrip('/')}/v1/models"
+
+ headers: Dict[str, str] = {}
+ if api_key:
+ headers["Authorization"] = f"Bearer {api_key}"
+
+ try:
+ resp = requests.get(url, headers=headers, timeout=10)
+ resp.raise_for_status()
+ data = resp.json().get("data", [])
+ models = sorted({m.get("id", "") for m in data if m.get("id")})
+ return models, None
+ except Exception as e:
+ return [], f"Error listing OllaBridge models from {url}: {e}"
+
+
+# --- Public helper ------------------------------------------------------------
+
+
+def list_models_for_provider(
+ provider: LLMProvider,
+ settings: Optional[AppSettings] = None,
+) -> Tuple[List[str], Optional[str]]:
+ """
+ Return (models, error) for a given provider.
+
+ models: list of strings (model IDs / names)
+ error: human-readable error if something went wrong, otherwise None
+ """
+ if settings is None:
+ settings = get_settings()
+
+ if provider == LLMProvider.openai:
+ return _list_openai_models(settings)
+ if provider == LLMProvider.claude:
+ return _list_claude_models(settings)
+ if provider == LLMProvider.watsonx:
+ return _list_watsonx_models(settings)
+ if provider == LLMProvider.ollama:
+ return _list_ollama_models(settings)
+ if provider == LLMProvider.ollabridge:
+ return _list_ollabridge_models(settings)
+
+ return [], f"Unsupported provider: {provider}"
diff --git a/gitpilot/models.py b/gitpilot/models.py
new file mode 100644
index 0000000000000000000000000000000000000000..078aaea076e6e3bcfe33c341f1820be783f187aa
--- /dev/null
+++ b/gitpilot/models.py
@@ -0,0 +1,223 @@
+"""
+GitPilot Redesign β Shared Models & Schemas
+Centralized Pydantic models for the redesigned API contract.
+"""
+
+from enum import StrEnum
+from typing import Literal
+
+from pydantic import BaseModel, Field
+
+# βββ Enums ββββββββββββββββββββββββββββββββββββββββββββββββ
+
+class WorkspaceMode(StrEnum):
+ folder = "folder"
+ local_git = "local_git"
+ github = "github"
+
+
+class ProviderName(StrEnum):
+ openai = "openai"
+ claude = "claude"
+ watsonx = "watsonx"
+ ollama = "ollama"
+ ollabridge = "ollabridge"
+
+
+class ProviderHealth(StrEnum):
+ ok = "ok"
+ warning = "warning"
+ error = "error"
+ unknown = "unknown"
+
+
+class ProviderConnectionType(StrEnum):
+ local = "local"
+ api_key = "api_key"
+ pairing = "pairing"
+ cloud = "cloud"
+ managed = "managed"
+
+
+class SessionMode(StrEnum):
+ folder = "folder"
+ local_git = "local_git"
+ github = "github"
+
+
+# βββ Provider Models βββββββββββββββββββββββββββββββββββββ
+
+class ProviderSummary(BaseModel):
+ configured: bool = False
+ name: ProviderName = ProviderName.ollama
+ source: Literal[".env", "settings", "unknown"] = "unknown"
+ model: str | None = None
+ base_url: str | None = None
+ connection_type: ProviderConnectionType | None = None
+ has_api_key: bool = False
+ health: ProviderHealth | None = ProviderHealth.unknown
+ models_available: bool | None = None
+ warning: str | None = None
+
+
+class ProviderStatusResponse(BaseModel):
+ configured: bool
+ name: ProviderName
+ source: Literal[".env", "settings", "unknown"] = "unknown"
+ model: str | None = None
+ base_url: str | None = None
+ connection_type: ProviderConnectionType | None = None
+ has_api_key: bool = False
+ health: ProviderHealth | None = ProviderHealth.unknown
+ models_available: bool | None = None
+ warning: str | None = None
+
+
+# βββ Workspace Models ββββββββββββββββββββββββββββββββββββ
+
+class WorkspaceCapabilitySummary(BaseModel):
+ folder_mode_available: bool = False
+ local_git_available: bool = False
+ github_mode_available: bool = False
+
+
+class WorkspaceSummary(BaseModel):
+ folder_open: bool = False
+ folder_path: str | None = None
+ folder_name: str | None = None
+ git_detected: bool = False
+ repo_root: str | None = None
+ repo_name: str | None = None
+ branch: str | None = None
+ remotes: list[str] = Field(default_factory=list)
+
+
+# βββ GitHub Models ββββββββββββββββββββββββββββββββββββββββ
+
+class GithubStatusSummary(BaseModel):
+ connected: bool = False
+ token_configured: bool = False
+ username: str | None = None
+
+
+# βββ Status Response βββββββββββββββββββββββββββββββββββββ
+
+class StatusResponse(BaseModel):
+ server_ready: bool = True
+ provider: ProviderStatusResponse
+ workspace: WorkspaceCapabilitySummary
+ github: GithubStatusSummary
+
+
+# βββ Session Models ββββββββββββββββββββββββββββββββββββββ
+
+class StartSessionRequest(BaseModel):
+ mode: WorkspaceMode
+ folder_path: str | None = None
+ repo_root: str | None = None
+ repo_full_name: str | None = None
+ branch: str | None = None
+
+
+class StartSessionResponse(BaseModel):
+ session_id: str
+ mode: WorkspaceMode
+ title: str
+ status: Literal["active"] = "active"
+ folder_path: str | None = None
+ repo_root: str | None = None
+ repo_full_name: str | None = None
+ branch: str | None = None
+
+
+# βββ Chat Models βββββββββββββββββββββββββββββββββββββββββ
+
+class PlanStepSummary(BaseModel):
+ step: int
+ title: str
+ action: str
+ file: str | None = None
+ description: str
+ status: Literal["pending", "ready", "applied", "failed"] | None = "pending"
+
+
+class PlanSummary(BaseModel):
+ goal: str
+ summary: str
+ steps: list[PlanStepSummary]
+
+
+class FileReference(BaseModel):
+ path: str
+ line: int | None = None
+
+
+class ChatMessageRequest(BaseModel):
+ session_id: str
+ message: str
+ scope: Literal["workspace", "selection", "file"] = "workspace"
+
+
+class ChatMessageResponse(BaseModel):
+ session_id: str
+ answer: str
+ message_id: str | None = None
+ plan: PlanSummary | None = None
+ references: list[FileReference] = Field(default_factory=list)
+
+
+# βββ Provider Test Models ββββββββββββββββββββββββββββββββ
+
+class OpenAIProviderInput(BaseModel):
+ api_key: str | None = None
+ base_url: str | None = None
+ model: str | None = None
+
+
+class ClaudeProviderInput(BaseModel):
+ api_key: str | None = None
+ base_url: str | None = None
+ model: str | None = None
+
+
+class WatsonxProviderInput(BaseModel):
+ api_key: str | None = None
+ project_id: str | None = None
+ base_url: str | None = None
+ model_id: str | None = None
+
+
+class OllamaProviderInput(BaseModel):
+ base_url: str | None = None
+ model: str | None = None
+
+
+class OllaBridgeProviderInput(BaseModel):
+ base_url: str | None = None
+ model: str | None = None
+ api_key: str | None = None
+ connection_type: ProviderConnectionType | None = None
+
+
+class ProviderTestRequest(BaseModel):
+ provider: ProviderName
+ openai: OpenAIProviderInput | None = None
+ claude: ClaudeProviderInput | None = None
+ watsonx: WatsonxProviderInput | None = None
+ ollama: OllamaProviderInput | None = None
+ ollabridge: OllaBridgeProviderInput | None = None
+
+
+class ProviderTestResponse(ProviderStatusResponse):
+ details: str | None = None
+
+
+# βββ OllaBridge Health ββββββββββββββββββββββββββββββββββββ
+
+class OllaBridgeHealthResponse(BaseModel):
+ status: Literal["ok", "error"]
+ base_url: str
+ effective_api_base: str
+ models_available: bool = False
+ auth_mode: str = "unknown"
+ warning: str | None = None
diff --git a/gitpilot/nl_database.py b/gitpilot/nl_database.py
new file mode 100644
index 0000000000000000000000000000000000000000..6684c52b8ae905a0a614a0f64cf043035ea63249
--- /dev/null
+++ b/gitpilot/nl_database.py
@@ -0,0 +1,381 @@
+# gitpilot/nl_database.py
+"""Natural language database queries via MCP.
+
+Translates plain-English questions into SQL (or other query languages),
+executes them through an MCP database server connection, and returns
+human-readable results.
+
+Architecture::
+
+ User question
+ β
+ βΌ
+ NLQueryEngine.ask()
+ β
+ βββΊ schema_context() β fetch table/collection metadata
+ βββΊ translate() β NL β SQL via LLM or rule-based
+ βββΊ validate_query() β safety checks (no DROP, DELETE without WHERE, etc.)
+ βββΊ execute() β run via MCP call_tool
+ βββΊ format_response() β tabular or narrative answer
+
+Inspired by:
+- C3 SQL (2023): zero-shot text-to-SQL with calibrated confidence
+- DIN-SQL (2023): decomposed in-context learning for text-to-SQL
+- BIRD benchmark (2023): bridging text-to-SQL with real-world databases
+
+Security: Queries are validated before execution. Destructive statements
+(DROP, TRUNCATE, ALTER, DELETE without WHERE) are blocked by default.
+"""
+from __future__ import annotations
+
+import re
+from dataclasses import dataclass, field
+from enum import Enum
+from typing import Any, Dict, List, Optional
+
+
+# ---------------------------------------------------------------------------
+# Enums & data models
+# ---------------------------------------------------------------------------
+
+class QueryDialect(str, Enum):
+ POSTGRESQL = "postgresql"
+ MYSQL = "mysql"
+ SQLITE = "sqlite"
+ GENERIC_SQL = "sql"
+
+
+class SafetyLevel(str, Enum):
+ READ_ONLY = "read_only" # SELECT only
+ READ_WRITE = "read_write" # SELECT, INSERT, UPDATE (with WHERE)
+ UNRESTRICTED = "unrestricted" # All statements (use with caution)
+
+
+@dataclass
+class TableSchema:
+ """Metadata about a database table."""
+
+ name: str
+ columns: List[Dict[str, str]] = field(default_factory=list) # [{"name": ..., "type": ...}]
+ primary_key: Optional[str] = None
+ row_count: Optional[int] = None
+ description: str = ""
+
+ def to_prompt_text(self) -> str:
+ """Format as context for LLM prompt."""
+ cols = ", ".join(
+ f"{c['name']} {c.get('type', 'TEXT')}" for c in self.columns
+ )
+ pk = f" PK={self.primary_key}" if self.primary_key else ""
+ return f"TABLE {self.name} ({cols}){pk}"
+
+
+@dataclass
+class QueryResult:
+ """Result of a natural language database query."""
+
+ original_question: str
+ generated_sql: str
+ dialect: QueryDialect
+ rows: List[Dict[str, Any]] = field(default_factory=list)
+ columns: List[str] = field(default_factory=list)
+ row_count: int = 0
+ explanation: str = ""
+ error: Optional[str] = None
+
+ def to_dict(self) -> Dict[str, Any]:
+ return {
+ "original_question": self.original_question,
+ "generated_sql": self.generated_sql,
+ "dialect": self.dialect.value,
+ "rows": self.rows[:100], # Limit for API responses
+ "columns": self.columns,
+ "row_count": self.row_count,
+ "explanation": self.explanation,
+ "error": self.error,
+ }
+
+ def to_table_string(self, max_rows: int = 20) -> str:
+ """Format as a plain-text table for CLI output."""
+ if self.error:
+ return f"Error: {self.error}"
+ if not self.rows:
+ return "No results."
+
+ headers = self.columns or list(self.rows[0].keys())
+ col_widths = [len(h) for h in headers]
+ display_rows = self.rows[:max_rows]
+
+ for row in display_rows:
+ for i, h in enumerate(headers):
+ val = str(row.get(h, ""))
+ col_widths[i] = max(col_widths[i], min(len(val), 40))
+
+ sep = "+-" + "-+-".join("-" * w for w in col_widths) + "-+"
+ header_line = "| " + " | ".join(
+ h.ljust(col_widths[i]) for i, h in enumerate(headers)
+ ) + " |"
+
+ lines = [sep, header_line, sep]
+ for row in display_rows:
+ vals = []
+ for i, h in enumerate(headers):
+ v = str(row.get(h, ""))[:40]
+ vals.append(v.ljust(col_widths[i]))
+ lines.append("| " + " | ".join(vals) + " |")
+ lines.append(sep)
+
+ if self.row_count > max_rows:
+ lines.append(f"... {self.row_count - max_rows} more rows")
+
+ return "\n".join(lines)
+
+
+# ---------------------------------------------------------------------------
+# SQL translation helpers
+# ---------------------------------------------------------------------------
+
+_BLOCKED_PATTERNS_READ_ONLY = [
+ r"\b(?:INSERT|UPDATE|DELETE|DROP|ALTER|TRUNCATE|CREATE|GRANT|REVOKE)\b",
+]
+
+_BLOCKED_PATTERNS_READ_WRITE = [
+ r"\b(?:DROP|ALTER|TRUNCATE|CREATE|GRANT|REVOKE)\b",
+ r"\bDELETE\b(?!.*\bWHERE\b)", # DELETE without WHERE
+]
+
+_NL_TO_SQL_MAPPINGS = [
+ # Pattern β SQL fragment mappings for simple rule-based fallback
+ (r"(?i)\bhow many\b.*\b(\w+)\b", "SELECT COUNT(*) FROM {table}"),
+ (r"(?i)\blist\s+(?:all\s+)?(\w+)\b", "SELECT * FROM {table} LIMIT 100"),
+ (r"(?i)\bshow\s+(?:all\s+)?(\w+)\b", "SELECT * FROM {table} LIMIT 100"),
+ (r"(?i)\baverage\b.*\b(\w+)\b.*\bof\b\s+(\w+)", "SELECT AVG({column}) FROM {table}"),
+ (r"(?i)\btop\s+(\d+)\b.*\b(\w+)\b", "SELECT * FROM {table} LIMIT {limit}"),
+]
+
+
+class NLQueryEngine:
+ """Translate natural language to SQL and execute via MCP.
+
+ Usage::
+
+ engine = NLQueryEngine(dialect=QueryDialect.POSTGRESQL)
+ engine.set_schema([table1, table2])
+
+ # Translate only
+ sql = engine.translate("How many users signed up this month?")
+
+ # Full pipeline
+ result = await engine.ask("Show top 10 customers by revenue")
+ print(result.to_table_string())
+ """
+
+ def __init__(
+ self,
+ dialect: QueryDialect = QueryDialect.POSTGRESQL,
+ safety_level: SafetyLevel = SafetyLevel.READ_ONLY,
+ mcp_client: Optional[Any] = None,
+ mcp_tool_name: str = "query",
+ ) -> None:
+ self.dialect = dialect
+ self.safety_level = safety_level
+ self.mcp_client = mcp_client
+ self.mcp_tool_name = mcp_tool_name
+ self._schema: List[TableSchema] = []
+
+ # --- Schema management ------------------------------------------------
+
+ def set_schema(self, tables: List[TableSchema]) -> None:
+ """Set the database schema for context-aware translation."""
+ self._schema = tables
+
+ def add_table(self, table: TableSchema) -> None:
+ """Add a single table to the schema."""
+ self._schema.append(table)
+
+ def get_schema_context(self) -> str:
+ """Build a prompt-friendly schema description."""
+ if not self._schema:
+ return "No schema information available."
+ return "\n".join(t.to_prompt_text() for t in self._schema)
+
+ def get_table_names(self) -> List[str]:
+ """Return list of known table names."""
+ return [t.name for t in self._schema]
+
+ # --- Translation ------------------------------------------------------
+
+ def translate(self, question: str) -> str:
+ """Translate natural language to SQL.
+
+ Uses rule-based matching as a deterministic fallback.
+ In production, this would call an LLM with the schema context.
+ """
+ table_names = self.get_table_names()
+ question_lower = question.lower()
+
+ # Try to find a matching table name in the question
+ matched_table = ""
+ for name in table_names:
+ if name.lower() in question_lower:
+ matched_table = name
+ break
+
+ # If no exact match, try singular/plural heuristics
+ if not matched_table:
+ for name in table_names:
+ singular = name.rstrip("s").lower()
+ if singular in question_lower:
+ matched_table = name
+ break
+
+ if not matched_table and table_names:
+ matched_table = table_names[0]
+
+ # Rule-based translation
+ for pattern, sql_template in _NL_TO_SQL_MAPPINGS:
+ match = re.search(pattern, question)
+ if match:
+ sql = sql_template.replace("{table}", matched_table)
+ # Handle captures
+ groups = match.groups()
+ if "{limit}" in sql and groups:
+ sql = sql.replace("{limit}", groups[0])
+ if "{column}" in sql and len(groups) > 1:
+ sql = sql.replace("{column}", groups[0])
+ sql = sql.replace("{table}", groups[1] if len(groups) > 1 else matched_table)
+ return sql
+
+ # Default: SELECT * with LIMIT
+ return f"SELECT * FROM {matched_table} LIMIT 100"
+
+ # --- Validation -------------------------------------------------------
+
+ def validate_query(self, sql: str) -> Optional[str]:
+ """Validate a SQL query against safety rules.
+
+ Returns None if valid, or an error message string if blocked.
+ """
+ sql_upper = sql.upper().strip()
+
+ if self.safety_level == SafetyLevel.READ_ONLY:
+ patterns = _BLOCKED_PATTERNS_READ_ONLY
+ elif self.safety_level == SafetyLevel.READ_WRITE:
+ patterns = _BLOCKED_PATTERNS_READ_WRITE
+ else:
+ return None # Unrestricted
+
+ for pattern in patterns:
+ if re.search(pattern, sql_upper):
+ return f"Query blocked by safety policy ({self.safety_level.value}): matches '{pattern}'"
+
+ # Check for multiple statements (SQL injection prevention)
+ statements = [s.strip() for s in sql.split(";") if s.strip()]
+ if len(statements) > 1:
+ return "Multiple SQL statements are not allowed."
+
+ return None
+
+ # --- Execution --------------------------------------------------------
+
+ async def execute(self, sql: str) -> QueryResult:
+ """Execute a SQL query via MCP and return structured results."""
+ if self.mcp_client is None:
+ return QueryResult(
+ original_question="",
+ generated_sql=sql,
+ dialect=self.dialect,
+ error="No MCP client configured. Cannot execute queries.",
+ )
+
+ try:
+ raw = await self.mcp_client.call_tool(
+ self.mcp_tool_name,
+ {"query": sql},
+ )
+ except Exception as exc:
+ return QueryResult(
+ original_question="",
+ generated_sql=sql,
+ dialect=self.dialect,
+ error=f"Execution error: {exc}",
+ )
+
+ rows = raw if isinstance(raw, list) else raw.get("rows", []) if isinstance(raw, dict) else []
+ columns = list(rows[0].keys()) if rows else []
+
+ return QueryResult(
+ original_question="",
+ generated_sql=sql,
+ dialect=self.dialect,
+ rows=rows,
+ columns=columns,
+ row_count=len(rows),
+ )
+
+ # --- Full pipeline ----------------------------------------------------
+
+ async def ask(self, question: str) -> QueryResult:
+ """Full NL-to-SQL pipeline: translate β validate β execute.
+
+ This is the main entry point for natural language queries.
+ """
+ sql = self.translate(question)
+
+ # Validate before execution
+ error = self.validate_query(sql)
+ if error:
+ return QueryResult(
+ original_question=question,
+ generated_sql=sql,
+ dialect=self.dialect,
+ error=error,
+ )
+
+ result = await self.execute(sql)
+ result.original_question = question
+ return result
+
+ def explain(self, sql: str) -> str:
+ """Return a human-readable explanation of what a SQL query does."""
+ upper = sql.upper().strip()
+ parts = []
+
+ if upper.startswith("SELECT"):
+ # Extract main components
+ table_match = re.search(r"\bFROM\s+(\w+)", sql, re.IGNORECASE)
+ where_match = re.search(r"\bWHERE\s+(.+?)(?:\bORDER|\bLIMIT|\bGROUP|$)", sql, re.IGNORECASE)
+ limit_match = re.search(r"\bLIMIT\s+(\d+)", sql, re.IGNORECASE)
+
+ table = table_match.group(1) if table_match else "unknown"
+
+ if "COUNT(*)" in upper:
+ parts.append(f"Count all rows in '{table}'")
+ elif "AVG(" in upper:
+ parts.append(f"Calculate averages from '{table}'")
+ elif "SUM(" in upper:
+ parts.append(f"Sum values from '{table}'")
+ else:
+ parts.append(f"Retrieve data from '{table}'")
+
+ if where_match:
+ parts.append(f"filtered by: {where_match.group(1).strip()}")
+ if limit_match:
+ parts.append(f"limited to {limit_match.group(1)} rows")
+
+ elif upper.startswith("INSERT"):
+ table_match = re.search(r"\bINTO\s+(\w+)", sql, re.IGNORECASE)
+ table = table_match.group(1) if table_match else "unknown"
+ parts.append(f"Insert new data into '{table}'")
+
+ elif upper.startswith("UPDATE"):
+ table_match = re.search(r"\bUPDATE\s+(\w+)", sql, re.IGNORECASE)
+ table = table_match.group(1) if table_match else "unknown"
+ parts.append(f"Update records in '{table}'")
+
+ elif upper.startswith("DELETE"):
+ table_match = re.search(r"\bFROM\s+(\w+)", sql, re.IGNORECASE)
+ table = table_match.group(1) if table_match else "unknown"
+ parts.append(f"Delete records from '{table}'")
+
+ return ". ".join(parts) if parts else "Unknown query type."
diff --git a/gitpilot/ollabridge_proxy.py b/gitpilot/ollabridge_proxy.py
new file mode 100644
index 0000000000000000000000000000000000000000..bcdc33b85288d792a5d52c6e44220dda6c5fedae
--- /dev/null
+++ b/gitpilot/ollabridge_proxy.py
@@ -0,0 +1,193 @@
+# gitpilot/ollabridge_proxy.py
+"""OllaBridge Cloud proxy endpoints for GitPilot.
+
+Provides server-side proxy for OllaBridge Cloud device pairing
+and model discovery, avoiding CORS issues when the frontend
+calls remote OllaBridge instances.
+"""
+from __future__ import annotations
+
+import logging
+
+import httpx
+from fastapi import APIRouter, Query
+from pydantic import BaseModel
+
+logger = logging.getLogger(__name__)
+
+router = APIRouter(prefix="/api/ollabridge", tags=["ollabridge"])
+
+
+class PairRequest(BaseModel):
+ base_url: str
+ code: str
+
+
+class PairResponse(BaseModel):
+ success: bool
+ token: str | None = None
+ error: str | None = None
+
+
+@router.post("/pair", response_model=PairResponse)
+async def proxy_pair(req: PairRequest):
+ """Proxy device pairing request to OllaBridge Cloud.
+
+ Forwards the pairing code to the OllaBridge /device/pair-simple endpoint
+ and returns the device token on success.
+ """
+ base = req.base_url.rstrip("/")
+ try:
+ async with httpx.AsyncClient(timeout=15.0) as client:
+ resp = await client.post(
+ f"{base}/device/pair-simple",
+ json={"code": req.code},
+ headers={"Content-Type": "application/json"},
+ )
+ if resp.status_code == 200:
+ data = resp.json()
+ if data.get("status") == "ok":
+ return PairResponse(
+ success=True,
+ token=data.get("device_token"),
+ )
+ # Endpoint returned an error in the response body
+ return PairResponse(
+ success=False,
+ error=data.get("error") or "Pairing failed",
+ )
+ # Try to extract error message from non-200 responses
+ try:
+ err_data = resp.json()
+ detail = err_data.get("detail") or err_data.get("error")
+ if isinstance(detail, list):
+ err_msg = "; ".join(
+ e.get("msg", str(e)) if isinstance(e, dict) else str(e)
+ for e in detail
+ )
+ elif detail:
+ err_msg = str(detail)
+ else:
+ err_msg = f"OllaBridge returned HTTP {resp.status_code}"
+ except Exception:
+ # Response is not JSON (e.g. HTML error page from HF Spaces)
+ body_preview = resp.text[:200] if resp.text else ""
+ logger.warning(
+ "OllaBridge pair: HTTP %d, non-JSON body: %s",
+ resp.status_code,
+ body_preview,
+ )
+ if resp.status_code == 503:
+ err_msg = "OllaBridge is starting up. Please try again in a moment."
+ elif resp.status_code >= 500:
+ err_msg = (
+ f"OllaBridge server error (HTTP {resp.status_code}). "
+ "The service may be restarting β try again shortly."
+ )
+ elif resp.status_code == 422:
+ err_msg = "Invalid pairing request format."
+ else:
+ err_msg = f"OllaBridge returned HTTP {resp.status_code}"
+ return PairResponse(success=False, error=err_msg)
+ except httpx.ConnectError:
+ return PairResponse(success=False, error=f"Cannot reach {base}")
+ except httpx.TimeoutException:
+ return PairResponse(success=False, error="Connection timed out")
+ except Exception as exc:
+ logger.warning("OllaBridge pair proxy error: %s", exc)
+ return PairResponse(success=False, error=str(exc))
+
+
+class ModelsResponse(BaseModel):
+ models: list[str]
+ error: str | None = None
+
+
+class OllaBridgeNormalizedHealth(BaseModel):
+ status: str # "ok" or "error"
+ base_url: str
+ effective_api_base: str
+ models_available: bool = False
+ auth_mode: str = "unknown"
+ warning: str | None = None
+
+
+@router.get("/models", response_model=ModelsResponse)
+async def proxy_models(base_url: str = "https://ruslanmv-ollabridge.hf.space", api_key: str = ""):
+ """Proxy model listing request to an OllaBridge instance."""
+ base = base_url.rstrip("/")
+ try:
+ headers: dict[str, str] = {"Accept": "application/json"}
+ if api_key:
+ headers["Authorization"] = f"Bearer {api_key}"
+ async with httpx.AsyncClient(timeout=10.0) as client:
+ resp = await client.get(f"{base}/v1/models", headers=headers)
+ if resp.status_code == 200:
+ data = resp.json()
+ if isinstance(data, dict) and "data" in data:
+ models = sorted({m.get("id", "") for m in data["data"] if m.get("id")})
+ return ModelsResponse(models=models)
+ if isinstance(data, dict) and "models" in data:
+ models = sorted({
+ m.get("name", m.get("model", ""))
+ for m in data["models"]
+ if m.get("name") or m.get("model")
+ })
+ return ModelsResponse(models=models)
+ return ModelsResponse(models=[], error=f"HTTP {resp.status_code}")
+ except Exception as exc:
+ return ModelsResponse(models=[], error=str(exc))
+
+
+@router.get("/health")
+async def proxy_health(base_url: str = "https://ruslanmv-ollabridge.hf.space"):
+ """Check OllaBridge instance health."""
+ base = base_url.rstrip("/")
+ try:
+ async with httpx.AsyncClient(timeout=5.0) as client:
+ resp = await client.get(f"{base}/health")
+ if resp.status_code == 200:
+ return {"status": "ok", "url": base, "data": resp.json()}
+ return {"status": "error", "url": base, "http_status": resp.status_code}
+ except Exception as exc:
+ return {"status": "error", "url": base, "error": str(exc)}
+
+
+@router.get("/normalized-health")
+async def ollabridge_normalized_health(
+ base_url: str = Query(default="http://127.0.0.1:8000"),
+ api_key: str | None = Query(default=None),
+):
+ """Normalized health check with machine-friendly fields for the redesigned UI."""
+ effective_base = base_url.rstrip("/")
+ warning = None
+ if effective_base.endswith("/v1"):
+ effective_base = effective_base[:-3]
+ warning = "Do not include /v1; GitPilot adds it automatically."
+
+ effective_api_base = f"{effective_base}/v1"
+
+ auth_mode = "local"
+ if api_key:
+ auth_mode = "api_key"
+
+ try:
+ headers = {}
+ if api_key:
+ headers["Authorization"] = f"Bearer {api_key}"
+ async with httpx.AsyncClient(timeout=5.0) as client:
+ resp = await client.get(f"{effective_api_base}/models", headers=headers)
+ models_available = resp.status_code == 200
+ status = "ok" if resp.status_code == 200 else "error"
+ except Exception:
+ status = "error"
+ models_available = False
+
+ return OllaBridgeNormalizedHealth(
+ status=status,
+ base_url=base_url,
+ effective_api_base=effective_api_base,
+ models_available=models_available,
+ auth_mode=auth_mode,
+ warning=warning,
+ )
diff --git a/gitpilot/permissions.py b/gitpilot/permissions.py
new file mode 100644
index 0000000000000000000000000000000000000000..62c07e07d7d1d0b45bf621a7eac8fda30f8abc20
--- /dev/null
+++ b/gitpilot/permissions.py
@@ -0,0 +1,131 @@
+# gitpilot/permissions.py
+"""Fine-grained permission system for tool execution.
+
+Controls what agents can do based on configurable policies.
+Three modes:
+
+- **NORMAL** β ask before risky operations (default)
+- **PLAN** β read-only; all writes and shell commands blocked
+- **AUTO** β allow everything without confirmation
+
+Permissions live in ``.gitpilot/permissions.json`` or are set via API.
+"""
+from __future__ import annotations
+
+import fnmatch
+import json
+import logging
+from dataclasses import dataclass, field
+from enum import Enum
+from pathlib import Path
+from typing import Any, Dict, List, Optional, Set
+
+logger = logging.getLogger(__name__)
+
+
+class PermissionMode(str, Enum):
+ NORMAL = "normal"
+ PLAN = "plan"
+ AUTO = "auto"
+
+
+class Action(str, Enum):
+ READ_FILE = "read_file"
+ WRITE_FILE = "write_file"
+ DELETE_FILE = "delete_file"
+ RUN_COMMAND = "run_command"
+ GIT_COMMIT = "git_commit"
+ GIT_PUSH = "git_push"
+ CREATE_ISSUE = "create_issue"
+ CREATE_PR = "create_pr"
+ MERGE_PR = "merge_pr"
+
+
+RISKY_ACTIONS: frozenset[Action] = frozenset([
+ Action.DELETE_FILE,
+ Action.GIT_PUSH,
+ Action.MERGE_PR,
+ Action.RUN_COMMAND,
+])
+
+READ_ONLY_ACTIONS: frozenset[Action] = frozenset([
+ Action.READ_FILE,
+])
+
+
+@dataclass
+class PermissionPolicy:
+ mode: PermissionMode = PermissionMode.NORMAL
+ allowed_actions: Optional[Set[Action]] = None
+ blocked_paths: List[str] = field(default_factory=lambda: [
+ ".env", ".env.*", "*.pem", "*.key", "credentials*", "secrets*",
+ ])
+ allowed_commands: Optional[List[str]] = None
+ require_confirmation: Set[Action] = field(
+ default_factory=lambda: set(RISKY_ACTIONS),
+ )
+
+
+class PermissionManager:
+ """Check and enforce permissions for agent actions."""
+
+ def __init__(self, policy: Optional[PermissionPolicy] = None):
+ self.policy = policy or PermissionPolicy()
+
+ def check(
+ self, action: Action, context: Optional[Dict[str, Any]] = None,
+ ) -> bool:
+ """Return ``True`` if allowed, raise ``PermissionError`` if blocked."""
+ if self.policy.mode == PermissionMode.PLAN:
+ if action not in READ_ONLY_ACTIONS:
+ raise PermissionError(
+ f"Action '{action.value}' blocked in plan mode (read-only)"
+ )
+ return True
+
+ if context and "path" in context:
+ self._check_path(context["path"])
+
+ if self.policy.allowed_actions is not None:
+ if action not in self.policy.allowed_actions:
+ raise PermissionError(
+ f"Action '{action.value}' not in allowed actions"
+ )
+
+ return True
+
+ def needs_confirmation(self, action: Action) -> bool:
+ if self.policy.mode == PermissionMode.AUTO:
+ return False
+ if self.policy.mode == PermissionMode.PLAN:
+ return False
+ return action in self.policy.require_confirmation
+
+ def _check_path(self, path: str):
+ basename = path.split("/")[-1] if "/" in path else path
+ for pattern in self.policy.blocked_paths:
+ if fnmatch.fnmatch(path, pattern) or fnmatch.fnmatch(basename, pattern):
+ raise PermissionError(
+ f"Access to '{path}' blocked by policy (pattern '{pattern}')"
+ )
+
+ def to_dict(self) -> Dict[str, Any]:
+ return {
+ "mode": self.policy.mode.value,
+ "blocked_paths": self.policy.blocked_paths,
+ "allowed_commands": self.policy.allowed_commands,
+ }
+
+ def load_from_file(self, path: Path):
+ if not path.exists():
+ return
+ try:
+ data = json.loads(path.read_text())
+ if "mode" in data:
+ self.policy.mode = PermissionMode(data["mode"])
+ if "blocked_paths" in data:
+ self.policy.blocked_paths = data["blocked_paths"]
+ if "allowed_commands" in data:
+ self.policy.allowed_commands = data["allowed_commands"]
+ except Exception as e:
+ logger.warning("Failed to load permissions: %s", e)
diff --git a/gitpilot/plugins.py b/gitpilot/plugins.py
new file mode 100644
index 0000000000000000000000000000000000000000..f97d100f21b90de0c598498f98ab8c5c465691ef
--- /dev/null
+++ b/gitpilot/plugins.py
@@ -0,0 +1,253 @@
+# gitpilot/plugins.py
+"""Plugin system for GitPilot.
+
+Plugins extend GitPilot with additional skills, hooks, MCP configs,
+and custom agent types. Plugins are installed from git URLs or local
+directories into ``~/.gitpilot/plugins/``.
+
+A plugin is a directory containing a ``plugin.json`` manifest:
+
+.. code-block:: json
+
+ {
+ "name": "my-plugin",
+ "version": "1.0.0",
+ "description": "Does amazing things",
+ "skills": ["skills/*.md"],
+ "hooks": ["hooks.json"],
+ "mcp": ["mcp.json"]
+ }
+"""
+from __future__ import annotations
+
+import json
+import logging
+import shutil
+import subprocess
+from dataclasses import dataclass, field
+from pathlib import Path
+from typing import Any, Dict, List, Optional
+
+logger = logging.getLogger(__name__)
+
+PLUGINS_DIR = Path.home() / ".gitpilot" / "plugins"
+MANIFEST_FILE = "plugin.json"
+
+
+@dataclass
+class PluginInfo:
+ """Metadata about an installed plugin."""
+
+ name: str
+ version: str = "0.0.0"
+ description: str = ""
+ author: str = ""
+ source: str = "" # git URL or local path
+ skills: List[str] = field(default_factory=list)
+ hooks: List[str] = field(default_factory=list)
+ mcp_configs: List[str] = field(default_factory=list)
+ path: Optional[Path] = None
+
+ @classmethod
+ def from_manifest(cls, manifest_path: Path) -> "PluginInfo":
+ data = json.loads(manifest_path.read_text())
+ return cls(
+ name=data["name"],
+ version=data.get("version", "0.0.0"),
+ description=data.get("description", ""),
+ author=data.get("author", ""),
+ source=data.get("source", ""),
+ skills=data.get("skills", []),
+ hooks=data.get("hooks", []),
+ mcp_configs=data.get("mcp", []),
+ path=manifest_path.parent,
+ )
+
+ def to_dict(self) -> Dict[str, Any]:
+ return {
+ "name": self.name,
+ "version": self.version,
+ "description": self.description,
+ "author": self.author,
+ "source": self.source,
+ "skills": self.skills,
+ "hooks": self.hooks,
+ "mcp_configs": self.mcp_configs,
+ "path": str(self.path) if self.path else None,
+ }
+
+
+class PluginManager:
+ """Discover, install, and manage GitPilot plugins."""
+
+ def __init__(self, plugins_dir: Optional[Path] = None) -> None:
+ self.plugins_dir = plugins_dir or PLUGINS_DIR
+ self.plugins_dir.mkdir(parents=True, exist_ok=True)
+ self._cache: Dict[str, PluginInfo] = {}
+
+ # ------------------------------------------------------------------
+ # Install / Uninstall
+ # ------------------------------------------------------------------
+
+ def install(self, source: str) -> PluginInfo:
+ """Install a plugin from a git URL or local directory path.
+
+ Returns the installed PluginInfo.
+ """
+ source_path = Path(source)
+
+ if source_path.is_dir():
+ return self._install_from_local(source_path)
+
+ if source.startswith(("http://", "https://", "git@")):
+ return self._install_from_git(source)
+
+ raise ValueError(f"Invalid plugin source: {source}. Provide a git URL or local path.")
+
+ def _install_from_git(self, url: str) -> PluginInfo:
+ # Derive plugin name from URL
+ name = url.rstrip("/").split("/")[-1]
+ if name.endswith(".git"):
+ name = name[:-4]
+ dest = self.plugins_dir / name
+
+ if dest.exists():
+ # Update existing
+ subprocess.run(
+ ["git", "pull", "--ff-only"],
+ cwd=str(dest),
+ capture_output=True,
+ check=True,
+ )
+ else:
+ subprocess.run(
+ ["git", "clone", "--depth=1", url, str(dest)],
+ capture_output=True,
+ check=True,
+ )
+
+ return self._load_plugin(dest)
+
+ def _install_from_local(self, source: Path) -> PluginInfo:
+ manifest = source / MANIFEST_FILE
+ if not manifest.exists():
+ raise FileNotFoundError(f"No {MANIFEST_FILE} found in {source}")
+ info = PluginInfo.from_manifest(manifest)
+ dest = self.plugins_dir / info.name
+ if dest.exists():
+ shutil.rmtree(dest)
+ shutil.copytree(source, dest)
+ return self._load_plugin(dest)
+
+ def uninstall(self, plugin_name: str) -> bool:
+ """Uninstall a plugin by name. Returns True if removed."""
+ dest = self.plugins_dir / plugin_name
+ if not dest.exists():
+ return False
+ shutil.rmtree(dest)
+ self._cache.pop(plugin_name, None)
+ logger.info("Uninstalled plugin: %s", plugin_name)
+ return True
+
+ # ------------------------------------------------------------------
+ # Discovery
+ # ------------------------------------------------------------------
+
+ def list_installed(self) -> List[PluginInfo]:
+ """List all installed plugins."""
+ plugins = []
+ if not self.plugins_dir.exists():
+ return plugins
+ for child in sorted(self.plugins_dir.iterdir()):
+ if child.is_dir() and (child / MANIFEST_FILE).exists():
+ try:
+ plugins.append(self._load_plugin(child))
+ except Exception as e:
+ logger.warning("Bad plugin at %s: %s", child, e)
+ return plugins
+
+ def get_plugin(self, name: str) -> Optional[PluginInfo]:
+ """Get a specific plugin by name."""
+ if name in self._cache:
+ return self._cache[name]
+ path = self.plugins_dir / name
+ if path.exists() and (path / MANIFEST_FILE).exists():
+ return self._load_plugin(path)
+ return None
+
+ def _load_plugin(self, path: Path) -> PluginInfo:
+ info = PluginInfo.from_manifest(path / MANIFEST_FILE)
+ info.path = path
+ self._cache[info.name] = info
+ return info
+
+ # ------------------------------------------------------------------
+ # Skill loading
+ # ------------------------------------------------------------------
+
+ def load_all_skills(self) -> List[Dict[str, Any]]:
+ """Load skill definitions from all installed plugins.
+
+ Returns raw skill dicts (name, description, prompt_template, etc.)
+ that can be passed to the SkillManager.
+ """
+ from .skills import Skill
+
+ skills: List[Dict[str, Any]] = []
+ for plugin in self.list_installed():
+ if not plugin.path:
+ continue
+ for pattern in plugin.skills:
+ for skill_file in plugin.path.glob(pattern):
+ try:
+ skill = Skill.from_file(skill_file)
+ skills.append({
+ "skill": skill,
+ "plugin": plugin.name,
+ })
+ except Exception as e:
+ logger.warning("Failed to load skill %s: %s", skill_file, e)
+ return skills
+
+ # ------------------------------------------------------------------
+ # Hook loading
+ # ------------------------------------------------------------------
+
+ def load_all_hooks(self) -> List[Dict[str, Any]]:
+ """Load hook definitions from all installed plugins."""
+ all_hooks: List[Dict[str, Any]] = []
+ for plugin in self.list_installed():
+ if not plugin.path:
+ continue
+ for hook_file_pattern in plugin.hooks:
+ for hook_file in plugin.path.glob(hook_file_pattern):
+ try:
+ hooks = json.loads(hook_file.read_text())
+ for h in hooks:
+ h["plugin"] = plugin.name
+ all_hooks.append(h)
+ except Exception as e:
+ logger.warning("Failed to load hooks from %s: %s", hook_file, e)
+ return all_hooks
+
+ # ------------------------------------------------------------------
+ # MCP config loading
+ # ------------------------------------------------------------------
+
+ def load_all_mcp_configs(self) -> List[Dict[str, Any]]:
+ """Load MCP server configs from all installed plugins."""
+ configs: List[Dict[str, Any]] = []
+ for plugin in self.list_installed():
+ if not plugin.path:
+ continue
+ for mcp_pattern in plugin.mcp_configs:
+ for mcp_file in plugin.path.glob(mcp_pattern):
+ try:
+ data = json.loads(mcp_file.read_text())
+ servers = data if isinstance(data, list) else data.get("servers", [])
+ for s in servers:
+ s["plugin"] = plugin.name
+ configs.append(s)
+ except Exception as e:
+ logger.warning("Failed to load MCP config from %s: %s", mcp_file, e)
+ return configs
diff --git a/gitpilot/pr_tools.py b/gitpilot/pr_tools.py
new file mode 100644
index 0000000000000000000000000000000000000000..c64407024949ebaa2028e38f6521e5383a791e5a
--- /dev/null
+++ b/gitpilot/pr_tools.py
@@ -0,0 +1,174 @@
+"""CrewAI tools for GitHub Pull Request management.
+
+These tools allow agents to create, list, review, and merge pull requests.
+"""
+import asyncio
+from typing import Optional
+
+from crewai.tools import tool
+
+from .agent_tools import get_repo_context
+from . import github_pulls as gp
+
+
+def _run_async(coro):
+ loop = asyncio.new_event_loop()
+ asyncio.set_event_loop(loop)
+ try:
+ return loop.run_until_complete(coro)
+ finally:
+ loop.close()
+
+
+def _fmt_pr(pr: dict) -> str:
+ return (
+ f"#{pr.get('number')} [{pr.get('state')}] {pr.get('title')}\n"
+ f" {pr.get('head', {}).get('ref', '?')} -> {pr.get('base', {}).get('ref', '?')}\n"
+ f" Author: {pr.get('user', {}).get('login', 'unknown')} | "
+ f"Draft: {pr.get('draft', False)}\n"
+ f" URL: {pr.get('html_url', '')}"
+ )
+
+
+@tool("List pull requests")
+def list_pull_requests(state: str = "open", per_page: int = 20) -> str:
+ """Lists pull requests in the current repository. state: open/closed/all."""
+ try:
+ owner, repo, token, _branch = get_repo_context()
+ prs = _run_async(
+ gp.list_pull_requests(owner, repo, state=state, per_page=per_page, token=token)
+ )
+ if not prs:
+ return f"No {state} pull requests in {owner}/{repo}."
+ header = f"Pull requests in {owner}/{repo} (state={state}):\n"
+ return header + "\n".join(_fmt_pr(p) for p in prs)
+ except Exception as e:
+ return f"Error listing PRs: {e}"
+
+
+@tool("Get pull request details")
+def get_pull_request(pull_number: int) -> str:
+ """Gets full details of a pull request by number."""
+ try:
+ owner, repo, token, _branch = get_repo_context()
+ pr = _run_async(gp.get_pull_request(owner, repo, pull_number, token=token))
+ body = (pr.get("body") or "")[:500]
+ return (
+ f"PR #{pr.get('number')}: {pr.get('title')}\n"
+ f"State: {pr.get('state')} | Mergeable: {pr.get('mergeable')}\n"
+ f"Head: {pr.get('head', {}).get('ref')} -> Base: {pr.get('base', {}).get('ref')}\n"
+ f"Author: {pr.get('user', {}).get('login', 'unknown')}\n"
+ f"Additions: {pr.get('additions', 0)} | Deletions: {pr.get('deletions', 0)} | "
+ f"Changed files: {pr.get('changed_files', 0)}\n"
+ f"Body:\n{body}\n"
+ f"URL: {pr.get('html_url', '')}"
+ )
+ except Exception as e:
+ return f"Error getting PR: {e}"
+
+
+@tool("Create a pull request")
+def create_pull_request(
+ title: str,
+ head: str,
+ base: str,
+ body: str = "",
+ draft: bool = False,
+) -> str:
+ """Creates a new pull request. head=source branch, base=target branch."""
+ try:
+ owner, repo, token, _branch = get_repo_context()
+ pr = _run_async(
+ gp.create_pull_request(
+ owner, repo, title=title, head=head, base=base,
+ body=body or None, draft=draft, token=token,
+ )
+ )
+ return (
+ f"Created PR #{pr.get('number')}: {pr.get('title')}\n"
+ f"URL: {pr.get('html_url', '')}"
+ )
+ except Exception as e:
+ return f"Error creating PR: {e}"
+
+
+@tool("Merge a pull request")
+def merge_pull_request(
+ pull_number: int,
+ merge_method: str = "merge",
+ commit_title: str = "",
+) -> str:
+ """Merges a pull request. merge_method: merge, squash, or rebase."""
+ try:
+ owner, repo, token, _branch = get_repo_context()
+ result = _run_async(
+ gp.merge_pull_request(
+ owner, repo, pull_number,
+ merge_method=merge_method,
+ commit_title=commit_title or None,
+ token=token,
+ )
+ )
+ sha = result.get("sha", "unknown") if isinstance(result, dict) else "unknown"
+ return f"PR #{pull_number} merged successfully. Merge commit: {sha}"
+ except Exception as e:
+ return f"Error merging PR: {e}"
+
+
+@tool("List files changed in a pull request")
+def list_pr_files(pull_number: int) -> str:
+ """Lists all files changed in a pull request with status and patch info."""
+ try:
+ owner, repo, token, _branch = get_repo_context()
+ files = _run_async(gp.list_pr_files(owner, repo, pull_number, token=token))
+ if not files:
+ return f"No files changed in PR #{pull_number}."
+ lines = [f"Files changed in PR #{pull_number}:"]
+ for f in files:
+ lines.append(
+ f" [{f.get('status', '?')}] {f.get('filename', '?')} "
+ f"(+{f.get('additions', 0)} -{f.get('deletions', 0)})"
+ )
+ return "\n".join(lines)
+ except Exception as e:
+ return f"Error listing PR files: {e}"
+
+
+@tool("Add a review to a pull request")
+def create_pr_review(
+ pull_number: int,
+ body: str,
+ event: str = "COMMENT",
+) -> str:
+ """Adds a review to a PR. event: APPROVE, REQUEST_CHANGES, or COMMENT."""
+ try:
+ owner, repo, token, _branch = get_repo_context()
+ review = _run_async(
+ gp.create_pr_review(owner, repo, pull_number, body=body, event=event, token=token)
+ )
+ return f"Review submitted on PR #{pull_number} (event={event})\nURL: {review.get('html_url', '')}"
+ except Exception as e:
+ return f"Error creating review: {e}"
+
+
+@tool("Comment on a pull request")
+def add_pr_comment(pull_number: int, body: str) -> str:
+ """Adds a general comment to a pull request."""
+ try:
+ owner, repo, token, _branch = get_repo_context()
+ comment = _run_async(gp.add_pr_comment(owner, repo, pull_number, body, token=token))
+ return f"Comment added to PR #{pull_number}\nURL: {comment.get('html_url', '')}"
+ except Exception as e:
+ return f"Error commenting on PR: {e}"
+
+
+# Export all PR tools
+PR_TOOLS = [
+ list_pull_requests,
+ get_pull_request,
+ create_pull_request,
+ merge_pull_request,
+ list_pr_files,
+ create_pr_review,
+ add_pr_comment,
+]
diff --git a/gitpilot/predictions.py b/gitpilot/predictions.py
new file mode 100644
index 0000000000000000000000000000000000000000..b4e4c788535d44ceade26026b0fe46c45e4d23a1
--- /dev/null
+++ b/gitpilot/predictions.py
@@ -0,0 +1,235 @@
+# gitpilot/predictions.py
+"""Predictive workflow engine β suggest next actions proactively.
+
+Analyses the current session state and recent actions to predict what
+the user likely needs next. Suggestions are scored by relevance and
+presented as actionable prompts.
+
+Based on the concept of *proactive assistance* from HCI research
+(Horvitz, 1999) and GitHub's own next-action prediction patterns.
+
+Trigger rules::
+
+ After merging a PR β suggest updating changelog
+ After creating issue β suggest assigning and labeling
+ After test failure β suggest debugging approach
+ After dep update β suggest full test suite
+ Before release β suggest version bump
+"""
+from __future__ import annotations
+
+import re
+from dataclasses import dataclass, field
+from enum import Enum
+from typing import Any, Dict, List, Optional
+
+
+class ActionCategory(str, Enum):
+ TESTING = "testing"
+ DOCUMENTATION = "documentation"
+ RELEASE = "release"
+ REVIEW = "review"
+ ISSUE_MGMT = "issue_management"
+ CLEANUP = "cleanup"
+ SECURITY = "security"
+ DEPLOYMENT = "deployment"
+
+
+@dataclass
+class SuggestedAction:
+ """A suggested next action for the user."""
+
+ title: str
+ description: str
+ category: ActionCategory
+ prompt: str # Ready-to-use prompt if the user accepts
+ relevance_score: float = 0.5 # 0.0 - 1.0
+ auto_executable: bool = False # Can be run without confirmation
+
+ def to_dict(self) -> Dict[str, Any]:
+ return {
+ "title": self.title,
+ "description": self.description,
+ "category": self.category.value,
+ "prompt": self.prompt,
+ "relevance_score": self.relevance_score,
+ "auto_executable": self.auto_executable,
+ }
+
+
+# ---------------------------------------------------------------------------
+# Prediction rules
+# ---------------------------------------------------------------------------
+
+@dataclass
+class PredictionRule:
+ """A rule that fires when certain conditions are met."""
+
+ name: str
+ trigger_patterns: List[str] # Regex patterns to match against context
+ action: SuggestedAction
+ cooldown_actions: int = 3 # Don't re-suggest within N actions
+
+
+_DEFAULT_RULES: List[PredictionRule] = [
+ PredictionRule(
+ name="post_merge_changelog",
+ trigger_patterns=[r"\bmerge\b.*\bpr\b", r"\bmerged\b", r"\bpull request.*merged\b"],
+ action=SuggestedAction(
+ title="Update Changelog",
+ description="A PR was just merged. Consider updating the changelog.",
+ category=ActionCategory.DOCUMENTATION,
+ prompt="Update the CHANGELOG.md with the latest merged PR changes.",
+ relevance_score=0.85,
+ ),
+ ),
+ PredictionRule(
+ name="post_issue_create_label",
+ trigger_patterns=[r"\bcreate.*issue\b", r"\bnew issue\b", r"\bissue.*created\b"],
+ action=SuggestedAction(
+ title="Label and Assign Issue",
+ description="A new issue was created. Consider adding labels and assignees.",
+ category=ActionCategory.ISSUE_MGMT,
+ prompt="Add appropriate labels and assign the newly created issue.",
+ relevance_score=0.75,
+ ),
+ ),
+ PredictionRule(
+ name="post_test_failure_debug",
+ trigger_patterns=[r"\btest.*fail\b", r"\bfailed\b.*\btest\b", r"\berror.*pytest\b"],
+ action=SuggestedAction(
+ title="Debug Test Failures",
+ description="Tests failed. Let me help identify the root cause.",
+ category=ActionCategory.TESTING,
+ prompt="Analyze the test failures and suggest fixes for each failing test.",
+ relevance_score=0.95,
+ ),
+ ),
+ PredictionRule(
+ name="post_dep_update_test",
+ trigger_patterns=[r"\bdependenc\w+.*update\b", r"\bupgrade\b.*\bpackage\b", r"\bnpm update\b"],
+ action=SuggestedAction(
+ title="Run Full Test Suite",
+ description="Dependencies were updated. Run the full test suite to verify compatibility.",
+ category=ActionCategory.TESTING,
+ prompt="Run the complete test suite to verify no regressions from the dependency update.",
+ relevance_score=0.90,
+ ),
+ ),
+ PredictionRule(
+ name="pre_release_version",
+ trigger_patterns=[r"\brelease\b", r"\bversion bump\b", r"\btag\b.*\brelease\b"],
+ action=SuggestedAction(
+ title="Version Bump & Tag",
+ description="Preparing a release. Consider bumping the version number.",
+ category=ActionCategory.RELEASE,
+ prompt="Bump the version number, update the changelog, and create a release tag.",
+ relevance_score=0.80,
+ ),
+ ),
+ PredictionRule(
+ name="post_edit_lint",
+ trigger_patterns=[r"\bedit\b.*\bfile\b", r"\bmodif\w+\b.*\bcode\b", r"\bwrote\b.*\bfile\b"],
+ action=SuggestedAction(
+ title="Run Linter",
+ description="Files were modified. Run the linter to check for style issues.",
+ category=ActionCategory.CLEANUP,
+ prompt="Run the project linter on the modified files and fix any issues.",
+ relevance_score=0.65,
+ auto_executable=True,
+ ),
+ ),
+ PredictionRule(
+ name="security_scan_suggestion",
+ trigger_patterns=[r"\bauth\w*\b", r"\bpassword\b", r"\bsecret\b", r"\btoken\b.*\bhandl\b"],
+ action=SuggestedAction(
+ title="Security Review",
+ description="Security-sensitive code was touched. Consider a security review.",
+ category=ActionCategory.SECURITY,
+ prompt="Review the security-sensitive changes for potential vulnerabilities.",
+ relevance_score=0.88,
+ ),
+ ),
+ PredictionRule(
+ name="post_commit_pr",
+ trigger_patterns=[r"\bcommit\w*\b.*\bbranch\b", r"\bpush\w*\b.*\bfeature\b"],
+ action=SuggestedAction(
+ title="Create Pull Request",
+ description="Changes were committed to a feature branch. Create a PR for review.",
+ category=ActionCategory.REVIEW,
+ prompt="Create a pull request for the current feature branch with a description of changes.",
+ relevance_score=0.70,
+ ),
+ ),
+]
+
+
+class PredictiveEngine:
+ """Predict what the user needs next based on context.
+
+ Usage::
+
+ engine = PredictiveEngine()
+ suggestions = engine.predict("Tests failed in auth module")
+ for s in suggestions:
+ print(f"[{s.relevance_score}] {s.title}: {s.prompt}")
+ """
+
+ def __init__(
+ self,
+ custom_rules: Optional[List[PredictionRule]] = None,
+ min_score: float = 0.5,
+ ) -> None:
+ self.rules = (custom_rules or []) + _DEFAULT_RULES
+ self.min_score = min_score
+ self._recent_suggestions: List[str] = []
+
+ def predict(self, context: str) -> List[SuggestedAction]:
+ """Predict next actions based on the given context string.
+
+ The context can be:
+ - The last user message
+ - A summary of recent session activity
+ - An agent's output/result
+ """
+ matches: List[SuggestedAction] = []
+ context_lower = context.lower()
+
+ for rule in self.rules:
+ # Skip recently suggested actions
+ if rule.name in self._recent_suggestions[-3:]:
+ continue
+ for pattern in rule.trigger_patterns:
+ if re.search(pattern, context_lower):
+ matches.append(rule.action)
+ self._recent_suggestions.append(rule.name)
+ break
+
+ return self.score_and_sort(matches)
+
+ def score_and_sort(
+ self, actions: List[SuggestedAction],
+ ) -> List[SuggestedAction]:
+ """Filter by minimum score and sort by relevance (descending)."""
+ filtered = [a for a in actions if a.relevance_score >= self.min_score]
+ return sorted(filtered, key=lambda a: a.relevance_score, reverse=True)
+
+ def add_rule(self, rule: PredictionRule) -> None:
+ """Add a custom prediction rule."""
+ self.rules.insert(0, rule) # Custom rules take priority
+
+ def clear_history(self) -> None:
+ """Clear the recent suggestion history."""
+ self._recent_suggestions.clear()
+
+ def list_rules(self) -> List[Dict[str, Any]]:
+ """List all prediction rules."""
+ return [
+ {
+ "name": r.name,
+ "trigger_patterns": r.trigger_patterns,
+ "action_title": r.action.title,
+ "category": r.action.category.value,
+ }
+ for r in self.rules
+ ]
diff --git a/gitpilot/py.typed b/gitpilot/py.typed
new file mode 100644
index 0000000000000000000000000000000000000000..cb5707d7ddc4ba1712aadae9fc34250aa1795291
--- /dev/null
+++ b/gitpilot/py.typed
@@ -0,0 +1,2 @@
+# Marker file for PEP 561
+# This package supports type checking
diff --git a/gitpilot/search_tools.py b/gitpilot/search_tools.py
new file mode 100644
index 0000000000000000000000000000000000000000..8cb5837f893ed475a90d3939cd75bcfe77198933
--- /dev/null
+++ b/gitpilot/search_tools.py
@@ -0,0 +1,212 @@
+"""CrewAI tools for GitHub Search operations.
+
+Provides tools for searching code, issues, repositories, and users
+across GitHub.
+"""
+import asyncio
+from typing import Optional
+
+from crewai.tools import tool
+
+from .agent_tools import get_repo_context
+from . import github_search as gs
+
+
+def _run_async(coro):
+ loop = asyncio.new_event_loop()
+ asyncio.set_event_loop(loop)
+ try:
+ return loop.run_until_complete(coro)
+ finally:
+ loop.close()
+
+
+@tool("Search code in repository")
+def search_code(
+ query: str,
+ language: str = "",
+ path: str = "",
+ per_page: int = 20,
+) -> str:
+ """Searches for code by keywords, symbols, or patterns. Scoped to the current repository by default. Optional: language filter, path filter."""
+ try:
+ owner, repo, token, _branch = get_repo_context()
+ result = _run_async(
+ gs.search_code(
+ query,
+ owner=owner,
+ repo=repo,
+ language=language or None,
+ path=path or None,
+ per_page=per_page,
+ token=token,
+ )
+ )
+ items = result.get("items", [])
+ total = result.get("total_count", 0)
+ if not items:
+ return f"No code matches found for '{query}' in {owner}/{repo}."
+ lines = [f"Code search results for '{query}' in {owner}/{repo} ({total} total):"]
+ for item in items:
+ lines.append(
+ f" {item.get('path', '?')} "
+ f"(score: {item.get('score', '?')})\n"
+ f" URL: {item.get('html_url', '')}"
+ )
+ return "\n".join(lines)
+ except Exception as e:
+ return f"Error searching code: {e}"
+
+
+@tool("Search code globally")
+def search_code_global(
+ query: str,
+ language: str = "",
+ per_page: int = 10,
+) -> str:
+ """Searches for code across ALL of GitHub (not scoped to a repo). Use for finding examples or patterns globally."""
+ try:
+ _owner, _repo, token, _branch = get_repo_context()
+ result = _run_async(
+ gs.search_code(query, language=language or None, per_page=per_page, token=token)
+ )
+ items = result.get("items", [])
+ total = result.get("total_count", 0)
+ if not items:
+ return f"No global code matches found for '{query}'."
+ lines = [f"Global code search for '{query}' ({total} total):"]
+ for item in items:
+ repo_name = item.get("repository", {}).get("full_name", "?")
+ lines.append(
+ f" [{repo_name}] {item.get('path', '?')}\n"
+ f" URL: {item.get('html_url', '')}"
+ )
+ return "\n".join(lines)
+ except Exception as e:
+ return f"Error searching code globally: {e}"
+
+
+@tool("Search issues and pull requests")
+def search_issues(
+ query: str,
+ state: str = "",
+ label: str = "",
+ is_pr: str = "",
+ per_page: int = 20,
+) -> str:
+ """Searches issues/PRs by keywords. Scoped to current repo. is_pr: 'true' for PRs only, 'false' for issues only, empty for both."""
+ try:
+ owner, repo, token, _branch = get_repo_context()
+ pr_flag = None
+ if is_pr.lower() == "true":
+ pr_flag = True
+ elif is_pr.lower() == "false":
+ pr_flag = False
+ result = _run_async(
+ gs.search_issues(
+ query,
+ owner=owner,
+ repo=repo,
+ state=state or None,
+ label=label or None,
+ is_pr=pr_flag,
+ per_page=per_page,
+ token=token,
+ )
+ )
+ items = result.get("items", [])
+ total = result.get("total_count", 0)
+ if not items:
+ return f"No matching issues/PRs for '{query}' in {owner}/{repo}."
+ lines = [f"Issue/PR search for '{query}' ({total} total):"]
+ for item in items:
+ kind = "PR" if "pull_request" in item else "Issue"
+ lines.append(
+ f" [{kind}] #{item.get('number')} [{item.get('state')}] {item.get('title')}\n"
+ f" URL: {item.get('html_url', '')}"
+ )
+ return "\n".join(lines)
+ except Exception as e:
+ return f"Error searching issues: {e}"
+
+
+@tool("Search GitHub users and organizations")
+def search_users(
+ query: str,
+ type_filter: str = "",
+ location: str = "",
+ language: str = "",
+ per_page: int = 10,
+) -> str:
+ """Searches for GitHub users or organizations. type_filter: 'user' or 'org'. Optional: location, language."""
+ try:
+ _owner, _repo, token, _branch = get_repo_context()
+ result = _run_async(
+ gs.search_users(
+ query,
+ type_filter=type_filter or None,
+ location=location or None,
+ language=language or None,
+ per_page=per_page,
+ token=token,
+ )
+ )
+ items = result.get("items", [])
+ total = result.get("total_count", 0)
+ if not items:
+ return f"No users/orgs found for '{query}'."
+ lines = [f"User search for '{query}' ({total} total):"]
+ for item in items:
+ lines.append(
+ f" @{item.get('login', '?')} ({item.get('type', '?')})\n"
+ f" URL: {item.get('html_url', '')}"
+ )
+ return "\n".join(lines)
+ except Exception as e:
+ return f"Error searching users: {e}"
+
+
+@tool("Search repositories")
+def search_repositories(
+ query: str,
+ language: str = "",
+ sort: str = "",
+ per_page: int = 10,
+) -> str:
+ """Searches for repositories across GitHub. Optional: language filter, sort (stars/forks/updated)."""
+ try:
+ _owner, _repo, token, _branch = get_repo_context()
+ result = _run_async(
+ gs.search_repositories(
+ query,
+ language=language or None,
+ sort=sort or None,
+ per_page=per_page,
+ token=token,
+ )
+ )
+ items = result.get("items", [])
+ total = result.get("total_count", 0)
+ if not items:
+ return f"No repositories found for '{query}'."
+ lines = [f"Repository search for '{query}' ({total} total):"]
+ for item in items:
+ lines.append(
+ f" {item.get('full_name', '?')} "
+ f"({item.get('stargazers_count', 0)} stars)\n"
+ f" {item.get('description', 'No description')[:100]}\n"
+ f" URL: {item.get('html_url', '')}"
+ )
+ return "\n".join(lines)
+ except Exception as e:
+ return f"Error searching repositories: {e}"
+
+
+# Export all search tools
+SEARCH_TOOLS = [
+ search_code,
+ search_code_global,
+ search_issues,
+ search_users,
+ search_repositories,
+]
diff --git a/gitpilot/security.py b/gitpilot/security.py
new file mode 100644
index 0000000000000000000000000000000000000000..8509b139908c9c44b37f2c965ef70f5d7798e24b
--- /dev/null
+++ b/gitpilot/security.py
@@ -0,0 +1,513 @@
+# gitpilot/security.py
+"""AI-powered security scanner β beyond traditional SAST.
+
+Combines pattern-based detection with semantic analysis to find
+vulnerabilities that static analysis tools typically miss:
+
+β’ **Secret detection** β API keys, tokens, passwords in code and config
+β’ **Dependency audit** β known CVEs in transitive dependency trees
+β’ **Code-flow analysis** β injection, XSS, SSRF via taint-style tracking
+β’ **Configuration review** β insecure defaults, overly permissive CORS, etc.
+β’ **AI reasoning** β uses LLM to evaluate context-dependent risks
+
+Inspired by:
+- OWASP Top 10 (2021) categorisation
+- Semgrep's rule-based approach with semantic matching
+- GitHub Advanced Security's CodeQL data-flow analysis
+- Google's *"Fixing a Trillion Bugs"* (2024) on AI-assisted vulnerability detection
+"""
+from __future__ import annotations
+
+import os
+import re
+from dataclasses import dataclass, field
+from enum import Enum
+from pathlib import Path
+from typing import Any
+
+# ---------------------------------------------------------------------------
+# Enums & data models
+# ---------------------------------------------------------------------------
+
+class Severity(str, Enum):
+ CRITICAL = "critical"
+ HIGH = "high"
+ MEDIUM = "medium"
+ LOW = "low"
+ INFO = "info"
+
+
+class VulnerabilityCategory(str, Enum):
+ SECRET_LEAK = "secret_leak"
+ INJECTION = "injection"
+ XSS = "xss"
+ SSRF = "ssrf"
+ AUTH_ISSUE = "auth_issue"
+ CRYPTO_WEAKNESS = "crypto_weakness"
+ INSECURE_CONFIG = "insecure_config"
+ DEPENDENCY_CVE = "dependency_cve"
+ PATH_TRAVERSAL = "path_traversal"
+ SENSITIVE_DATA = "sensitive_data"
+
+
+@dataclass
+class Finding:
+ """A single security finding."""
+
+ rule_id: str
+ title: str
+ description: str
+ severity: Severity
+ category: VulnerabilityCategory
+ file_path: str = ""
+ line_number: int = 0
+ snippet: str = ""
+ recommendation: str = ""
+ cwe_id: str | None = None
+ confidence: float = 0.8 # 0.0 - 1.0
+
+ def to_dict(self) -> dict[str, Any]:
+ return {
+ "rule_id": self.rule_id,
+ "title": self.title,
+ "description": self.description,
+ "severity": self.severity.value,
+ "category": self.category.value,
+ "file_path": self.file_path,
+ "line_number": self.line_number,
+ "snippet": self.snippet,
+ "recommendation": self.recommendation,
+ "cwe_id": self.cwe_id,
+ "confidence": self.confidence,
+ }
+
+
+@dataclass
+class ScanResult:
+ """Aggregate result of a security scan."""
+
+ findings: list[Finding] = field(default_factory=list)
+ files_scanned: int = 0
+ scan_duration_ms: float = 0.0
+ summary: dict[str, int] = field(default_factory=dict)
+
+ def to_dict(self) -> dict[str, Any]:
+ return {
+ "findings": [f.to_dict() for f in self.findings],
+ "files_scanned": self.files_scanned,
+ "scan_duration_ms": self.scan_duration_ms,
+ "summary": self.summary,
+ "total_findings": len(self.findings),
+ }
+
+ def by_severity(self, severity: Severity) -> list[Finding]:
+ return [f for f in self.findings if f.severity == severity]
+
+
+# ---------------------------------------------------------------------------
+# Secret detection patterns
+# ---------------------------------------------------------------------------
+
+_SECRET_PATTERNS: list[dict[str, Any]] = [
+ {
+ "rule_id": "SEC001",
+ "title": "AWS Access Key",
+ "pattern": r"(?:AKIA|ABIA|ACCA|ASIA)[0-9A-Z]{16}",
+ "severity": Severity.CRITICAL,
+ "cwe": "CWE-798",
+ },
+ {
+ "rule_id": "SEC002",
+ "title": "GitHub Token",
+ "pattern": r"gh[pousr]_[A-Za-z0-9_]{36,255}",
+ "severity": Severity.CRITICAL,
+ "cwe": "CWE-798",
+ },
+ {
+ "rule_id": "SEC003",
+ "title": "Generic API Key",
+ "pattern": r"(?i)(?:api[_-]?key|apikey)\s*[:=]\s*['\"]([A-Za-z0-9_\-]{20,})['\"]",
+ "severity": Severity.HIGH,
+ "cwe": "CWE-798",
+ },
+ {
+ "rule_id": "SEC004",
+ "title": "Password in Code",
+ "pattern": r"(?i)(?:password|passwd|pwd)\s*[:=]\s*['\"]([^'\"]{8,})['\"]",
+ "severity": Severity.HIGH,
+ "cwe": "CWE-798",
+ },
+ {
+ "rule_id": "SEC005",
+ "title": "Private Key",
+ "pattern": r"-----BEGIN (?:RSA |EC |DSA )?PRIVATE KEY-----",
+ "severity": Severity.CRITICAL,
+ "cwe": "CWE-321",
+ },
+ {
+ "rule_id": "SEC006",
+ "title": "JWT Secret",
+ "pattern": r"(?i)(?:jwt[_-]?secret|token[_-]?secret)\s*[:=]\s*['\"]([^'\"]{8,})['\"]",
+ "severity": Severity.HIGH,
+ "cwe": "CWE-798",
+ },
+ {
+ "rule_id": "SEC007",
+ "title": "Slack Token",
+ "pattern": r"xox[bporas]-[0-9]{10,13}-[0-9]{10,13}[a-zA-Z0-9-]*",
+ "severity": Severity.HIGH,
+ "cwe": "CWE-798",
+ },
+]
+
+# ---------------------------------------------------------------------------
+# Code vulnerability patterns
+# ---------------------------------------------------------------------------
+
+_CODE_PATTERNS: list[dict[str, Any]] = [
+ {
+ "rule_id": "SEC100",
+ "title": "SQL Injection Risk",
+ "pattern": r"(?i)(?:execute|cursor\.execute|\.query)\s*\(\s*[f'\"].*\{.*\}",
+ "severity": Severity.HIGH,
+ "category": VulnerabilityCategory.INJECTION,
+ "cwe": "CWE-89",
+ "recommendation": "Use parameterised queries instead of string interpolation.",
+ "file_types": {".py", ".rb", ".js", ".ts"},
+ },
+ {
+ "rule_id": "SEC101",
+ "title": "Command Injection Risk",
+ "pattern": r"(?i)(?:os\.system|subprocess\.call|subprocess\.Popen|exec|eval)\s*\(.*[\+f\{]",
+ "severity": Severity.CRITICAL,
+ "category": VulnerabilityCategory.INJECTION,
+ "cwe": "CWE-78",
+ "recommendation": "Use subprocess with a list of arguments instead of shell=True.",
+ "file_types": {".py"},
+ },
+ {
+ "rule_id": "SEC102",
+ "title": "Cross-Site Scripting (XSS)",
+ "pattern": r"(?i)(?:innerHTML|outerHTML|document\.write|\.html\()\s*[=\(]\s*[^'\"]*[\+`\$]",
+ "severity": Severity.HIGH,
+ "category": VulnerabilityCategory.XSS,
+ "cwe": "CWE-79",
+ "recommendation": "Sanitise user input before inserting into the DOM.",
+ "file_types": {".js", ".ts", ".jsx", ".tsx", ".html"},
+ },
+ {
+ "rule_id": "SEC103",
+ "title": "Path Traversal",
+ "pattern": r"(?i)(?:open|read_file|send_file|join)\s*\(.*(?:request\.|params\[|input\()",
+ "severity": Severity.HIGH,
+ "category": VulnerabilityCategory.PATH_TRAVERSAL,
+ "cwe": "CWE-22",
+ "recommendation": "Validate and canonicalise file paths. Reject path traversal sequences.",
+ "file_types": {".py", ".js", ".ts", ".rb", ".go"},
+ },
+ {
+ "rule_id": "SEC104",
+ "title": "Insecure Random",
+ "pattern": r"(?i)\brandom\b\.(?:random|randint|choice|seed)\b",
+ "severity": Severity.MEDIUM,
+ "category": VulnerabilityCategory.CRYPTO_WEAKNESS,
+ "cwe": "CWE-330",
+ "recommendation": "Use secrets module or os.urandom() for security-sensitive randomness.",
+ "file_types": {".py"},
+ },
+ {
+ "rule_id": "SEC105",
+ "title": "SSRF Risk",
+ "pattern": r"(?i)(?:requests\.get|httpx\.get|fetch|urllib\.request)\s*\(.*(?:request\.|params|input)",
+ "severity": Severity.HIGH,
+ "category": VulnerabilityCategory.SSRF,
+ "cwe": "CWE-918",
+ "recommendation": "Validate and allowlist URLs before making HTTP requests.",
+ "file_types": {".py", ".js", ".ts"},
+ },
+ {
+ "rule_id": "SEC106",
+ "title": "Weak Cryptographic Algorithm",
+ "pattern": r"(?i)(?:md5|sha1)\s*\(",
+ "severity": Severity.MEDIUM,
+ "category": VulnerabilityCategory.CRYPTO_WEAKNESS,
+ "cwe": "CWE-327",
+ "recommendation": "Use SHA-256 or stronger for cryptographic purposes.",
+ "file_types": {".py", ".js", ".ts", ".go", ".java"},
+ },
+ {
+ "rule_id": "SEC107",
+ "title": "Insecure CORS Configuration",
+ "pattern": r"""(?i)(?:access-control-allow-origin|cors_allow_origins?)\s*[:=]\s*['"]\*['"]""",
+ "severity": Severity.MEDIUM,
+ "category": VulnerabilityCategory.INSECURE_CONFIG,
+ "cwe": "CWE-942",
+ "recommendation": "Restrict CORS to specific trusted origins instead of '*'.",
+ "file_types": {".py", ".js", ".ts", ".json", ".yaml", ".yml"},
+ },
+ {
+ "rule_id": "SEC108",
+ "title": "Disabled SSL Verification",
+ "pattern": r"(?i)(?:verify\s*=\s*False|rejectUnauthorized\s*[:=]\s*false|InsecureSkipVerify\s*[:=]\s*true)",
+ "severity": Severity.HIGH,
+ "category": VulnerabilityCategory.INSECURE_CONFIG,
+ "cwe": "CWE-295",
+ "recommendation": "Never disable SSL certificate verification in production.",
+ "file_types": {".py", ".js", ".ts", ".go"},
+ },
+]
+
+# Directories to always skip
+_SKIP_DIRS = {
+ ".git", "node_modules", "__pycache__", ".tox", ".venv",
+ "venv", "dist", "build", ".eggs", ".mypy_cache", ".ruff_cache",
+}
+
+# File extensions to scan
+_SCANNABLE_EXTENSIONS = {
+ ".py", ".js", ".ts", ".jsx", ".tsx", ".rb", ".go", ".java",
+ ".rs", ".c", ".cpp", ".h", ".cs", ".php", ".sh", ".bash",
+ ".yaml", ".yml", ".json", ".toml", ".ini", ".cfg", ".env",
+ ".html", ".xml", ".sql",
+}
+
+
+# ---------------------------------------------------------------------------
+# Security scanner
+# ---------------------------------------------------------------------------
+
+class SecurityScanner:
+ """AI-powered security scanner with pattern and semantic analysis.
+
+ Usage::
+
+ scanner = SecurityScanner()
+ result = scanner.scan_directory("/path/to/repo")
+ for finding in result.findings:
+ print(f"[{finding.severity.value}] {finding.title} in {finding.file_path}:{finding.line_number}")
+ """
+
+ def __init__(
+ self,
+ extra_secret_patterns: list[dict[str, Any]] | None = None,
+ extra_code_patterns: list[dict[str, Any]] | None = None,
+ min_confidence: float = 0.5,
+ ) -> None:
+ self.secret_patterns = _SECRET_PATTERNS + (extra_secret_patterns or [])
+ self.code_patterns = _CODE_PATTERNS + (extra_code_patterns or [])
+ self.min_confidence = min_confidence
+
+ # --- Public API -------------------------------------------------------
+
+ def scan_directory(self, directory: str) -> ScanResult:
+ """Recursively scan a directory for security issues."""
+ import time
+
+ start = time.monotonic()
+ result = ScanResult()
+ root = Path(directory)
+
+ if not root.is_dir():
+ return result
+
+ for path in self._walk(root):
+ findings = self.scan_file(str(path))
+ result.findings.extend(findings)
+ result.files_scanned += 1
+
+ result.scan_duration_ms = (time.monotonic() - start) * 1000
+ result.summary = self._build_summary(result.findings)
+ return result
+
+ def scan_file(self, file_path: str) -> list[Finding]:
+ """Scan a single file for security issues."""
+ findings: list[Finding] = []
+ path = Path(file_path)
+ suffix = path.suffix.lower()
+
+ try:
+ content = path.read_text(errors="replace")
+ except (OSError, UnicodeDecodeError):
+ return findings
+
+ lines = content.splitlines()
+
+ # Secret detection (all file types)
+ findings.extend(self._check_secrets(lines, file_path))
+
+ # Code pattern detection (filtered by file type)
+ findings.extend(self._check_code_patterns(lines, file_path, suffix))
+
+ # Filter by confidence threshold
+ return [f for f in findings if f.confidence >= self.min_confidence]
+
+ def scan_diff(self, diff_text: str) -> list[Finding]:
+ """Scan a git diff for security issues in added lines only.
+
+ This is useful for CI/CD pipelines to check only new changes.
+ """
+ findings: list[Finding] = []
+ current_file = ""
+ current_line = 0
+
+ for line in diff_text.splitlines():
+ # Track file name
+ if line.startswith("+++ b/"):
+ current_file = line[6:]
+ continue
+ # Track line numbers from hunk headers
+ if line.startswith("@@"):
+ match = re.search(r"\+(\d+)", line)
+ if match:
+ current_line = int(match.group(1)) - 1
+ continue
+ # Only scan added lines
+ if line.startswith("+") and not line.startswith("+++"):
+ current_line += 1
+ added_text = line[1:]
+ suffix = Path(current_file).suffix.lower() if current_file else ""
+
+ for sp in self.secret_patterns:
+ if re.search(sp["pattern"], added_text):
+ findings.append(Finding(
+ rule_id=sp["rule_id"],
+ title=sp["title"],
+ description=f"Potential {sp['title'].lower()} found in diff.",
+ severity=sp["severity"],
+ category=VulnerabilityCategory.SECRET_LEAK,
+ file_path=current_file,
+ line_number=current_line,
+ snippet=added_text.strip()[:200],
+ recommendation="Remove the secret and rotate it immediately.",
+ cwe_id=sp.get("cwe"),
+ ))
+
+ for cp in self.code_patterns:
+ file_types = cp.get("file_types", set())
+ if file_types and suffix not in file_types:
+ continue
+ if re.search(cp["pattern"], added_text):
+ findings.append(Finding(
+ rule_id=cp["rule_id"],
+ title=cp["title"],
+ description=f"Potential {cp['title'].lower()} in new code.",
+ severity=cp["severity"],
+ category=cp["category"],
+ file_path=current_file,
+ line_number=current_line,
+ snippet=added_text.strip()[:200],
+ recommendation=cp.get("recommendation", ""),
+ cwe_id=cp.get("cwe"),
+ ))
+ elif not line.startswith("-"):
+ current_line += 1
+
+ return findings
+
+ # --- Internal helpers -------------------------------------------------
+
+ def _walk(self, root: Path):
+ """Walk directory, skipping non-scannable paths."""
+ for entry in sorted(root.iterdir()):
+ if entry.name.startswith(".") and entry.name in _SKIP_DIRS:
+ continue
+ if entry.is_dir():
+ if entry.name in _SKIP_DIRS:
+ continue
+ yield from self._walk(entry)
+ elif entry.is_file() and entry.suffix.lower() in _SCANNABLE_EXTENSIONS:
+ yield entry
+
+ def _check_secrets(self, lines: list[str], file_path: str) -> list[Finding]:
+ """Check all lines for secret patterns."""
+ findings: list[Finding] = []
+ # Skip likely test/fixture files for lower confidence
+ is_test = "test" in file_path.lower() or "fixture" in file_path.lower()
+
+ for i, line in enumerate(lines, start=1):
+ for sp in self.secret_patterns:
+ if re.search(sp["pattern"], line):
+ findings.append(Finding(
+ rule_id=sp["rule_id"],
+ title=sp["title"],
+ description=f"Potential {sp['title'].lower()} detected.",
+ severity=sp["severity"],
+ category=VulnerabilityCategory.SECRET_LEAK,
+ file_path=file_path,
+ line_number=i,
+ snippet=self._redact(line.strip(), sp["pattern"]),
+ recommendation="Remove the secret from source code and rotate it.",
+ cwe_id=sp.get("cwe"),
+ confidence=0.6 if is_test else 0.9,
+ ))
+ return findings
+
+ def _check_code_patterns(
+ self, lines: list[str], file_path: str, suffix: str,
+ ) -> list[Finding]:
+ """Check lines against code vulnerability patterns."""
+ findings: list[Finding] = []
+ for i, line in enumerate(lines, start=1):
+ for cp in self.code_patterns:
+ file_types = cp.get("file_types", set())
+ if file_types and suffix not in file_types:
+ continue
+ if re.search(cp["pattern"], line):
+ findings.append(Finding(
+ rule_id=cp["rule_id"],
+ title=cp["title"],
+ description=f"Potential {cp['title'].lower()} vulnerability.",
+ severity=cp["severity"],
+ category=cp["category"],
+ file_path=file_path,
+ line_number=i,
+ snippet=line.strip()[:200],
+ recommendation=cp.get("recommendation", ""),
+ cwe_id=cp.get("cwe"),
+ ))
+ return findings
+
+ @staticmethod
+ def _redact(text: str, pattern: str) -> str:
+ """Partially redact matched secrets in snippets."""
+ def _mask(m: re.Match) -> str:
+ val = m.group(0)
+ if len(val) <= 8:
+ return val[:2] + "***"
+ return val[:4] + "***" + val[-4:]
+
+ return re.sub(pattern, _mask, text)[:200]
+
+ @staticmethod
+ def _build_summary(findings: list[Finding]) -> dict[str, int]:
+ """Build a severity summary dict."""
+ summary: dict[str, int] = {}
+ for f in findings:
+ key = f.severity.value
+ summary[key] = summary.get(key, 0) + 1
+ return summary
+
+
+def scan_current_workspace(path: str) -> dict:
+ """Lightweight API-friendly entry point for quick action security scan.
+
+ Returns normalized diagnostics suitable for the extension quick action.
+ """
+
+ if not os.path.isdir(path):
+ return {
+ "success": False,
+ "error": f"Path is not a directory: {path}",
+ "findings": [],
+ "summary": {},
+ }
+
+ scanner = SecurityScanner()
+ result = scanner.scan_directory(path)
+ return {
+ "success": True,
+ "files_scanned": result.files_scanned,
+ "scan_duration_ms": result.scan_duration_ms,
+ "findings": [f.to_dict() for f in result.findings],
+ "summary": result.summary,
+ }
diff --git a/gitpilot/session.py b/gitpilot/session.py
new file mode 100644
index 0000000000000000000000000000000000000000..d9401083a2297bdf139837881fe43ae98e22f2f5
--- /dev/null
+++ b/gitpilot/session.py
@@ -0,0 +1,282 @@
+# gitpilot/session.py
+"""Session persistence, resumption, and checkpoint management.
+
+Sessions track the full conversation and workspace state. Checkpoints
+snapshot the workspace at key moments so users can rewind.
+"""
+from __future__ import annotations
+
+import json
+import logging
+import os
+import shutil
+import uuid
+from dataclasses import asdict, dataclass, field
+from datetime import UTC, datetime
+from pathlib import Path
+from typing import Any
+
+logger = logging.getLogger(__name__)
+
+SESSION_ROOT = Path.home() / ".gitpilot" / "sessions"
+
+
+@dataclass
+class Message:
+ role: str # user | assistant | system
+ content: str
+ timestamp: str = field(
+ default_factory=lambda: datetime.now(UTC).isoformat(),
+ )
+ metadata: dict[str, Any] = field(default_factory=dict)
+
+
+@dataclass
+class Checkpoint:
+ id: str = field(default_factory=lambda: uuid.uuid4().hex[:12])
+ message_index: int = 0
+ description: str = ""
+ timestamp: str = field(
+ default_factory=lambda: datetime.now(UTC).isoformat(),
+ )
+ snapshot_path: str | None = None
+
+
+@dataclass
+class Session:
+ id: str = field(default_factory=lambda: uuid.uuid4().hex[:16])
+ name: str | None = None
+ repo_full_name: str | None = None
+ branch: str | None = None
+ messages: list[Message] = field(default_factory=list)
+ checkpoints: list[Checkpoint] = field(default_factory=list)
+ created_at: str = field(
+ default_factory=lambda: datetime.now(UTC).isoformat(),
+ )
+ updated_at: str = field(
+ default_factory=lambda: datetime.now(UTC).isoformat(),
+ )
+ pr_number: int | None = None
+ status: str = "active" # active | paused | completed
+ metadata: dict[str, Any] = field(default_factory=dict)
+
+ # Session mode fields
+ mode: str | None = None # "folder" | "local_git" | "github"
+ folder_path: str | None = None
+ repo_root: str | None = None
+
+ # Multi-repo context support
+ # Each entry: {"full_name": "owner/repo", "branch": "main", "mode": "read"|"write"}
+ repos: list[dict[str, Any]] = field(default_factory=list)
+ active_repo: str | None = None # full_name of the write-target repo
+
+ def add_message(self, role: str, content: str, **meta):
+ self.messages.append(Message(role=role, content=content, metadata=meta))
+ self.updated_at = datetime.now(UTC).isoformat()
+
+ def to_dict(self) -> dict[str, Any]:
+ return asdict(self)
+
+ @classmethod
+ def from_dict(cls, data: dict[str, Any]) -> Session:
+ data = dict(data) # shallow copy
+ data["messages"] = [Message(**m) for m in data.get("messages", [])]
+ data["checkpoints"] = [Checkpoint(**c) for c in data.get("checkpoints", [])]
+
+ # Backwards-compatible migration: populate repos from legacy single-repo
+ if not data.get("repos") and data.get("repo_full_name"):
+ data["repos"] = [{
+ "full_name": data["repo_full_name"],
+ "branch": data.get("branch", "main"),
+ "mode": "write",
+ }]
+ data.setdefault("active_repo", data["repo_full_name"])
+ data.setdefault("repos", [])
+ data.setdefault("active_repo", None)
+
+ # Session mode fields (backwards-compatible)
+ data.setdefault("mode", None)
+ data.setdefault("folder_path", None)
+ data.setdefault("repo_root", None)
+
+ return cls(**data)
+
+
+class SessionManager:
+ """Manage session lifecycle: create, save, load, list, fork, rewind."""
+
+ def __init__(self, root: Path | None = None):
+ self.root = root or SESSION_ROOT
+ self.root.mkdir(parents=True, exist_ok=True)
+
+ def _session_path(self, session_id: str) -> Path:
+ return self.root / f"{session_id}.json"
+
+ def create(
+ self,
+ repo_full_name: str | None = None,
+ branch: str | None = None,
+ name: str | None = None,
+ ) -> Session:
+ session = Session(
+ repo_full_name=repo_full_name, branch=branch, name=name,
+ )
+ self.save(session)
+ return session
+
+ def create_folder_session(
+ self, folder_path: str, name: str | None = None,
+ ) -> Session:
+ """Create a session for folder-only mode (no git required)."""
+ folder_name = os.path.basename(os.path.normpath(folder_path))
+ session_name = name or f"Folder: {folder_name}"
+ session = self.create(name=session_name)
+ session.mode = "folder"
+ session.folder_path = folder_path
+ self.save(session)
+ return session
+
+ def create_local_git_session(
+ self, repo_root: str, branch: str | None = None, name: str | None = None,
+ ) -> Session:
+ """Create a session for local git mode."""
+ repo_name = os.path.basename(os.path.normpath(repo_root))
+ session_name = name or f"Local Git: {repo_name}"
+ if branch:
+ session_name += f" ({branch})"
+ session = self.create(name=session_name)
+ session.mode = "local_git"
+ session.repo_root = repo_root
+ session.folder_path = repo_root
+ session.branch = branch
+ self.save(session)
+ return session
+
+ def create_github_session(
+ self, repo_full_name: str, branch: str | None = None, name: str | None = None,
+ ) -> Session:
+ """Create a session for GitHub mode."""
+ session_name = name or f"GitHub: {repo_full_name}"
+ if branch:
+ session_name += f" ({branch})"
+ session = self.create(
+ name=session_name,
+ repo_full_name=repo_full_name
+ )
+ session.mode = "github"
+ session.branch = branch
+ self.save(session)
+ return session
+
+ def save(self, session: Session):
+ path = self._session_path(session.id)
+ path.write_text(json.dumps(session.to_dict(), indent=2))
+
+ def load(self, session_id: str) -> Session:
+ path = self._session_path(session_id)
+ if not path.exists():
+ raise FileNotFoundError(f"Session not found: {session_id}")
+ return Session.from_dict(json.loads(path.read_text()))
+
+ def list_sessions(
+ self,
+ repo_full_name: str | None = None,
+ limit: int = 50,
+ ) -> list[dict[str, Any]]:
+ sessions = []
+ for path in sorted(self.root.glob("*.json"), reverse=True):
+ try:
+ data = json.loads(path.read_text())
+ if repo_full_name and data.get("repo_full_name") != repo_full_name:
+ continue
+ sessions.append({
+ "id": data["id"],
+ "name": data.get("name"),
+ "repo": data.get("repo_full_name"),
+ "branch": data.get("branch"),
+ "message_count": len(data.get("messages", [])),
+ "status": data.get("status", "active"),
+ "updated_at": data.get("updated_at"),
+ "pr_number": data.get("pr_number"),
+ "repos": data.get("repos", []),
+ "active_repo": data.get("active_repo"),
+ })
+ if len(sessions) >= limit:
+ break
+ except Exception:
+ logger.debug("Failed to read session file %s", path, exc_info=True)
+ continue
+ return sessions
+
+ def delete(self, session_id: str) -> bool:
+ path = self._session_path(session_id)
+ if path.exists():
+ path.unlink()
+ return True
+ return False
+
+ def fork(self, session_id: str, at_message: int | None = None) -> Session:
+ original = self.load(session_id)
+ messages = original.messages
+ if at_message is not None:
+ messages = messages[: at_message + 1]
+
+ forked = Session(
+ repo_full_name=original.repo_full_name,
+ branch=original.branch,
+ name=f"Fork of {original.name or original.id}",
+ messages=list(messages),
+ metadata={"forked_from": original.id},
+ )
+ self.save(forked)
+ return forked
+
+ def create_checkpoint(
+ self,
+ session: Session,
+ workspace_path: Path | None = None,
+ description: str = "",
+ ) -> Checkpoint:
+ checkpoint = Checkpoint(
+ message_index=len(session.messages),
+ description=description or f"Checkpoint at message {len(session.messages)}",
+ )
+ if workspace_path and workspace_path.exists():
+ snap_dir = self.root / "snapshots" / session.id
+ snap_dir.mkdir(parents=True, exist_ok=True)
+ archive_base = str(snap_dir / checkpoint.id)
+ shutil.make_archive(archive_base, "gztar", root_dir=str(workspace_path))
+ checkpoint.snapshot_path = archive_base + ".tar.gz"
+
+ session.checkpoints.append(checkpoint)
+ self.save(session)
+ return checkpoint
+
+ def rewind_to_checkpoint(
+ self,
+ session: Session,
+ checkpoint_id: str,
+ workspace_path: Path | None = None,
+ ) -> Session:
+ checkpoint = None
+ for cp in session.checkpoints:
+ if cp.id == checkpoint_id:
+ checkpoint = cp
+ break
+ if checkpoint is None:
+ raise ValueError(f"Checkpoint not found: {checkpoint_id}")
+
+ session.messages = session.messages[: checkpoint.message_index]
+
+ if checkpoint.snapshot_path and workspace_path:
+ snap = Path(checkpoint.snapshot_path)
+ if snap.exists():
+ if workspace_path.exists():
+ shutil.rmtree(workspace_path)
+ workspace_path.mkdir(parents=True, exist_ok=True)
+ shutil.unpack_archive(str(snap), str(workspace_path))
+
+ idx = session.checkpoints.index(checkpoint)
+ session.checkpoints = session.checkpoints[: idx + 1]
+ self.save(session)
+ return session
diff --git a/gitpilot/settings.py b/gitpilot/settings.py
new file mode 100644
index 0000000000000000000000000000000000000000..1e49255d183cf21bf1a8e4dba9b7ca653ed34b36
--- /dev/null
+++ b/gitpilot/settings.py
@@ -0,0 +1,286 @@
+from __future__ import annotations
+
+import contextlib
+import enum
+import json
+import logging
+import os
+from pathlib import Path
+
+from dotenv import load_dotenv
+from pydantic import BaseModel, Field
+
+from gitpilot.models import (
+ ProviderConnectionType,
+ ProviderName,
+ ProviderSummary,
+)
+
+# Load .env file if it exists (from project root or current directory)
+load_dotenv()
+
+CONFIG_DIR = Path.home() / ".gitpilot"
+CONFIG_FILE = CONFIG_DIR / "settings.json"
+
+
+class LLMProvider(enum.StrEnum):
+ openai = "openai"
+ claude = "claude"
+ watsonx = "watsonx"
+ ollama = "ollama"
+ ollabridge = "ollabridge"
+
+
+class OpenAIConfig(BaseModel):
+ api_key: str = Field(default="")
+ model: str = Field(default="gpt-4o-mini")
+ base_url: str = Field(default="") # Optional: for Azure OpenAI or proxies
+
+
+class ClaudeConfig(BaseModel):
+ api_key: str = Field(default="")
+ model: str = Field(default="claude-sonnet-4-5")
+ base_url: str = Field(default="") # Optional: for proxies
+
+
+class WatsonxConfig(BaseModel):
+ api_key: str = Field(default="")
+ project_id: str = Field(default="")
+ model_id: str = Field(default="meta-llama/llama-3-3-70b-instruct")
+ base_url: str = Field(default="https://api.watsonx.ai/v1")
+
+
+class OllamaConfig(BaseModel):
+ base_url: str = Field(default="http://localhost:11434")
+ model: str = Field(default="llama3")
+
+
+class OllaBridgeConfig(BaseModel):
+ base_url: str = Field(default="http://localhost:8000")
+ model: str = Field(default="qwen2.5:1.5b")
+ api_key: str = Field(default="") # Optional: for authenticated endpoints
+
+
+class AppSettings(BaseModel):
+ provider: LLMProvider = Field(default=LLMProvider.ollabridge)
+
+ openai: OpenAIConfig = Field(default_factory=OpenAIConfig)
+ claude: ClaudeConfig = Field(default_factory=ClaudeConfig)
+ watsonx: WatsonxConfig = Field(default_factory=WatsonxConfig)
+ ollama: OllamaConfig = Field(default_factory=OllamaConfig)
+ ollabridge: OllaBridgeConfig = Field(default_factory=OllaBridgeConfig)
+
+ langflow_url: str = Field(default="http://localhost:7860")
+ langflow_api_key: str | None = None
+ langflow_plan_flow_id: str | None = None
+
+ @classmethod
+ def from_disk(cls) -> AppSettings:
+ """Load settings from disk and merge with environment variables.
+
+ On Vercel or serverless environments, relies entirely on environment variables
+ since the filesystem is ephemeral.
+ """
+ # Start with defaults or saved settings
+ if CONFIG_FILE.exists():
+ data = json.loads(CONFIG_FILE.read_text("utf-8"))
+ settings = cls.model_validate(data)
+ else:
+ settings = cls()
+
+ # Override with environment variables (they take precedence)
+ env_provider = os.getenv("GITPILOT_PROVIDER")
+ if env_provider:
+ with contextlib.suppress(ValueError):
+ settings.provider = LLMProvider(env_provider.lower())
+
+ # Merge environment variables into provider configs
+ # OpenAI
+ if os.getenv("OPENAI_API_KEY"):
+ settings.openai.api_key = os.getenv("OPENAI_API_KEY")
+ if os.getenv("GITPILOT_OPENAI_MODEL"):
+ settings.openai.model = os.getenv("GITPILOT_OPENAI_MODEL")
+ if os.getenv("OPENAI_BASE_URL"):
+ settings.openai.base_url = os.getenv("OPENAI_BASE_URL")
+
+ # Claude
+ if os.getenv("ANTHROPIC_API_KEY"):
+ settings.claude.api_key = os.getenv("ANTHROPIC_API_KEY")
+ if os.getenv("GITPILOT_CLAUDE_MODEL"):
+ settings.claude.model = os.getenv("GITPILOT_CLAUDE_MODEL")
+ if os.getenv("ANTHROPIC_BASE_URL"):
+ settings.claude.base_url = os.getenv("ANTHROPIC_BASE_URL")
+
+ # Watsonx
+ if os.getenv("WATSONX_API_KEY"):
+ settings.watsonx.api_key = os.getenv("WATSONX_API_KEY")
+ if os.getenv("WATSONX_PROJECT_ID") or os.getenv("PROJECT_ID"):
+ settings.watsonx.project_id = os.getenv(
+ "WATSONX_PROJECT_ID", os.getenv("PROJECT_ID", "")
+ )
+ if os.getenv("GITPILOT_WATSONX_MODEL"):
+ settings.watsonx.model_id = os.getenv("GITPILOT_WATSONX_MODEL")
+ if os.getenv("WATSONX_BASE_URL"):
+ settings.watsonx.base_url = os.getenv("WATSONX_BASE_URL")
+
+ # Ollama
+ if os.getenv("OLLAMA_BASE_URL"):
+ settings.ollama.base_url = os.getenv("OLLAMA_BASE_URL")
+ if os.getenv("GITPILOT_OLLAMA_MODEL"):
+ settings.ollama.model = os.getenv("GITPILOT_OLLAMA_MODEL")
+
+ # OllaBridge / OllaBridge Cloud
+ if os.getenv("OLLABRIDGE_BASE_URL"):
+ settings.ollabridge.base_url = os.getenv("OLLABRIDGE_BASE_URL")
+ if os.getenv("GITPILOT_OLLABRIDGE_MODEL"):
+ settings.ollabridge.model = os.getenv("GITPILOT_OLLABRIDGE_MODEL")
+ if os.getenv("OLLABRIDGE_API_KEY"):
+ settings.ollabridge.api_key = os.getenv("OLLABRIDGE_API_KEY")
+
+ # LangFlow (optional)
+ if os.getenv("GITPILOT_LANGFLOW_URL"):
+ settings.langflow_url = os.getenv("GITPILOT_LANGFLOW_URL")
+ if os.getenv("GITPILOT_LANGFLOW_API_KEY"):
+ settings.langflow_api_key = os.getenv("GITPILOT_LANGFLOW_API_KEY")
+ if os.getenv("GITPILOT_LANGFLOW_PLAN_FLOW_ID"):
+ settings.langflow_plan_flow_id = os.getenv("GITPILOT_LANGFLOW_PLAN_FLOW_ID")
+
+ return settings
+
+ def save(self) -> None:
+ """Save settings to disk. Skipped on Vercel (ephemeral filesystem)."""
+ # Skip saving on Vercel - filesystem is ephemeral
+ if os.getenv("GITPILOT_VERCEL_DEPLOYMENT") or os.getenv("VERCEL"):
+ logging.warning(
+ "Settings persistence disabled on Vercel. "
+ "Use environment variables for configuration."
+ )
+ return
+
+ CONFIG_DIR.mkdir(parents=True, exist_ok=True)
+ CONFIG_FILE.write_text(self.model_dump_json(indent=2), "utf-8")
+
+ # ββ Provider introspection helpers ββββββββββββββββββββ
+
+ def is_provider_configured(self) -> bool:
+ """Return True if the active provider has the required configuration."""
+ p = self.provider
+ if p == LLMProvider.openai:
+ return bool(self.openai.api_key)
+ if p == LLMProvider.claude:
+ return bool(self.claude.api_key)
+ if p == LLMProvider.watsonx:
+ return bool(self.watsonx.api_key and self.watsonx.project_id)
+ if p == LLMProvider.ollama:
+ return True
+ return p == LLMProvider.ollabridge
+
+ def get_effective_model(self) -> str | None:
+ """Return the model string for the active provider."""
+ p = self.provider
+ if p == LLMProvider.openai:
+ return self.openai.model or None
+ if p == LLMProvider.claude:
+ return self.claude.model or None
+ if p == LLMProvider.watsonx:
+ return self.watsonx.model_id or None
+ if p == LLMProvider.ollama:
+ return self.ollama.model or None
+ if p == LLMProvider.ollabridge:
+ return self.ollabridge.model or None
+ return None
+
+ def get_provider_summary(self) -> ProviderSummary:
+ """Build a :class:`ProviderSummary` for the active provider."""
+ p = self.provider
+
+ # --- source detection (.env vs settings) ---
+ env_key_map = {
+ LLMProvider.openai: "OPENAI_API_KEY",
+ LLMProvider.claude: "ANTHROPIC_API_KEY",
+ LLMProvider.watsonx: "WATSONX_API_KEY",
+ LLMProvider.ollama: "OLLAMA_BASE_URL",
+ LLMProvider.ollabridge: "OLLABRIDGE_BASE_URL",
+ }
+ source: str = (
+ ".env" if os.getenv(env_key_map.get(p, "")) else "settings"
+ )
+
+ # --- per-provider fields ---
+ if p == LLMProvider.openai:
+ model = self.openai.model
+ base_url = self.openai.base_url or None
+ conn = ProviderConnectionType.api_key
+ has_key = bool(self.openai.api_key)
+ elif p == LLMProvider.claude:
+ model = self.claude.model
+ base_url = self.claude.base_url or None
+ conn = ProviderConnectionType.api_key
+ has_key = bool(self.claude.api_key)
+ elif p == LLMProvider.watsonx:
+ model = self.watsonx.model_id
+ base_url = self.watsonx.base_url or None
+ conn = ProviderConnectionType.api_key
+ has_key = bool(self.watsonx.api_key)
+ elif p == LLMProvider.ollama:
+ model = self.ollama.model
+ base_url = self.ollama.base_url or None
+ conn = ProviderConnectionType.local
+ has_key = False
+ elif p == LLMProvider.ollabridge:
+ model = self.ollabridge.model
+ base_url = self.ollabridge.base_url or None
+ conn = ProviderConnectionType.local
+ has_key = bool(self.ollabridge.api_key)
+ else:
+ model = None
+ base_url = None
+ conn = None
+ has_key = False
+
+ return ProviderSummary(
+ configured=self.is_provider_configured(),
+ name=ProviderName(p.value),
+ source=source,
+ model=model,
+ base_url=base_url,
+ connection_type=conn,
+ has_api_key=has_key,
+ )
+
+
+_settings = AppSettings.from_disk()
+
+
+def get_settings() -> AppSettings:
+ return _settings
+
+
+def set_provider(provider: LLMProvider) -> AppSettings:
+ _settings.provider = provider
+ _settings.save()
+ return _settings
+
+
+def update_settings(updates: dict) -> AppSettings:
+ """Update settings with partial or full configuration."""
+ global _settings # noqa: PLW0602
+
+ # Update provider if present
+ if "provider" in updates:
+ _settings.provider = LLMProvider(updates["provider"])
+
+ # Update provider-specific configs
+ if "openai" in updates:
+ _settings.openai = OpenAIConfig(**updates["openai"])
+ if "claude" in updates:
+ _settings.claude = ClaudeConfig(**updates["claude"])
+ if "watsonx" in updates:
+ _settings.watsonx = WatsonxConfig(**updates["watsonx"])
+ if "ollama" in updates:
+ _settings.ollama = OllamaConfig(**updates["ollama"])
+ if "ollabridge" in updates:
+ _settings.ollabridge = OllaBridgeConfig(**updates["ollabridge"])
+
+ _settings.save()
+ return _settings
diff --git a/gitpilot/skills.py b/gitpilot/skills.py
new file mode 100644
index 0000000000000000000000000000000000000000..dbd22046ce06194211c5391b455f0c002710dc90
--- /dev/null
+++ b/gitpilot/skills.py
@@ -0,0 +1,237 @@
+# gitpilot/skills.py
+"""Skill system for GitPilot β reusable, invocable workflows.
+
+Skills are markdown files that define prompt templates. Users invoke
+them via ``/skill-name`` in chat. They live in:
+
+- ``.gitpilot/skills/*.md`` β project-level skills
+- ``~/.gitpilot/skills/*.md`` β global user skills
+- Plugin skills (discovered via PluginManager)
+
+Each markdown file has YAML front-matter followed by a prompt template::
+
+ ---
+ name: review
+ description: Review code quality for the current branch
+ auto_trigger: false
+ required_tools:
+ - git_diff
+ - read_local_file
+ ---
+
+ Review the code changes on the current branch.
+ Focus on: security issues, performance, and maintainability.
+ Use git_diff to see what changed, then read relevant files.
+ Provide a structured review with severity ratings.
+"""
+from __future__ import annotations
+
+import logging
+import re
+from dataclasses import dataclass, field
+from pathlib import Path
+from typing import Any, Dict, List, Optional
+
+logger = logging.getLogger(__name__)
+
+SKILLS_DIR_NAME = "skills"
+_FRONT_MATTER_RE = re.compile(r"^---\s*\n(.*?)\n---\s*\n", re.DOTALL)
+
+
+@dataclass
+class Skill:
+ """A reusable, invocable workflow template."""
+
+ name: str
+ description: str = ""
+ prompt_template: str = ""
+ auto_trigger: bool = False
+ required_tools: List[str] = field(default_factory=list)
+ source_file: Optional[Path] = None
+
+ @classmethod
+ def from_file(cls, path: Path) -> "Skill":
+ """Parse a skill from a markdown file with YAML front-matter."""
+ text = path.read_text(encoding="utf-8")
+ meta: Dict[str, Any] = {}
+ prompt = text
+
+ m = _FRONT_MATTER_RE.match(text)
+ if m:
+ meta = _parse_yaml_simple(m.group(1))
+ prompt = text[m.end():]
+
+ return cls(
+ name=meta.get("name", path.stem),
+ description=meta.get("description", ""),
+ prompt_template=prompt.strip(),
+ auto_trigger=meta.get("auto_trigger", False),
+ required_tools=meta.get("required_tools", []),
+ source_file=path,
+ )
+
+ def render(self, context: Optional[Dict[str, str]] = None) -> str:
+ """Render the prompt template with optional variable substitution.
+
+ Variables use ``{{var_name}}`` syntax.
+ """
+ result = self.prompt_template
+ if context:
+ for key, value in context.items():
+ result = result.replace("{{" + key + "}}", str(value))
+ return result
+
+ def to_dict(self) -> Dict[str, Any]:
+ return {
+ "name": self.name,
+ "description": self.description,
+ "auto_trigger": self.auto_trigger,
+ "required_tools": self.required_tools,
+ "source": str(self.source_file) if self.source_file else None,
+ }
+
+
+class SkillManager:
+ """Discover and manage skills from project, user, and plugin sources."""
+
+ def __init__(
+ self,
+ workspace_path: Optional[Path] = None,
+ user_dir: Optional[Path] = None,
+ ) -> None:
+ self.workspace_path = workspace_path
+ self.user_dir = user_dir or (Path.home() / ".gitpilot")
+ self._skills: Dict[str, Skill] = {}
+
+ def load_all(self) -> int:
+ """Load skills from all sources. Returns count of skills loaded."""
+ count = 0
+
+ # 1. Project-level skills
+ if self.workspace_path:
+ project_skills = self.workspace_path / ".gitpilot" / SKILLS_DIR_NAME
+ count += self._load_from_dir(project_skills, prefix="project")
+
+ # 2. Global user skills
+ user_skills = self.user_dir / SKILLS_DIR_NAME
+ count += self._load_from_dir(user_skills, prefix="user")
+
+ logger.info("Loaded %d skills total", count)
+ return count
+
+ def register(self, skill: Skill) -> None:
+ """Register a skill (e.g. from a plugin)."""
+ self._skills[skill.name] = skill
+
+ def get(self, name: str) -> Optional[Skill]:
+ """Get a skill by name (used for /command invocation)."""
+ return self._skills.get(name)
+
+ def list_skills(self) -> List[Dict[str, Any]]:
+ """List all loaded skills."""
+ return [s.to_dict() for s in self._skills.values()]
+
+ def find_auto_triggers(self, context: str) -> List[Skill]:
+ """Find skills that should auto-trigger based on context.
+
+ Auto-trigger skills are checked against the context string
+ (e.g. user message or session state) and returned if their
+ name or description is relevant.
+ """
+ matches = []
+ ctx_lower = context.lower()
+ for skill in self._skills.values():
+ if not skill.auto_trigger:
+ continue
+ # Simple keyword match for now
+ if skill.name.lower() in ctx_lower:
+ matches.append(skill)
+ elif any(word in ctx_lower for word in skill.description.lower().split()):
+ matches.append(skill)
+ return matches
+
+ def invoke(
+ self,
+ name: str,
+ context: Optional[Dict[str, str]] = None,
+ ) -> Optional[str]:
+ """Invoke a skill by name and return the rendered prompt.
+
+ Returns None if the skill is not found.
+ """
+ skill = self.get(name)
+ if not skill:
+ return None
+ return skill.render(context)
+
+ def _load_from_dir(self, skills_dir: Path, prefix: str = "") -> int:
+ if not skills_dir.is_dir():
+ return 0
+ count = 0
+ for md_file in sorted(skills_dir.glob("*.md")):
+ try:
+ skill = Skill.from_file(md_file)
+ self._skills[skill.name] = skill
+ count += 1
+ except Exception as e:
+ logger.warning("Failed to load skill %s: %s", md_file, e)
+ return count
+
+
+def _parse_yaml_simple(text: str) -> Dict[str, Any]:
+ """Minimal YAML front-matter parser (no external dependency).
+
+ Handles:
+ key: value
+ key: true/false
+ key:
+ - item1
+ - item2
+ """
+ result: Dict[str, Any] = {}
+ lines = text.strip().split("\n")
+ current_key: Optional[str] = None
+ current_list: Optional[List[str]] = None
+
+ for line in lines:
+ stripped = line.strip()
+ if not stripped or stripped.startswith("#"):
+ continue
+
+ # List item
+ if stripped.startswith("- ") and current_key:
+ if current_list is None:
+ current_list = []
+ current_list.append(stripped[2:].strip())
+ result[current_key] = current_list
+ continue
+
+ # Key-value
+ if ":" in stripped:
+ if current_list is not None:
+ current_list = None
+
+ key, _, value = stripped.partition(":")
+ key = key.strip()
+ value = value.strip()
+ current_key = key
+
+ if not value:
+ # Next lines might be a list
+ current_list = []
+ result[key] = current_list
+ continue
+
+ # Parse value types
+ if value.lower() == "true":
+ result[key] = True
+ elif value.lower() == "false":
+ result[key] = False
+ elif value.isdigit():
+ result[key] = int(value)
+ else:
+ result[key] = value
+
+ current_list = None
+
+ return result
diff --git a/gitpilot/skills/fix-hf-space.md b/gitpilot/skills/fix-hf-space.md
new file mode 100644
index 0000000000000000000000000000000000000000..6d22492471c2f770326ae578e504bd624a9e78df
--- /dev/null
+++ b/gitpilot/skills/fix-hf-space.md
@@ -0,0 +1,49 @@
+# /fix-hf-space
+
+Analyze and repair a broken HuggingFace Space
+
+## Description
+
+This skill diagnoses and fixes broken HuggingFace Spaces by:
+1. Cloning the Space repository
+2. Analyzing for dead dependencies, deprecated APIs, and SDK issues
+3. Generating a complete fix using OllaBridge LLM (or template fallback)
+4. Pushing the fix and managing ZeroGPU hardware if needed
+
+Works with RepoGuardian's Space analyzer for structured diagnosis.
+
+## Arguments
+
+- `space_id` (required): HuggingFace Space ID, e.g. `ruslanmv/Logo-Creator`
+- `--push`: Push fixes to the Space repo (default: dry run)
+- `--hardware`: Also manage ZeroGPU hardware allocation
+
+## Prompt
+
+Fix the broken HuggingFace Space `{space_id}`.
+
+Steps:
+1. Clone the Space: `clone_hf_space("{space_id}")`
+2. Get runtime info: `get_space_runtime_info("{space_id}")`
+3. Analyze for issues: `analyze_hf_space(repo_dir)`
+4. Generate fix: `generate_space_fix("{space_id}", diagnosis, app_content)`
+5. Push fix: `push_space_fix(repo_dir, fix)`
+6. Manage hardware: `manage_space_hardware("{space_id}", token, "zero-a10g")`
+
+Use OllaBridge Cloud ({ollabridge_url}) for intelligent analysis.
+Report all issues found and actions taken.
+
+## Example
+
+```bash
+gitpilot skill fix-hf-space ruslanmv/Logo-Creator --push --hardware
+```
+
+## Required Tools
+
+- `clone_hf_space`
+- `analyze_hf_space`
+- `generate_space_fix`
+- `push_space_fix`
+- `manage_space_hardware`
+- `get_space_runtime_info`
diff --git a/gitpilot/smart_model_router.py b/gitpilot/smart_model_router.py
new file mode 100644
index 0000000000000000000000000000000000000000..9776a2c061265112eb1b590b6c98128a5d16c298
--- /dev/null
+++ b/gitpilot/smart_model_router.py
@@ -0,0 +1,268 @@
+# gitpilot/smart_model_router.py
+"""Smart multi-model routing for GitPilot.
+
+Routes different tasks to different LLM models based on complexity,
+task type, and cost constraints. This allows GitPilot to use cheap
+models for simple tasks and powerful models for complex reasoning.
+
+Complexity is estimated from the request text using heuristics:
+
+- **low** β simple queries, listings, status checks β fast/cheap model
+- **medium** β code generation, edits, reviews β balanced model
+- **high** β complex reasoning, architecture, security analysis β strongest model
+
+The router respects a configurable cost budget and tracks usage.
+"""
+from __future__ import annotations
+
+import logging
+import re
+from dataclasses import dataclass, field
+from enum import Enum
+from typing import Any, Dict, List, Optional
+
+logger = logging.getLogger(__name__)
+
+
+class Complexity(str, Enum):
+ LOW = "low"
+ MEDIUM = "medium"
+ HIGH = "high"
+
+
+class TaskTier(str, Enum):
+ """Maps complexity to model tier."""
+
+ FAST = "fast" # cheap, low-latency
+ BALANCED = "balanced" # good quality/cost ratio
+ POWERFUL = "powerful" # strongest available
+
+
+# Default model mapping per provider + tier
+DEFAULT_MODEL_MAP: Dict[str, Dict[str, str]] = {
+ "openai": {
+ "fast": "gpt-4o-mini",
+ "balanced": "gpt-4o",
+ "powerful": "o1",
+ },
+ "claude": {
+ "fast": "claude-haiku-4-5-20251001",
+ "balanced": "claude-sonnet-4-5-20250929",
+ "powerful": "claude-opus-4-6",
+ },
+ "ollama": {
+ "fast": "llama3",
+ "balanced": "llama3",
+ "powerful": "llama3:70b",
+ },
+ "watsonx": {
+ "fast": "meta-llama/llama-3-1-8b-instruct",
+ "balanced": "meta-llama/llama-3-3-70b-instruct",
+ "powerful": "meta-llama/llama-3-3-70b-instruct",
+ },
+}
+
+# Patterns for complexity estimation
+_HIGH_COMPLEXITY_PATTERNS = [
+ re.compile(r"\b(architect|design|refactor|migrate|security audit|threat model)\b", re.I),
+ re.compile(r"\b(analyze .*across|cross-repo|monorepo|dependency graph)\b", re.I),
+ re.compile(r"\b(implement .*system|build .*from scratch|rewrite)\b", re.I),
+ re.compile(r"\b(debug.*complex|race condition|memory leak|deadlock)\b", re.I),
+]
+
+_LOW_COMPLEXITY_PATTERNS = [
+ re.compile(r"\b(list|show|status|version|help|what is|how do)\b", re.I),
+ re.compile(r"\b(rename|typo|comment|format|lint)\b", re.I),
+ re.compile(r"\b(git status|git log|git diff)\b", re.I),
+]
+
+# Category -> default tier
+_CATEGORY_TIER_MAP: Dict[str, TaskTier] = {
+ "plan_execute": TaskTier.POWERFUL,
+ "issue_management": TaskTier.FAST,
+ "pr_management": TaskTier.BALANCED,
+ "code_search": TaskTier.FAST,
+ "code_review": TaskTier.POWERFUL,
+ "learning": TaskTier.BALANCED,
+ "conversational": TaskTier.FAST,
+ "local_edit": TaskTier.BALANCED,
+ "terminal": TaskTier.FAST,
+}
+
+
+@dataclass
+class ModelSelection:
+ """Result of model selection."""
+
+ model: str
+ tier: TaskTier
+ complexity: Complexity
+ provider: str
+ reason: str
+
+
+@dataclass
+class UsageRecord:
+ """Track model usage for budgeting."""
+
+ model: str
+ tokens_in: int = 0
+ tokens_out: int = 0
+ estimated_cost_usd: float = 0.0
+
+
+@dataclass
+class ModelRouterConfig:
+ """Configuration for the model router."""
+
+ provider: str = "openai"
+ model_map: Dict[str, Dict[str, str]] = field(default_factory=dict)
+ # Override: force a specific model for all tasks
+ force_model: Optional[str] = None
+ # Budget: max estimated cost per session (USD, 0 = unlimited)
+ budget_usd: float = 0.0
+ # Override tier for specific categories
+ category_overrides: Dict[str, str] = field(default_factory=dict)
+
+
+class ModelRouter:
+ """Route tasks to optimal models based on complexity and cost.
+
+ Usage::
+
+ router = ModelRouter(config=ModelRouterConfig(provider="openai"))
+ selection = router.select("Implement authentication system")
+ # β ModelSelection(model="o1", tier=POWERFUL, complexity=HIGH)
+
+ selection = router.select("list open issues")
+ # β ModelSelection(model="gpt-4o-mini", tier=FAST, complexity=LOW)
+ """
+
+ def __init__(self, config: Optional[ModelRouterConfig] = None) -> None:
+ self.config = config or ModelRouterConfig()
+ self._usage: List[UsageRecord] = []
+
+ def select(
+ self,
+ request: str,
+ category: Optional[str] = None,
+ ) -> ModelSelection:
+ """Select the best model for a given request.
+
+ Args:
+ request: The user's request text.
+ category: Optional RequestCategory value (e.g. "code_review").
+ """
+ # Force model override
+ if self.config.force_model:
+ return ModelSelection(
+ model=self.config.force_model,
+ tier=TaskTier.BALANCED,
+ complexity=Complexity.MEDIUM,
+ provider=self.config.provider,
+ reason="Force model override",
+ )
+
+ complexity = self.estimate_complexity(request)
+ tier = self._select_tier(complexity, category)
+ model = self._resolve_model(tier)
+
+ return ModelSelection(
+ model=model,
+ tier=tier,
+ complexity=complexity,
+ provider=self.config.provider,
+ reason=self._explain(complexity, tier, category),
+ )
+
+ def estimate_complexity(self, request: str) -> Complexity:
+ """Estimate the complexity of a request using heuristics."""
+ text = request.strip()
+
+ # Check high complexity patterns
+ for pattern in _HIGH_COMPLEXITY_PATTERNS:
+ if pattern.search(text):
+ return Complexity.HIGH
+
+ # Check low complexity patterns
+ for pattern in _LOW_COMPLEXITY_PATTERNS:
+ if pattern.search(text):
+ return Complexity.LOW
+
+ # Length-based heuristic
+ word_count = len(text.split())
+ if word_count > 100:
+ return Complexity.HIGH
+ if word_count < 15:
+ return Complexity.LOW
+
+ return Complexity.MEDIUM
+
+ def record_usage(self, record: UsageRecord) -> None:
+ """Record a model usage for budget tracking."""
+ self._usage.append(record)
+
+ def get_total_cost(self) -> float:
+ """Get total estimated cost across all recorded usage."""
+ return sum(r.estimated_cost_usd for r in self._usage)
+
+ def is_budget_exceeded(self) -> bool:
+ """Check if the session budget has been exceeded."""
+ if self.config.budget_usd <= 0:
+ return False
+ return self.get_total_cost() >= self.config.budget_usd
+
+ def get_usage_summary(self) -> Dict[str, Any]:
+ """Get a summary of model usage."""
+ by_model: Dict[str, Dict[str, Any]] = {}
+ for r in self._usage:
+ if r.model not in by_model:
+ by_model[r.model] = {"calls": 0, "tokens_in": 0, "tokens_out": 0, "cost": 0.0}
+ by_model[r.model]["calls"] += 1
+ by_model[r.model]["tokens_in"] += r.tokens_in
+ by_model[r.model]["tokens_out"] += r.tokens_out
+ by_model[r.model]["cost"] += r.estimated_cost_usd
+ return {
+ "total_cost_usd": self.get_total_cost(),
+ "budget_usd": self.config.budget_usd,
+ "budget_exceeded": self.is_budget_exceeded(),
+ "models": by_model,
+ }
+
+ def _select_tier(self, complexity: Complexity, category: Optional[str]) -> TaskTier:
+ # Check category overrides first
+ if category and category in self.config.category_overrides:
+ return TaskTier(self.config.category_overrides[category])
+
+ # Check category defaults
+ if category and category in _CATEGORY_TIER_MAP:
+ category_tier = _CATEGORY_TIER_MAP[category]
+ # Upgrade tier based on complexity
+ if complexity == Complexity.HIGH and category_tier == TaskTier.FAST:
+ return TaskTier.BALANCED
+ return category_tier
+
+ # Pure complexity-based
+ return {
+ Complexity.LOW: TaskTier.FAST,
+ Complexity.MEDIUM: TaskTier.BALANCED,
+ Complexity.HIGH: TaskTier.POWERFUL,
+ }[complexity]
+
+ def _resolve_model(self, tier: TaskTier) -> str:
+ # Check user-configured model map first
+ provider = self.config.provider
+ if self.config.model_map.get(provider, {}).get(tier.value):
+ return self.config.model_map[provider][tier.value]
+ # Fall back to defaults
+ provider_models = DEFAULT_MODEL_MAP.get(provider, DEFAULT_MODEL_MAP["openai"])
+ return provider_models.get(tier.value, provider_models["balanced"])
+
+ def _explain(
+ self, complexity: Complexity, tier: TaskTier, category: Optional[str],
+ ) -> str:
+ parts = [f"Complexity={complexity.value}"]
+ if category:
+ parts.append(f"category={category}")
+ parts.append(f"β tier={tier.value}")
+ return ", ".join(parts)
diff --git a/gitpilot/terminal.py b/gitpilot/terminal.py
new file mode 100644
index 0000000000000000000000000000000000000000..bcfe9e04d7ae2fc1fd7997b6bda13df90334d2e1
--- /dev/null
+++ b/gitpilot/terminal.py
@@ -0,0 +1,200 @@
+# gitpilot/terminal.py
+"""Sandboxed terminal command executor.
+
+Runs shell commands within the workspace directory with configurable
+timeout, size limits, and directory restrictions.
+"""
+from __future__ import annotations
+
+import asyncio
+import logging
+import os
+import signal
+import time
+from dataclasses import dataclass, field
+from pathlib import Path
+from typing import Any, AsyncIterator, Dict, List, Optional
+
+logger = logging.getLogger(__name__)
+
+DEFAULT_TIMEOUT_SEC = 120
+MAX_OUTPUT_BYTES = 512_000
+
+BLOCKED_PATTERNS = [
+ "rm -rf /",
+ "mkfs",
+ "dd if=/dev/zero",
+ ":(){ :|:& };:",
+]
+
+
+@dataclass
+class CommandResult:
+ """Result of a terminal command execution."""
+
+ command: str
+ exit_code: int
+ stdout: str
+ stderr: str
+ duration_ms: int
+ truncated: bool = False
+ timed_out: bool = False
+
+
+@dataclass
+class TerminalSession:
+ """An active terminal session bound to a workspace."""
+
+ workspace_path: Path
+ env: Dict[str, str] = field(default_factory=dict)
+ history: List[CommandResult] = field(default_factory=list)
+ cwd: Optional[Path] = None
+
+ def __post_init__(self):
+ if self.cwd is None:
+ self.cwd = self.workspace_path
+
+
+class TerminalExecutor:
+ """Execute shell commands safely within a workspace directory.
+
+ Security:
+ - Commands run via subprocess (never os.system)
+ - Working directory locked to workspace
+ - Configurable timeout with process-group kill
+ - Output size capping
+ - Blocked command patterns
+ """
+
+ def __init__(
+ self,
+ allowed_commands: Optional[List[str]] = None,
+ blocked_patterns: Optional[List[str]] = None,
+ ):
+ self.allowed_commands = allowed_commands
+ self.blocked_patterns = blocked_patterns or list(BLOCKED_PATTERNS)
+
+ def _validate_command(self, command: str):
+ cmd_lower = command.lower().strip()
+ for blocked in self.blocked_patterns:
+ if blocked in cmd_lower:
+ raise PermissionError(f"Command blocked: {command}")
+ if self.allowed_commands is not None:
+ base_cmd = cmd_lower.split()[0] if cmd_lower else ""
+ if base_cmd not in self.allowed_commands:
+ raise PermissionError(f"Command not in allowlist: {base_cmd}")
+
+ async def execute(
+ self,
+ session: TerminalSession,
+ command: str,
+ timeout: int = DEFAULT_TIMEOUT_SEC,
+ env: Optional[Dict[str, str]] = None,
+ ) -> CommandResult:
+ """Execute a command and return captured output."""
+ self._validate_command(command)
+
+ resolved_cwd = session.cwd.resolve()
+ ws_resolved = session.workspace_path.resolve()
+ if not str(resolved_cwd).startswith(str(ws_resolved)):
+ session.cwd = session.workspace_path
+
+ full_env = {**os.environ, **session.env, **(env or {})}
+ start = time.monotonic()
+
+ try:
+ proc = await asyncio.create_subprocess_shell(
+ command,
+ cwd=str(session.cwd),
+ stdout=asyncio.subprocess.PIPE,
+ stderr=asyncio.subprocess.PIPE,
+ env=full_env,
+ )
+
+ try:
+ stdout_bytes, stderr_bytes = await asyncio.wait_for(
+ proc.communicate(), timeout=timeout,
+ )
+ timed_out = False
+ except asyncio.TimeoutError:
+ try:
+ proc.kill()
+ except ProcessLookupError:
+ pass
+ stdout_bytes, stderr_bytes = b"", b""
+ timed_out = True
+
+ duration_ms = int((time.monotonic() - start) * 1000)
+
+ truncated = False
+ if len(stdout_bytes) > MAX_OUTPUT_BYTES:
+ stdout_bytes = stdout_bytes[:MAX_OUTPUT_BYTES]
+ truncated = True
+ if len(stderr_bytes) > MAX_OUTPUT_BYTES:
+ stderr_bytes = stderr_bytes[:MAX_OUTPUT_BYTES]
+ truncated = True
+
+ result = CommandResult(
+ command=command,
+ exit_code=proc.returncode if not timed_out else -1,
+ stdout=stdout_bytes.decode("utf-8", errors="replace"),
+ stderr=stderr_bytes.decode("utf-8", errors="replace"),
+ duration_ms=duration_ms,
+ truncated=truncated,
+ timed_out=timed_out,
+ )
+ except Exception as e:
+ duration_ms = int((time.monotonic() - start) * 1000)
+ result = CommandResult(
+ command=command, exit_code=-1,
+ stdout="", stderr=str(e),
+ duration_ms=duration_ms,
+ )
+
+ session.history.append(result)
+ return result
+
+ async def execute_streaming(
+ self,
+ session: TerminalSession,
+ command: str,
+ timeout: int = DEFAULT_TIMEOUT_SEC,
+ ) -> AsyncIterator[Dict[str, Any]]:
+ """Execute command and yield output lines as they arrive."""
+ self._validate_command(command)
+
+ full_env = {**os.environ, **session.env}
+ proc = await asyncio.create_subprocess_shell(
+ command,
+ cwd=str(session.cwd),
+ stdout=asyncio.subprocess.PIPE,
+ stderr=asyncio.subprocess.STDOUT,
+ env=full_env,
+ )
+
+ start = time.monotonic()
+ try:
+ while True:
+ if time.monotonic() - start > timeout:
+ proc.kill()
+ yield {"type": "error", "data": "Command timed out"}
+ break
+ try:
+ line = await asyncio.wait_for(
+ proc.stdout.readline(), timeout=5.0,
+ )
+ except asyncio.TimeoutError:
+ continue
+ if not line:
+ break
+ yield {
+ "type": "stdout",
+ "data": line.decode("utf-8", errors="replace"),
+ }
+ finally:
+ await proc.wait()
+ yield {
+ "type": "exit",
+ "exit_code": proc.returncode,
+ "duration_ms": int((time.monotonic() - start) * 1000),
+ }
diff --git a/gitpilot/topology_registry.py b/gitpilot/topology_registry.py
new file mode 100644
index 0000000000000000000000000000000000000000..ddf524e3c7d3971a47494e1dbd703249163e54b9
--- /dev/null
+++ b/gitpilot/topology_registry.py
@@ -0,0 +1,928 @@
+# gitpilot/topology_registry.py
+"""Topology Registry β switchable agent workflow presets for GitPilot.
+
+A topology controls three things simultaneously:
+ 1. **Visualization** β the ReactFlow node/edge graph shown in Agent Workflow view.
+ 2. **Routing** β which agent(s) are selected for a given user request.
+ 3. **Execution** β the runtime pattern (single_task, crew_pipeline, or react_loop).
+
+There are 7 built-in topologies:
+ - T1 ``default`` β Fan-out CrewAI routing (the original architecture)
+ - T2 ``gitpilot_code`` β Hub-and-spoke ReAct loop with on-demand subagents
+ - T3 ``feature_builder`` β 5-agent pipeline: explore > plan > implement > review > PR
+ - T4 ``bug_hunter`` β 4-agent pipeline: explore > fix > verify > PR
+ - T5 ``code_inspector`` β 2-agent read-only: explore > review
+ - T6 ``architect_mode`` β 2-agent read-only: explore > plan
+ - T7 ``quick_fix`` β 2-agent fast path: implement > git
+
+This module is **purely additive** β it does not modify any existing routing,
+agent, or execution logic. The existing ``get_flow_definition()`` and
+``dispatch_request()`` continue to work unchanged. New code paths can opt-in
+to topology-aware behaviour by importing from this module.
+"""
+from __future__ import annotations
+
+import logging
+import re
+from dataclasses import dataclass, field
+from enum import Enum
+from typing import Any, Dict, List, Optional, Tuple
+
+logger = logging.getLogger(__name__)
+
+
+# ---------------------------------------------------------------------------
+# Enums
+# ---------------------------------------------------------------------------
+
+class TopologyCategory(str, Enum):
+ """Whether a topology is a system-level architecture or a task pipeline."""
+ system = "system"
+ pipeline = "pipeline"
+
+
+class ExecutionStyle(str, Enum):
+ """How the topology's agents are orchestrated at runtime."""
+ single_task = "single_task" # One agent, one task (T1 default)
+ react_loop = "react_loop" # while(tool_use) main agent loop (T2)
+ crew_pipeline = "crew_pipeline" # Sequential multi-agent CrewAI crew (T3-T7)
+
+
+class RoutingStrategy(str, Enum):
+ """How a request is mapped to agents within a topology."""
+ classify_and_dispatch = "classify_and_dispatch" # Regex/LLM picks one agent
+ always_main_agent = "always_main_agent" # Everything goes to one agent
+ fixed_sequence = "fixed_sequence" # Ordered chain of agents
+
+
+# ---------------------------------------------------------------------------
+# Data classes
+# ---------------------------------------------------------------------------
+
+@dataclass
+class RoutingPolicy:
+ """Defines how a topology selects agents for a request."""
+ strategy: RoutingStrategy
+ primary_agent: Optional[str] = None
+ sequence: Optional[List[str]] = None
+ classifier_hints: List[str] = field(default_factory=list)
+
+
+@dataclass
+class TopologyMeta:
+ """Lightweight summary of a topology (no graph data)."""
+ id: str
+ name: str
+ description: str
+ category: TopologyCategory
+ icon: str
+ agents_used: List[str]
+ execution_style: ExecutionStyle
+
+
+@dataclass
+class Topology:
+ """Complete topology definition including the flow graph."""
+ id: str
+ name: str
+ description: str
+ category: TopologyCategory
+ icon: str
+ agents_used: List[str]
+ execution_style: ExecutionStyle
+ routing_policy: RoutingPolicy
+ flow_graph: Dict[str, Any] # {"nodes": [...], "edges": [...]}
+
+ def to_meta(self) -> TopologyMeta:
+ return TopologyMeta(
+ id=self.id,
+ name=self.name,
+ description=self.description,
+ category=self.category,
+ icon=self.icon,
+ agents_used=self.agents_used,
+ execution_style=self.execution_style,
+ )
+
+ def to_dict(self) -> Dict[str, Any]:
+ return {
+ "id": self.id,
+ "name": self.name,
+ "description": self.description,
+ "category": self.category.value,
+ "icon": self.icon,
+ "agents_used": self.agents_used,
+ "execution_style": self.execution_style.value,
+ "routing_policy": {
+ "strategy": self.routing_policy.strategy.value,
+ "primary_agent": self.routing_policy.primary_agent,
+ "sequence": self.routing_policy.sequence,
+ "classifier_hints": self.routing_policy.classifier_hints,
+ },
+ "flow_graph": self.flow_graph,
+ }
+
+
+# ---------------------------------------------------------------------------
+# T1 β Default CrewAI Routing
+# ---------------------------------------------------------------------------
+
+_T1_FLOW_GRAPH: Dict[str, Any] = {
+ "nodes": [
+ {
+ "id": "user_request",
+ "type": "user",
+ "data": {
+ "label": "User Request",
+ "description": "Incoming task from user",
+ },
+ "position": {"x": 400, "y": 0},
+ },
+ {
+ "id": "router",
+ "type": "router",
+ "data": {
+ "label": "Task Router",
+ "description": "Classifies request type and dispatches to the best agent",
+ "model": "regex + heuristics",
+ },
+ "position": {"x": 400, "y": 100},
+ },
+ {
+ "id": "repo_explorer",
+ "type": "agent",
+ "data": {
+ "label": "Repo Explorer",
+ "model": "Haiku 4.5",
+ "mode": "read-only",
+ "tools": ["Glob", "Grep", "Read", "LS", "Bash(ro)"],
+ "description": "Searches and maps codebase structure",
+ },
+ "position": {"x": 0, "y": 250},
+ },
+ {
+ "id": "planner",
+ "type": "agent",
+ "data": {
+ "label": "Planner",
+ "model": "Sonnet 4.5",
+ "mode": "read-only",
+ "tools": ["Glob", "Grep", "Read", "Bash(ro)"],
+ "description": "Designs implementation plans and strategies",
+ },
+ "position": {"x": 160, "y": 250},
+ },
+ {
+ "id": "code_writer",
+ "type": "agent",
+ "data": {
+ "label": "Code Writer",
+ "model": "Sonnet 4.5",
+ "mode": "read-write",
+ "tools": ["Read", "Write", "Edit", "MultiEdit", "Bash", "Glob", "Grep"],
+ "description": "Implements code changes, creates files, runs tests",
+ },
+ "position": {"x": 320, "y": 250},
+ },
+ {
+ "id": "reviewer",
+ "type": "agent",
+ "data": {
+ "label": "Reviewer",
+ "model": "Sonnet 4.5",
+ "mode": "read-only",
+ "tools": ["Read", "Grep", "Glob", "Bash(git diff)"],
+ "description": "Reviews code for quality, security, and best practices",
+ },
+ "position": {"x": 480, "y": 250},
+ },
+ {
+ "id": "issue_manager",
+ "type": "agent",
+ "data": {
+ "label": "Issue Manager",
+ "model": "Sonnet 4.5",
+ "mode": "read-write",
+ "tools": ["GitHub API", "Read"],
+ "description": "Creates, updates, and manages GitHub issues",
+ },
+ "position": {"x": 640, "y": 250},
+ },
+ {
+ "id": "pr_manager",
+ "type": "agent",
+ "data": {
+ "label": "PR Manager",
+ "model": "Sonnet 4.5",
+ "mode": "git-ops",
+ "tools": ["Bash(git)", "Bash(gh)", "Read"],
+ "description": "Creates branches, commits, pushes, opens PRs",
+ },
+ "position": {"x": 800, "y": 250},
+ },
+ {
+ "id": "search_agent",
+ "type": "agent",
+ "data": {
+ "label": "Search Agent",
+ "model": "Sonnet 4.5",
+ "mode": "read-only",
+ "tools": ["WebSearch", "WebFetch", "Read"],
+ "description": "Researches external documentation and APIs",
+ },
+ "position": {"x": 160, "y": 400},
+ },
+ {
+ "id": "learning_agent",
+ "type": "agent",
+ "data": {
+ "label": "Learning Agent",
+ "model": "Sonnet 4.5",
+ "mode": "read-only",
+ "tools": ["WebSearch", "WebFetch", "Read"],
+ "description": "Explains concepts, generates tutorials",
+ },
+ "position": {"x": 320, "y": 400},
+ },
+ {
+ "id": "local_editor",
+ "type": "agent",
+ "data": {
+ "label": "Local Editor",
+ "model": "Sonnet 4.5",
+ "mode": "read-write",
+ "tools": ["Read", "Write", "Edit", "Glob"],
+ "description": "Edits local files without git operations",
+ },
+ "position": {"x": 480, "y": 400},
+ },
+ {
+ "id": "terminal_agent",
+ "type": "agent",
+ "data": {
+ "label": "Terminal Agent",
+ "model": "Sonnet 4.5",
+ "mode": "read-write",
+ "tools": ["Bash"],
+ "description": "Runs shell commands, manages environment",
+ },
+ "position": {"x": 640, "y": 400},
+ },
+ {
+ "id": "github_tools",
+ "type": "tool_group",
+ "data": {
+ "label": "GitHub Tools",
+ "tools": ["GitHub API", "Bash(gh)"],
+ "description": "GitHub REST/GraphQL API and CLI",
+ },
+ "position": {"x": 0, "y": 400},
+ },
+ {
+ "id": "local_tools",
+ "type": "tool_group",
+ "data": {
+ "label": "Local Tools",
+ "tools": ["Read", "Write", "Edit", "Bash", "Glob", "Grep"],
+ "description": "Filesystem and shell tools",
+ },
+ "position": {"x": 800, "y": 400},
+ },
+ {
+ "id": "output",
+ "type": "output",
+ "data": {
+ "label": "Result",
+ "description": "Response returned to user",
+ },
+ "position": {"x": 400, "y": 550},
+ },
+ ],
+ "edges": [
+ {"id": "e-user-router", "source": "user_request", "target": "router", "animated": True},
+ {"id": "e-router-explorer", "source": "router", "target": "repo_explorer", "label": "explore"},
+ {"id": "e-router-planner", "source": "router", "target": "planner", "label": "plan"},
+ {"id": "e-router-codewriter", "source": "router", "target": "code_writer", "label": "implement"},
+ {"id": "e-router-reviewer", "source": "router", "target": "reviewer", "label": "review"},
+ {"id": "e-router-issue", "source": "router", "target": "issue_manager", "label": "issue"},
+ {"id": "e-router-pr", "source": "router", "target": "pr_manager", "label": "pr"},
+ {"id": "e-router-search", "source": "router", "target": "search_agent", "label": "search"},
+ {"id": "e-router-learning", "source": "router", "target": "learning_agent", "label": "learn"},
+ {"id": "e-router-editor", "source": "router", "target": "local_editor", "label": "edit"},
+ {"id": "e-router-terminal", "source": "router", "target": "terminal_agent", "label": "terminal"},
+ {"id": "e-explorer-output", "source": "repo_explorer", "target": "output"},
+ {"id": "e-planner-output", "source": "planner", "target": "output"},
+ {"id": "e-codewriter-output", "source": "code_writer", "target": "output"},
+ {"id": "e-reviewer-output", "source": "reviewer", "target": "output"},
+ {"id": "e-issue-output", "source": "issue_manager", "target": "output"},
+ {"id": "e-pr-output", "source": "pr_manager", "target": "output"},
+ {"id": "e-search-output", "source": "search_agent", "target": "output"},
+ {"id": "e-learning-output", "source": "learning_agent", "target": "output"},
+ {"id": "e-editor-output", "source": "local_editor", "target": "output"},
+ {"id": "e-terminal-output", "source": "terminal_agent", "target": "output"},
+ {"id": "e-explorer-github", "source": "repo_explorer", "target": "github_tools", "type": "bidirectional", "animated": False},
+ {"id": "e-pr-github", "source": "pr_manager", "target": "github_tools", "type": "bidirectional", "animated": False},
+ {"id": "e-codewriter-local", "source": "code_writer", "target": "local_tools", "type": "bidirectional", "animated": False},
+ {"id": "e-terminal-local", "source": "terminal_agent", "target": "local_tools", "type": "bidirectional", "animated": False},
+ ],
+}
+
+T1_DEFAULT = Topology(
+ id="default",
+ name="Default (CrewAI Routing)",
+ description="Router dispatches to specialized agents based on task type",
+ category=TopologyCategory.system,
+ icon="\U0001f500", # shuffle arrows
+ agents_used=[
+ "repo_explorer", "planner", "code_writer", "reviewer",
+ "issue_manager", "pr_manager", "search_agent",
+ "learning_agent", "local_editor", "terminal_agent",
+ ],
+ execution_style=ExecutionStyle.single_task,
+ routing_policy=RoutingPolicy(
+ strategy=RoutingStrategy.classify_and_dispatch,
+ classifier_hints=[],
+ ),
+ flow_graph=_T1_FLOW_GRAPH,
+)
+
+# ---------------------------------------------------------------------------
+# T2 β GitPilot Code (ReAct Loop + Subagents)
+# ---------------------------------------------------------------------------
+
+_T2_FLOW_GRAPH: Dict[str, Any] = {
+ "nodes": [
+ {
+ "id": "user_request",
+ "type": "user",
+ "data": {"label": "User Request", "description": "Incoming task or feedback"},
+ "position": {"x": 400, "y": 0},
+ },
+ {
+ "id": "main_react_agent",
+ "type": "agent",
+ "data": {
+ "label": "Main ReAct Agent",
+ "model": "Opus 4.6",
+ "mode": "read-write",
+ "tools": ["ALL"],
+ "description": "Central agent running in a while(tool_use) loop. Reasons, acts, observes, repeats. Delegates complex subtasks to subagents.",
+ },
+ "position": {"x": 400, "y": 150},
+ },
+ {
+ "id": "todo_write",
+ "type": "tool",
+ "data": {
+ "label": "TodoWrite",
+ "tools": ["TodoWrite"],
+ "description": "Creates and tracks step-by-step TODO lists for complex tasks",
+ },
+ "position": {"x": 150, "y": 150},
+ },
+ {
+ "id": "fs_tools",
+ "type": "tool_group",
+ "data": {
+ "label": "File Tools",
+ "tools": ["Read", "Write", "Edit", "MultiEdit"],
+ "description": "Read, create, and edit files in the repository",
+ },
+ "position": {"x": 650, "y": 80},
+ },
+ {
+ "id": "search_tools",
+ "type": "tool_group",
+ "data": {
+ "label": "Search Tools",
+ "tools": ["Glob", "Grep", "LS"],
+ "description": "Find files by pattern, search contents, list directories",
+ },
+ "position": {"x": 650, "y": 160},
+ },
+ {
+ "id": "bash_tool",
+ "type": "tool",
+ "data": {
+ "label": "Bash",
+ "tools": ["Bash"],
+ "description": "Execute shell commands (git, npm, pytest, etc.)",
+ },
+ "position": {"x": 650, "y": 240},
+ },
+ {
+ "id": "web_tools",
+ "type": "tool_group",
+ "data": {
+ "label": "Web Tools",
+ "tools": ["WebSearch", "WebFetch"],
+ "description": "Search the web and fetch page contents",
+ },
+ "position": {"x": 150, "y": 240},
+ },
+ {
+ "id": "subagent_explore",
+ "type": "agent",
+ "data": {
+ "label": "Explore Subagent",
+ "model": "Haiku 4.5",
+ "mode": "read-only",
+ "tools": ["Glob", "Grep", "Read", "LS", "Bash(ro)"],
+ "description": "Fast, cheap codebase exploration. Returns concise summary without polluting main context.",
+ },
+ "position": {"x": 100, "y": 400},
+ },
+ {
+ "id": "subagent_plan",
+ "type": "agent",
+ "data": {
+ "label": "Plan Subagent",
+ "model": "Sonnet 4.5",
+ "mode": "read-only",
+ "tools": ["Glob", "Grep", "Read", "Bash(ro)"],
+ "description": "Researches codebase and designs implementation plans before execution.",
+ },
+ "position": {"x": 270, "y": 400},
+ },
+ {
+ "id": "subagent_review",
+ "type": "agent",
+ "data": {
+ "label": "Review Subagent",
+ "model": "Sonnet 4.5",
+ "mode": "read-only",
+ "tools": ["Read", "Grep", "Glob", "Bash(git diff)"],
+ "description": "Reviews code changes for security, quality, and best practices.",
+ },
+ "position": {"x": 440, "y": 400},
+ },
+ {
+ "id": "subagent_research",
+ "type": "agent",
+ "data": {
+ "label": "Research Subagent",
+ "model": "Sonnet 4.5",
+ "mode": "read-only",
+ "tools": ["WebSearch", "WebFetch", "Read"],
+ "description": "Gathers external knowledge from documentation, APIs, and examples.",
+ },
+ "position": {"x": 610, "y": 400},
+ },
+ {
+ "id": "subagent_gitops",
+ "type": "agent",
+ "data": {
+ "label": "GitOps Subagent",
+ "model": "Sonnet 4.5",
+ "mode": "git-ops",
+ "tools": ["Bash(git)", "Bash(gh)", "Read"],
+ "description": "Handles git operations: commit, push, create PR.",
+ },
+ "position": {"x": 780, "y": 400},
+ },
+ {
+ "id": "output",
+ "type": "output",
+ "data": {"label": "Result", "description": "Response returned to user (when loop ends)"},
+ "position": {"x": 400, "y": 550},
+ },
+ ],
+ "edges": [
+ {"id": "e-user-main", "source": "user_request", "target": "main_react_agent", "animated": True},
+ {"id": "e-main-todo", "source": "main_react_agent", "target": "todo_write", "type": "bidirectional"},
+ {"id": "e-main-fs", "source": "main_react_agent", "target": "fs_tools", "type": "bidirectional"},
+ {"id": "e-main-search", "source": "main_react_agent", "target": "search_tools", "type": "bidirectional"},
+ {"id": "e-main-bash", "source": "main_react_agent", "target": "bash_tool", "type": "bidirectional"},
+ {"id": "e-main-web", "source": "main_react_agent", "target": "web_tools", "type": "bidirectional"},
+ {"id": "e-main-explore", "source": "main_react_agent", "target": "subagent_explore", "label": "Task(explore)"},
+ {"id": "e-explore-main", "source": "subagent_explore", "target": "main_react_agent", "label": "summary", "animated": True},
+ {"id": "e-main-plan", "source": "main_react_agent", "target": "subagent_plan", "label": "Task(plan)"},
+ {"id": "e-plan-main", "source": "subagent_plan", "target": "main_react_agent", "label": "plan", "animated": True},
+ {"id": "e-main-review", "source": "main_react_agent", "target": "subagent_review", "label": "Task(review)"},
+ {"id": "e-review-main", "source": "subagent_review", "target": "main_react_agent", "label": "findings", "animated": True},
+ {"id": "e-main-research", "source": "main_react_agent", "target": "subagent_research","label": "Task(research)"},
+ {"id": "e-research-main", "source": "subagent_research", "target": "main_react_agent", "label": "info", "animated": True},
+ {"id": "e-main-gitops", "source": "main_react_agent", "target": "subagent_gitops", "label": "Task(gitops)"},
+ {"id": "e-gitops-main", "source": "subagent_gitops", "target": "main_react_agent", "label": "PR URL", "animated": True},
+ {"id": "e-main-output", "source": "main_react_agent", "target": "output", "label": "no tool calls = done"},
+ ],
+}
+
+T2_CLAUDE_CODE = Topology(
+ id="gitpilot_code",
+ name="GitPilot Code (ReAct + Subagents)",
+ description="Single main agent in a ReAct loop with on-demand subagents",
+ category=TopologyCategory.system,
+ icon="\U0001f9e0", # brain
+ agents_used=[
+ "main_react_agent", "subagent_explore", "subagent_plan",
+ "subagent_review", "subagent_research", "subagent_gitops",
+ ],
+ execution_style=ExecutionStyle.react_loop,
+ routing_policy=RoutingPolicy(
+ strategy=RoutingStrategy.always_main_agent,
+ primary_agent="main_react_agent",
+ classifier_hints=[],
+ ),
+ flow_graph=_T2_FLOW_GRAPH,
+)
+
+# ---------------------------------------------------------------------------
+# T3 β Feature Builder (5-agent pipeline)
+# ---------------------------------------------------------------------------
+
+_T3_FLOW_GRAPH: Dict[str, Any] = {
+ "nodes": [
+ {"id": "user_request", "type": "user", "data": {"label": "User Request", "description": "New feature or enhancement request"}, "position": {"x": 400, "y": 0}},
+ {"id": "explorer", "type": "agent", "data": {"label": "Explorer", "model": "Haiku 4.5", "mode": "read-only", "tools": ["Glob","Grep","Read","LS","Bash(ro)"], "description": "Maps codebase structure and discovers relevant files"}, "position": {"x": 100, "y": 150}},
+ {"id": "planner", "type": "agent", "data": {"label": "Planner", "model": "Sonnet 4.5", "mode": "read-only", "tools": ["Glob","Grep","Read","Bash(ro)"], "description": "Designs step-by-step implementation plan"}, "position": {"x": 250, "y": 150}},
+ {"id": "developer", "type": "agent", "data": {"label": "Developer", "model": "Sonnet 4.5", "mode": "read-write", "tools": ["Read","Write","Edit","MultiEdit","Bash","Glob","Grep"], "description": "Implements code changes and runs tests"}, "position": {"x": 400, "y": 150}},
+ {"id": "reviewer", "type": "agent", "data": {"label": "Reviewer", "model": "Sonnet 4.5", "mode": "read-only", "tools": ["Read","Grep","Glob","Bash(git diff)"], "description": "Reviews code for quality, security, and best practices"}, "position": {"x": 550, "y": 150}},
+ {"id": "git_agent", "type": "agent", "data": {"label": "Git Agent", "model": "Sonnet 4.5", "mode": "git-ops", "tools": ["Bash(git)","Bash(gh)","Read"], "description": "Creates branch, commits, pushes, opens PR"}, "position": {"x": 700, "y": 150}},
+ {"id": "output", "type": "output", "data": {"label": "PR Created", "description": "Feature implemented and PR opened"}, "position": {"x": 700, "y": 300}},
+ ],
+ "edges": [
+ {"id": "e-user-exp", "source": "user_request", "target": "explorer", "animated": True},
+ {"id": "e-exp-plan", "source": "explorer", "target": "planner", "label": "analysis", "animated": True},
+ {"id": "e-plan-dev", "source": "planner", "target": "developer", "label": "plan", "animated": True},
+ {"id": "e-dev-rev", "source": "developer", "target": "reviewer", "label": "changes", "animated": True},
+ {"id": "e-rev-git", "source": "reviewer", "target": "git_agent", "label": "approved", "animated": True},
+ {"id": "e-git-output", "source": "git_agent", "target": "output", "label": "PR URL", "animated": True},
+ ],
+}
+
+T3_FEATURE_BUILDER = Topology(
+ id="feature_builder",
+ name="Feature Builder",
+ description="Full pipeline: explore > plan > implement > review > PR",
+ category=TopologyCategory.pipeline,
+ icon="\U0001f680", # rocket
+ agents_used=["explorer", "planner", "developer", "reviewer", "git_agent"],
+ execution_style=ExecutionStyle.crew_pipeline,
+ routing_policy=RoutingPolicy(
+ strategy=RoutingStrategy.fixed_sequence,
+ sequence=["explorer", "planner", "developer", "reviewer", "git_agent"],
+ classifier_hints=[
+ "add", "create", "implement", "build", "new feature",
+ "endpoint", "component", "module", "integrate", "migration",
+ "refactor", "rewrite", "enhance", "upgrade",
+ ],
+ ),
+ flow_graph=_T3_FLOW_GRAPH,
+)
+
+# ---------------------------------------------------------------------------
+# T4 β Bug Hunter (4-agent pipeline)
+# ---------------------------------------------------------------------------
+
+_T4_FLOW_GRAPH: Dict[str, Any] = {
+ "nodes": [
+ {"id": "user_request", "type": "user", "data": {"label": "Bug Report", "description": "Bug description or error report"}, "position": {"x": 400, "y": 0}},
+ {"id": "explorer", "type": "agent", "data": {"label": "Explorer", "model": "Haiku 4.5", "mode": "read-only", "tools": ["Glob","Grep","Read","LS","Bash(ro)"], "description": "Traces error patterns and locates root cause"}, "position": {"x": 175, "y": 150}},
+ {"id": "developer", "type": "agent", "data": {"label": "Developer", "model": "Sonnet 4.5", "mode": "read-write", "tools": ["Read","Write","Edit","MultiEdit","Bash","Glob","Grep"], "description": "Applies targeted fix and runs tests"}, "position": {"x": 350, "y": 150}},
+ {"id": "reviewer", "type": "agent", "data": {"label": "Reviewer", "model": "Sonnet 4.5", "mode": "read-only", "tools": ["Read","Grep","Glob","Bash(git diff)"], "description": "Verifies fix and checks for regressions"}, "position": {"x": 525, "y": 150}},
+ {"id": "git_agent", "type": "agent", "data": {"label": "Git Agent", "model": "Sonnet 4.5", "mode": "git-ops", "tools": ["Bash(git)","Bash(gh)","Read"], "description": "Commits fix, pushes, opens hotfix PR"}, "position": {"x": 700, "y": 150}},
+ {"id": "output", "type": "output", "data": {"label": "Hotfix PR", "description": "Bug fixed and hotfix PR opened"}, "position": {"x": 700, "y": 300}},
+ ],
+ "edges": [
+ {"id": "e-user-exp", "source": "user_request", "target": "explorer", "animated": True},
+ {"id": "e-exp-dev", "source": "explorer", "target": "developer", "label": "root cause", "animated": True},
+ {"id": "e-dev-rev", "source": "developer", "target": "reviewer", "label": "fix applied","animated": True},
+ {"id": "e-rev-git", "source": "reviewer", "target": "git_agent", "label": "verified", "animated": True},
+ {"id": "e-git-output", "source": "git_agent", "target": "output", "label": "PR URL", "animated": True},
+ ],
+}
+
+T4_BUG_HUNTER = Topology(
+ id="bug_hunter",
+ name="Bug Hunter",
+ description="Diagnose > fix > verify > ship hotfix",
+ category=TopologyCategory.pipeline,
+ icon="\U0001f41b", # bug
+ agents_used=["explorer", "developer", "reviewer", "git_agent"],
+ execution_style=ExecutionStyle.crew_pipeline,
+ routing_policy=RoutingPolicy(
+ strategy=RoutingStrategy.fixed_sequence,
+ sequence=["explorer", "developer", "reviewer", "git_agent"],
+ classifier_hints=[
+ "fix", "bug", "error", "broken", "failing", "crash", "exception",
+ "debug", "traceback", "500", "403", "404", "timeout", "leak",
+ "regression", "hotfix", "patch", "not working", "tests failing",
+ ],
+ ),
+ flow_graph=_T4_FLOW_GRAPH,
+)
+
+# ---------------------------------------------------------------------------
+# T5 β Code Inspector (2-agent read-only)
+# ---------------------------------------------------------------------------
+
+_T5_FLOW_GRAPH: Dict[str, Any] = {
+ "nodes": [
+ {"id": "user_request", "type": "user", "data": {"label": "Review Request", "description": "Code review or audit request"}, "position": {"x": 300, "y": 0}},
+ {"id": "explorer", "type": "agent", "data": {"label": "Explorer", "model": "Haiku 4.5", "mode": "read-only", "tools": ["Glob","Grep","Read","LS","Bash(ro)"], "description": "Discovers modified files and gathers context"}, "position": {"x": 200, "y": 150}},
+ {"id": "reviewer", "type": "agent", "data": {"label": "Reviewer", "model": "Sonnet 4.5", "mode": "read-only", "tools": ["Read","Grep","Glob","Bash(git diff)"], "description": "Deep analysis: security, quality, performance"}, "position": {"x": 400, "y": 150}},
+ {"id": "output", "type": "output", "data": {"label": "Review Report", "description": "Structured review with severity levels"}, "position": {"x": 400, "y": 300}},
+ ],
+ "edges": [
+ {"id": "e-user-exp", "source": "user_request", "target": "explorer", "animated": True},
+ {"id": "e-exp-rev", "source": "explorer", "target": "reviewer", "label": "scope + context", "animated": True},
+ {"id": "e-rev-output", "source": "reviewer", "target": "output", "label": "report", "animated": True},
+ ],
+}
+
+T5_CODE_INSPECTOR = Topology(
+ id="code_inspector",
+ name="Code Inspector",
+ description="Read-only analysis: explore changes > review for issues",
+ category=TopologyCategory.pipeline,
+ icon="\U0001f50d", # magnifying glass
+ agents_used=["explorer", "reviewer"],
+ execution_style=ExecutionStyle.crew_pipeline,
+ routing_policy=RoutingPolicy(
+ strategy=RoutingStrategy.fixed_sequence,
+ sequence=["explorer", "reviewer"],
+ classifier_hints=[
+ "review", "audit", "security", "inspect", "analyze code",
+ "vulnerabilities", "quality", "what changed", "diff",
+ "pre-merge", "check quality", "code smell", "coverage",
+ ],
+ ),
+ flow_graph=_T5_FLOW_GRAPH,
+)
+
+# ---------------------------------------------------------------------------
+# T6 β Architect Mode (2-agent read-only)
+# ---------------------------------------------------------------------------
+
+_T6_FLOW_GRAPH: Dict[str, Any] = {
+ "nodes": [
+ {"id": "user_request", "type": "user", "data": {"label": "Architecture Question", "description": "Design or strategy question"}, "position": {"x": 300, "y": 0}},
+ {"id": "explorer", "type": "agent", "data": {"label": "Explorer", "model": "Haiku 4.5", "mode": "read-only", "tools": ["Glob","Grep","Read","LS","Bash(ro)"], "description": "Deep codebase research: structure, deps, patterns"}, "position": {"x": 200, "y": 150}},
+ {"id": "planner", "type": "agent", "data": {"label": "Planner", "model": "Sonnet 4.5", "mode": "read-only", "tools": ["Glob","Grep","Read","Bash(ro)"], "description": "Synthesizes findings into actionable plan"}, "position": {"x": 400, "y": 150}},
+ {"id": "output", "type": "output", "data": {"label": "Implementation Plan", "description": "Plan awaiting user approval before execution"}, "position": {"x": 400, "y": 300}},
+ ],
+ "edges": [
+ {"id": "e-user-exp", "source": "user_request", "target": "explorer", "animated": True},
+ {"id": "e-exp-plan", "source": "explorer", "target": "planner", "label": "deep analysis", "animated": True},
+ {"id": "e-plan-output", "source": "planner", "target": "output", "label": "plan + approval", "animated": True},
+ ],
+}
+
+T6_ARCHITECT_MODE = Topology(
+ id="architect_mode",
+ name="Architect Mode",
+ description="Research codebase > design plan (no code changes)",
+ category=TopologyCategory.pipeline,
+ icon="\U0001f4d0", # triangular ruler
+ agents_used=["explorer", "planner"],
+ execution_style=ExecutionStyle.crew_pipeline,
+ routing_policy=RoutingPolicy(
+ strategy=RoutingStrategy.fixed_sequence,
+ sequence=["explorer", "planner"],
+ classifier_hints=[
+ "plan", "design", "architect", "strategy", "how should",
+ "approach", "migration", "refactor plan", "proposal",
+ "trade-offs", "options", "recommend", "evaluate",
+ ],
+ ),
+ flow_graph=_T6_FLOW_GRAPH,
+)
+
+# ---------------------------------------------------------------------------
+# T7 β Quick Fix (2-agent fast path)
+# ---------------------------------------------------------------------------
+
+_T7_FLOW_GRAPH: Dict[str, Any] = {
+ "nodes": [
+ {"id": "user_request", "type": "user", "data": {"label": "Quick Edit", "description": "Trivial change request"}, "position": {"x": 300, "y": 0}},
+ {"id": "developer", "type": "agent", "data": {"label": "Developer", "model": "Sonnet 4.5", "mode": "read-write", "tools": ["Read","Write","Edit","MultiEdit","Bash","Glob","Grep"], "description": "Makes targeted change, verifies with quick test"}, "position": {"x": 200, "y": 150}},
+ {"id": "git_agent", "type": "agent", "data": {"label": "Git Agent", "model": "Sonnet 4.5", "mode": "git-ops", "tools": ["Bash(git)","Bash(gh)","Read"], "description": "Commits and pushes the change"}, "position": {"x": 400, "y": 150}},
+ {"id": "output", "type": "output", "data": {"label": "Committed & Pushed", "description": "Change committed and pushed"}, "position": {"x": 400, "y": 300}},
+ ],
+ "edges": [
+ {"id": "e-user-dev", "source": "user_request", "target": "developer", "animated": True},
+ {"id": "e-dev-git", "source": "developer", "target": "git_agent", "label": "changes ready", "animated": True},
+ {"id": "e-git-output", "source": "git_agent", "target": "output", "label": "pushed", "animated": True},
+ ],
+}
+
+T7_QUICK_FIX = Topology(
+ id="quick_fix",
+ name="Quick Fix",
+ description="Minimal pipeline: edit > commit > done",
+ category=TopologyCategory.pipeline,
+ icon="\u26a1", # lightning bolt
+ agents_used=["developer", "git_agent"],
+ execution_style=ExecutionStyle.crew_pipeline,
+ routing_policy=RoutingPolicy(
+ strategy=RoutingStrategy.fixed_sequence,
+ sequence=["developer", "git_agent"],
+ classifier_hints=[
+ "typo", "rename", "update readme", "config", "small change",
+ "one-liner", "documentation", "comment", "formatting",
+ "version bump", "update dependency", "quick",
+ ],
+ ),
+ flow_graph=_T7_FLOW_GRAPH,
+)
+
+
+# ===========================================================================
+# Registry singleton
+# ===========================================================================
+
+TOPOLOGY_REGISTRY: Dict[str, Topology] = {
+ t.id: t
+ for t in [
+ T1_DEFAULT,
+ T2_CLAUDE_CODE,
+ T3_FEATURE_BUILDER,
+ T4_BUG_HUNTER,
+ T5_CODE_INSPECTOR,
+ T6_ARCHITECT_MODE,
+ T7_QUICK_FIX,
+ ]
+}
+
+DEFAULT_TOPOLOGY_ID = "default"
+
+
+def list_topologies() -> List[Dict[str, Any]]:
+ """Return lightweight summaries of all registered topologies."""
+ result = []
+ for t in TOPOLOGY_REGISTRY.values():
+ meta = t.to_meta()
+ result.append({
+ "id": meta.id,
+ "name": meta.name,
+ "description": meta.description,
+ "category": meta.category.value,
+ "icon": meta.icon,
+ "agents_used": meta.agents_used,
+ "execution_style": meta.execution_style.value,
+ })
+ return result
+
+
+def get_topology(topology_id: str) -> Optional[Topology]:
+ """Look up a topology by ID. Returns None if not found."""
+ return TOPOLOGY_REGISTRY.get(topology_id)
+
+
+def get_topology_graph(topology_id: Optional[str] = None) -> Dict[str, Any]:
+ """Return the flow graph for a given topology.
+
+ If *topology_id* is ``None`` or unrecognised, falls back to ``"default"``.
+ The returned dict is the same shape as the legacy ``get_flow_definition()``
+ output so the frontend ``FlowViewer`` can consume it without changes.
+ """
+ tid = topology_id or DEFAULT_TOPOLOGY_ID
+ topo = TOPOLOGY_REGISTRY.get(tid)
+ if topo is None:
+ topo = TOPOLOGY_REGISTRY[DEFAULT_TOPOLOGY_ID]
+
+ # Build the response β include topology metadata alongside the graph
+ # so the frontend can display the name/description even without a
+ # separate metadata call.
+ graph = topo.flow_graph.copy()
+
+ # For backward compat with the legacy FlowViewer which expects flat
+ # ``nodes`` with ``label``/``type``/``description`` keys, we normalise
+ # the new richer node format. New FlowViewer uses the ``data`` sub-key
+ # directly but old code reads top-level keys.
+ legacy_nodes = []
+ for n in graph.get("nodes", []):
+ ln = dict(n) # shallow copy
+ # Hoist data.label / data.description to top level for legacy compat
+ d = n.get("data", {})
+ ln.setdefault("label", d.get("label", n["id"]))
+ ln.setdefault("description", d.get("description", ""))
+ legacy_nodes.append(ln)
+
+ return {
+ "topology_id": topo.id,
+ "topology_name": topo.name,
+ "topology_icon": topo.icon,
+ "topology_description": topo.description,
+ "execution_style": topo.execution_style.value,
+ "nodes": legacy_nodes,
+ "edges": graph.get("edges", []),
+ }
+
+
+# ===========================================================================
+# Topology classifier β keyword-based auto-detection
+# ===========================================================================
+
+@dataclass
+class ClassificationResult:
+ """Result of classifying a user message against topology hints."""
+ recommended: str
+ confidence: float
+ alternatives: List[Dict[str, Any]]
+
+ def to_dict(self) -> Dict[str, Any]:
+ return {
+ "recommended_topology": self.recommended,
+ "confidence": round(self.confidence, 3),
+ "alternatives": self.alternatives,
+ }
+
+
+def classify_message(message: str) -> ClassificationResult:
+ """Classify a user message and recommend the best topology.
+
+ Uses a two-pass approach:
+ 1. Keyword hit scoring against each topology's ``classifier_hints``.
+ 2. Tie-breaking heuristics (message length, question marks, etc.).
+
+ System topologies (T1, T2) act as fallbacks β they are only recommended
+ when no pipeline topology scores above the confidence threshold.
+ """
+ msg_lower = message.lower().strip()
+ scores: List[Tuple[str, float]] = []
+
+ for tid, topo in TOPOLOGY_REGISTRY.items():
+ hints = topo.routing_policy.classifier_hints
+ if not hints:
+ continue # system topologies have no hints
+
+ hit_count = 0
+ for hint in hints:
+ # Match whole-word (ish) β allow the hint to appear as a substring
+ # but prefer word boundaries.
+ pattern = r"(?:^|\W)" + re.escape(hint) + r"(?:\W|$)"
+ if re.search(pattern, msg_lower):
+ hit_count += 1
+
+ if hit_count > 0:
+ # Normalise: score = hits / total_hints, capped at 1.0
+ raw_score = min(hit_count / max(len(hints), 1), 1.0)
+ # Boost by number of distinct hits so more-specific matches win
+ boosted = raw_score * (1 + 0.1 * min(hit_count, 5))
+ scores.append((tid, min(boosted, 1.0)))
+
+ # Sort descending by score
+ scores.sort(key=lambda x: x[1], reverse=True)
+
+ if not scores or scores[0][1] < 0.05:
+ # Nothing matched β fall back to default
+ return ClassificationResult(
+ recommended="default",
+ confidence=0.5,
+ alternatives=[
+ {"id": "gitpilot_code", "confidence": 0.45},
+ ],
+ )
+
+ best_id, best_score = scores[0]
+ alternatives = [
+ {"id": tid, "confidence": round(sc, 3)}
+ for tid, sc in scores[1:4]
+ ]
+
+ # Always include the two system topologies as alternatives if not already present
+ present_ids = {best_id} | {a["id"] for a in alternatives}
+ for sys_id, sys_conf in [("default", 0.3), ("gitpilot_code", 0.35)]:
+ if sys_id not in present_ids:
+ alternatives.append({"id": sys_id, "confidence": sys_conf})
+
+ return ClassificationResult(
+ recommended=best_id,
+ confidence=round(best_score, 3),
+ alternatives=alternatives,
+ )
+
+
+# ===========================================================================
+# User preference persistence
+# ===========================================================================
+
+_TOPOLOGY_PREF_KEY = "active_topology"
+
+
+def get_saved_topology_preference() -> Optional[str]:
+ """Read the user's saved topology preference from settings file."""
+ import json
+ from .settings import CONFIG_DIR
+ pref_file = CONFIG_DIR / "topology_pref.json"
+ if pref_file.exists():
+ try:
+ data = json.loads(pref_file.read_text("utf-8"))
+ return data.get(_TOPOLOGY_PREF_KEY)
+ except Exception:
+ pass
+ return None
+
+
+def save_topology_preference(topology_id: str) -> None:
+ """Persist the user's selected topology preference."""
+ import json
+ from .settings import CONFIG_DIR
+ CONFIG_DIR.mkdir(parents=True, exist_ok=True)
+ pref_file = CONFIG_DIR / "topology_pref.json"
+ pref_file.write_text(
+ json.dumps({_TOPOLOGY_PREF_KEY: topology_id}, indent=2),
+ "utf-8",
+ )
diff --git a/gitpilot/use_case.py b/gitpilot/use_case.py
new file mode 100644
index 0000000000000000000000000000000000000000..036e9136bff476a56f0e67edfdf04d48403839ed
--- /dev/null
+++ b/gitpilot/use_case.py
@@ -0,0 +1,407 @@
+# gitpilot/use_case.py
+"""Use Case manager β guided requirement clarification and spec generation.
+
+Non-destructive, additive feature. Stores use cases under:
+ ~/.gitpilot/workspaces/{owner}/{repo}/.gitpilot/context/use_cases/
+
+Each use case is a JSON file with structured spec + message history.
+"""
+from __future__ import annotations
+
+import json
+import logging
+import time
+import uuid
+from dataclasses import asdict, dataclass, field
+from pathlib import Path
+from typing import Any, Dict, List, Optional
+
+logger = logging.getLogger(__name__)
+
+
+# ---------------------------------------------------------------------------
+# Data classes
+# ---------------------------------------------------------------------------
+@dataclass
+class UseCaseSpec:
+ """Structured spec extracted from use-case conversation."""
+ title: str = ""
+ summary: str = ""
+ problem: str = ""
+ users: str = ""
+ requirements: List[str] = field(default_factory=list)
+ acceptance_criteria: List[str] = field(default_factory=list)
+ constraints: List[str] = field(default_factory=list)
+ open_questions: List[str] = field(default_factory=list)
+ notes: str = ""
+
+
+@dataclass
+class UseCaseMessage:
+ role: str # "user" or "assistant"
+ content: str
+ ts: str = ""
+
+
+@dataclass
+class UseCase:
+ use_case_id: str
+ title: str
+ created_at: str
+ updated_at: str
+ is_active: bool = False
+ is_finalized: bool = False
+ spec: UseCaseSpec = field(default_factory=UseCaseSpec)
+ messages: List[UseCaseMessage] = field(default_factory=list)
+
+ def to_dict(self) -> dict:
+ d = asdict(self)
+ return d
+
+ def to_summary(self) -> dict:
+ return {
+ "use_case_id": self.use_case_id,
+ "title": self.title,
+ "created_at": self.created_at,
+ "updated_at": self.updated_at,
+ "is_active": self.is_active,
+ "is_finalized": self.is_finalized,
+ }
+
+
+# ---------------------------------------------------------------------------
+# Guided assistant prompts
+# ---------------------------------------------------------------------------
+GUIDED_SYSTEM_PROMPT = """\
+You are a requirements analyst helping clarify a software use case.
+Your job is to ask structured questions and extract a clear spec.
+
+After each user message, do TWO things:
+1. Respond conversationally (acknowledge, ask follow-up questions)
+2. Update the structured spec with any new information
+
+Focus on extracting:
+- Summary: what is being built
+- Problem: what problem it solves
+- Users/Personas: who will use it
+- Requirements: functional requirements (bullet list)
+- Acceptance Criteria: how to verify it works (bullet list)
+- Constraints: technical or business constraints
+- Open Questions: anything still unclear
+
+Be concise but thorough. Ask one or two questions at a time.
+"""
+
+INITIAL_ASSISTANT_MESSAGE = (
+ "Welcome! Let's define this use case together.\n\n"
+ "To get started, could you describe:\n"
+ "1. **What** you want to build (high-level summary)\n"
+ "2. **Who** will use it (target users)\n"
+ "3. **Why** it's needed (the problem it solves)\n\n"
+ "You can paste meeting notes, transcripts, or just describe it in your own words."
+)
+
+
+# ---------------------------------------------------------------------------
+# Use Case Manager
+# ---------------------------------------------------------------------------
+class UseCaseManager:
+ """Manages use cases stored as JSON files."""
+
+ def __init__(self, workspace_path: Path):
+ self.workspace_path = workspace_path
+ self.use_cases_dir = workspace_path / ".gitpilot" / "context" / "use_cases"
+
+ def _ensure_dir(self):
+ self.use_cases_dir.mkdir(parents=True, exist_ok=True)
+
+ def _uc_path(self, use_case_id: str) -> Path:
+ return self.use_cases_dir / f"{use_case_id}.json"
+
+ def _now(self) -> str:
+ return time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
+
+ # ------------------------------------------------------------------
+ # CRUD
+ # ------------------------------------------------------------------
+ def list_use_cases(self) -> List[dict]:
+ """Return summary list of all use cases."""
+ self._ensure_dir()
+ results = []
+ for f in sorted(self.use_cases_dir.glob("*.json")):
+ try:
+ data = json.loads(f.read_text(encoding="utf-8"))
+ results.append({
+ "use_case_id": data.get("use_case_id", f.stem),
+ "title": data.get("title", "(untitled)"),
+ "created_at": data.get("created_at", ""),
+ "updated_at": data.get("updated_at", ""),
+ "is_active": data.get("is_active", False),
+ "is_finalized": data.get("is_finalized", False),
+ })
+ except Exception:
+ logger.warning("Skipping corrupt use case: %s", f)
+ return results
+
+ def create_use_case(self, title: str = "New Use Case", initial_notes: str = "") -> UseCase:
+ """Create a new use case with initial assistant message."""
+ self._ensure_dir()
+ uc_id = uuid.uuid4().hex[:12]
+ now = self._now()
+
+ uc = UseCase(
+ use_case_id=uc_id,
+ title=title,
+ created_at=now,
+ updated_at=now,
+ spec=UseCaseSpec(title=title, notes=initial_notes),
+ messages=[
+ UseCaseMessage(role="assistant", content=INITIAL_ASSISTANT_MESSAGE, ts=now),
+ ],
+ )
+ self._save(uc)
+ return uc
+
+ def get_use_case(self, use_case_id: str) -> Optional[UseCase]:
+ """Load a use case by ID."""
+ path = self._uc_path(use_case_id)
+ if not path.exists():
+ return None
+ try:
+ data = json.loads(path.read_text(encoding="utf-8"))
+ return self._from_dict(data)
+ except Exception as e:
+ logger.warning("Failed to load use case %s: %s", use_case_id, e)
+ return None
+
+ def chat(self, use_case_id: str, user_message: str) -> Optional[UseCase]:
+ """Process a user message and return updated use case with assistant response.
+
+ This is the guided chat: we parse the user message, update the spec,
+ and generate an assistant response with follow-up questions.
+ """
+ uc = self.get_use_case(use_case_id)
+ if not uc:
+ return None
+
+ now = self._now()
+
+ # Add user message
+ uc.messages.append(UseCaseMessage(role="user", content=user_message, ts=now))
+
+ # Parse and update spec from user message
+ self._update_spec_from_message(uc.spec, user_message)
+
+ # Generate assistant response
+ response = self._generate_response(uc)
+ uc.messages.append(UseCaseMessage(role="assistant", content=response, ts=now))
+
+ uc.updated_at = now
+ self._save(uc)
+ return uc
+
+ def finalize(self, use_case_id: str) -> Optional[UseCase]:
+ """Mark a use case as finalized and active, export markdown."""
+ uc = self.get_use_case(use_case_id)
+ if not uc:
+ return None
+
+ now = self._now()
+
+ # Deactivate all others
+ for f in self.use_cases_dir.glob("*.json"):
+ try:
+ data = json.loads(f.read_text(encoding="utf-8"))
+ if data.get("is_active"):
+ data["is_active"] = False
+ f.write_text(json.dumps(data, indent=2), encoding="utf-8")
+ except Exception:
+ pass
+
+ # Mark this one as active + finalized
+ uc.is_active = True
+ uc.is_finalized = True
+ uc.updated_at = now
+ self._save(uc)
+
+ # Export markdown
+ self._export_markdown(uc)
+
+ return uc
+
+ def get_active_use_case(self) -> Optional[UseCase]:
+ """Return the currently active use case, if any."""
+ self._ensure_dir()
+ for f in self.use_cases_dir.glob("*.json"):
+ try:
+ data = json.loads(f.read_text(encoding="utf-8"))
+ if data.get("is_active"):
+ return self._from_dict(data)
+ except Exception:
+ continue
+ return None
+
+ # ------------------------------------------------------------------
+ # Internal helpers
+ # ------------------------------------------------------------------
+ def _save(self, uc: UseCase):
+ path = self._uc_path(uc.use_case_id)
+ path.write_text(json.dumps(uc.to_dict(), indent=2), encoding="utf-8")
+
+ def _from_dict(self, data: dict) -> UseCase:
+ spec_data = data.get("spec", {})
+ spec = UseCaseSpec(
+ title=spec_data.get("title", ""),
+ summary=spec_data.get("summary", ""),
+ problem=spec_data.get("problem", ""),
+ users=spec_data.get("users", ""),
+ requirements=spec_data.get("requirements", []),
+ acceptance_criteria=spec_data.get("acceptance_criteria", []),
+ constraints=spec_data.get("constraints", []),
+ open_questions=spec_data.get("open_questions", []),
+ notes=spec_data.get("notes", ""),
+ )
+ messages = [
+ UseCaseMessage(
+ role=m.get("role", "user"),
+ content=m.get("content", ""),
+ ts=m.get("ts", ""),
+ )
+ for m in data.get("messages", [])
+ ]
+ return UseCase(
+ use_case_id=data.get("use_case_id", ""),
+ title=data.get("title", ""),
+ created_at=data.get("created_at", ""),
+ updated_at=data.get("updated_at", ""),
+ is_active=data.get("is_active", False),
+ is_finalized=data.get("is_finalized", False),
+ spec=spec,
+ messages=messages,
+ )
+
+ def _update_spec_from_message(self, spec: UseCaseSpec, message: str):
+ """Parse user message and update spec fields heuristically."""
+ msg_lower = message.lower()
+ lines = [l.strip() for l in message.split("\n") if l.strip()]
+
+ for line in lines:
+ ll = line.lower()
+
+ # Detect labeled sections
+ if ll.startswith("summary:"):
+ spec.summary = line.split(":", 1)[1].strip()
+ elif ll.startswith("problem:"):
+ spec.problem = line.split(":", 1)[1].strip()
+ elif ll.startswith("users:") or ll.startswith("personas:"):
+ spec.users = line.split(":", 1)[1].strip()
+ elif ll.startswith("notes:"):
+ spec.notes = line.split(":", 1)[1].strip()
+ elif ll.startswith("constraint:") or ll.startswith("constraints:"):
+ val = line.split(":", 1)[1].strip()
+ if val and val not in spec.constraints:
+ spec.constraints.append(val)
+ elif ll.startswith("- ") or ll.startswith("* "):
+ # Bullet items β classify by context
+ item = line[2:].strip()
+ if not item:
+ continue
+ # If it looks like acceptance criteria
+ if any(kw in item.lower() for kw in ["should", "must", "verify", "test", "given", "when", "then"]):
+ if item not in spec.acceptance_criteria:
+ spec.acceptance_criteria.append(item)
+ else:
+ if item not in spec.requirements:
+ spec.requirements.append(item)
+
+ # If no summary yet and message is substantial, use first sentence
+ if not spec.summary and len(message) > 20:
+ first_sentence = message.split(".")[0].strip()
+ if len(first_sentence) > 10:
+ spec.summary = first_sentence[:200]
+
+ def _generate_response(self, uc: UseCase) -> str:
+ """Generate a guided assistant response based on current spec state."""
+ spec = uc.spec
+ missing = []
+
+ if not spec.summary:
+ missing.append("a **summary** of what you're building")
+ if not spec.problem:
+ missing.append("the **problem** this solves")
+ if not spec.users:
+ missing.append("the **target users/personas**")
+ if not spec.requirements:
+ missing.append("**functional requirements** (as bullet points)")
+ if not spec.acceptance_criteria:
+ missing.append("**acceptance criteria** (how to verify it works)")
+
+ if missing:
+ items = "\n".join(f"- {m}" for m in missing[:3])
+ return (
+ f"Thanks for the details! I've updated the spec preview.\n\n"
+ f"To make the spec more complete, could you provide:\n{items}\n\n"
+ f"You can paste structured info or just describe it naturally."
+ )
+
+ # Spec is reasonably complete
+ if spec.open_questions:
+ q_list = "\n".join(f"- {q}" for q in spec.open_questions[:3])
+ return (
+ f"The spec is taking shape nicely. There are some open questions:\n{q_list}\n\n"
+ f"Would you like to address these, or shall we **Finalize** the use case?"
+ )
+
+ return (
+ "The spec looks fairly complete! Here's what we have:\n\n"
+ f"**Summary:** {spec.summary}\n"
+ f"**Requirements:** {len(spec.requirements)} items\n"
+ f"**Acceptance Criteria:** {len(spec.acceptance_criteria)} items\n\n"
+ "You can add more details or click **Finalize** to save this as the active use case."
+ )
+
+ def _export_markdown(self, uc: UseCase):
+ """Export use case as a markdown file."""
+ spec = uc.spec
+ lines = [
+ f"# Use Case: {spec.title or uc.title}",
+ "",
+ f"**ID:** {uc.use_case_id}",
+ f"**Created:** {uc.created_at}",
+ f"**Finalized:** {uc.updated_at}",
+ f"**Status:** {'Active' if uc.is_active else 'Inactive'}",
+ "",
+ ]
+
+ if spec.summary:
+ lines.extend(["## Summary", "", spec.summary, ""])
+ if spec.problem:
+ lines.extend(["## Problem", "", spec.problem, ""])
+ if spec.users:
+ lines.extend(["## Users / Personas", "", spec.users, ""])
+ if spec.requirements:
+ lines.extend(["## Requirements", ""])
+ for r in spec.requirements:
+ lines.append(f"- {r}")
+ lines.append("")
+ if spec.acceptance_criteria:
+ lines.extend(["## Acceptance Criteria", ""])
+ for ac in spec.acceptance_criteria:
+ lines.append(f"- {ac}")
+ lines.append("")
+ if spec.constraints:
+ lines.extend(["## Constraints", ""])
+ for c in spec.constraints:
+ lines.append(f"- {c}")
+ lines.append("")
+ if spec.open_questions:
+ lines.extend(["## Open Questions", ""])
+ for q in spec.open_questions:
+ lines.append(f"- {q}")
+ lines.append("")
+ if spec.notes:
+ lines.extend(["## Notes", "", spec.notes, ""])
+
+ md_path = self.use_cases_dir / f"{uc.use_case_id}.md"
+ md_path.write_text("\n".join(lines), encoding="utf-8")
+ logger.info("Exported use case markdown: %s", md_path)
diff --git a/gitpilot/version.py b/gitpilot/version.py
new file mode 100644
index 0000000000000000000000000000000000000000..ae7362549b3c09d7077c3159fd4e5f35ab0e2292
--- /dev/null
+++ b/gitpilot/version.py
@@ -0,0 +1 @@
+__version__ = "0.1.3"
diff --git a/gitpilot/vision.py b/gitpilot/vision.py
new file mode 100644
index 0000000000000000000000000000000000000000..e1ca2d1d66519e509566c7104469a9d04c0b3a4b
--- /dev/null
+++ b/gitpilot/vision.py
@@ -0,0 +1,298 @@
+# gitpilot/vision.py
+"""Vision & image analysis for GitPilot.
+
+Uses multimodal LLM capabilities to analyse screenshots, architecture
+diagrams, error images, and design mockups. Supports multiple providers:
+
+- **OpenAI** (GPT-4o, GPT-4o-mini) β via base64 image in messages
+- **Anthropic** (Claude) β via base64 image in messages
+- **Ollama** (LLaVA, etc.) β local multimodal models
+
+The module reads images, encodes them, and sends them alongside a
+text prompt to the configured LLM provider.
+"""
+from __future__ import annotations
+
+import base64
+import logging
+import mimetypes
+from dataclasses import dataclass
+from pathlib import Path
+from typing import Any, Dict, List, Optional
+
+import httpx
+
+logger = logging.getLogger(__name__)
+
+MAX_IMAGE_SIZE_BYTES = 20 * 1024 * 1024 # 20 MB
+SUPPORTED_FORMATS = {".png", ".jpg", ".jpeg", ".gif", ".webp", ".bmp", ".svg"}
+
+
+@dataclass
+class ImageAnalysisResult:
+ """Result of an image analysis."""
+
+ description: str
+ confidence: str = "high" # high | medium | low
+ metadata: Dict[str, Any] = None
+
+ def __post_init__(self):
+ if self.metadata is None:
+ self.metadata = {}
+
+ def to_dict(self) -> Dict[str, Any]:
+ return {
+ "description": self.description,
+ "confidence": self.confidence,
+ "metadata": self.metadata,
+ }
+
+
+def _encode_image(image_path: Path) -> tuple:
+ """Read and base64-encode an image. Returns (base64_str, mime_type)."""
+ if not image_path.exists():
+ raise FileNotFoundError(f"Image not found: {image_path}")
+
+ suffix = image_path.suffix.lower()
+ if suffix not in SUPPORTED_FORMATS:
+ raise ValueError(f"Unsupported image format: {suffix}")
+
+ size = image_path.stat().st_size
+ if size > MAX_IMAGE_SIZE_BYTES:
+ raise ValueError(f"Image too large: {size} bytes (max {MAX_IMAGE_SIZE_BYTES})")
+
+ mime = mimetypes.guess_type(str(image_path))[0] or "image/png"
+ data = image_path.read_bytes()
+ return base64.b64encode(data).decode("utf-8"), mime
+
+
+class VisionAnalyzer:
+ """Analyze images using multimodal LLM capabilities.
+
+ Usage::
+
+ analyzer = VisionAnalyzer(provider="openai", api_key="sk-...")
+ result = await analyzer.analyze_image(
+ Path("screenshot.png"),
+ prompt="Describe this UI and identify any bugs",
+ )
+ """
+
+ def __init__(
+ self,
+ provider: str = "openai",
+ api_key: Optional[str] = None,
+ model: Optional[str] = None,
+ base_url: Optional[str] = None,
+ ) -> None:
+ self.provider = provider.lower()
+ self.api_key = api_key
+ self.model = model or self._default_model()
+ self.base_url = base_url
+
+ def _default_model(self) -> str:
+ defaults = {
+ "openai": "gpt-4o",
+ "claude": "claude-sonnet-4-5-20250929",
+ "ollama": "llava",
+ }
+ return defaults.get(self.provider, "gpt-4o")
+
+ async def analyze_image(
+ self,
+ image_path: Path,
+ prompt: str = "Describe this image in detail.",
+ ) -> ImageAnalysisResult:
+ """Analyze a single image with a text prompt."""
+ b64, mime = _encode_image(image_path)
+
+ if self.provider == "openai":
+ text = await self._call_openai(b64, mime, prompt)
+ elif self.provider in ("claude", "anthropic"):
+ text = await self._call_anthropic(b64, mime, prompt)
+ elif self.provider == "ollama":
+ text = await self._call_ollama(b64, prompt)
+ else:
+ raise ValueError(f"Unsupported vision provider: {self.provider}")
+
+ return ImageAnalysisResult(
+ description=text,
+ metadata={"model": self.model, "image": str(image_path)},
+ )
+
+ async def compare_screenshots(
+ self,
+ before: Path,
+ after: Path,
+ prompt: str = "Compare these two screenshots and describe the differences.",
+ ) -> ImageAnalysisResult:
+ """Compare two screenshots and describe differences."""
+ b64_before, mime_before = _encode_image(before)
+ b64_after, mime_after = _encode_image(after)
+
+ combined_prompt = (
+ f"{prompt}\n\n"
+ "The first image is the 'before' state and the second is the 'after' state."
+ )
+
+ if self.provider == "openai":
+ text = await self._call_openai_multi(
+ [(b64_before, mime_before), (b64_after, mime_after)],
+ combined_prompt,
+ )
+ elif self.provider in ("claude", "anthropic"):
+ text = await self._call_anthropic_multi(
+ [(b64_before, mime_before), (b64_after, mime_after)],
+ combined_prompt,
+ )
+ else:
+ # Fallback: analyze each separately
+ r1 = await self.analyze_image(before, "Describe this screenshot.")
+ r2 = await self.analyze_image(after, "Describe this screenshot.")
+ text = f"Before: {r1.description}\n\nAfter: {r2.description}"
+
+ return ImageAnalysisResult(
+ description=text,
+ metadata={
+ "model": self.model,
+ "before": str(before),
+ "after": str(after),
+ },
+ )
+
+ async def extract_text(self, image_path: Path) -> str:
+ """Extract text from an image (OCR via multimodal LLM)."""
+ result = await self.analyze_image(
+ image_path,
+ prompt=(
+ "Extract ALL text visible in this image. "
+ "Return only the extracted text, preserving layout where possible. "
+ "If there are code snippets, preserve formatting and indentation."
+ ),
+ )
+ return result.description
+
+ # ------------------------------------------------------------------
+ # Provider implementations
+ # ------------------------------------------------------------------
+
+ async def _call_openai(self, b64: str, mime: str, prompt: str) -> str:
+ url = self.base_url or "https://api.openai.com/v1"
+ async with httpx.AsyncClient(timeout=60) as client:
+ resp = await client.post(
+ f"{url}/chat/completions",
+ headers={"Authorization": f"Bearer {self.api_key}"},
+ json={
+ "model": self.model,
+ "messages": [{
+ "role": "user",
+ "content": [
+ {"type": "text", "text": prompt},
+ {"type": "image_url", "image_url": {
+ "url": f"data:{mime};base64,{b64}",
+ }},
+ ],
+ }],
+ "max_tokens": 4096,
+ },
+ )
+ resp.raise_for_status()
+ return resp.json()["choices"][0]["message"]["content"]
+
+ async def _call_openai_multi(
+ self, images: List[tuple], prompt: str,
+ ) -> str:
+ url = self.base_url or "https://api.openai.com/v1"
+ content: List[Dict[str, Any]] = [{"type": "text", "text": prompt}]
+ for b64, mime in images:
+ content.append({
+ "type": "image_url",
+ "image_url": {"url": f"data:{mime};base64,{b64}"},
+ })
+ async with httpx.AsyncClient(timeout=60) as client:
+ resp = await client.post(
+ f"{url}/chat/completions",
+ headers={"Authorization": f"Bearer {self.api_key}"},
+ json={
+ "model": self.model,
+ "messages": [{"role": "user", "content": content}],
+ "max_tokens": 4096,
+ },
+ )
+ resp.raise_for_status()
+ return resp.json()["choices"][0]["message"]["content"]
+
+ async def _call_anthropic(self, b64: str, mime: str, prompt: str) -> str:
+ url = self.base_url or "https://api.anthropic.com/v1"
+ media_type = mime or "image/png"
+ async with httpx.AsyncClient(timeout=60) as client:
+ resp = await client.post(
+ f"{url}/messages",
+ headers={
+ "x-api-key": self.api_key,
+ "anthropic-version": "2023-06-01",
+ "content-type": "application/json",
+ },
+ json={
+ "model": self.model,
+ "max_tokens": 4096,
+ "messages": [{
+ "role": "user",
+ "content": [
+ {"type": "image", "source": {
+ "type": "base64",
+ "media_type": media_type,
+ "data": b64,
+ }},
+ {"type": "text", "text": prompt},
+ ],
+ }],
+ },
+ )
+ resp.raise_for_status()
+ data = resp.json()
+ return data["content"][0]["text"]
+
+ async def _call_anthropic_multi(
+ self, images: List[tuple], prompt: str,
+ ) -> str:
+ url = self.base_url or "https://api.anthropic.com/v1"
+ content: List[Dict[str, Any]] = []
+ for b64, mime in images:
+ content.append({
+ "type": "image",
+ "source": {"type": "base64", "media_type": mime or "image/png", "data": b64},
+ })
+ content.append({"type": "text", "text": prompt})
+ async with httpx.AsyncClient(timeout=60) as client:
+ resp = await client.post(
+ f"{url}/messages",
+ headers={
+ "x-api-key": self.api_key,
+ "anthropic-version": "2023-06-01",
+ "content-type": "application/json",
+ },
+ json={
+ "model": self.model,
+ "max_tokens": 4096,
+ "messages": [{"role": "user", "content": content}],
+ },
+ )
+ resp.raise_for_status()
+ data = resp.json()
+ return data["content"][0]["text"]
+
+ async def _call_ollama(self, b64: str, prompt: str) -> str:
+ url = self.base_url or "http://localhost:11434"
+ async with httpx.AsyncClient(timeout=120) as client:
+ resp = await client.post(
+ f"{url}/api/generate",
+ json={
+ "model": self.model,
+ "prompt": prompt,
+ "images": [b64],
+ "stream": False,
+ },
+ )
+ resp.raise_for_status()
+ return resp.json().get("response", "")
diff --git a/gitpilot/web/index.html b/gitpilot/web/index.html
new file mode 100644
index 0000000000000000000000000000000000000000..3de2667ee02871f5dcc75133d2bc6cc192ad7028
--- /dev/null
+++ b/gitpilot/web/index.html
@@ -0,0 +1,13 @@
+
+
+
+
+ GitPilot
+
+
+
+
+
+
+
+
diff --git a/gitpilot/workspace.py b/gitpilot/workspace.py
new file mode 100644
index 0000000000000000000000000000000000000000..cc49a2779b93aeeced224991e787699b7ad2e25b
--- /dev/null
+++ b/gitpilot/workspace.py
@@ -0,0 +1,387 @@
+# gitpilot/workspace.py
+"""Local workspace manager β clone, sync, and operate on repositories locally.
+
+Manages a workspace directory (~/.gitpilot/workspaces/{owner}/{repo}) where
+repositories are cloned and kept in sync. All local file operations go through
+this module to ensure path-traversal safety and consistency.
+"""
+from __future__ import annotations
+
+import asyncio
+import logging
+import os
+import shutil
+from dataclasses import dataclass
+from pathlib import Path
+from typing import Any
+
+from gitpilot.models import WorkspaceSummary
+
+logger = logging.getLogger(__name__)
+
+WORKSPACE_ROOT = Path.home() / ".gitpilot" / "workspaces"
+
+
+@dataclass
+class WorkspaceInfo:
+ """Metadata about an active workspace."""
+
+ owner: str
+ repo: str
+ path: Path
+ branch: str
+ remote_url: str
+ is_dirty: bool = False
+ last_sync: str | None = None
+
+
+class WorkspaceManager:
+ """Manages local git clones for repository operations.
+
+ Responsibilities:
+ - Clone repositories on first access (shallow for speed)
+ - Checkout and track branches
+ - Provide safe file read / write / delete / search
+ - Sync with remote (pull / push)
+ - Create feature branches, commit, and push
+ """
+
+ def __init__(self, root: Path | None = None):
+ self.root = root or WORKSPACE_ROOT
+ self.root.mkdir(parents=True, exist_ok=True)
+ self._active: dict[str, WorkspaceInfo] = {}
+
+ def workspace_path(self, owner: str, repo: str) -> Path:
+ return self.root / owner / repo
+
+ # ------------------------------------------------------------------
+ # Workspace lifecycle
+ # ------------------------------------------------------------------
+
+ async def ensure_workspace(
+ self,
+ owner: str,
+ repo: str,
+ token: str,
+ branch: str | None = None,
+ ) -> WorkspaceInfo:
+ """Clone if absent, fetch if present, checkout *branch*."""
+ ws_path = self.workspace_path(owner, repo)
+ remote_url = f"https://x-access-token:{token}@github.com/{owner}/{repo}.git"
+
+ if not (ws_path / ".git").exists():
+ ws_path.mkdir(parents=True, exist_ok=True)
+ await self._run_git(
+ ["git", "clone", "--depth=1", remote_url, str(ws_path)],
+ cwd=ws_path.parent,
+ )
+ else:
+ await self._run_git(
+ ["git", "fetch", "origin", "--prune"],
+ cwd=ws_path,
+ env={"GIT_TERMINAL_PROMPT": "0"},
+ )
+
+ target_branch = branch or await self._default_branch(ws_path)
+ await self._checkout(ws_path, target_branch)
+
+ info = WorkspaceInfo(
+ owner=owner,
+ repo=repo,
+ path=ws_path,
+ branch=target_branch,
+ remote_url=remote_url,
+ )
+ self._active[f"{owner}/{repo}"] = info
+ return info
+
+ async def cleanup(self, owner: str, repo: str) -> bool:
+ ws_path = self.workspace_path(owner, repo)
+ if ws_path.exists():
+ shutil.rmtree(ws_path)
+ self._active.pop(f"{owner}/{repo}", None)
+ return True
+ return False
+
+ # ------------------------------------------------------------------
+ # File operations
+ # ------------------------------------------------------------------
+
+ def _safe_resolve(self, ws: WorkspaceInfo, file_path: str) -> Path:
+ full = (ws.path / file_path).resolve()
+ if not str(full).startswith(str(ws.path.resolve())):
+ raise PermissionError(f"Path traversal blocked: {file_path}")
+ return full
+
+ async def read_file(self, ws: WorkspaceInfo, file_path: str) -> str:
+ full = self._safe_resolve(ws, file_path)
+ return full.read_text(encoding="utf-8", errors="replace")
+
+ async def write_file(
+ self, ws: WorkspaceInfo, file_path: str, content: str
+ ) -> dict[str, Any]:
+ full = self._safe_resolve(ws, file_path)
+ full.parent.mkdir(parents=True, exist_ok=True)
+ full.write_text(content, encoding="utf-8")
+ return {"path": file_path, "size": len(content)}
+
+ async def delete_file(self, ws: WorkspaceInfo, file_path: str) -> bool:
+ full = self._safe_resolve(ws, file_path)
+ if full.exists():
+ full.unlink()
+ return True
+ return False
+
+ async def list_files(
+ self, ws: WorkspaceInfo, directory: str = "."
+ ) -> list[str]:
+ result = await self._run_git(
+ ["git", "ls-files", "--cached", "--others",
+ "--exclude-standard", directory],
+ cwd=ws.path,
+ )
+ return [f for f in result.stdout.strip().split("\n") if f]
+
+ async def search_files(
+ self, ws: WorkspaceInfo, pattern: str, path: str = "."
+ ) -> list[dict[str, Any]]:
+ try:
+ result = await self._run_git(
+ ["git", "grep", "-n", "--no-color", "-I", pattern, "--", path],
+ cwd=ws.path, check=False,
+ )
+ matches = []
+ for line in result.stdout.strip().split("\n"):
+ if ":" in line and line:
+ parts = line.split(":", 2)
+ if len(parts) >= 3:
+ matches.append({
+ "file": parts[0],
+ "line": int(parts[1]) if parts[1].isdigit() else 0,
+ "content": parts[2],
+ })
+ return matches
+ except Exception:
+ return []
+
+ # ------------------------------------------------------------------
+ # Git operations
+ # ------------------------------------------------------------------
+
+ async def create_branch(
+ self, ws: WorkspaceInfo, branch_name: str
+ ) -> str:
+ await self._run_git(
+ ["git", "checkout", "-b", branch_name], cwd=ws.path,
+ )
+ ws.branch = branch_name
+ return branch_name
+
+ async def commit(
+ self, ws: WorkspaceInfo, message: str, files: list[str] | None = None,
+ ) -> dict[str, str]:
+ if files:
+ await self._run_git(["git", "add", "--"] + files, cwd=ws.path)
+ else:
+ await self._run_git(["git", "add", "-A"], cwd=ws.path)
+
+ await self._run_git(["git", "commit", "-m", message], cwd=ws.path)
+ sha_result = await self._run_git(
+ ["git", "rev-parse", "HEAD"], cwd=ws.path,
+ )
+ return {"sha": sha_result.stdout.strip(), "message": message}
+
+ async def push(
+ self, ws: WorkspaceInfo, force: bool = False,
+ ) -> dict[str, str]:
+ cmd = ["git", "push", "-u", "origin", ws.branch]
+ if force:
+ cmd.insert(2, "--force-with-lease")
+ await self._run_git(cmd, cwd=ws.path)
+ return {"branch": ws.branch, "status": "pushed"}
+
+ async def diff(self, ws: WorkspaceInfo, staged: bool = False) -> str:
+ cmd = ["git", "diff"]
+ if staged:
+ cmd.append("--staged")
+ result = await self._run_git(cmd, cwd=ws.path)
+ return result.stdout
+
+ async def status(self, ws: WorkspaceInfo) -> dict[str, Any]:
+ result = await self._run_git(
+ ["git", "status", "--porcelain=v2", "--branch"], cwd=ws.path,
+ )
+ return self._parse_status(result.stdout)
+
+ async def log(
+ self, ws: WorkspaceInfo, count: int = 10,
+ ) -> list[dict[str, str]]:
+ result = await self._run_git(
+ ["git", "log", f"-{count}", "--format=%H|%an|%ae|%s|%aI"],
+ cwd=ws.path,
+ )
+ commits: list[dict[str, str]] = []
+ for line in result.stdout.strip().split("\n"):
+ if "|" in line:
+ parts = line.split("|", 4)
+ commits.append({
+ "sha": parts[0],
+ "author": parts[1],
+ "email": parts[2],
+ "message": parts[3],
+ "date": parts[4] if len(parts) > 4 else "",
+ })
+ return commits
+
+ async def stash(self, ws: WorkspaceInfo, pop: bool = False) -> str:
+ cmd = ["git", "stash", "pop" if pop else "push"]
+ result = await self._run_git(cmd, cwd=ws.path)
+ return result.stdout.strip()
+
+ async def merge(
+ self, ws: WorkspaceInfo, branch: str,
+ ) -> dict[str, Any]:
+ result = await self._run_git(
+ ["git", "merge", branch], cwd=ws.path, check=False,
+ )
+ return {
+ "success": result.returncode == 0,
+ "output": result.stdout,
+ "conflicts": result.returncode != 0,
+ }
+
+ # ------------------------------------------------------------------
+ # Internal helpers
+ # ------------------------------------------------------------------
+
+ async def _run_git(self, cmd, cwd=None, env=None, check=True):
+ full_env = {**os.environ, **(env or {})}
+ proc = await asyncio.create_subprocess_exec(
+ *cmd,
+ cwd=cwd,
+ stdout=asyncio.subprocess.PIPE,
+ stderr=asyncio.subprocess.PIPE,
+ env=full_env,
+ )
+ stdout, stderr = await proc.communicate()
+
+ class _Result:
+ pass
+
+ r = _Result()
+ r.stdout = stdout.decode("utf-8", errors="replace")
+ r.stderr = stderr.decode("utf-8", errors="replace")
+ r.returncode = proc.returncode
+ if check and proc.returncode != 0:
+ raise RuntimeError(
+ f"Git command failed ({proc.returncode}): {' '.join(cmd)}\n{r.stderr}"
+ )
+ return r
+
+ async def _default_branch(self, ws_path: Path) -> str:
+ result = await self._run_git(
+ ["git", "symbolic-ref", "refs/remotes/origin/HEAD"],
+ cwd=ws_path, check=False,
+ )
+ if result.returncode == 0:
+ return result.stdout.strip().split("/")[-1]
+ return "main"
+
+ async def _checkout(self, ws_path: Path, branch: str):
+ result = await self._run_git(
+ ["git", "checkout", branch], cwd=ws_path, check=False,
+ )
+ if result.returncode != 0:
+ await self._run_git(
+ ["git", "checkout", "-b", branch, f"origin/{branch}"],
+ cwd=ws_path, check=False,
+ )
+
+ @staticmethod
+ def _parse_status(raw: str) -> dict[str, Any]:
+ modified, added, deleted, untracked = [], [], [], []
+ branch_name = "unknown"
+ for line in raw.split("\n"):
+ if line.startswith("# branch.head"):
+ branch_name = line.split()[-1]
+ elif line.startswith("1 "):
+ parts = line.split()
+ xy = parts[1] if len(parts) > 1 else ""
+ path = parts[-1] if parts else ""
+ if "M" in xy:
+ modified.append(path)
+ elif "A" in xy:
+ added.append(path)
+ elif "D" in xy:
+ deleted.append(path)
+ elif line.startswith("? "):
+ untracked.append(line[2:])
+ return {
+ "branch": branch_name,
+ "modified": modified,
+ "added": added,
+ "deleted": deleted,
+ "untracked": untracked,
+ "clean": not any([modified, added, deleted, untracked]),
+ }
+
+
+async def summarize_workspace(folder_path: str) -> WorkspaceSummary:
+ """Summarize workspace state for the redesigned UI status endpoint."""
+ folder_path = os.path.abspath(folder_path)
+ folder_name = os.path.basename(folder_path) if folder_path else None
+ folder_open = os.path.isdir(folder_path) if folder_path else False
+
+ summary = WorkspaceSummary(
+ folder_open=folder_open,
+ folder_path=folder_path,
+ folder_name=folder_name,
+ )
+
+ if not folder_open:
+ return summary
+
+ # Check for git repo
+ git_dir = os.path.join(folder_path, ".git")
+ if not os.path.exists(git_dir):
+ return summary
+
+ summary.git_detected = True
+ summary.repo_root = folder_path
+
+ # Get repo name from folder
+ summary.repo_name = folder_name
+
+ # Get branch and remotes via git CLI
+ try:
+ proc = await asyncio.create_subprocess_exec(
+ "git", "rev-parse", "--abbrev-ref", "HEAD",
+ cwd=folder_path,
+ stdout=asyncio.subprocess.PIPE,
+ stderr=asyncio.subprocess.PIPE,
+ )
+ stdout, _ = await proc.communicate()
+ if proc.returncode == 0:
+ summary.branch = stdout.decode().strip()
+ except Exception:
+ logger.debug("Branch detection failed", exc_info=True)
+
+ try:
+ proc = await asyncio.create_subprocess_exec(
+ "git", "remote", "-v",
+ cwd=folder_path,
+ stdout=asyncio.subprocess.PIPE,
+ stderr=asyncio.subprocess.PIPE,
+ )
+ stdout, _ = await proc.communicate()
+ if proc.returncode == 0:
+ remotes = set()
+ for line in stdout.decode().strip().splitlines():
+ parts = line.split()
+ if parts:
+ remotes.add(parts[0])
+ summary.remotes = sorted(remotes)
+ except Exception:
+ logger.debug("Remote detection failed", exc_info=True)
+
+ return summary
diff --git a/pyproject.toml b/pyproject.toml
new file mode 100644
index 0000000000000000000000000000000000000000..13e51cdec80f50d9ea68b192c72a4e16fc18838e
--- /dev/null
+++ b/pyproject.toml
@@ -0,0 +1,105 @@
+[build-system]
+requires = ["setuptools>=64", "wheel"]
+build-backend = "setuptools.build_meta"
+
+[project]
+# PyPI project name β MUST match what you configured in PyPI Trusted Publishers
+name = "gitcopilot"
+version = "0.2.3"
+description = "Production-ready agentic AI assistant for GitHub repositories with multi-LLM support and visual workflow insights"
+requires-python = ">=3.11,<3.13"
+readme = "README.md"
+
+# Use a simple SPDX license string, not a table
+license = "MIT"
+
+authors = [{ name = "Ruslan Magana Vsevolodovna" }]
+keywords = [
+ "ai",
+ "github",
+ "copilot",
+ "agentic",
+ "crewai",
+ "llm",
+ "openai",
+ "claude",
+ "watsonx",
+ "ollama",
+ "fastapi",
+ "react",
+]
+classifiers = [
+ "Development Status :: 4 - Beta",
+ "Intended Audience :: Developers",
+ # License classifier removed to avoid deprecation warning
+ "Programming Language :: Python :: 3",
+ "Programming Language :: Python :: 3.11",
+ "Programming Language :: Python :: 3.12",
+ "Programming Language :: JavaScript",
+ "Framework :: FastAPI",
+ "Topic :: Software Development :: Libraries :: Python Modules",
+ "Topic :: Software Development :: Version Control :: Git",
+ "Topic :: Scientific/Engineering :: Artificial Intelligence",
+ "Operating System :: OS Independent",
+]
+dependencies = [
+ "fastapi>=0.111.0",
+ "uvicorn[standard]>=0.30.0",
+ "httpx>=0.27.0",
+ "python-dotenv>=1.1.0",
+ "typer>=0.12.0",
+ "pydantic>=2.7.0",
+ "crewai[anthropic]>=0.76.9",
+ "anthropic>=0.39.0",
+ "crewai-tools>=0.13.4",
+ "ibm-watsonx-ai>=1.1.0",
+ "langchain-ibm>=0.3.0",
+ "rich>=13.0.0",
+ "pyjwt[crypto]>=2.8.0",
+ "litellm>=1.80.5",
+]
+
+[project.urls]
+Homepage = "https://github.com/ruslanmv/gitpilot"
+Documentation = "https://github.com/ruslanmv/gitpilot#readme"
+Repository = "https://github.com/ruslanmv/gitpilot"
+Issues = "https://github.com/ruslanmv/gitpilot/issues"
+
+[project.optional-dependencies]
+dev = [
+ "ruff>=0.6",
+ "pytest>=8.2",
+ "pytest-asyncio>=0.23",
+ "build>=1.2.1",
+ "twine>=5.0.0",
+]
+
+[project.scripts]
+# CLI entry points: these remain based on the python package `gitpilot`
+gitpilot = "gitpilot.cli:main"
+gitpilot-api = "gitpilot.cli:serve_only"
+
+[tool.setuptools.packages.find]
+where = ["."]
+include = ["gitpilot*"] # package directory is gitpilot/
+
+[tool.setuptools.package-data]
+gitpilot = ["web/*", "web/**/*", "py.typed"]
+
+[tool.setuptools]
+include-package-data = true
+
+[tool.ruff]
+line-length = 100
+target-version = "py312"
+
+[tool.ruff.lint]
+select = ["E", "F", "W", "I", "N", "UP", "S", "B", "A", "C4", "DTZ", "ICN", "PIE", "PT", "RET", "SIM", "ARG", "PL"]
+ignore = ["S101", "PLR0913", "PLR2004"]
+
+[tool.pytest.ini_options]
+testpaths = ["tests"]
+python_files = ["test_*.py"]
+python_classes = ["Test*"]
+python_functions = ["test_*"]
+asyncio_mode = "auto"