Commit
·
92f3410
1
Parent(s):
95dd43a
Add production-ready enhancements: progress tracking, error handling, exports
Browse files- Real-time progress tracking with agent status indicators
- Comprehensive error handling with retries and fallbacks
- Multi-framework/platform auto-detection (Next.js, Django, FastAPI, React, Express, etc.)
- Export utilities for JSON (CI/CD) and Markdown (documentation)
- Enhanced UI with progress display and export options
- Partial result handling for graceful degradation
- Visual status indicators throughout the pipeline
- app.py +60 -7
- docs_agent.py +58 -9
- error_handler.py +162 -0
- export_utils.py +125 -0
- orchestrator.py +150 -17
- progress_tracker.py +100 -0
app.py
CHANGED
|
@@ -1,4 +1,4 @@
|
|
| 1 |
-
"""Enhanced Gradio interface with
|
| 2 |
|
| 3 |
from __future__ import annotations
|
| 4 |
|
|
@@ -6,6 +6,7 @@ from typing import Dict, Tuple
|
|
| 6 |
|
| 7 |
import gradio as gr
|
| 8 |
|
|
|
|
| 9 |
from orchestrator import ReadinessOrchestrator
|
| 10 |
|
| 11 |
|
|
@@ -18,7 +19,7 @@ def run_pipeline(
|
|
| 18 |
code_summary: str,
|
| 19 |
infra_notes: str,
|
| 20 |
stakeholders: str,
|
| 21 |
-
) -> Tuple[Dict, str, str, str]:
|
| 22 |
payload = {
|
| 23 |
"project_name": project_name or "Unnamed Service",
|
| 24 |
"release_goal": release_goal or "Ship stable build",
|
|
@@ -28,6 +29,23 @@ def run_pipeline(
|
|
| 28 |
}
|
| 29 |
result = orchestrator.run_dict(payload)
|
| 30 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 31 |
# Extract sponsor synthesis
|
| 32 |
sponsor_text = ""
|
| 33 |
if "sponsor_synthesis" in result:
|
|
@@ -49,10 +67,12 @@ def run_pipeline(
|
|
| 49 |
for lookup in lookups[:5]: # Show first 5
|
| 50 |
lookup_type = lookup.get("type", "unknown")
|
| 51 |
status = lookup.get("status", "unknown")
|
| 52 |
-
|
|
|
|
| 53 |
|
| 54 |
# Extract deployment actions
|
| 55 |
deploy_text = ""
|
|
|
|
| 56 |
if "deployment" in result and result["deployment"]:
|
| 57 |
deploy = result["deployment"]
|
| 58 |
repo = deploy.get("repo", "Not configured")
|
|
@@ -60,14 +80,22 @@ def run_pipeline(
|
|
| 60 |
ready = deploy.get("ready", False)
|
| 61 |
actions = deploy.get("actions", [])
|
| 62 |
|
| 63 |
-
|
|
|
|
| 64 |
deploy_text += "**Deployment Actions**:\n"
|
| 65 |
for action in actions[:5]: # Show first 5
|
| 66 |
action_type = action.get("type", "unknown")
|
| 67 |
message = action.get("message", action.get("title", ""))
|
| 68 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 69 |
|
| 70 |
-
return result, sponsor_text, docs_text, deploy_text
|
| 71 |
|
| 72 |
|
| 73 |
def build_interface() -> gr.Blocks:
|
|
@@ -97,6 +125,16 @@ def build_interface() -> gr.Blocks:
|
|
| 97 |
|
| 98 |
run_button = gr.Button("🔍 Run Readiness Pipeline", variant="primary", size="lg")
|
| 99 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 100 |
with gr.Row():
|
| 101 |
with gr.Column(scale=2):
|
| 102 |
gr.Markdown("### 📋 Full Results")
|
|
@@ -124,11 +162,26 @@ def build_interface() -> gr.Blocks:
|
|
| 124 |
lines=10,
|
| 125 |
interactive=False
|
| 126 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 127 |
|
| 128 |
run_button.click(
|
| 129 |
fn=run_pipeline,
|
| 130 |
inputs=[project_name, release_goal, code_summary, infra_notes, stakeholders],
|
| 131 |
-
outputs=[output, sponsor_output, docs_output, deploy_output],
|
| 132 |
)
|
| 133 |
|
| 134 |
return demo
|
|
|
|
| 1 |
+
"""Enhanced Gradio interface with progress tracking, exports, and deployment actions."""
|
| 2 |
|
| 3 |
from __future__ import annotations
|
| 4 |
|
|
|
|
| 6 |
|
| 7 |
import gradio as gr
|
| 8 |
|
| 9 |
+
from export_utils import export_json, export_markdown
|
| 10 |
from orchestrator import ReadinessOrchestrator
|
| 11 |
|
| 12 |
|
|
|
|
| 19 |
code_summary: str,
|
| 20 |
infra_notes: str,
|
| 21 |
stakeholders: str,
|
| 22 |
+
) -> Tuple[Dict, str, str, str, str, str, str]:
|
| 23 |
payload = {
|
| 24 |
"project_name": project_name or "Unnamed Service",
|
| 25 |
"release_goal": release_goal or "Ship stable build",
|
|
|
|
| 29 |
}
|
| 30 |
result = orchestrator.run_dict(payload)
|
| 31 |
|
| 32 |
+
# Extract progress information
|
| 33 |
+
progress_text = ""
|
| 34 |
+
if "progress" in result:
|
| 35 |
+
progress = result["progress"]
|
| 36 |
+
overall = progress.get("overall_progress", 0.0)
|
| 37 |
+
status_msg = progress.get("status_message", "Processing...")
|
| 38 |
+
agents = progress.get("agents", [])
|
| 39 |
+
|
| 40 |
+
progress_text = f"**Overall Progress**: {overall:.0%}\n**Status**: {status_msg}\n\n"
|
| 41 |
+
progress_text += "**Agent Status**:\n"
|
| 42 |
+
for agent in agents:
|
| 43 |
+
status = agent.get("status", "unknown")
|
| 44 |
+
name = agent.get("name", "Unknown")
|
| 45 |
+
message = agent.get("message", "")
|
| 46 |
+
icon = "✅" if status == "completed" else "⏳" if status == "running" else "❌" if status == "failed" else "⏭️"
|
| 47 |
+
progress_text += f"{icon} **{name}**: {message}\n"
|
| 48 |
+
|
| 49 |
# Extract sponsor synthesis
|
| 50 |
sponsor_text = ""
|
| 51 |
if "sponsor_synthesis" in result:
|
|
|
|
| 67 |
for lookup in lookups[:5]: # Show first 5
|
| 68 |
lookup_type = lookup.get("type", "unknown")
|
| 69 |
status = lookup.get("status", "unknown")
|
| 70 |
+
status_icon = "✅" if status == "found" else "⚠️" if status == "not_found" else "ℹ️"
|
| 71 |
+
docs_text += f"{status_icon} **{lookup_type}**: {status}\n"
|
| 72 |
|
| 73 |
# Extract deployment actions
|
| 74 |
deploy_text = ""
|
| 75 |
+
deploy_actions = []
|
| 76 |
if "deployment" in result and result["deployment"]:
|
| 77 |
deploy = result["deployment"]
|
| 78 |
repo = deploy.get("repo", "Not configured")
|
|
|
|
| 80 |
ready = deploy.get("ready", False)
|
| 81 |
actions = deploy.get("actions", [])
|
| 82 |
|
| 83 |
+
ready_icon = "✅" if ready else "❌"
|
| 84 |
+
deploy_text = f"**Repository**: {repo}\n**Branch**: {branch}\n**Ready**: {ready_icon} {ready}\n\n"
|
| 85 |
deploy_text += "**Deployment Actions**:\n"
|
| 86 |
for action in actions[:5]: # Show first 5
|
| 87 |
action_type = action.get("type", "unknown")
|
| 88 |
message = action.get("message", action.get("title", ""))
|
| 89 |
+
actionable = action.get("actionable", False)
|
| 90 |
+
action_icon = "🚀" if actionable else "ℹ️"
|
| 91 |
+
deploy_text += f"{action_icon} **{action_type}**: {message}\n"
|
| 92 |
+
deploy_actions.append(action)
|
| 93 |
+
|
| 94 |
+
# Generate export formats
|
| 95 |
+
json_export = export_json(result)
|
| 96 |
+
markdown_export = export_markdown(result)
|
| 97 |
|
| 98 |
+
return result, progress_text, sponsor_text, docs_text, deploy_text, json_export, markdown_export
|
| 99 |
|
| 100 |
|
| 101 |
def build_interface() -> gr.Blocks:
|
|
|
|
| 125 |
|
| 126 |
run_button = gr.Button("🔍 Run Readiness Pipeline", variant="primary", size="lg")
|
| 127 |
|
| 128 |
+
# Progress tracking
|
| 129 |
+
with gr.Row():
|
| 130 |
+
gr.Markdown("### 📊 Pipeline Progress")
|
| 131 |
+
progress_output = gr.Textbox(
|
| 132 |
+
label="Real-time Progress",
|
| 133 |
+
lines=8,
|
| 134 |
+
interactive=False,
|
| 135 |
+
value="Click 'Run Readiness Pipeline' to start..."
|
| 136 |
+
)
|
| 137 |
+
|
| 138 |
with gr.Row():
|
| 139 |
with gr.Column(scale=2):
|
| 140 |
gr.Markdown("### 📋 Full Results")
|
|
|
|
| 162 |
lines=10,
|
| 163 |
interactive=False
|
| 164 |
)
|
| 165 |
+
|
| 166 |
+
# Export options
|
| 167 |
+
with gr.Row():
|
| 168 |
+
gr.Markdown("### 📥 Export Reports")
|
| 169 |
+
with gr.Row():
|
| 170 |
+
json_export = gr.Textbox(
|
| 171 |
+
label="JSON Export (for CI/CD)",
|
| 172 |
+
lines=5,
|
| 173 |
+
interactive=True
|
| 174 |
+
)
|
| 175 |
+
markdown_export = gr.Textbox(
|
| 176 |
+
label="Markdown Export (for documentation)",
|
| 177 |
+
lines=5,
|
| 178 |
+
interactive=True
|
| 179 |
+
)
|
| 180 |
|
| 181 |
run_button.click(
|
| 182 |
fn=run_pipeline,
|
| 183 |
inputs=[project_name, release_goal, code_summary, infra_notes, stakeholders],
|
| 184 |
+
outputs=[output, progress_output, sponsor_output, docs_output, deploy_output, json_export, markdown_export],
|
| 185 |
)
|
| 186 |
|
| 187 |
return demo
|
docs_agent.py
CHANGED
|
@@ -16,51 +16,100 @@ class DocumentationLookupAgent:
|
|
| 16 |
self.mcp_client = EnhancedMCPClient()
|
| 17 |
|
| 18 |
async def extract_framework_from_request(self, request: ReadinessRequest) -> Optional[str]:
|
| 19 |
-
"""Extract framework/library from code summary."""
|
| 20 |
code_lower = request.code_summary.lower()
|
|
|
|
|
|
|
| 21 |
|
| 22 |
-
#
|
| 23 |
frameworks = {
|
|
|
|
| 24 |
"next.js": "next.js",
|
| 25 |
"nextjs": "next.js",
|
| 26 |
-
"
|
|
|
|
|
|
|
| 27 |
"django": "django",
|
| 28 |
"fastapi": "fastapi",
|
| 29 |
"flask": "flask",
|
|
|
|
|
|
|
|
|
|
| 30 |
"express": "express",
|
| 31 |
"nestjs": "nestjs",
|
|
|
|
|
|
|
|
|
|
|
|
|
| 32 |
"vue": "vue",
|
| 33 |
"angular": "angular",
|
| 34 |
"svelte": "svelte",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 35 |
}
|
| 36 |
|
|
|
|
|
|
|
| 37 |
for key, framework in frameworks.items():
|
| 38 |
-
if key in
|
| 39 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 40 |
|
| 41 |
return None
|
| 42 |
|
| 43 |
async def extract_platform_from_request(self, request: ReadinessRequest) -> Optional[str]:
|
| 44 |
-
"""Extract deployment platform from infra notes."""
|
| 45 |
infra_lower = (request.infra_notes or "").lower()
|
|
|
|
|
|
|
| 46 |
|
|
|
|
| 47 |
platforms = {
|
|
|
|
| 48 |
"vercel": "vercel",
|
|
|
|
|
|
|
|
|
|
|
|
|
| 49 |
"aws": "aws",
|
|
|
|
| 50 |
"azure": "azure",
|
|
|
|
| 51 |
"gcp": "gcp",
|
| 52 |
"google cloud": "gcp",
|
| 53 |
-
"
|
|
|
|
| 54 |
"railway": "railway",
|
| 55 |
"render": "render",
|
| 56 |
"fly.io": "fly.io",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 57 |
"kubernetes": "kubernetes",
|
| 58 |
"k8s": "kubernetes",
|
|
|
|
|
|
|
|
|
|
| 59 |
}
|
| 60 |
|
|
|
|
|
|
|
| 61 |
for key, platform in platforms.items():
|
| 62 |
-
if key in
|
| 63 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 64 |
|
| 65 |
return None
|
| 66 |
|
|
|
|
| 16 |
self.mcp_client = EnhancedMCPClient()
|
| 17 |
|
| 18 |
async def extract_framework_from_request(self, request: ReadinessRequest) -> Optional[str]:
|
| 19 |
+
"""Extract framework/library from code summary with multi-framework support."""
|
| 20 |
code_lower = request.code_summary.lower()
|
| 21 |
+
infra_lower = (request.infra_notes or "").lower()
|
| 22 |
+
combined = f"{code_lower} {infra_lower}"
|
| 23 |
|
| 24 |
+
# Extended framework detection with priority order
|
| 25 |
frameworks = {
|
| 26 |
+
# React ecosystem (highest priority)
|
| 27 |
"next.js": "next.js",
|
| 28 |
"nextjs": "next.js",
|
| 29 |
+
"remix": "remix",
|
| 30 |
+
"gatsby": "gatsby",
|
| 31 |
+
# Python frameworks
|
| 32 |
"django": "django",
|
| 33 |
"fastapi": "fastapi",
|
| 34 |
"flask": "flask",
|
| 35 |
+
"fastapi": "fastapi",
|
| 36 |
+
"starlette": "starlette",
|
| 37 |
+
# Node.js frameworks
|
| 38 |
"express": "express",
|
| 39 |
"nestjs": "nestjs",
|
| 40 |
+
"koa": "koa",
|
| 41 |
+
"hapi": "hapi",
|
| 42 |
+
# Frontend frameworks
|
| 43 |
+
"react": "react",
|
| 44 |
"vue": "vue",
|
| 45 |
"angular": "angular",
|
| 46 |
"svelte": "svelte",
|
| 47 |
+
"nuxt": "nuxt",
|
| 48 |
+
# Other
|
| 49 |
+
"spring": "spring",
|
| 50 |
+
"rails": "rails",
|
| 51 |
+
"laravel": "laravel",
|
| 52 |
}
|
| 53 |
|
| 54 |
+
# Check for multiple frameworks (return the most specific one)
|
| 55 |
+
detected = []
|
| 56 |
for key, framework in frameworks.items():
|
| 57 |
+
if key in combined:
|
| 58 |
+
detected.append((len(key), framework)) # Prioritize longer matches
|
| 59 |
+
|
| 60 |
+
if detected:
|
| 61 |
+
# Return the most specific (longest) match
|
| 62 |
+
detected.sort(reverse=True, key=lambda x: x[0])
|
| 63 |
+
return detected[0][1]
|
| 64 |
|
| 65 |
return None
|
| 66 |
|
| 67 |
async def extract_platform_from_request(self, request: ReadinessRequest) -> Optional[str]:
|
| 68 |
+
"""Extract deployment platform from infra notes with multi-platform support."""
|
| 69 |
infra_lower = (request.infra_notes or "").lower()
|
| 70 |
+
code_lower = (request.code_summary or "").lower()
|
| 71 |
+
combined = f"{infra_lower} {code_lower}"
|
| 72 |
|
| 73 |
+
# Extended platform detection
|
| 74 |
platforms = {
|
| 75 |
+
# Serverless/Edge
|
| 76 |
"vercel": "vercel",
|
| 77 |
+
"netlify": "netlify",
|
| 78 |
+
"cloudflare": "cloudflare",
|
| 79 |
+
"cloudflare pages": "cloudflare",
|
| 80 |
+
# Cloud providers
|
| 81 |
"aws": "aws",
|
| 82 |
+
"amazon web services": "aws",
|
| 83 |
"azure": "azure",
|
| 84 |
+
"microsoft azure": "azure",
|
| 85 |
"gcp": "gcp",
|
| 86 |
"google cloud": "gcp",
|
| 87 |
+
"google cloud platform": "gcp",
|
| 88 |
+
# PaaS
|
| 89 |
"railway": "railway",
|
| 90 |
"render": "render",
|
| 91 |
"fly.io": "fly.io",
|
| 92 |
+
"flyio": "fly.io",
|
| 93 |
+
"heroku": "heroku",
|
| 94 |
+
"digitalocean": "digitalocean",
|
| 95 |
+
"digital ocean": "digitalocean",
|
| 96 |
+
# Containers/Orchestration
|
| 97 |
"kubernetes": "kubernetes",
|
| 98 |
"k8s": "kubernetes",
|
| 99 |
+
"docker": "docker",
|
| 100 |
+
"docker compose": "docker",
|
| 101 |
+
"docker-compose": "docker",
|
| 102 |
}
|
| 103 |
|
| 104 |
+
# Check for multiple platforms (return the most specific one)
|
| 105 |
+
detected = []
|
| 106 |
for key, platform in platforms.items():
|
| 107 |
+
if key in combined:
|
| 108 |
+
detected.append((len(key), platform))
|
| 109 |
+
|
| 110 |
+
if detected:
|
| 111 |
+
detected.sort(reverse=True, key=lambda x: x[0])
|
| 112 |
+
return detected[0][1]
|
| 113 |
|
| 114 |
return None
|
| 115 |
|
error_handler.py
ADDED
|
@@ -0,0 +1,162 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Comprehensive error handling with retries and fallbacks."""
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
import asyncio
|
| 6 |
+
import logging
|
| 7 |
+
import time
|
| 8 |
+
from functools import wraps
|
| 9 |
+
from typing import Any, Callable, Dict, Optional, TypeVar, Union
|
| 10 |
+
|
| 11 |
+
logger = logging.getLogger(__name__)
|
| 12 |
+
|
| 13 |
+
T = TypeVar('T')
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class RetryConfig:
|
| 17 |
+
"""Configuration for retry logic."""
|
| 18 |
+
|
| 19 |
+
def __init__(
|
| 20 |
+
self,
|
| 21 |
+
max_retries: int = 3,
|
| 22 |
+
initial_delay: float = 1.0,
|
| 23 |
+
backoff_factor: float = 2.0,
|
| 24 |
+
max_delay: float = 60.0,
|
| 25 |
+
retryable_exceptions: tuple = (Exception,)
|
| 26 |
+
):
|
| 27 |
+
self.max_retries = max_retries
|
| 28 |
+
self.initial_delay = initial_delay
|
| 29 |
+
self.backoff_factor = backoff_factor
|
| 30 |
+
self.max_delay = max_delay
|
| 31 |
+
self.retryable_exceptions = retryable_exceptions
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def retry_with_backoff(config: Optional[RetryConfig] = None):
|
| 35 |
+
"""Decorator for retrying functions with exponential backoff."""
|
| 36 |
+
if config is None:
|
| 37 |
+
config = RetryConfig()
|
| 38 |
+
|
| 39 |
+
def decorator(func: Callable[..., T]) -> Callable[..., T]:
|
| 40 |
+
@wraps(func)
|
| 41 |
+
def sync_wrapper(*args, **kwargs) -> T:
|
| 42 |
+
last_exception = None
|
| 43 |
+
delay = config.initial_delay
|
| 44 |
+
|
| 45 |
+
for attempt in range(config.max_retries + 1):
|
| 46 |
+
try:
|
| 47 |
+
return func(*args, **kwargs)
|
| 48 |
+
except config.retryable_exceptions as e:
|
| 49 |
+
last_exception = e
|
| 50 |
+
if attempt < config.max_retries:
|
| 51 |
+
logger.warning(
|
| 52 |
+
f"{func.__name__} failed (attempt {attempt + 1}/{config.max_retries + 1}): {e}. "
|
| 53 |
+
f"Retrying in {delay:.1f}s..."
|
| 54 |
+
)
|
| 55 |
+
time.sleep(delay)
|
| 56 |
+
delay = min(delay * config.backoff_factor, config.max_delay)
|
| 57 |
+
else:
|
| 58 |
+
logger.error(f"{func.__name__} failed after {config.max_retries + 1} attempts: {e}")
|
| 59 |
+
|
| 60 |
+
raise last_exception
|
| 61 |
+
|
| 62 |
+
@wraps(func)
|
| 63 |
+
async def async_wrapper(*args, **kwargs) -> T:
|
| 64 |
+
last_exception = None
|
| 65 |
+
delay = config.initial_delay
|
| 66 |
+
|
| 67 |
+
for attempt in range(config.max_retries + 1):
|
| 68 |
+
try:
|
| 69 |
+
return await func(*args, **kwargs)
|
| 70 |
+
except config.retryable_exceptions as e:
|
| 71 |
+
last_exception = e
|
| 72 |
+
if attempt < config.max_retries:
|
| 73 |
+
logger.warning(
|
| 74 |
+
f"{func.__name__} failed (attempt {attempt + 1}/{config.max_retries + 1}): {e}. "
|
| 75 |
+
f"Retrying in {delay:.1f}s..."
|
| 76 |
+
)
|
| 77 |
+
await asyncio.sleep(delay)
|
| 78 |
+
delay = min(delay * config.backoff_factor, config.max_delay)
|
| 79 |
+
else:
|
| 80 |
+
logger.error(f"{func.__name__} failed after {config.max_retries + 1} attempts: {e}")
|
| 81 |
+
|
| 82 |
+
raise last_exception
|
| 83 |
+
|
| 84 |
+
if asyncio.iscoroutinefunction(func):
|
| 85 |
+
return async_wrapper
|
| 86 |
+
return sync_wrapper
|
| 87 |
+
|
| 88 |
+
return decorator
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
def safe_execute(
|
| 92 |
+
func: Callable[..., T],
|
| 93 |
+
*args,
|
| 94 |
+
default: Optional[T] = None,
|
| 95 |
+
error_message: str = "Operation failed",
|
| 96 |
+
**kwargs
|
| 97 |
+
) -> Union[T, Dict[str, Any]]:
|
| 98 |
+
"""Safely execute a function with fallback."""
|
| 99 |
+
try:
|
| 100 |
+
return func(*args, **kwargs)
|
| 101 |
+
except Exception as e:
|
| 102 |
+
logger.error(f"{error_message}: {e}")
|
| 103 |
+
if default is not None:
|
| 104 |
+
return default
|
| 105 |
+
return {
|
| 106 |
+
"success": False,
|
| 107 |
+
"error": str(e),
|
| 108 |
+
"error_type": type(e).__name__
|
| 109 |
+
}
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
async def safe_execute_async(
|
| 113 |
+
func: Callable[..., T],
|
| 114 |
+
*args,
|
| 115 |
+
default: Optional[T] = None,
|
| 116 |
+
error_message: str = "Operation failed",
|
| 117 |
+
**kwargs
|
| 118 |
+
) -> Union[T, Dict[str, Any]]:
|
| 119 |
+
"""Safely execute an async function with fallback."""
|
| 120 |
+
try:
|
| 121 |
+
return await func(*args, **kwargs)
|
| 122 |
+
except Exception as e:
|
| 123 |
+
logger.error(f"{error_message}: {e}")
|
| 124 |
+
if default is not None:
|
| 125 |
+
return default
|
| 126 |
+
return {
|
| 127 |
+
"success": False,
|
| 128 |
+
"error": str(e),
|
| 129 |
+
"error_type": type(e).__name__
|
| 130 |
+
}
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
class PartialResult:
|
| 134 |
+
"""Container for partial results when some operations fail."""
|
| 135 |
+
|
| 136 |
+
def __init__(self):
|
| 137 |
+
self.results: Dict[str, Any] = {}
|
| 138 |
+
self.errors: Dict[str, str] = {}
|
| 139 |
+
self.success_count: int = 0
|
| 140 |
+
self.failure_count: int = 0
|
| 141 |
+
|
| 142 |
+
def add_result(self, key: str, value: Any):
|
| 143 |
+
"""Add a successful result."""
|
| 144 |
+
self.results[key] = value
|
| 145 |
+
self.success_count += 1
|
| 146 |
+
|
| 147 |
+
def add_error(self, key: str, error: str):
|
| 148 |
+
"""Add an error."""
|
| 149 |
+
self.errors[key] = error
|
| 150 |
+
self.failure_count += 1
|
| 151 |
+
|
| 152 |
+
def to_dict(self) -> Dict[str, Any]:
|
| 153 |
+
"""Convert to dictionary."""
|
| 154 |
+
return {
|
| 155 |
+
"results": self.results,
|
| 156 |
+
"errors": self.errors,
|
| 157 |
+
"success_count": self.success_count,
|
| 158 |
+
"failure_count": self.failure_count,
|
| 159 |
+
"has_errors": self.failure_count > 0,
|
| 160 |
+
"is_partial": self.failure_count > 0 and self.success_count > 0
|
| 161 |
+
}
|
| 162 |
+
|
export_utils.py
ADDED
|
@@ -0,0 +1,125 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Export utilities for reports in multiple formats."""
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
import json
|
| 6 |
+
from datetime import datetime
|
| 7 |
+
from typing import Any, Dict, Optional
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def export_json(data: Dict[str, Any], filename: Optional[str] = None) -> str:
|
| 11 |
+
"""Export data as JSON."""
|
| 12 |
+
output = json.dumps(data, indent=2, default=str)
|
| 13 |
+
if filename:
|
| 14 |
+
with open(filename, 'w') as f:
|
| 15 |
+
f.write(output)
|
| 16 |
+
return output
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def export_markdown(data: Dict[str, Any], filename: Optional[str] = None) -> str:
|
| 20 |
+
"""Export readiness report as Markdown."""
|
| 21 |
+
md_lines = [
|
| 22 |
+
f"# Deployment Readiness Report",
|
| 23 |
+
f"**Generated**: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}",
|
| 24 |
+
"",
|
| 25 |
+
"## Summary",
|
| 26 |
+
""
|
| 27 |
+
]
|
| 28 |
+
|
| 29 |
+
# Plan summary
|
| 30 |
+
if "plan" in data:
|
| 31 |
+
plan = data["plan"]
|
| 32 |
+
md_lines.extend([
|
| 33 |
+
f"### Deployment Plan",
|
| 34 |
+
plan.get("summary", "No summary available"),
|
| 35 |
+
"",
|
| 36 |
+
"#### Checklist Items:",
|
| 37 |
+
""
|
| 38 |
+
])
|
| 39 |
+
for item in plan.get("items", [])[:10]:
|
| 40 |
+
status_icon = "✅" if item.get("status") == "done" else "⏳"
|
| 41 |
+
md_lines.append(f"- {status_icon} **{item.get('title', 'Untitled')}**")
|
| 42 |
+
md_lines.append(f" - Category: {item.get('category', 'general')}")
|
| 43 |
+
md_lines.append(f" - {item.get('description', '')}")
|
| 44 |
+
md_lines.append("")
|
| 45 |
+
|
| 46 |
+
# Review decision
|
| 47 |
+
if "review" in data:
|
| 48 |
+
review = data["review"]
|
| 49 |
+
decision = review.get("decision", "unknown")
|
| 50 |
+
confidence = review.get("confidence", 0.0)
|
| 51 |
+
md_lines.extend([
|
| 52 |
+
"## Review Decision",
|
| 53 |
+
f"**Decision**: {decision.upper()}",
|
| 54 |
+
f"**Confidence**: {confidence:.1%}",
|
| 55 |
+
"",
|
| 56 |
+
"### Findings:",
|
| 57 |
+
""
|
| 58 |
+
])
|
| 59 |
+
for finding in review.get("findings", [])[:5]:
|
| 60 |
+
severity = finding.get("severity", "medium")
|
| 61 |
+
severity_icon = "🔴" if severity == "high" else "🟡" if severity == "medium" else "🟢"
|
| 62 |
+
md_lines.append(f"- {severity_icon} **{severity.upper()}**: {finding.get('note', '')}")
|
| 63 |
+
md_lines.append("")
|
| 64 |
+
|
| 65 |
+
# Documentation references
|
| 66 |
+
if "docs_references" in data and data["docs_references"]:
|
| 67 |
+
docs_refs = data["docs_references"]
|
| 68 |
+
md_lines.extend([
|
| 69 |
+
"## Documentation References",
|
| 70 |
+
f"**Framework**: {docs_refs.get('framework', 'Unknown')}",
|
| 71 |
+
f"**Platform**: {docs_refs.get('platform', 'Unknown')}",
|
| 72 |
+
"",
|
| 73 |
+
"### Lookups:",
|
| 74 |
+
""
|
| 75 |
+
])
|
| 76 |
+
for lookup in docs_refs.get("lookups", [])[:5]:
|
| 77 |
+
lookup_type = lookup.get("type", "unknown")
|
| 78 |
+
status = lookup.get("status", "unknown")
|
| 79 |
+
md_lines.append(f"- **{lookup_type}**: {status}")
|
| 80 |
+
md_lines.append("")
|
| 81 |
+
|
| 82 |
+
# Deployment actions
|
| 83 |
+
if "deployment" in data and data["deployment"]:
|
| 84 |
+
deploy = data["deployment"]
|
| 85 |
+
md_lines.extend([
|
| 86 |
+
"## Deployment Actions",
|
| 87 |
+
f"**Repository**: {deploy.get('repo', 'Not configured')}",
|
| 88 |
+
f"**Branch**: {deploy.get('branch', 'main')}",
|
| 89 |
+
f"**Ready**: {'✅' if deploy.get('ready') else '❌'}",
|
| 90 |
+
"",
|
| 91 |
+
"### Actions:",
|
| 92 |
+
""
|
| 93 |
+
])
|
| 94 |
+
for action in deploy.get("actions", [])[:5]:
|
| 95 |
+
action_type = action.get("type", "unknown")
|
| 96 |
+
message = action.get("message", action.get("title", ""))
|
| 97 |
+
md_lines.append(f"- **{action_type}**: {message}")
|
| 98 |
+
md_lines.append("")
|
| 99 |
+
|
| 100 |
+
# Sponsor synthesis
|
| 101 |
+
if "sponsor_synthesis" in data:
|
| 102 |
+
md_lines.extend([
|
| 103 |
+
"## Sponsor LLM Synthesis",
|
| 104 |
+
""
|
| 105 |
+
])
|
| 106 |
+
for key, value in data["sponsor_synthesis"].items():
|
| 107 |
+
md_lines.append(f"### {key}")
|
| 108 |
+
md_lines.append(str(value))
|
| 109 |
+
md_lines.append("")
|
| 110 |
+
|
| 111 |
+
output = "\n".join(md_lines)
|
| 112 |
+
if filename:
|
| 113 |
+
with open(filename, 'w') as f:
|
| 114 |
+
f.write(output)
|
| 115 |
+
return output
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
def export_pdf(data: Dict[str, Any], filename: Optional[str] = None) -> bytes:
|
| 119 |
+
"""Export as PDF (requires markdown2pdf or similar)."""
|
| 120 |
+
# For now, return markdown as PDF would require additional dependencies
|
| 121 |
+
# In production, use libraries like weasyprint or reportlab
|
| 122 |
+
md_content = export_markdown(data)
|
| 123 |
+
# Convert markdown to PDF would go here
|
| 124 |
+
return md_content.encode('utf-8')
|
| 125 |
+
|
orchestrator.py
CHANGED
|
@@ -4,7 +4,7 @@ from __future__ import annotations
|
|
| 4 |
|
| 5 |
import asyncio
|
| 6 |
from dataclasses import asdict, field
|
| 7 |
-
from typing import Dict
|
| 8 |
|
| 9 |
from agents import (
|
| 10 |
DocumentationAgent,
|
|
@@ -15,6 +15,8 @@ from agents import (
|
|
| 15 |
)
|
| 16 |
from deployment_agent import DeploymentAgent
|
| 17 |
from docs_agent import DocumentationLookupAgent
|
|
|
|
|
|
|
| 18 |
from schemas import (
|
| 19 |
DeploymentActions,
|
| 20 |
DocumentationReferences,
|
|
@@ -55,30 +57,161 @@ class ReadinessOrchestrator:
|
|
| 55 |
deployment=DeploymentActions(**deployment_config),
|
| 56 |
)
|
| 57 |
|
| 58 |
-
def run_dict(self, payload: Dict) -> Dict:
|
| 59 |
-
"""Convenience wrapper for UI usage with plain dicts."""
|
|
|
|
|
|
|
|
|
|
| 60 |
|
| 61 |
request = ReadinessRequest(**payload)
|
| 62 |
-
|
| 63 |
-
evidence = self.evidence.run(plan, project_name=request.project_name)
|
| 64 |
-
sponsor_synthesis = self.synthesis.run(evidence, plan.summary)
|
| 65 |
-
docs = self.documentation.run(request, evidence)
|
| 66 |
-
review = self.reviewer.run(plan, evidence, docs, sponsor_synthesis)
|
| 67 |
|
| 68 |
-
#
|
| 69 |
-
|
| 70 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 71 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 72 |
response = ReadinessResponse(
|
| 73 |
-
plan=plan,
|
| 74 |
-
evidence=evidence,
|
| 75 |
-
documentation=docs,
|
| 76 |
-
review=review,
|
| 77 |
-
docs_references=DocumentationReferences(**docs_refs),
|
| 78 |
-
deployment=DeploymentActions(**deployment_config),
|
| 79 |
)
|
| 80 |
result = asdict(response)
|
| 81 |
result["sponsor_synthesis"] = sponsor_synthesis
|
|
|
|
|
|
|
| 82 |
return result
|
| 83 |
|
| 84 |
async def execute_deployment(self, payload: Dict) -> Dict:
|
|
|
|
| 4 |
|
| 5 |
import asyncio
|
| 6 |
from dataclasses import asdict, field
|
| 7 |
+
from typing import Dict, Optional
|
| 8 |
|
| 9 |
from agents import (
|
| 10 |
DocumentationAgent,
|
|
|
|
| 15 |
)
|
| 16 |
from deployment_agent import DeploymentAgent
|
| 17 |
from docs_agent import DocumentationLookupAgent
|
| 18 |
+
from error_handler import PartialResult, safe_execute, safe_execute_async
|
| 19 |
+
from progress_tracker import AgentStatus, PipelineProgress
|
| 20 |
from schemas import (
|
| 21 |
DeploymentActions,
|
| 22 |
DocumentationReferences,
|
|
|
|
| 57 |
deployment=DeploymentActions(**deployment_config),
|
| 58 |
)
|
| 59 |
|
| 60 |
+
def run_dict(self, payload: Dict, progress: Optional[PipelineProgress] = None) -> Dict:
|
| 61 |
+
"""Convenience wrapper for UI usage with plain dicts and progress tracking."""
|
| 62 |
+
|
| 63 |
+
if progress is None:
|
| 64 |
+
progress = PipelineProgress()
|
| 65 |
|
| 66 |
request = ReadinessRequest(**payload)
|
| 67 |
+
partial = PartialResult()
|
|
|
|
|
|
|
|
|
|
|
|
|
| 68 |
|
| 69 |
+
# Planner
|
| 70 |
+
progress.update_agent("Planner", AgentStatus.RUNNING, "Analyzing project context...")
|
| 71 |
+
plan = safe_execute(
|
| 72 |
+
self.planner.run,
|
| 73 |
+
request,
|
| 74 |
+
default=None,
|
| 75 |
+
error_message="Planner agent failed"
|
| 76 |
+
)
|
| 77 |
+
if plan:
|
| 78 |
+
progress.update_agent("Planner", AgentStatus.COMPLETED, "Plan generated", 1.0)
|
| 79 |
+
partial.add_result("plan", plan)
|
| 80 |
+
else:
|
| 81 |
+
progress.update_agent("Planner", AgentStatus.FAILED, "Failed to generate plan", 0.0, "Planner returned None")
|
| 82 |
+
partial.add_error("plan", "Planner agent failed")
|
| 83 |
+
plan = None # Will need fallback
|
| 84 |
+
|
| 85 |
+
# Evidence
|
| 86 |
+
if plan:
|
| 87 |
+
progress.update_agent("Evidence", AgentStatus.RUNNING, "Gathering deployment signals...")
|
| 88 |
+
evidence = safe_execute(
|
| 89 |
+
self.evidence.run,
|
| 90 |
+
plan,
|
| 91 |
+
request.project_name,
|
| 92 |
+
default=None,
|
| 93 |
+
error_message="Evidence agent failed"
|
| 94 |
+
)
|
| 95 |
+
if evidence:
|
| 96 |
+
progress.update_agent("Evidence", AgentStatus.COMPLETED, "Evidence gathered", 1.0)
|
| 97 |
+
partial.add_result("evidence", evidence)
|
| 98 |
+
else:
|
| 99 |
+
progress.update_agent("Evidence", AgentStatus.FAILED, "Failed to gather evidence", 0.0)
|
| 100 |
+
partial.add_error("evidence", "Evidence agent failed")
|
| 101 |
+
evidence = None
|
| 102 |
+
else:
|
| 103 |
+
evidence = None
|
| 104 |
+
progress.update_agent("Evidence", AgentStatus.SKIPPED, "Skipped (no plan)", 0.0)
|
| 105 |
+
|
| 106 |
+
# Synthesis
|
| 107 |
+
if evidence and plan:
|
| 108 |
+
progress.update_agent("Synthesis", AgentStatus.RUNNING, "Cross-validating with sponsor LLMs...")
|
| 109 |
+
sponsor_synthesis = safe_execute(
|
| 110 |
+
self.synthesis.run,
|
| 111 |
+
evidence,
|
| 112 |
+
plan.summary,
|
| 113 |
+
default={},
|
| 114 |
+
error_message="Synthesis agent failed"
|
| 115 |
+
)
|
| 116 |
+
progress.update_agent("Synthesis", AgentStatus.COMPLETED, "Synthesis complete", 1.0)
|
| 117 |
+
partial.add_result("sponsor_synthesis", sponsor_synthesis)
|
| 118 |
+
else:
|
| 119 |
+
sponsor_synthesis = {}
|
| 120 |
+
progress.update_agent("Synthesis", AgentStatus.SKIPPED, "Skipped (no evidence)", 0.0)
|
| 121 |
|
| 122 |
+
# Documentation
|
| 123 |
+
if evidence:
|
| 124 |
+
progress.update_agent("Documentation", AgentStatus.RUNNING, "Generating deployment docs...")
|
| 125 |
+
docs = safe_execute(
|
| 126 |
+
self.documentation.run,
|
| 127 |
+
request,
|
| 128 |
+
evidence,
|
| 129 |
+
default=None,
|
| 130 |
+
error_message="Documentation agent failed"
|
| 131 |
+
)
|
| 132 |
+
if docs:
|
| 133 |
+
progress.update_agent("Documentation", AgentStatus.COMPLETED, "Documentation generated", 1.0)
|
| 134 |
+
partial.add_result("documentation", docs)
|
| 135 |
+
else:
|
| 136 |
+
progress.update_agent("Documentation", AgentStatus.FAILED, "Failed to generate docs", 0.0)
|
| 137 |
+
partial.add_error("documentation", "Documentation agent failed")
|
| 138 |
+
docs = None
|
| 139 |
+
else:
|
| 140 |
+
docs = None
|
| 141 |
+
progress.update_agent("Documentation", AgentStatus.SKIPPED, "Skipped (no evidence)", 0.0)
|
| 142 |
+
|
| 143 |
+
# Reviewer
|
| 144 |
+
if plan and evidence and docs:
|
| 145 |
+
progress.update_agent("Reviewer", AgentStatus.RUNNING, "Performing risk assessment...")
|
| 146 |
+
review = safe_execute(
|
| 147 |
+
self.reviewer.run,
|
| 148 |
+
plan,
|
| 149 |
+
evidence,
|
| 150 |
+
docs,
|
| 151 |
+
sponsor_synthesis,
|
| 152 |
+
default=None,
|
| 153 |
+
error_message="Reviewer agent failed"
|
| 154 |
+
)
|
| 155 |
+
if review:
|
| 156 |
+
progress.update_agent("Reviewer", AgentStatus.COMPLETED, "Review complete", 1.0)
|
| 157 |
+
partial.add_result("review", review)
|
| 158 |
+
else:
|
| 159 |
+
progress.update_agent("Reviewer", AgentStatus.FAILED, "Failed to review", 0.0)
|
| 160 |
+
partial.add_error("review", "Reviewer agent failed")
|
| 161 |
+
review = None
|
| 162 |
+
else:
|
| 163 |
+
review = None
|
| 164 |
+
progress.update_agent("Reviewer", AgentStatus.SKIPPED, "Skipped (missing inputs)", 0.0)
|
| 165 |
+
|
| 166 |
+
# Docs Lookup (async)
|
| 167 |
+
progress.update_agent("Docs Lookup", AgentStatus.RUNNING, "Looking up framework/platform docs...")
|
| 168 |
+
docs_refs = asyncio.run(
|
| 169 |
+
safe_execute_async(
|
| 170 |
+
self.docs_lookup.lookup_deployment_docs,
|
| 171 |
+
request,
|
| 172 |
+
plan if plan else None,
|
| 173 |
+
default={"framework": None, "platform": None, "lookups": []},
|
| 174 |
+
error_message="Documentation lookup failed"
|
| 175 |
+
)
|
| 176 |
+
)
|
| 177 |
+
if docs_refs and docs_refs.get("lookups"):
|
| 178 |
+
progress.update_agent("Docs Lookup", AgentStatus.COMPLETED, "Documentation found", 1.0)
|
| 179 |
+
partial.add_result("docs_references", docs_refs)
|
| 180 |
+
else:
|
| 181 |
+
progress.update_agent("Docs Lookup", AgentStatus.COMPLETED, "No docs found (framework may be unknown)", 1.0)
|
| 182 |
+
partial.add_result("docs_references", docs_refs or {"framework": None, "platform": None, "lookups": []})
|
| 183 |
+
|
| 184 |
+
# Deployment (async)
|
| 185 |
+
progress.update_agent("Deployment", AgentStatus.RUNNING, "Preparing deployment actions...")
|
| 186 |
+
deployment_config = asyncio.run(
|
| 187 |
+
safe_execute_async(
|
| 188 |
+
self.deployment.prepare_deployment,
|
| 189 |
+
request,
|
| 190 |
+
plan if plan else None,
|
| 191 |
+
default={"repo": None, "branch": "main", "ready": False, "actions": []},
|
| 192 |
+
error_message="Deployment preparation failed"
|
| 193 |
+
)
|
| 194 |
+
)
|
| 195 |
+
if deployment_config and deployment_config.get("ready"):
|
| 196 |
+
progress.update_agent("Deployment", AgentStatus.COMPLETED, "Deployment ready", 1.0)
|
| 197 |
+
partial.add_result("deployment", deployment_config)
|
| 198 |
+
else:
|
| 199 |
+
progress.update_agent("Deployment", AgentStatus.COMPLETED, "Deployment not configured", 1.0)
|
| 200 |
+
partial.add_result("deployment", deployment_config or {"repo": None, "branch": "main", "ready": False, "actions": []})
|
| 201 |
+
|
| 202 |
+
# Build response
|
| 203 |
response = ReadinessResponse(
|
| 204 |
+
plan=plan or partial.results.get("plan"),
|
| 205 |
+
evidence=evidence or partial.results.get("evidence"),
|
| 206 |
+
documentation=docs or partial.results.get("documentation"),
|
| 207 |
+
review=review or partial.results.get("review"),
|
| 208 |
+
docs_references=DocumentationReferences(**docs_refs) if docs_refs else None,
|
| 209 |
+
deployment=DeploymentActions(**deployment_config) if deployment_config else None,
|
| 210 |
)
|
| 211 |
result = asdict(response)
|
| 212 |
result["sponsor_synthesis"] = sponsor_synthesis
|
| 213 |
+
result["progress"] = progress.to_dict()
|
| 214 |
+
result["partial_results"] = partial.to_dict()
|
| 215 |
return result
|
| 216 |
|
| 217 |
async def execute_deployment(self, payload: Dict) -> Dict:
|
progress_tracker.py
ADDED
|
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Real-time progress tracking for multi-agent pipeline."""
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
from dataclasses import dataclass, field
|
| 6 |
+
from enum import Enum
|
| 7 |
+
from typing import Dict, List, Optional
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class AgentStatus(str, Enum):
|
| 11 |
+
"""Status of an agent in the pipeline."""
|
| 12 |
+
PENDING = "pending"
|
| 13 |
+
RUNNING = "running"
|
| 14 |
+
COMPLETED = "completed"
|
| 15 |
+
FAILED = "failed"
|
| 16 |
+
SKIPPED = "skipped"
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
@dataclass
|
| 20 |
+
class AgentProgress:
|
| 21 |
+
"""Progress information for a single agent."""
|
| 22 |
+
name: str
|
| 23 |
+
status: AgentStatus
|
| 24 |
+
progress: float # 0.0 to 1.0
|
| 25 |
+
message: str
|
| 26 |
+
start_time: Optional[float] = None
|
| 27 |
+
end_time: Optional[float] = None
|
| 28 |
+
error: Optional[str] = None
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
@dataclass
|
| 32 |
+
class PipelineProgress:
|
| 33 |
+
"""Overall pipeline progress tracking."""
|
| 34 |
+
agents: List[AgentProgress] = field(default_factory=list)
|
| 35 |
+
current_step: int = 0
|
| 36 |
+
total_steps: int = 7
|
| 37 |
+
overall_progress: float = 0.0
|
| 38 |
+
status_message: str = "Initializing..."
|
| 39 |
+
|
| 40 |
+
def update_agent(self, name: str, status: AgentStatus, message: str = "", progress: float = 0.0, error: Optional[str] = None):
|
| 41 |
+
"""Update progress for a specific agent."""
|
| 42 |
+
import time
|
| 43 |
+
|
| 44 |
+
agent = next((a for a in self.agents if a.name == name), None)
|
| 45 |
+
if not agent:
|
| 46 |
+
agent = AgentProgress(name=name, status=status, progress=progress, message=message, error=error)
|
| 47 |
+
self.agents.append(agent)
|
| 48 |
+
agent.start_time = time.time()
|
| 49 |
+
else:
|
| 50 |
+
agent.status = status
|
| 51 |
+
agent.progress = progress
|
| 52 |
+
agent.message = message
|
| 53 |
+
agent.error = error
|
| 54 |
+
if status in [AgentStatus.COMPLETED, AgentStatus.FAILED, AgentStatus.SKIPPED]:
|
| 55 |
+
agent.end_time = time.time()
|
| 56 |
+
|
| 57 |
+
self._update_overall()
|
| 58 |
+
|
| 59 |
+
def _update_overall(self):
|
| 60 |
+
"""Update overall progress based on agent statuses."""
|
| 61 |
+
if not self.agents:
|
| 62 |
+
self.overall_progress = 0.0
|
| 63 |
+
return
|
| 64 |
+
|
| 65 |
+
completed = sum(1 for a in self.agents if a.status == AgentStatus.COMPLETED)
|
| 66 |
+
total = len(self.agents)
|
| 67 |
+
self.overall_progress = completed / total if total > 0 else 0.0
|
| 68 |
+
|
| 69 |
+
# Update status message
|
| 70 |
+
running = next((a for a in self.agents if a.status == AgentStatus.RUNNING), None)
|
| 71 |
+
if running:
|
| 72 |
+
self.status_message = f"Running {running.name}... ({running.message})"
|
| 73 |
+
elif completed == total:
|
| 74 |
+
self.status_message = "Pipeline completed successfully!"
|
| 75 |
+
else:
|
| 76 |
+
failed = [a for a in self.agents if a.status == AgentStatus.FAILED]
|
| 77 |
+
if failed:
|
| 78 |
+
self.status_message = f"Pipeline failed: {failed[0].name}"
|
| 79 |
+
else:
|
| 80 |
+
self.status_message = "Pipeline in progress..."
|
| 81 |
+
|
| 82 |
+
def to_dict(self) -> Dict:
|
| 83 |
+
"""Convert to dictionary for JSON serialization."""
|
| 84 |
+
return {
|
| 85 |
+
"agents": [
|
| 86 |
+
{
|
| 87 |
+
"name": a.name,
|
| 88 |
+
"status": a.status.value,
|
| 89 |
+
"progress": a.progress,
|
| 90 |
+
"message": a.message,
|
| 91 |
+
"error": a.error
|
| 92 |
+
}
|
| 93 |
+
for a in self.agents
|
| 94 |
+
],
|
| 95 |
+
"current_step": self.current_step,
|
| 96 |
+
"total_steps": self.total_steps,
|
| 97 |
+
"overall_progress": self.overall_progress,
|
| 98 |
+
"status_message": self.status_message
|
| 99 |
+
}
|
| 100 |
+
|