Spaces:
Runtime error
Runtime error
import gradio as gr | |
import asyncio | |
from typing import Generator, List, Dict, Any | |
from utils.langgraph_pipeline import run_pipeline_and_save | |
class AgentInference: | |
def __init__(self): | |
self.current_agent = None | |
self.chat_log = [] | |
self.is_running = False | |
async def stream_agent_output(self, agent_name: str, prompt: str) -> Generator[str, None, None]: | |
"""Stream output from a single agent""" | |
self.current_agent = agent_name | |
# Simulate streaming output with delays | |
yield f"π€ {agent_name} is thinking..." | |
await asyncio.sleep(1) | |
# Get agent output | |
result = await self.get_agent_output(agent_name, prompt) | |
# Stream the output word by word | |
words = result.split() | |
for word in words: | |
yield f"{word} " | |
await asyncio.sleep(0.1) | |
self.chat_log.append({"role": agent_name, "content": result}) | |
yield "\n\n" | |
async def get_agent_output(self, agent_name: str, prompt: str) -> str: | |
"""Get output from a specific agent""" | |
# This would be replaced with actual agent calls | |
agents = { | |
"Product Manager": "Analyzing requirements and defining product specifications...", | |
"Project Manager": "Creating project timeline and resource allocation...", | |
"Software Architect": "Designing system architecture and technical specifications...", | |
"UI Designer": "Creating beautiful and user-friendly interface designs...", | |
"Software Engineer": "Implementing the UI components and functionality...", | |
"Quality Assurance": "Reviewing and testing the implementation..." | |
} | |
return agents.get(agent_name, "Processing...") | |
async def run_inference(self, prompt: str) -> Generator[Dict[str, Any], None, None]: | |
"""Run inference through all agents with streaming output""" | |
self.is_running = True | |
self.chat_log = [] | |
agents = [ | |
"Product Manager", | |
"Project Manager", | |
"Software Architect", | |
"UI Designer", | |
"Software Engineer", | |
"Quality Assurance" | |
] | |
for agent in agents: | |
if not self.is_running: | |
break | |
async for output in self.stream_agent_output(agent, prompt): | |
yield { | |
"agent": agent, | |
"output": output, | |
"chat_log": self.chat_log | |
} | |
# Add a small delay between agents | |
await asyncio.sleep(0.5) | |
# Generate final output | |
yield { | |
"agent": "System", | |
"output": "π UI Generation Complete!", | |
"chat_log": self.chat_log | |
} | |
inference_engine = AgentInference() | |
def format_chat_log(chat_log: List[Dict[str, Any]]) -> List[tuple]: | |
"""Format chat log for display""" | |
formatted_log = [] | |
for entry in chat_log: | |
role = entry["role"] | |
content = entry["content"] | |
formatted_log.append((f"**{role}**:", content)) | |
return formatted_log | |
async def handle_run(prompt: str) -> Generator[tuple, None, None]: | |
"""Handle the run button click with streaming output""" | |
async for update in inference_engine.run_inference(prompt): | |
formatted_log = format_chat_log(update["chat_log"]) | |
yield formatted_log, None # None for file_output until complete | |
with gr.Blocks() as demo: | |
gr.Markdown(""" | |
# π§ Multi-Agent UI Generator (Real-time Inference) | |
This system uses multiple AI agents working together to generate beautiful UI designs in real-time: | |
1. Product Manager: Defines requirements | |
2. Project Manager: Creates project plan | |
3. Software Architect: Designs system architecture | |
4. UI Designer: Creates beautiful UI design | |
5. Software Engineer: Implements the code | |
6. Quality Assurance: Reviews and suggests improvements | |
Watch as each agent contributes to the design in real-time! | |
""") | |
with gr.Row(): | |
with gr.Column(scale=2): | |
input_box = gr.Textbox( | |
lines=4, | |
label="Enter your product idea prompt", | |
placeholder="Describe the website or UI you want to create..." | |
) | |
run_btn = gr.Button("Generate Website", variant="primary") | |
stop_btn = gr.Button("Stop Generation", variant="stop") | |
with gr.Column(scale=3): | |
chatbox = gr.Chatbot( | |
label="Agent Conversation Log", | |
type="messages", | |
height=600 | |
) | |
file_output = gr.File(label="Download UI ZIP") | |
# Handle run button click | |
run_btn.click( | |
fn=handle_run, | |
inputs=[input_box], | |
outputs=[chatbox, file_output], | |
api_name="generate" | |
) | |
# Handle stop button click | |
def stop_generation(): | |
inference_engine.is_running = False | |
return "Generation stopped by user" | |
stop_btn.click( | |
fn=stop_generation, | |
outputs=[chatbox] | |
) | |
demo.queue() | |
demo.launch() | |