|
import uuid |
|
import os |
|
import gradio as gr |
|
from loguru import logger |
|
from langchain_core.messages import HumanMessage, AIMessage |
|
|
|
|
|
from mcpc_graph import setup_graph |
|
|
|
|
|
async def chat_logic( |
|
message, |
|
history, |
|
session_state, |
|
github_repo, |
|
github_token, |
|
trello_api, |
|
trello_token, |
|
hf_token, |
|
): |
|
""" |
|
Handles the main chat logic, including environment setup and streaming responses. |
|
|
|
Args: |
|
message (str): The user's input message. |
|
history (list): The chat history managed by Gradio. |
|
session_state (dict): A dictionary to maintain state across calls for a session. |
|
github_repo (str): The GitHub repository (username/repo). |
|
github_token (str): The GitHub personal access token. |
|
trello_api (str): The Trello API key. |
|
trello_token (str): The Trello API token. |
|
hf_token (str): The Hugging Face API token. |
|
|
|
Yields: |
|
str: The bot's streaming response or an interruption message. |
|
""" |
|
|
|
|
|
hf_token = os.getenv("NEBIUS_API_KEY") |
|
if not hf_token: |
|
yield "Error: LLM token not found. Please set the token as environment variable or configure it as a Gradio secret." |
|
return |
|
|
|
app = session_state.get("app") |
|
human_resume_node = session_state.get("human_resume_node") |
|
|
|
|
|
first_turn = session_state.get("first_turn_done") is None |
|
|
|
|
|
if app is None: |
|
|
|
if not all([github_repo, github_token, trello_api, trello_token, hf_token]): |
|
yield "Error: Please provide all API keys and the GitHub repository in the 'API Configuration' section before starting the chat." |
|
return |
|
|
|
|
|
os.environ["GITHUB_REPO"] = github_repo |
|
os.environ["NEBIUS_API_KEY"] = hf_token |
|
|
|
|
|
|
|
app, human_resume_node = await setup_graph( |
|
github_token=github_token, trello_api=trello_api, trello_token=trello_token |
|
) |
|
session_state["app"] = app |
|
session_state["human_resume_node"] = human_resume_node |
|
|
|
|
|
thread_id = session_state.get("thread_id") |
|
if not thread_id: |
|
thread_id = str(uuid.uuid4()) |
|
session_state["thread_id"] = thread_id |
|
|
|
|
|
is_message_command = session_state.get("is_message_command", False) |
|
|
|
config = { |
|
"configurable": {"thread_id": thread_id}, |
|
"recursion_limit": 100, |
|
} |
|
|
|
if first_turn: |
|
|
|
prompt_for_agent = f"{message}\n\nGITHUB REPOSITORY: {github_repo}" |
|
session_state["first_turn_done"] = True |
|
else: |
|
prompt_for_agent = message |
|
|
|
if is_message_command: |
|
|
|
app_input = human_resume_node.call_human_interrupt_agent(message) |
|
session_state["is_message_command"] = False |
|
else: |
|
|
|
logger.debug(f"Prompt for agent: '{prompt_for_agent}'") |
|
app_input = {"messages": [HumanMessage(content=prompt_for_agent)]} |
|
|
|
|
|
|
|
|
|
|
|
final_reply = None |
|
|
|
async for res in app.astream(app_input, config=config, stream_mode="values"): |
|
|
|
if "__interrupt__" in res: |
|
session_state["is_message_command"] = True |
|
|
|
yield res["__interrupt__"][0].value |
|
return |
|
|
|
|
|
if "messages" in res: |
|
last = res["messages"][-1] |
|
if isinstance(last, AIMessage): |
|
final_reply = last.content |
|
|
|
|
|
if final_reply is not None: |
|
yield final_reply |
|
else: |
|
|
|
yield "✅ Done" |
|
|
|
|
|
def create_gradio_app(): |
|
"""Creates and launches the Gradio web application.""" |
|
print("Launching Gradio app...") |
|
|
|
|
|
|
|
theme = gr.themes.Soft( |
|
primary_hue="green", |
|
secondary_hue="teal", |
|
neutral_hue="slate", |
|
font=["Arial", "sans-serif"] |
|
).set( |
|
body_background_fill="linear-gradient(135deg,#e8f5e9 0%,#f4fcf4 100%)", |
|
block_background_fill="white", |
|
block_border_width="1px", |
|
block_shadow="*shadow_drop_lg", |
|
button_primary_background_fill="#02B900", |
|
button_primary_text_color="white", |
|
button_secondary_background_fill="#35C733", |
|
button_secondary_text_color="white", |
|
) |
|
|
|
|
|
custom_css = """ |
|
body { font-family: 'Inter', sans-serif; } |
|
#header { text-align:center; margin-bottom: 1.25rem; } |
|
#header h1 { font-size:2.25rem; font-weight:700; background:linear-gradient(90deg,#02B900 0%,#35C733 100%); -webkit-background-clip:text; color:transparent; } |
|
#chatbot .message.user { background:#4F814E; } |
|
#chatbot .message.assistant { background:#F9FDF9; } |
|
""" |
|
|
|
with gr.Blocks( theme=theme, |
|
title="LangGraph Multi-Agent Chat", |
|
css=custom_css, |
|
fill_height=True,) as demo: |
|
|
|
session_state = gr.State({}) |
|
|
|
gr.HTML( |
|
""" |
|
<div id='header'> |
|
<h1>PMCP — Agentic Project Management</h1> |
|
<p class='tagline'>Manage your projects with PMCP, a multi-agent system capable to interact with Trello and GitHub.</p> |
|
</div> |
|
""" |
|
) |
|
|
|
|
|
with gr.Row(): |
|
with gr.Column(scale=1): |
|
with gr.Accordion("🔑 API Configuration", open=True): |
|
gr.Markdown( |
|
"We set up a [Trello public board](https://trello.com/b/Z2MAnn7H/pmcp-agent-ai) and a [Github repository](https://github.com/PMCPAgentAI/brainrot_image_generation) so you can experiment this agent. If you want to try with your account, you can edit this configuration with your API keys." |
|
) |
|
github_repo = gr.Textbox( |
|
label="📁 GitHub Repo", |
|
placeholder="e.g., username/repository", |
|
info="The target repository for GitHub operations.", |
|
value=os.getenv("GITHUB_REPO_NAME") |
|
) |
|
github_token = gr.Textbox( |
|
label="🔐 GitHub Token", |
|
placeholder="ghp_xxxxxxxxxxxx", |
|
type="password", |
|
info="A fine-grained personal access token.", |
|
value=os.getenv("GITHUB_API_KEY") |
|
) |
|
trello_api = gr.Textbox( |
|
label="🗂️ Trello API Key", |
|
placeholder="Your Trello API key", |
|
info="Your API key from trello.com/power-ups/admin.", |
|
value=os.getenv("TRELLO_API_KEY") |
|
) |
|
trello_token = gr.Textbox( |
|
label="🔐 Trello Token", |
|
placeholder="Your Trello token", |
|
type="password", |
|
info="A token generated from your Trello account.", |
|
value=os.getenv("TRELLO_TOKEN") |
|
) |
|
|
|
|
|
with gr.Column(scale=2): |
|
chatbot = gr.Chatbot( |
|
[], |
|
elem_id="chatbot", |
|
bubble_full_width=False, |
|
height=600, |
|
label="Multi-Agent Chat", |
|
show_label=False, |
|
avatar_images=(None, None) |
|
) |
|
|
|
|
|
def _reset_agent(state: dict): |
|
""" |
|
Runs when the user clicks the 🗑 button. |
|
|
|
Keeps the API credentials that live in the Textboxes and the env-vars, |
|
but forgets everything that makes the current LangGraph session unique, |
|
so the next user message starts from the root node again. |
|
""" |
|
logger.info("Resetting the agent") |
|
state["app"] = None |
|
state['first_turn_done'] = None |
|
return state |
|
|
|
|
|
|
|
chatbot.clear( |
|
_reset_agent, |
|
inputs=[session_state], |
|
outputs=[session_state], |
|
) |
|
|
|
|
|
|
|
gr.ChatInterface( |
|
fn=chat_logic, |
|
chatbot=chatbot, |
|
additional_inputs=[ |
|
session_state, |
|
github_repo, |
|
github_token, |
|
trello_api, |
|
trello_token, |
|
], |
|
title=None, |
|
description="Ask **PMCP** to create tickets, open PRs, or coordinate tasks across your boards and repositories.", |
|
) |
|
|
|
demo.queue() |
|
demo.launch(debug=True) |
|
|
|
|
|
if __name__ == "__main__": |
|
try: |
|
|
|
|
|
import subprocess |
|
|
|
subprocess.run(["pip", "install", "-e", "."]) |
|
|
|
create_gradio_app() |
|
except KeyboardInterrupt: |
|
print("\nShutting down Gradio app.") |
|
except Exception as e: |
|
print(f"An error occurred: {e}") |
|
|