Spaces:
Runtime error
Runtime error
""" | |
Common functionality for MCP agents. | |
""" | |
from dotenv import load_dotenv | |
import pathlib | |
import asyncio | |
import os | |
import logging | |
from pydantic_ai import Agent | |
from pydantic_ai.models.openai import OpenAIModel | |
# Import the MCP client | |
import sys | |
sys.path.append(os.path.join(os.path.dirname(__file__), '..')) | |
from pydantic_ai_mcp_agent import mcp_client | |
# Configure logging | |
logging.basicConfig( | |
level=logging.INFO, | |
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s" | |
) | |
logger = logging.getLogger("mcp-agents") | |
# Get the directory where the current script is located | |
SCRIPT_DIR = pathlib.Path(__file__).parent.resolve() | |
# Define the path to the config file relative to the script directory | |
CONFIG_FILE = SCRIPT_DIR / "mcp_config.json" | |
load_dotenv() | |
def get_model(): | |
"""Get the LLM model to use for the agent.""" | |
llm = os.getenv('MODEL_CHOICE', 'gpt-4o-mini') | |
base_url = os.getenv('BASE_URL', 'https://api.openai.com/v1') | |
api_key = os.getenv('LLM_API_KEY', 'no-api-key-provided') | |
return OpenAIModel( | |
llm, | |
base_url=base_url, | |
api_key=api_key | |
) | |
async def get_pydantic_ai_agent(system_prompt): | |
""" | |
Initialize and return a Pydantic AI agent with the specified system prompt. | |
Args: | |
system_prompt (str): The system prompt to use for the agent. | |
Returns: | |
tuple: A tuple containing the MCP client and the initialized agent. | |
""" | |
client = mcp_client.MCPClient() | |
client.load_servers(str(CONFIG_FILE)) | |
tools = await client.start() | |
# Create the agent with the specified system prompt | |
agent = Agent( | |
model=get_model(), | |
tools=tools, | |
system_prompt=system_prompt | |
) | |
return client, agent | |
async def process_research_questions(agent, research_questions, domain_context=None): | |
""" | |
Process research questions using the specified agent. | |
Args: | |
agent (Agent): The Pydantic AI agent to use. | |
research_questions (list): List of research questions to process. | |
domain_context (str, optional): Additional domain context to provide. | |
Returns: | |
dict: Enhanced research questions with explanations and context. | |
""" | |
prompt = f""" | |
I need you to enhance and refine the following research questions: | |
{research_questions} | |
Please: | |
1. Evaluate each question for clarity, specificity, and research potential | |
2. Refine the questions to be more precise and answerable | |
3. Add 2-3 additional research questions that would complement the existing ones | |
4. For each question, provide a brief explanation of why it's important and what insights it might yield | |
5. Suggest methodological approaches for investigating each question | |
Return your response as a structured JSON with the following format: | |
{{ | |
"refined_questions": [ | |
{{ | |
"question": "The refined research question", | |
"explanation": "Why this question is important", | |
"methodology": "Suggested research approach" | |
}} | |
], | |
"additional_questions": [ | |
{{ | |
"question": "A new complementary research question", | |
"explanation": "Why this question is important", | |
"methodology": "Suggested research approach" | |
}} | |
], | |
"overall_assessment": "A brief assessment of the research direction" | |
}} | |
""" | |
if domain_context: | |
prompt += f"\n\nAdditional domain context:\n{domain_context}" | |
result = await agent.run(prompt) | |
return result.data | |
async def enhance_hypotheses(agent, hypotheses, research_goal): | |
""" | |
Enhance hypotheses using the specified agent. | |
Args: | |
agent (Agent): The Pydantic AI agent to use. | |
hypotheses (list): List of hypotheses to enhance. | |
research_goal (str): The research goal. | |
Returns: | |
dict: Enhanced hypotheses with explanations and context. | |
""" | |
prompt = f""" | |
I need you to analyze and enhance the following research hypotheses related to this research goal: | |
Research Goal: {research_goal} | |
Hypotheses: | |
{hypotheses} | |
Please: | |
1. Evaluate each hypothesis for novelty, feasibility, and potential impact | |
2. Suggest refinements to make each hypothesis more testable | |
3. Identify potential methodological approaches for testing each hypothesis | |
4. Highlight connections between different hypotheses | |
5. Suggest potential implications if each hypothesis is confirmed or refuted | |
Return your response as a structured JSON with the following format: | |
{{ | |
"enhanced_hypotheses": [ | |
{{ | |
"original": "The original hypothesis text", | |
"refined": "The refined hypothesis", | |
"novelty_assessment": "HIGH/MEDIUM/LOW", | |
"feasibility_assessment": "HIGH/MEDIUM/LOW", | |
"testing_approach": "Suggested methodology", | |
"implications": "Potential implications of findings" | |
}} | |
], | |
"connections": [ | |
{{ | |
"hypothesis_pair": ["id1", "id2"], | |
"relationship": "Description of how these hypotheses relate" | |
}} | |
], | |
"overall_assessment": "A brief assessment of the research direction" | |
}} | |
""" | |
result = await agent.run(prompt) | |
return result.data | |