File size: 4,819 Bytes
d17a22d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f68f944
932bfc3
d17a22d
 
 
 
 
 
 
 
 
932bfc3
d17a22d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c3bd4b4
d17a22d
701d26a
 
d17a22d
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
import base64
import os
import gradio as gr
from mcp import ClientSession, StdioServerParameters, types
from mcp.client.stdio import stdio_client
from smolagents import ToolCollection, CodeAgent, load_tool, tool, ToolCallingAgent, InferenceClientModel #, GradioUI
from smolagents.mcp_client import MCPClient
from smolagents import TransformersModel
from dotenv import load_dotenv
import yaml
import requests
import json
from PIL import Image
from datetime import datetime
from outage_odyssey_ui import  GradioUI
import base64
from io import BytesIO
from smolagents import InferenceClientModel

# Load environment variables from .env file
load_dotenv()
MISTRAL_API_KEY = os.getenv("MISTRAL_API_KEY")
ANTHROPIC_API_KEY = os.getenv("ANTHROPIC_API_KEY")
CODEASTREAL_API_KEY = os.getenv("CODEASTREAL_API_KEY")
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
HF_TOKEN = os.getenv("HF_TOKEN")
USE_CLOUD_MODEL = os.getenv("USE_CLOUD_MODEL", "true")
# Conditional import based on availability of Gemini API key
GEMINI_API_KEY = os.getenv("GEMINI_API_KEY")

if USE_CLOUD_MODEL == 'true':
    from smolagents import LiteLLMModel

    #model = LiteLLMModel(model_id="gemini/gemini-2.0-flash", api_key=GEMINI_API_KEY)
    #model = LiteLLMModel(model_id="mistral/mistral-large-latest", api_key=MISTRAL_API_KEY)
    #model = LiteLLMModel(model_id="codestral/codestral-latest", api_key=CODEASTREAL_API_KEY)
    #model = LiteLLMModel(model_id="openai/gpt-4o", api_key=OPENAI_API_KEY)
    #model = LiteLLMModel(model_id="anthropic/claude-3-7-sonnet-latest", api_key=ANTHROPIC_API_KEY)
    model = InferenceClientModel(
        model_id="deepseek-ai/DeepSeek-V3-0324",
        provider="hyperbolic",
        api_key=HF_TOKEN,
    )
    model_description = "This agent uses MCP tools and LLM Models using LiteLLMModel via API."
    print(model_description)
else:
    from transformers import pipeline

    print("Loading local Qwen model...")
    model = TransformersModel(
        model_id="Qwen3-4B",
        device_map='auto',
        max_new_tokens=8192,
        trust_remote_code=True
    )
    print("Local model loaded successfully.")
    model_description = "This agent uses MCP tools and a locally-run Qwen3-4B model."


@tool
def pil_to_base64(pil_image: Image.Image) -> str:
    """
    Converts a PIL Image object to a base64-encoded PNG data URL.

    This tool takes a PIL Image object and encodes it into a base64 string
    formatted as a data URL, which can be used in HTML or other contexts that
    support embedded images.

    Args:
        pil_image (PIL.Image.Image): A PIL Image object to be converted.

    Returns:
        str: A string representing the image in base64 format, prefixed with the MIME type.
             The format is: 'data:image/png;base64,<base64_string>'

    Example:
        >>> pil_to_base64(Image.open('example.png'))
        'data:image/png;base64,iVBORw0KGgoAAAANSUh....
    """
    buffer = BytesIO()
    pil_image.save(buffer, format="PNG")
    img_str = base64.b64encode(buffer.getvalue()).decode()
    return f"data:image/png;base64,{img_str}"


try:
    mcp_client = MCPClient({"url": "http://localhost:8000/sse"})

    tools = mcp_client.get_tools()
    #print(tools.to_json())
    tools_array = [{
        "name": tool.name,
        "description": tool.description,
        "inputs": tool.inputs,
        "output_type": tool.output_type,
        "is_initialized": tool.is_initialized
    } for tool in tools]

    tool_names = [tool["name"] for tool in tools_array]
    print(f"Connected to MCP server. Available tools: {', '.join(tool_names)}")


    with open("prompts.yml", 'r', encoding='utf-8') as stream:
        prompt_templates = yaml.safe_load(stream)
    # Import tool from Hub
    #image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
    agent = CodeAgent(tools=[ pil_to_base64,*tools], model=model, prompt_templates=prompt_templates, max_steps=10, planning_interval=5,
                      additional_authorized_imports=['time', 'math', 'queue',
                                                     're', 'stat', 'collections', 'datetime', 'statistics', 'itertools',
                                                     'unicodedata', 'random', 'matplotlib.pyplot', 'open',
                                                     'pandas', 'numpy', 'json', 'yaml', 'plotly', 'pillow','PIL','base64' , 'io']) #prompt_templates=prompt_templates,

 
    agent.name = "Outage Odyssey Agent"
    GradioUI(agent=agent, file_upload_folder="uploaded_data").launch(server_name="0.0.0.0", server_port=7860,share=False,mcp_server=True) ##, file_upload_folder="uploaded_data", mcp_server=True,debug=True

except Exception as e:
    print(f"Error starting Gradio: {str(e)}")
finally:
    mcp_client.disconnect()
    print("MCP client disconnected")