File size: 1,480 Bytes
ee980d6
 
bfdb063
 
ee980d6
 
 
 
 
 
 
 
 
 
 
bfdb063
ee980d6
 
 
 
 
 
 
bfdb063
ee980d6
bfdb063
ee980d6
 
bfdb063
ee980d6
 
bfdb063
 
 
0b0dd57
 
7c32caf
ee980d6
0b0dd57
ee980d6
0b0dd57
 
ee980d6
0b0dd57
ee980d6
bfdb063
0b0dd57
 
ee980d6
 
 
bfdb063
 
ee980d6
 
 
 
 
bfdb063
 
 
d2f9910
7c32caf
d2f9910
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
import os

import gradio as gr

from smolagents import CodeAgent, ToolCallingAgent, MCPClient, InferenceClientModel
from openai import OpenAI


model_name = None
workspace = "imessam"
environment = None
app_name = "example-vllm-openai-compatible"
function_name = "serve"

api_key = os.getenv("MODAL_API_KEY")

client = OpenAI(api_key=api_key)

prefix = workspace + (f"-{environment}" if environment else "")

client.base_url = (
    f"https://{prefix}--{app_name}-{function_name}.modal.run/v1"
)

print(str(client.base_url.host))

model = client.models.list().data[0]
model_id = model.id


def generate_podcast(prompt : str, history: list) -> str:

    response = ""

    #try:
    mcp_client = MCPClient(
            {"url": "https://agents-mcp-hackathon-websearch.hf.space/gradio_api/mcp/sse", "transport": "sse"}# This is the MCP Server we created in the previous section
        )
    tools = mcp_client.get_tools()

    model = InferenceClientModel()
    agent = CodeAgent(tools=[*tools], model=model)

    response = str(agent.run(prompt))
        

    #finally:
    mcp_client.disconnect()


    return response

demo = gr.ChatInterface(
    fn=generate_podcast,
    type="messages",
    examples=["Generate a podcast about AI"],
    title="Podcast Generator Agent and MCP Server",
    description="This is an agent that uses MCP tools to generate a podcast, and can be used as an MCP server.",
)

if __name__ == "__main__":
    demo.launch(
        mcp_server=True
    )