Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -6,20 +6,19 @@ import math
|
|
| 6 |
import requests
|
| 7 |
from deep_translator import GoogleTranslator
|
| 8 |
|
| 9 |
-
# Framework 1: LlamaIndex
|
| 10 |
-
from llama_index.core.agent import
|
| 11 |
from llama_index.core.tools import FunctionTool
|
| 12 |
from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI
|
| 13 |
|
| 14 |
-
# Framework 2: smolagents
|
| 15 |
-
# FIXED IMPORT: HfApiModel is now InferenceClientModel
|
| 16 |
from smolagents import CodeAgent, DuckDuckGoSearchTool, tool, InferenceClientModel
|
| 17 |
|
| 18 |
# 0. SHARED CONFIG
|
| 19 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
| 20 |
|
| 21 |
# ==========================================
|
| 22 |
-
# PART 1: LLAMAINDEX AGENT
|
| 23 |
# ==========================================
|
| 24 |
li_llm = HuggingFaceInferenceAPI(
|
| 25 |
model_name="Qwen/Qwen2.5-7B-Instruct",
|
|
@@ -34,12 +33,14 @@ def get_tokyo_time() -> str:
|
|
| 34 |
|
| 35 |
li_tools = [FunctionTool.from_defaults(fn=get_tokyo_time)]
|
| 36 |
|
| 37 |
-
#
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
|
|
|
| 41 |
verbose=True
|
| 42 |
)
|
|
|
|
| 43 |
|
| 44 |
def chat_llama(message, history):
|
| 45 |
try:
|
|
@@ -51,7 +52,6 @@ def chat_llama(message, history):
|
|
| 51 |
# ==========================================
|
| 52 |
# PART 2: SMOLAGENTS
|
| 53 |
# ==========================================
|
| 54 |
-
# Using the corrected model class
|
| 55 |
smol_model = InferenceClientModel(
|
| 56 |
model_id="Qwen/Qwen2.5-Coder-32B-Instruct",
|
| 57 |
token=HF_TOKEN
|
|
@@ -68,7 +68,7 @@ def weather_tool(location: str) -> str:
|
|
| 68 |
smol_agent = CodeAgent(
|
| 69 |
model=smol_model,
|
| 70 |
tools=[weather_tool, DuckDuckGoSearchTool()],
|
| 71 |
-
additional_authorized_imports=['math', 'requests']
|
| 72 |
)
|
| 73 |
|
| 74 |
def chat_smol(message, history):
|
|
@@ -83,14 +83,12 @@ def chat_smol(message, history):
|
|
| 83 |
# ==========================================
|
| 84 |
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
| 85 |
gr.Markdown("# 🤖 Multi-Framework Agent Space")
|
| 86 |
-
gr.Markdown("Compare
|
| 87 |
|
| 88 |
with gr.Tab("LlamaIndex (ReAct Agent)"):
|
| 89 |
-
gr.Markdown("This agent uses the classic **Reasoning + Acting** text loop.")
|
| 90 |
gr.ChatInterface(fn=chat_llama)
|
| 91 |
|
| 92 |
with gr.Tab("smolagents (Code Agent)"):
|
| 93 |
-
gr.Markdown("This agent solves tasks by writing and executing **Python code**.")
|
| 94 |
gr.ChatInterface(fn=chat_smol)
|
| 95 |
|
| 96 |
if __name__ == "__main__":
|
|
|
|
| 6 |
import requests
|
| 7 |
from deep_translator import GoogleTranslator
|
| 8 |
|
| 9 |
+
# Framework 1: LlamaIndex (Core components for Agent Construction)
|
| 10 |
+
from llama_index.core.agent import AgentRunner, ReActAgentWorker
|
| 11 |
from llama_index.core.tools import FunctionTool
|
| 12 |
from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI
|
| 13 |
|
| 14 |
+
# Framework 2: smolagents (Updated Model Class)
|
|
|
|
| 15 |
from smolagents import CodeAgent, DuckDuckGoSearchTool, tool, InferenceClientModel
|
| 16 |
|
| 17 |
# 0. SHARED CONFIG
|
| 18 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
| 19 |
|
| 20 |
# ==========================================
|
| 21 |
+
# PART 1: LLAMAINDEX AGENT (Fixed Initialization)
|
| 22 |
# ==========================================
|
| 23 |
li_llm = HuggingFaceInferenceAPI(
|
| 24 |
model_name="Qwen/Qwen2.5-7B-Instruct",
|
|
|
|
| 33 |
|
| 34 |
li_tools = [FunctionTool.from_defaults(fn=get_tokyo_time)]
|
| 35 |
|
| 36 |
+
# FIX: Using ReActAgentWorker + AgentRunner instead of ReActAgent.from_tools
|
| 37 |
+
# This bypasses the Pydantic AttributeError issue
|
| 38 |
+
worker = ReActAgentWorker.from_tools(
|
| 39 |
+
tools=li_tools,
|
| 40 |
+
llm=li_llm,
|
| 41 |
verbose=True
|
| 42 |
)
|
| 43 |
+
li_agent = AgentRunner(worker)
|
| 44 |
|
| 45 |
def chat_llama(message, history):
|
| 46 |
try:
|
|
|
|
| 52 |
# ==========================================
|
| 53 |
# PART 2: SMOLAGENTS
|
| 54 |
# ==========================================
|
|
|
|
| 55 |
smol_model = InferenceClientModel(
|
| 56 |
model_id="Qwen/Qwen2.5-Coder-32B-Instruct",
|
| 57 |
token=HF_TOKEN
|
|
|
|
| 68 |
smol_agent = CodeAgent(
|
| 69 |
model=smol_model,
|
| 70 |
tools=[weather_tool, DuckDuckGoSearchTool()],
|
| 71 |
+
additional_authorized_imports=['math', 'requests', 'pytz', 'datetime']
|
| 72 |
)
|
| 73 |
|
| 74 |
def chat_smol(message, history):
|
|
|
|
| 83 |
# ==========================================
|
| 84 |
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
| 85 |
gr.Markdown("# 🤖 Multi-Framework Agent Space")
|
| 86 |
+
gr.Markdown("Compare the two leading agent frameworks side-by-side.")
|
| 87 |
|
| 88 |
with gr.Tab("LlamaIndex (ReAct Agent)"):
|
|
|
|
| 89 |
gr.ChatInterface(fn=chat_llama)
|
| 90 |
|
| 91 |
with gr.Tab("smolagents (Code Agent)"):
|
|
|
|
| 92 |
gr.ChatInterface(fn=chat_smol)
|
| 93 |
|
| 94 |
if __name__ == "__main__":
|