YeeJun02 commited on
Commit
f5342cd
·
verified ·
1 Parent(s): e34cf5b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -14
app.py CHANGED
@@ -2,10 +2,10 @@ import os
2
  import gradio as gr
3
  import datetime
4
  import pytz
5
- import requests
6
 
7
- # Framework 1: LlamaIndex - Using the most direct, stable imports
8
- from llama_index.core.agent import ReActAgent
9
  from llama_index.core.tools import FunctionTool
10
  from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI
11
 
@@ -16,31 +16,30 @@ from smolagents import CodeAgent, DuckDuckGoSearchTool, tool, InferenceClientMod
16
  HF_TOKEN = os.getenv("HF_TOKEN")
17
 
18
  # ==========================================
19
- # PART 1: LLAMAINDEX AGENT
20
  # ==========================================
21
  li_llm = HuggingFaceInferenceAPI(
22
  model_name="Qwen/Qwen2.5-7B-Instruct",
23
  token=HF_TOKEN,
24
- task="conversational"
25
  )
26
 
27
  def get_tokyo_time() -> str:
28
  """Returns the current time in Tokyo, Japan."""
29
  tz = pytz.timezone('Asia/Tokyo')
30
- return f"The current time is {datetime.datetime.now(tz).strftime('%H:%M:%S')}"
31
 
32
  li_tools = [FunctionTool.from_defaults(fn=get_tokyo_time)]
33
 
34
- # Using the class-based initialization which is now corrected in latest 0.10+ versions
35
- li_agent = ReActAgent.from_tools(
36
  tools=li_tools,
37
  llm=li_llm,
38
- verbose=True
39
  )
40
 
41
  def chat_llama(message, history):
42
  try:
43
- response = li_agent.chat(message)
 
44
  return str(response)
45
  except Exception as e:
46
  return f"LlamaIndex Error: {str(e)}"
@@ -74,15 +73,15 @@ def chat_smol(message, history):
74
  return f"Smolagents Error: {str(e)}"
75
 
76
  # ==========================================
77
- # PART 3: UNIFIED GRADIO UI
78
  # ==========================================
79
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
80
- gr.Markdown("# 🤖 Multi-Framework AI Space")
81
 
82
- with gr.Tab("LlamaIndex (ReAct)"):
83
  gr.ChatInterface(fn=chat_llama)
84
 
85
- with gr.Tab("smolagents (Code)"):
86
  gr.ChatInterface(fn=chat_smol)
87
 
88
  if __name__ == "__main__":
 
2
  import gradio as gr
3
  import datetime
4
  import pytz
5
+ import asyncio
6
 
7
+ # Framework 1: LlamaIndex (Updated to use Workflow for stability)
8
+ from llama_index.core.agent.workflow import AgentWorkflow
9
  from llama_index.core.tools import FunctionTool
10
  from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI
11
 
 
16
  HF_TOKEN = os.getenv("HF_TOKEN")
17
 
18
  # ==========================================
19
+ # PART 1: LLAMAINDEX AGENT (Using Workflow)
20
  # ==========================================
21
  li_llm = HuggingFaceInferenceAPI(
22
  model_name="Qwen/Qwen2.5-7B-Instruct",
23
  token=HF_TOKEN,
 
24
  )
25
 
26
  def get_tokyo_time() -> str:
27
  """Returns the current time in Tokyo, Japan."""
28
  tz = pytz.timezone('Asia/Tokyo')
29
+ return f"The current time in Tokyo is {datetime.datetime.now(tz).strftime('%H:%M:%S')}"
30
 
31
  li_tools = [FunctionTool.from_defaults(fn=get_tokyo_time)]
32
 
33
+ # The Workflow approach is much more stable against Pydantic errors
34
+ li_agent = AgentWorkflow.from_tools_or_functions(
35
  tools=li_tools,
36
  llm=li_llm,
 
37
  )
38
 
39
  def chat_llama(message, history):
40
  try:
41
+ # AgentWorkflow uses async .run()
42
+ response = asyncio.run(li_agent.run(input=message))
43
  return str(response)
44
  except Exception as e:
45
  return f"LlamaIndex Error: {str(e)}"
 
73
  return f"Smolagents Error: {str(e)}"
74
 
75
  # ==========================================
76
+ # PART 3: GRADIO UI
77
  # ==========================================
78
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
79
+ gr.Markdown("# 🤖 Dual-Agent Testing Space")
80
 
81
+ with gr.Tab("LlamaIndex (AgentWorkflow)"):
82
  gr.ChatInterface(fn=chat_llama)
83
 
84
+ with gr.Tab("smolagents (CodeAgent)"):
85
  gr.ChatInterface(fn=chat_smol)
86
 
87
  if __name__ == "__main__":