mjschock commited on
Commit
a60b872
·
unverified ·
1 Parent(s): c43be1d

Refactor app.py to import main from main_v2.py, enhancing modularity. Introduce main_v2.py with a new agent implementation, including OpenTelemetry integration and YAML-based prompt templates. Update requirements.txt to reflect the latest smolagents version. Add tasks.json for VSCode to streamline development workflows.

Browse files
Files changed (5) hide show
  1. .vscode/tasks.json +17 -0
  2. app.py +2 -1
  3. main.py +8 -3
  4. main_v2.py +120 -0
  5. requirements.txt +1 -1
.vscode/tasks.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ // See https://go.microsoft.com/fwlink/?LinkId=733558
3
+ // for the documentation about the tasks.json format
4
+ "version": "2.0.0",
5
+ "tasks": [
6
+ {
7
+ "label": "serve",
8
+ "type": "shell",
9
+ "command": "uv run python -m phoenix.server.main serve"
10
+ },
11
+ {
12
+ "label": "run",
13
+ "type": "shell",
14
+ "command": "uv run python -m main_v2"
15
+ }
16
+ ]
17
+ }
app.py CHANGED
@@ -6,7 +6,8 @@ import gradio as gr
6
  import pandas as pd
7
  import requests
8
 
9
- from main import main
 
10
 
11
  global question_counter
12
  question_counter = 0
 
6
  import pandas as pd
7
  import requests
8
 
9
+ # from main import main
10
+ from main_v2 import main
11
 
12
  global question_counter
13
  question_counter = 0
main.py CHANGED
@@ -11,6 +11,9 @@ import yaml
11
  from dotenv import find_dotenv, load_dotenv
12
  from langgraph.checkpoint.memory import MemorySaver
13
  from langgraph.graph import END, START, StateGraph
 
 
 
14
  from smolagents import CodeAgent, LiteLLMModel
15
  from smolagents.memory import ActionStep, FinalAnswerStep
16
  from smolagents.monitoring import LogLevel
@@ -19,12 +22,14 @@ from agents import create_data_analysis_agent, create_media_agent, create_web_ag
19
  from prompts import MANAGER_SYSTEM_PROMPT
20
  from tools import perform_calculation, web_search
21
  from utils import extract_final_answer
22
- from phoenix.otel import register
23
- from openinference.instrumentation.smolagents import SmolagentsInstrumentor
24
 
25
  litellm._turn_on_debug()
 
 
26
  register()
27
- SmolagentsInstrumentor().instrument()
 
 
28
 
29
  # Configure logging
30
  logging.basicConfig(
 
11
  from dotenv import find_dotenv, load_dotenv
12
  from langgraph.checkpoint.memory import MemorySaver
13
  from langgraph.graph import END, START, StateGraph
14
+ from openinference.instrumentation.smolagents import SmolagentsInstrumentor
15
+ from opentelemetry.sdk.trace.export import BatchSpanProcessor
16
+ from phoenix.otel import register
17
  from smolagents import CodeAgent, LiteLLMModel
18
  from smolagents.memory import ActionStep, FinalAnswerStep
19
  from smolagents.monitoring import LogLevel
 
22
  from prompts import MANAGER_SYSTEM_PROMPT
23
  from tools import perform_calculation, web_search
24
  from utils import extract_final_answer
 
 
25
 
26
  litellm._turn_on_debug()
27
+
28
+ # Configure OpenTelemetry with BatchSpanProcessor
29
  register()
30
+ tracer_provider = register()
31
+ tracer_provider.add_span_processor(BatchSpanProcessor())
32
+ SmolagentsInstrumentor().instrument(tracer_provider=tracer_provider)
33
 
34
  # Configure logging
35
  logging.basicConfig(
main_v2.py ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import importlib
2
+ import logging
3
+ import os
4
+
5
+ import requests
6
+ import yaml
7
+ from dotenv import find_dotenv, load_dotenv
8
+ from litellm._logging import _disable_debugging
9
+ from openinference.instrumentation.smolagents import SmolagentsInstrumentor
10
+ from phoenix.otel import register
11
+
12
+ # from smolagents import CodeAgent, LiteLLMModel, LiteLLMRouterModel
13
+ from smolagents import CodeAgent, LiteLLMModel
14
+ from smolagents.default_tools import DuckDuckGoSearchTool, VisitWebpageTool
15
+ from smolagents.monitoring import LogLevel
16
+
17
+ from agents import create_data_analysis_agent, create_media_agent, create_web_agent
18
+ from utils import extract_final_answer
19
+
20
+ _disable_debugging()
21
+
22
+ # Configure OpenTelemetry with Phoenix
23
+ register()
24
+ SmolagentsInstrumentor().instrument()
25
+
26
+ logging.basicConfig(
27
+ level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
28
+ )
29
+ logger = logging.getLogger(__name__)
30
+
31
+ load_dotenv(find_dotenv())
32
+
33
+ API_BASE = os.getenv("API_BASE")
34
+ API_KEY = os.getenv("API_KEY")
35
+ MODEL_ID = os.getenv("MODEL_ID")
36
+
37
+ model = LiteLLMModel(
38
+ api_base=API_BASE,
39
+ api_key=API_KEY,
40
+ model_id=MODEL_ID,
41
+ )
42
+
43
+ web_agent = create_web_agent(model)
44
+ data_agent = create_data_analysis_agent(model)
45
+ media_agent = create_media_agent(model)
46
+
47
+ prompt_templates = yaml.safe_load(
48
+ importlib.resources.files("smolagents.prompts")
49
+ .joinpath("code_agent.yaml")
50
+ .read_text()
51
+ )
52
+
53
+ agent = CodeAgent(
54
+ # add_base_tools=True,
55
+ additional_authorized_imports=[
56
+ "json",
57
+ "pandas",
58
+ "numpy",
59
+ "re",
60
+ # "requests"
61
+ # "urllib.request",
62
+ ],
63
+ # max_steps=10,
64
+ # managed_agents=[web_agent, data_agent, media_agent],
65
+ model=model,
66
+ prompt_templates=prompt_templates,
67
+ tools=[
68
+ # web_search,
69
+ # perform_calculation,
70
+ DuckDuckGoSearchTool(max_results=1),
71
+ VisitWebpageTool(max_output_length=256),
72
+ ],
73
+ step_callbacks=None,
74
+ verbosity_level=LogLevel.ERROR,
75
+ )
76
+
77
+ agent.visualize()
78
+
79
+
80
+ def main(task: str):
81
+ result = agent.run(
82
+ additional_args=None,
83
+ images=None,
84
+ max_steps=3,
85
+ reset=True,
86
+ stream=False,
87
+ task=task,
88
+ )
89
+
90
+ logger.info(f"Result: {result}")
91
+
92
+ return extract_final_answer(result)
93
+
94
+
95
+ if __name__ == "__main__":
96
+ DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
97
+
98
+ api_url = DEFAULT_API_URL
99
+ questions_url = f"{api_url}/questions"
100
+ submit_url = f"{api_url}/submit"
101
+
102
+ response = requests.get(questions_url, timeout=15)
103
+ response.raise_for_status()
104
+ questions_data = response.json()
105
+
106
+ for question_data in questions_data[:1]:
107
+ file_name = question_data["file_name"]
108
+ level = question_data["Level"]
109
+ question = question_data["question"]
110
+ task_id = question_data["task_id"]
111
+
112
+ logger.info(f"Question: {question}")
113
+ # logger.info(f"Level: {level}")
114
+ if file_name:
115
+ logger.info(f"File Name: {file_name}")
116
+ # logger.info(f"Task ID: {task_id}")
117
+
118
+ final_answer = main(question)
119
+ logger.info(f"Final Answer: {final_answer}")
120
+ logger.info("--------------------------------")
requirements.txt CHANGED
@@ -8,6 +8,6 @@ pytest>=8.3.5
8
  pytest-cov>=6.1.1
9
  python-dotenv>=1.0.0
10
  requests>=2.32.3
11
- smolagents[litellm,telemetry]>=0.1.3
12
  typing-extensions>=4.5.0
13
  wikipedia-api>=0.8.1
 
8
  pytest-cov>=6.1.1
9
  python-dotenv>=1.0.0
10
  requests>=2.32.3
11
+ smolagents[litellm,telemetry]>=1.14.0
12
  typing-extensions>=4.5.0
13
  wikipedia-api>=0.8.1