MathFrenchToast commited on
Commit
bbc85bc
·
1 Parent(s): 7020f76

refactor: code organisation + info about local services on the gradio home page

Browse files
app.py CHANGED
@@ -1,11 +1,10 @@
1
  import os
2
  import gradio as gr
3
  import requests
4
- import inspect
5
  import pandas as pd
6
  from dotenv import load_dotenv
7
 
8
- from myagent import BasicAgent # Import your agent class from myagent.py
9
  from multiagents import MultiAgent
10
 
11
  from phoenix.otel import register
@@ -100,7 +99,7 @@ def run_and_submit_all(nb_questions: int, profile: gr.OAuthProfile | None):
100
  agent_question += f"\n\nFile URL: {file_question_url}"
101
 
102
  submitted_answer = agent(agent_question)
103
-
104
  answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
105
  results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
106
  except Exception as e:
@@ -163,6 +162,10 @@ def run_and_submit_all(nb_questions: int, profile: gr.OAuthProfile | None):
163
  # --- Build Gradio Interface using Blocks ---
164
  with gr.Blocks() as demo:
165
  gr.Markdown("# Basic Agent Evaluation Runner")
 
 
 
 
166
  gr.Markdown(
167
  """
168
  **Instructions:**
 
1
  import os
2
  import gradio as gr
3
  import requests
 
4
  import pandas as pd
5
  from dotenv import load_dotenv
6
 
7
+ from myagent import BasicAgent # Unused basic single agent
8
  from multiagents import MultiAgent
9
 
10
  from phoenix.otel import register
 
99
  agent_question += f"\n\nFile URL: {file_question_url}"
100
 
101
  submitted_answer = agent(agent_question)
102
+
103
  answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
104
  results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
105
  except Exception as e:
 
162
  # --- Build Gradio Interface using Blocks ---
163
  with gr.Blocks() as demo:
164
  gr.Markdown("# Basic Agent Evaluation Runner")
165
+ gr.Markdown("""
166
+ *Special Considerations*: Due to limitation issues, this code depend on local search engine and local speech to text model. Both run through docker, see the readme file."
167
+ One can achieve similar result, by using Google search API and OpenAI Whisper API.
168
+ """)
169
  gr.Markdown(
170
  """
171
  **Instructions:**
common/__init__.py ADDED
File without changes
{tools → common}/mylogger.py RENAMED
File without changes
multiagents.py CHANGED
@@ -2,14 +2,14 @@
2
  # a multi agent proposal to solve HF agent course final assignment
3
  import os
4
  import dotenv
5
- from smolagents import CodeAgent, ToolCallingAgent
6
  from smolagents import OpenAIServerModel
7
  from tools.fetch import fetch_webpage, search_web
8
  from smolagents import PythonInterpreterTool
9
  from tools.yttranscript import get_youtube_transcript, get_youtube_title_description
10
  from tools.stt import get_text_transcript_from_audio_file
11
  from tools.image import analyze_image
12
- from tools.mylogger import save_file_with_timestamp, mylog
13
  import myprompts
14
 
15
  dotenv.load_dotenv()
@@ -43,8 +43,9 @@ openai_41mini_model = OpenAIServerModel(
43
  def check_final_answer(final_answer, agent_memory) -> bool:
44
  """
45
  Check if the final answer is correct.
46
- This is a placeholder function. You can implement your own logic here.
47
  """
 
48
  # if return answer is more than 200 characters, we will assume it is not correct
49
  if len(str(final_answer)) > 200:
50
  return False
 
2
  # a multi agent proposal to solve HF agent course final assignment
3
  import os
4
  import dotenv
5
+ from smolagents import CodeAgent
6
  from smolagents import OpenAIServerModel
7
  from tools.fetch import fetch_webpage, search_web
8
  from smolagents import PythonInterpreterTool
9
  from tools.yttranscript import get_youtube_transcript, get_youtube_title_description
10
  from tools.stt import get_text_transcript_from_audio_file
11
  from tools.image import analyze_image
12
+ from common.mylogger import mylog
13
  import myprompts
14
 
15
  dotenv.load_dotenv()
 
43
  def check_final_answer(final_answer, agent_memory) -> bool:
44
  """
45
  Check if the final answer is correct.
46
+ basic check on the length of the answer.
47
  """
48
+ mylog("check_final_answer", final_answer)
49
  # if return answer is more than 200 characters, we will assume it is not correct
50
  if len(str(final_answer)) > 200:
51
  return False
tools/fetch.py CHANGED
@@ -3,7 +3,7 @@ from smolagents import tool
3
  import requests
4
  from markdownify import markdownify as md
5
  from bs4 import BeautifulSoup
6
- from tools.mylogger import save_file_with_timestamp, mylog
7
 
8
  @tool
9
  def fetch_webpage(url: str, convert_to_markdown: bool = True) -> str:
 
3
  import requests
4
  from markdownify import markdownify as md
5
  from bs4 import BeautifulSoup
6
+ from common.mylogger import save_file_with_timestamp, mylog
7
 
8
  @tool
9
  def fetch_webpage(url: str, convert_to_markdown: bool = True) -> str:
webpage ADDED
The diff for this file is too large to render. See raw diff