File size: 4,034 Bytes
10e9b7d eccf8e4 7d65c66 c995d04 3c4371f 3fd800d 20fb90e c2a782d aba723a 8b6c38c c1e60ba fd77cd9 b8de19c 044325e aba723a 3898396 aba723a 2d114c5 aba723a 2d114c5 aba723a 2d114c5 3b78624 8d12972 aba723a 66704ef 9695569 20fb90e 024860e b8de19c 6259cf2 604b2a4 044325e 3898396 f3478d8 17287fc 2d114c5 9491d2f 044325e 908ec97 8d12972 f3478d8 8d12972 3898396 335a818 044325e 335a818 b7e3b12 335a818 5ad1f03 335a818 b7e3b12 335a818 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 |
import os
import gradio as gr
import requests
import inspect
import yaml
import pandas as pd
#import smolagents #to test
from smolagents import CodeAgent, InferenceClientModel, load_tool, tool #DuckDuckGoSearchTool,
from huggingface_hub import InferenceClient
import json
from final_answer import FinalAnswerTool
from visit_webpage import VisitWebpageTool
from web_search import web_search_DuckDuckGoSearchTool
from wikipediaLookup import WikipediaLookupTool
# from video_translation import AudioTranscriptionTool
# from youtube_audio_download import YouTubeAudioDownloadTool
api_url = "https://agents-course-unit4-scoring.hf.space"
questions_url = f"{api_url}/questions"
submit_url = f"{api_url}/submit"
class BasicAgent:
def __init__(self):
print("BasicAgent initialized.")
def __call__(self, question: str) -> str:
print(f"Agent received question (first 50 chars): {question[:50]}...")
fixed_answer = "This is a default answer."
print(f"Agent returning fixed answer: {fixed_answer}")
return fixed_answer
def load_questions_from_file(filepath="questions.json"):
try:
with open(filepath, "r", encoding="utf-8") as f:
questions_data = json.load(f)
if not questions_data:
print("Loaded file is empty.")
return "Loaded file is empty.", None
print(f"Loaded {len(questions_data)} questions from file.")
return "Loaded questions successfully.", questions_data
except FileNotFoundError:
print("File not found. Please run the API fetch first.")
return "File not found.", None
except json.JSONDecodeError as e:
print(f"Error decoding JSON: {e}")
return f"Error decoding JSON: {e}", None
except Exception as e:
print(f"Unexpected error: {e}")
return f"Unexpected error: {e}", None
#set up
#token
#Model
model = InferenceClientModel(
max_tokens=2096,
temperature=0.5,
model_id='Qwen/Qwen2.5-Coder-32B-Instruct',# it is possible that this model may be overloaded
custom_role_conversions=None,
)
#Tools
final_answer = FinalAnswerTool()
#duckDuckGoSearch = DuckDuckGoSearchTool() #smolagent version
visitWebpage = VisitWebpageTool()
wikipediaLookup = WikipediaLookupTool()
webSearch = web_search_DuckDuckGoSearchTool()
#Youtube task
# video_translation = AudioTranscriptionTool()
# youtube_download = YouTubeAudioDownloadTool()
# with open("prompts.yaml", 'r') as stream:
# prompt_templates = yaml.safe_load(stream)
#Agent
agent_codeagent = CodeAgent(
model=model, #removed video_translation, youtube_download
tools=[final_answer, wikipediaLookup, visitWebpage, webSearch], ## add your tools here (don't remove final answer) duckDuckGoSearch,
max_steps=3,
verbosity_level=1,
grammar=None,
planning_interval=None,
name=None,
description=None
#prompt_templates=prompt_templates
)
# Gradio handler that runs the agent
def run_once(state):
if state is not None:
return "Already run once. Refresh to rerun.", state
status_message, questions_data = load_questions_from_file()
if questions_data is None or len(questions_data) == 0:
return "No questions found or failed to load.", None
question = questions_data[0]
question_text = question["question"]
task_id = question["task_id"]
print(f"\nTask ID: {task_id}")
print(f"Question: {question_text}")
try:
answer = agent_codeagent(question_text)
output = f"Answer to task {task_id}:\n{answer}"
return output, output
except Exception as e:
return f"Error running agent: {e}", None
# Create Gradio interface
with gr.Blocks() as demo:
gr.Markdown("## Run AI Agent Once")
output_text = gr.Textbox(label="Agent Output", lines=10)
run_button = gr.Button("Run Agent")
state = gr.State() # cache variable to prevent re-runs
run_button.click(fn=run_once, inputs=state, outputs=[output_text, state])
# Launch the interface
demo.launch()
|