from smolagents import ( HfApiModel, CodeAgent, load_tool, Tool, InferenceClientModel, ToolCallingAgent, FinalAnswerTool, DuckDuckGoSearchTool, VisitWebpageTool, GoogleSearchTool, PythonInterpreterTool, ) import os from huggingface_hub import login from dotenv import load_dotenv from langchain.agents import load_tools load_dotenv() login(os.environ["HF_API_KEY"]) from sample_questions import QUESTIONS # Tools # wikipedia = Tool.from_langchain(load_tools(["wikipedia"])[0]) tools = [ DuckDuckGoSearchTool(), VisitWebpageTool(), # PythonInterpreterTool(), # FinalAnswerTool(), # wikipedia, ] # Model # LLM Model model = HfApiModel( "Qwen/Qwen2.5-72B-Instruct", provider="together", # max_tokens=40096, temperature=0.1, # token=get_huggingface_token(), ) # Tool Calling Agent llm = HfApiModel("Qwen/Qwen2.5-72B-Instruct", temperature=0) toolCallingAgent = ToolCallingAgent( model=model, tools=tools, max_steps=20, ) toolCallingAgent.logger.console.width = 66 # question = QUESTIONS[0] # answer = toolCallingAgent.run(question) # print(answer)