File size: 1,149 Bytes
93b922c 9cb9227 93b922c 8d6bd5c edcd6eb 8d6bd5c edcd6eb 8d6bd5c 9cb9227 8952f30 9cb9227 233fe2e 48777c4 983ea9a 8952f30 233fe2e 8d6bd5c edcd6eb 93b922c 8d6bd5c 0c83fed 93b922c 8d6bd5c 7fe40d1 8d6bd5c 93b922c 8d6bd5c 983ea9a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 |
from smolagents import (
HfApiModel,
CodeAgent,
load_tool,
Tool,
InferenceClientModel,
ToolCallingAgent,
FinalAnswerTool,
DuckDuckGoSearchTool,
VisitWebpageTool,
GoogleSearchTool,
PythonInterpreterTool,
)
import os
from huggingface_hub import login
from dotenv import load_dotenv
from langchain.agents import load_tools
load_dotenv()
login(os.environ["HF_API_KEY"])
from sample_questions import QUESTIONS
# Tools
# wikipedia = Tool.from_langchain(load_tools(["wikipedia"])[0])
tools = [
DuckDuckGoSearchTool(),
VisitWebpageTool(),
# PythonInterpreterTool(),
# FinalAnswerTool(),
# wikipedia,
]
# Model
# LLM Model
model = HfApiModel(
"Qwen/Qwen2.5-72B-Instruct",
provider="together",
# max_tokens=40096,
temperature=0.1,
# token=get_huggingface_token(),
)
# Tool Calling Agent
llm = HfApiModel("Qwen/Qwen2.5-72B-Instruct", temperature=0)
toolCallingAgent = ToolCallingAgent(
model=model,
tools=tools,
max_steps=20,
)
toolCallingAgent.logger.console.width = 66
# question = QUESTIONS[0]
# answer = toolCallingAgent.run(question)
# print(answer) |