Spaces:
Runtime error
Runtime error
File size: 40,932 Bytes
5938097 5a72425 5938097 5a72425 5938097 5a72425 5938097 5a72425 5938097 5a72425 96bf2d7 d7ea820 5a72425 96bf2d7 5938097 96bf2d7 5a72425 96bf2d7 5938097 5a72425 d7ea820 5a72425 96bf2d7 5a72425 5938097 5a72425 5938097 5a72425 5938097 5a72425 5938097 5a72425 5938097 5a72425 5938097 5a72425 d7ea820 5a72425 5938097 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 |
{
"cells": [
{
"cell_type": "markdown",
"id": "cb28dbf0",
"metadata": {},
"source": [
"#### Do the imports and prepare the agent code"
]
},
{
"cell_type": "code",
"execution_count": 19,
"id": "6cff8644",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Token was found\n"
]
}
],
"source": [
"import os\n",
"token = os.getenv(\"HF_TOKEN\")\n",
"\n",
"if token is None:\n",
" raise ValueError('You must set the HF_TOKEN environment variable')\n",
"else:\n",
" print('Token was found')\n",
" #print('Token:', token)\n"
]
},
{
"cell_type": "code",
"execution_count": 21,
"id": "a414cce1",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"BasicAgent initialized.\n"
]
}
],
"source": [
"import os\n",
"import requests\n",
"import inspect\n",
"import pandas as pd\n",
"import json\n",
"import yaml\n",
"\n",
"from smolagents import ToolCallingAgent, Tool, InferenceClientModel, DuckDuckGoSearchTool, WikipediaSearchTool\n",
"from smolagents import OpenAIServerModel\n",
"from tools import fetch_file\n",
"\n",
"#model = InferenceClientModel(model_id=\"Qwen/Qwen3-32B\", provider=\"nscale\")\n",
"model = InferenceClientModel(model_id=\"deepseek-ai/DeepSeek-R1\", provider=\"nebius\")\n",
"\n",
"# Set your Gemini API key in the environment variable GEMINI_API_KEY_1\n",
"#model = OpenAIServerModel(\n",
"# model_id=\"gemini-2.5-flash\", \n",
"# api_base=\"https://generativelanguage.googleapis.com/v1beta\", \n",
"# api_key=os.getenv(\"GEMINI_API_KEY_1\")\n",
"# ) \n",
"#print('API key was found:', os.getenv(\"GEMINI_API_KEY_1\") is not None)\n",
"\n",
"# --- Basic Agent Definition ---\n",
"# ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------\n",
"class BasicAgent:\n",
" def __init__(self):\n",
" print(\"BasicAgent initialized.\")\n",
" def __call__(self, taskid: str, question: str) -> str:\n",
" print(f\"Agent received question (first 50 chars): {question[:50]}...\")\n",
" #fixed_answer = \"This is a default answer.\"\n",
" #print(f\"Agent returning fixed answer: {fixed_answer}\")\n",
"\n",
" prompt = f\"\"\"\n",
" \n",
" You are a general AI assistant. \n",
" I will ask you a question and you can use 9 steps to answer the question.\n",
" You can use the tools I provide you to answer my question. Every tool call reduces the number \n",
" of remaining steps available to answer the question. \n",
" Report your thoughts, and finish your answer with the following template: \n",
" FINAL ANSWER: [YOUR FINAL ANSWER]. \n",
" \n",
" YOUR FINAL ANSWER should be a number OR as few words as possible OR a comma separated list of numbers and/or strings.\n",
" If you are asked for a number, donโt use comma to write your number neither use units such as $ or percent sign unless specified otherwise.\n",
" If you are asked for a string, donโt use articles, neither abbreviations (e.g. for cities), and write the digits in plain text unless specified otherwise.\n",
" If you are asked for a comma separated list, apply the above rules depending of whether the element to be put in the list is a number or a string. \n",
"\n",
" The taskid is {taskid}. If you need to download a file that comes with the question then use the taskid to fetch the file\n",
" The question is '{question}'.\n",
"\n",
" You have 9 steps to answer the question.\n",
"\n",
" \"\"\"\n",
"\n",
" agent = ToolCallingAgent(\n",
" tools=[\n",
" fetch_file, \n",
" DuckDuckGoSearchTool(), \n",
" WikipediaSearchTool()\n",
" ], \n",
" model=model, \n",
" max_steps=9,\n",
" )\n",
"\n",
" # Run the agent with the prompt\n",
" fixed_answer = agent.run(prompt)\n",
"\n",
" return fixed_answer\n",
"\n",
"agent = BasicAgent()\n"
]
},
{
"cell_type": "markdown",
"id": "9190ad36",
"metadata": {},
"source": [
"Run first question against agent"
]
},
{
"cell_type": "code",
"execution_count": 22,
"id": "09b50aa1",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Agent received question (first 50 chars): How many studio albums were published by Mercedes ...\n"
]
},
{
"data": {
"text/html": [
"<pre style=\"white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace\"><span style=\"color: #d4b702; text-decoration-color: #d4b702\">โญโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ </span><span style=\"color: #d4b702; text-decoration-color: #d4b702; font-weight: bold\">New run</span><span style=\"color: #d4b702; text-decoration-color: #d4b702\"> โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโฎ</span>\n",
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">โ</span> <span style=\"color: #d4b702; text-decoration-color: #d4b702\">โ</span>\n",
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">โ</span> <span style=\"font-weight: bold\">You are a general AI assistant. </span> <span style=\"color: #d4b702; text-decoration-color: #d4b702\">โ</span>\n",
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">โ</span> <span style=\"font-weight: bold\"> I will ask you a question and you can use 9 steps to answer the question.</span> <span style=\"color: #d4b702; text-decoration-color: #d4b702\">โ</span>\n",
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">โ</span> <span style=\"font-weight: bold\"> You can use the tools I provide you to answer my question. Every tool call reduces the number </span> <span style=\"color: #d4b702; text-decoration-color: #d4b702\">โ</span>\n",
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">โ</span> <span style=\"font-weight: bold\"> of remaining steps available to answer the question. </span> <span style=\"color: #d4b702; text-decoration-color: #d4b702\">โ</span>\n",
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">โ</span> <span style=\"font-weight: bold\"> Report your thoughts, and finish your answer with the following template: </span> <span style=\"color: #d4b702; text-decoration-color: #d4b702\">โ</span>\n",
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">โ</span> <span style=\"font-weight: bold\"> FINAL ANSWER: [YOUR FINAL ANSWER\\]. </span> <span style=\"color: #d4b702; text-decoration-color: #d4b702\">โ</span>\n",
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">โ</span> <span style=\"font-weight: bold\"> </span> <span style=\"color: #d4b702; text-decoration-color: #d4b702\">โ</span>\n",
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">โ</span> <span style=\"font-weight: bold\"> YOUR FINAL ANSWER should be a number OR as few words as possible OR a comma separated list of numbers </span> <span style=\"color: #d4b702; text-decoration-color: #d4b702\">โ</span>\n",
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">โ</span> <span style=\"font-weight: bold\">and/or strings.</span> <span style=\"color: #d4b702; text-decoration-color: #d4b702\">โ</span>\n",
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">โ</span> <span style=\"font-weight: bold\"> If you are asked for a number, donโt use comma to write your number neither use units such as $ or </span> <span style=\"color: #d4b702; text-decoration-color: #d4b702\">โ</span>\n",
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">โ</span> <span style=\"font-weight: bold\">percent sign unless specified otherwise.</span> <span style=\"color: #d4b702; text-decoration-color: #d4b702\">โ</span>\n",
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">โ</span> <span style=\"font-weight: bold\"> If you are asked for a string, donโt use articles, neither abbreviations (e.g. for cities), and write </span> <span style=\"color: #d4b702; text-decoration-color: #d4b702\">โ</span>\n",
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">โ</span> <span style=\"font-weight: bold\">the digits in plain text unless specified otherwise.</span> <span style=\"color: #d4b702; text-decoration-color: #d4b702\">โ</span>\n",
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">โ</span> <span style=\"font-weight: bold\"> If you are asked for a comma separated list, apply the above rules depending of whether the element to </span> <span style=\"color: #d4b702; text-decoration-color: #d4b702\">โ</span>\n",
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">โ</span> <span style=\"font-weight: bold\">be put in the list is a number or a string. </span> <span style=\"color: #d4b702; text-decoration-color: #d4b702\">โ</span>\n",
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">โ</span> <span style=\"color: #d4b702; text-decoration-color: #d4b702\">โ</span>\n",
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">โ</span> <span style=\"font-weight: bold\"> The taskid is 8e867cd7-cff9-4e6c-867a-ff5ddc2550be. If you need to download a file that comes with the </span> <span style=\"color: #d4b702; text-decoration-color: #d4b702\">โ</span>\n",
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">โ</span> <span style=\"font-weight: bold\">question then use the taskid to fetch the file</span> <span style=\"color: #d4b702; text-decoration-color: #d4b702\">โ</span>\n",
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">โ</span> <span style=\"font-weight: bold\"> The question is 'How many studio albums were published by Mercedes Sosa between 2000 and 2009 </span> <span style=\"color: #d4b702; text-decoration-color: #d4b702\">โ</span>\n",
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">โ</span> <span style=\"font-weight: bold\">(included)? You can use the latest 2022 version of english wikipedia.'.</span> <span style=\"color: #d4b702; text-decoration-color: #d4b702\">โ</span>\n",
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">โ</span> <span style=\"color: #d4b702; text-decoration-color: #d4b702\">โ</span>\n",
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">โ</span> <span style=\"font-weight: bold\"> You have 9 steps to answer the question.</span> <span style=\"color: #d4b702; text-decoration-color: #d4b702\">โ</span>\n",
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">โ</span> <span style=\"color: #d4b702; text-decoration-color: #d4b702\">โ</span>\n",
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">โฐโ InferenceClientModel - deepseek-ai/DeepSeek-R1 โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโฏ</span>\n",
"</pre>\n"
],
"text/plain": [
"\u001b[38;2;212;183;2mโญโ\u001b[0m\u001b[38;2;212;183;2mโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ\u001b[0m\u001b[38;2;212;183;2m \u001b[0m\u001b[1;38;2;212;183;2mNew run\u001b[0m\u001b[38;2;212;183;2m \u001b[0m\u001b[38;2;212;183;2mโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ\u001b[0m\u001b[38;2;212;183;2mโโฎ\u001b[0m\n",
"\u001b[38;2;212;183;2mโ\u001b[0m \u001b[38;2;212;183;2mโ\u001b[0m\n",
"\u001b[38;2;212;183;2mโ\u001b[0m \u001b[1mYou are a general AI assistant. \u001b[0m \u001b[38;2;212;183;2mโ\u001b[0m\n",
"\u001b[38;2;212;183;2mโ\u001b[0m \u001b[1m I will ask you a question and you can use 9 steps to answer the question.\u001b[0m \u001b[38;2;212;183;2mโ\u001b[0m\n",
"\u001b[38;2;212;183;2mโ\u001b[0m \u001b[1m You can use the tools I provide you to answer my question. Every tool call reduces the number \u001b[0m \u001b[38;2;212;183;2mโ\u001b[0m\n",
"\u001b[38;2;212;183;2mโ\u001b[0m \u001b[1m of remaining steps available to answer the question. \u001b[0m \u001b[38;2;212;183;2mโ\u001b[0m\n",
"\u001b[38;2;212;183;2mโ\u001b[0m \u001b[1m Report your thoughts, and finish your answer with the following template: \u001b[0m \u001b[38;2;212;183;2mโ\u001b[0m\n",
"\u001b[38;2;212;183;2mโ\u001b[0m \u001b[1m FINAL ANSWER: [YOUR FINAL ANSWER\\]. \u001b[0m \u001b[38;2;212;183;2mโ\u001b[0m\n",
"\u001b[38;2;212;183;2mโ\u001b[0m \u001b[1m \u001b[0m \u001b[38;2;212;183;2mโ\u001b[0m\n",
"\u001b[38;2;212;183;2mโ\u001b[0m \u001b[1m YOUR FINAL ANSWER should be a number OR as few words as possible OR a comma separated list of numbers \u001b[0m \u001b[38;2;212;183;2mโ\u001b[0m\n",
"\u001b[38;2;212;183;2mโ\u001b[0m \u001b[1mand/or strings.\u001b[0m \u001b[38;2;212;183;2mโ\u001b[0m\n",
"\u001b[38;2;212;183;2mโ\u001b[0m \u001b[1m If you are asked for a number, donโt use comma to write your number neither use units such as $ or \u001b[0m \u001b[38;2;212;183;2mโ\u001b[0m\n",
"\u001b[38;2;212;183;2mโ\u001b[0m \u001b[1mpercent sign unless specified otherwise.\u001b[0m \u001b[38;2;212;183;2mโ\u001b[0m\n",
"\u001b[38;2;212;183;2mโ\u001b[0m \u001b[1m If you are asked for a string, donโt use articles, neither abbreviations (e.g. for cities), and write \u001b[0m \u001b[38;2;212;183;2mโ\u001b[0m\n",
"\u001b[38;2;212;183;2mโ\u001b[0m \u001b[1mthe digits in plain text unless specified otherwise.\u001b[0m \u001b[38;2;212;183;2mโ\u001b[0m\n",
"\u001b[38;2;212;183;2mโ\u001b[0m \u001b[1m If you are asked for a comma separated list, apply the above rules depending of whether the element to \u001b[0m \u001b[38;2;212;183;2mโ\u001b[0m\n",
"\u001b[38;2;212;183;2mโ\u001b[0m \u001b[1mbe put in the list is a number or a string. \u001b[0m \u001b[38;2;212;183;2mโ\u001b[0m\n",
"\u001b[38;2;212;183;2mโ\u001b[0m \u001b[38;2;212;183;2mโ\u001b[0m\n",
"\u001b[38;2;212;183;2mโ\u001b[0m \u001b[1m The taskid is 8e867cd7-cff9-4e6c-867a-ff5ddc2550be. If you need to download a file that comes with the \u001b[0m \u001b[38;2;212;183;2mโ\u001b[0m\n",
"\u001b[38;2;212;183;2mโ\u001b[0m \u001b[1mquestion then use the taskid to fetch the file\u001b[0m \u001b[38;2;212;183;2mโ\u001b[0m\n",
"\u001b[38;2;212;183;2mโ\u001b[0m \u001b[1m The question is 'How many studio albums were published by Mercedes Sosa between 2000 and 2009 \u001b[0m \u001b[38;2;212;183;2mโ\u001b[0m\n",
"\u001b[38;2;212;183;2mโ\u001b[0m \u001b[1m(included)? You can use the latest 2022 version of english wikipedia.'.\u001b[0m \u001b[38;2;212;183;2mโ\u001b[0m\n",
"\u001b[38;2;212;183;2mโ\u001b[0m \u001b[38;2;212;183;2mโ\u001b[0m\n",
"\u001b[38;2;212;183;2mโ\u001b[0m \u001b[1m You have 9 steps to answer the question.\u001b[0m \u001b[38;2;212;183;2mโ\u001b[0m\n",
"\u001b[38;2;212;183;2mโ\u001b[0m \u001b[38;2;212;183;2mโ\u001b[0m\n",
"\u001b[38;2;212;183;2mโฐโ\u001b[0m\u001b[38;2;212;183;2m InferenceClientModel - deepseek-ai/DeepSeek-R1 \u001b[0m\u001b[38;2;212;183;2mโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ\u001b[0m\u001b[38;2;212;183;2mโโฏ\u001b[0m\n"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/html": [
"<pre style=\"white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace\"><span style=\"color: #d4b702; text-decoration-color: #d4b702\">โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ </span><span style=\"font-weight: bold\">Step </span><span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">1</span><span style=\"color: #d4b702; text-decoration-color: #d4b702\"> โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ</span>\n",
"</pre>\n"
],
"text/plain": [
"\u001b[38;2;212;183;2mโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ \u001b[0m\u001b[1mStep \u001b[0m\u001b[1;36m1\u001b[0m\u001b[38;2;212;183;2m โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ\u001b[0m\n"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/html": [
"<pre style=\"white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace\"><span style=\"color: #800000; text-decoration-color: #800000; font-weight: bold\">Error while generating output:</span>\n",
"<span style=\"color: #800000; text-decoration-color: #800000; font-weight: bold\">(Request ID: </span><span style=\"color: #808000; text-decoration-color: #808000; font-weight: bold\">Root</span><span style=\"color: #800000; text-decoration-color: #800000; font-weight: bold\">=</span><span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">1</span><span style=\"color: #800000; text-decoration-color: #800000; font-weight: bold\">-686d606c-1f8bffb50e7ac4b077a26fe0;</span><span style=\"color: #ffff00; text-decoration-color: #ffff00\">2128d2d1-4e56-4788-a9eb-3015f54f1e44</span><span style=\"color: #800000; text-decoration-color: #800000; font-weight: bold\">)</span>\n",
"\n",
"<span style=\"color: #800000; text-decoration-color: #800000; font-weight: bold\">Bad request:</span>\n",
"</pre>\n"
],
"text/plain": [
"\u001b[1;31mError while generating output:\u001b[0m\n",
"\u001b[1;31m(\u001b[0m\u001b[1;31mRequest ID: \u001b[0m\u001b[1;33mRoot\u001b[0m\u001b[1;31m=\u001b[0m\u001b[1;36m1\u001b[0m\u001b[1;31m-686d606c-1f8bffb50e7ac4b077a26fe0;\u001b[0m\u001b[93m2128d2d1-4e56-4788-a9eb-3015f54f1e44\u001b[0m\u001b[1;31m)\u001b[0m\n",
"\n",
"\u001b[1;31mBad request:\u001b[0m\n"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/html": [
"<pre style=\"white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace\"><span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">[Step 1: Duration 0.80 seconds]</span>\n",
"</pre>\n"
],
"text/plain": [
"\u001b[2m[Step 1: Duration 0.80 seconds]\u001b[0m\n"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"ename": "AgentGenerationError",
"evalue": "Error while generating output:\n(Request ID: Root=1-686d606c-1f8bffb50e7ac4b077a26fe0;2128d2d1-4e56-4788-a9eb-3015f54f1e44)\n\nBad request:",
"output_type": "error",
"traceback": [
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[1;31mHTTPError\u001b[0m Traceback (most recent call last)",
"File \u001b[1;32m~\\AppData\\Roaming\\Python\\Python313\\site-packages\\huggingface_hub\\utils\\_http.py:409\u001b[0m, in \u001b[0;36mhf_raise_for_status\u001b[1;34m(response, endpoint_name)\u001b[0m\n\u001b[0;32m 408\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m--> 409\u001b[0m \u001b[43mresponse\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mraise_for_status\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m 410\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m HTTPError \u001b[38;5;28;01mas\u001b[39;00m e:\n",
"File \u001b[1;32m~\\AppData\\Roaming\\Python\\Python313\\site-packages\\requests\\models.py:1024\u001b[0m, in \u001b[0;36mResponse.raise_for_status\u001b[1;34m(self)\u001b[0m\n\u001b[0;32m 1023\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m http_error_msg:\n\u001b[1;32m-> 1024\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m HTTPError(http_error_msg, response\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mself\u001b[39m)\n",
"\u001b[1;31mHTTPError\u001b[0m: 400 Client Error: Bad Request for url: https://router.huggingface.co/nebius/v1/chat/completions",
"\nThe above exception was the direct cause of the following exception:\n",
"\u001b[1;31mBadRequestError\u001b[0m Traceback (most recent call last)",
"File \u001b[1;32m~\\AppData\\Roaming\\Python\\Python313\\site-packages\\smolagents\\agents.py:1272\u001b[0m, in \u001b[0;36mToolCallingAgent._step_stream\u001b[1;34m(self, memory_step)\u001b[0m\n\u001b[0;32m 1271\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m-> 1272\u001b[0m chat_message: ChatMessage \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mmodel\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mgenerate\u001b[49m\u001b[43m(\u001b[49m\n\u001b[0;32m 1273\u001b[0m \u001b[43m \u001b[49m\u001b[43minput_messages\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 1274\u001b[0m \u001b[43m \u001b[49m\u001b[43mstop_sequences\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m[\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mObservation:\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mCalling tools:\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 1275\u001b[0m \u001b[43m \u001b[49m\u001b[43mtools_to_call_from\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mtools_and_managed_agents\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 1276\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m 1278\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mlogger\u001b[38;5;241m.\u001b[39mlog_markdown(\n\u001b[0;32m 1279\u001b[0m content\u001b[38;5;241m=\u001b[39mchat_message\u001b[38;5;241m.\u001b[39mcontent \u001b[38;5;28;01mif\u001b[39;00m chat_message\u001b[38;5;241m.\u001b[39mcontent \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;28mstr\u001b[39m(chat_message\u001b[38;5;241m.\u001b[39mraw),\n\u001b[0;32m 1280\u001b[0m title\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mOutput message of the LLM:\u001b[39m\u001b[38;5;124m\"\u001b[39m,\n\u001b[0;32m 1281\u001b[0m level\u001b[38;5;241m=\u001b[39mLogLevel\u001b[38;5;241m.\u001b[39mDEBUG,\n\u001b[0;32m 1282\u001b[0m )\n",
"File \u001b[1;32m~\\AppData\\Roaming\\Python\\Python313\\site-packages\\smolagents\\models.py:1408\u001b[0m, in \u001b[0;36mInferenceClientModel.generate\u001b[1;34m(self, messages, stop_sequences, response_format, tools_to_call_from, **kwargs)\u001b[0m\n\u001b[0;32m 1399\u001b[0m completion_kwargs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_prepare_completion_kwargs(\n\u001b[0;32m 1400\u001b[0m messages\u001b[38;5;241m=\u001b[39mmessages,\n\u001b[0;32m 1401\u001b[0m stop_sequences\u001b[38;5;241m=\u001b[39mstop_sequences,\n\u001b[1;32m (...)\u001b[0m\n\u001b[0;32m 1406\u001b[0m \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs,\n\u001b[0;32m 1407\u001b[0m )\n\u001b[1;32m-> 1408\u001b[0m response \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mclient\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mchat_completion\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mcompletion_kwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m 1410\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_last_input_token_count \u001b[38;5;241m=\u001b[39m response\u001b[38;5;241m.\u001b[39musage\u001b[38;5;241m.\u001b[39mprompt_tokens\n",
"File \u001b[1;32m~\\AppData\\Roaming\\Python\\Python313\\site-packages\\huggingface_hub\\inference\\_client.py:924\u001b[0m, in \u001b[0;36mInferenceClient.chat_completion\u001b[1;34m(self, messages, model, stream, frequency_penalty, logit_bias, logprobs, max_tokens, n, presence_penalty, response_format, seed, stop, stream_options, temperature, tool_choice, tool_prompt, tools, top_logprobs, top_p, extra_body)\u001b[0m\n\u001b[0;32m 917\u001b[0m request_parameters \u001b[38;5;241m=\u001b[39m provider_helper\u001b[38;5;241m.\u001b[39mprepare_request(\n\u001b[0;32m 918\u001b[0m inputs\u001b[38;5;241m=\u001b[39mmessages,\n\u001b[0;32m 919\u001b[0m parameters\u001b[38;5;241m=\u001b[39mparameters,\n\u001b[1;32m (...)\u001b[0m\n\u001b[0;32m 922\u001b[0m api_key\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mtoken,\n\u001b[0;32m 923\u001b[0m )\n\u001b[1;32m--> 924\u001b[0m data \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_inner_post\u001b[49m\u001b[43m(\u001b[49m\u001b[43mrequest_parameters\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mstream\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mstream\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m 926\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m stream:\n",
"File \u001b[1;32m~\\AppData\\Roaming\\Python\\Python313\\site-packages\\huggingface_hub\\inference\\_client.py:280\u001b[0m, in \u001b[0;36mInferenceClient._inner_post\u001b[1;34m(self, request_parameters, stream)\u001b[0m\n\u001b[0;32m 279\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m--> 280\u001b[0m \u001b[43mhf_raise_for_status\u001b[49m\u001b[43m(\u001b[49m\u001b[43mresponse\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m 281\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m response\u001b[38;5;241m.\u001b[39miter_lines() \u001b[38;5;28;01mif\u001b[39;00m stream \u001b[38;5;28;01melse\u001b[39;00m response\u001b[38;5;241m.\u001b[39mcontent\n",
"File \u001b[1;32m~\\AppData\\Roaming\\Python\\Python313\\site-packages\\huggingface_hub\\utils\\_http.py:465\u001b[0m, in \u001b[0;36mhf_raise_for_status\u001b[1;34m(response, endpoint_name)\u001b[0m\n\u001b[0;32m 462\u001b[0m message \u001b[38;5;241m=\u001b[39m (\n\u001b[0;32m 463\u001b[0m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;124mBad request for \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mendpoint_name\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m endpoint:\u001b[39m\u001b[38;5;124m\"\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m endpoint_name \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;124mBad request:\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m 464\u001b[0m )\n\u001b[1;32m--> 465\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m _format(BadRequestError, message, response) \u001b[38;5;28;01mfrom\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;21;01me\u001b[39;00m\n\u001b[0;32m 467\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m response\u001b[38;5;241m.\u001b[39mstatus_code \u001b[38;5;241m==\u001b[39m \u001b[38;5;241m403\u001b[39m:\n",
"\u001b[1;31mBadRequestError\u001b[0m: (Request ID: Root=1-686d606c-1f8bffb50e7ac4b077a26fe0;2128d2d1-4e56-4788-a9eb-3015f54f1e44)\n\nBad request:",
"\nThe above exception was the direct cause of the following exception:\n",
"\u001b[1;31mAgentGenerationError\u001b[0m Traceback (most recent call last)",
"Cell \u001b[1;32mIn[22], line 11\u001b[0m\n\u001b[0;32m 8\u001b[0m question_text \u001b[38;5;241m=\u001b[39m item[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mquestion\u001b[39m\u001b[38;5;124m\"\u001b[39m]\n\u001b[0;32m 9\u001b[0m task_id \u001b[38;5;241m=\u001b[39m item[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mtask_id\u001b[39m\u001b[38;5;124m\"\u001b[39m]\n\u001b[1;32m---> 11\u001b[0m submitted_answer \u001b[38;5;241m=\u001b[39m \u001b[43magent\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtask_id\u001b[49m\u001b[43m,\u001b[49m\u001b[43mquestion_text\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m 13\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mSubmitted answer:\u001b[39m\u001b[38;5;124m\"\u001b[39m, submitted_answer)\n",
"Cell \u001b[1;32mIn[21], line 65\u001b[0m, in \u001b[0;36mBasicAgent.__call__\u001b[1;34m(self, taskid, question)\u001b[0m\n\u001b[0;32m 54\u001b[0m agent \u001b[38;5;241m=\u001b[39m ToolCallingAgent(\n\u001b[0;32m 55\u001b[0m tools\u001b[38;5;241m=\u001b[39m[\n\u001b[0;32m 56\u001b[0m fetch_file, \n\u001b[1;32m (...)\u001b[0m\n\u001b[0;32m 61\u001b[0m max_steps\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m9\u001b[39m,\n\u001b[0;32m 62\u001b[0m )\n\u001b[0;32m 64\u001b[0m \u001b[38;5;66;03m# Run the agent with the prompt\u001b[39;00m\n\u001b[1;32m---> 65\u001b[0m fixed_answer \u001b[38;5;241m=\u001b[39m \u001b[43magent\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mrun\u001b[49m\u001b[43m(\u001b[49m\u001b[43mprompt\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m 67\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m fixed_answer\n",
"File \u001b[1;32m~\\AppData\\Roaming\\Python\\Python313\\site-packages\\smolagents\\agents.py:442\u001b[0m, in \u001b[0;36mMultiStepAgent.run\u001b[1;34m(self, task, stream, reset, images, additional_args, max_steps)\u001b[0m\n\u001b[0;32m 439\u001b[0m run_start_time \u001b[38;5;241m=\u001b[39m time\u001b[38;5;241m.\u001b[39mtime()\n\u001b[0;32m 440\u001b[0m \u001b[38;5;66;03m# Outputs are returned only at the end. We only look at the last step.\u001b[39;00m\n\u001b[1;32m--> 442\u001b[0m steps \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mlist\u001b[39;49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_run_stream\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtask\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mtask\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mmax_steps\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mmax_steps\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mimages\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mimages\u001b[49m\u001b[43m)\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m 443\u001b[0m \u001b[38;5;28;01massert\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(steps[\u001b[38;5;241m-\u001b[39m\u001b[38;5;241m1\u001b[39m], FinalAnswerStep)\n\u001b[0;32m 444\u001b[0m output \u001b[38;5;241m=\u001b[39m steps[\u001b[38;5;241m-\u001b[39m\u001b[38;5;241m1\u001b[39m]\u001b[38;5;241m.\u001b[39moutput\n",
"File \u001b[1;32m~\\AppData\\Roaming\\Python\\Python313\\site-packages\\smolagents\\agents.py:530\u001b[0m, in \u001b[0;36mMultiStepAgent._run_stream\u001b[1;34m(self, task, max_steps, images)\u001b[0m\n\u001b[0;32m 527\u001b[0m final_answer \u001b[38;5;241m=\u001b[39m output\u001b[38;5;241m.\u001b[39moutput\n\u001b[0;32m 528\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m AgentGenerationError \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[0;32m 529\u001b[0m \u001b[38;5;66;03m# Agent generation errors are not caused by a Model error but an implementation error: so we should raise them and exit.\u001b[39;00m\n\u001b[1;32m--> 530\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m e\n\u001b[0;32m 531\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m AgentError \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[0;32m 532\u001b[0m \u001b[38;5;66;03m# Other AgentError types are caused by the Model, so we should log them and iterate.\u001b[39;00m\n\u001b[0;32m 533\u001b[0m action_step\u001b[38;5;241m.\u001b[39merror \u001b[38;5;241m=\u001b[39m e\n",
"File \u001b[1;32m~\\AppData\\Roaming\\Python\\Python313\\site-packages\\smolagents\\agents.py:517\u001b[0m, in \u001b[0;36mMultiStepAgent._run_stream\u001b[1;34m(self, task, max_steps, images)\u001b[0m\n\u001b[0;32m 515\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mlogger\u001b[38;5;241m.\u001b[39mlog_rule(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mStep \u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mstep_number\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m, level\u001b[38;5;241m=\u001b[39mLogLevel\u001b[38;5;241m.\u001b[39mINFO)\n\u001b[0;32m 516\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m--> 517\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43;01mfor\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43moutput\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;129;43;01min\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_step_stream\u001b[49m\u001b[43m(\u001b[49m\u001b[43maction_step\u001b[49m\u001b[43m)\u001b[49m\u001b[43m:\u001b[49m\n\u001b[0;32m 518\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;66;43;03m# Yield streaming deltas\u001b[39;49;00m\n\u001b[0;32m 519\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43;01mif\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[38;5;129;43;01mnot\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[38;5;28;43misinstance\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43moutput\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43m(\u001b[49m\u001b[43mActionOutput\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mToolOutput\u001b[49m\u001b[43m)\u001b[49m\u001b[43m)\u001b[49m\u001b[43m:\u001b[49m\n\u001b[0;32m 520\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43;01myield\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43moutput\u001b[49m\n",
"File \u001b[1;32m~\\AppData\\Roaming\\Python\\Python313\\site-packages\\smolagents\\agents.py:1289\u001b[0m, in \u001b[0;36mToolCallingAgent._step_stream\u001b[1;34m(self, memory_step)\u001b[0m\n\u001b[0;32m 1287\u001b[0m memory_step\u001b[38;5;241m.\u001b[39mtoken_usage \u001b[38;5;241m=\u001b[39m chat_message\u001b[38;5;241m.\u001b[39mtoken_usage\n\u001b[0;32m 1288\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mException\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[1;32m-> 1289\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m AgentGenerationError(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mError while generating output:\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;132;01m{\u001b[39;00me\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mlogger) \u001b[38;5;28;01mfrom\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;21;01me\u001b[39;00m\n\u001b[0;32m 1291\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m chat_message\u001b[38;5;241m.\u001b[39mtool_calls \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mlen\u001b[39m(chat_message\u001b[38;5;241m.\u001b[39mtool_calls) \u001b[38;5;241m==\u001b[39m \u001b[38;5;241m0\u001b[39m:\n\u001b[0;32m 1292\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n",
"\u001b[1;31mAgentGenerationError\u001b[0m: Error while generating output:\n(Request ID: Root=1-686d606c-1f8bffb50e7ac4b077a26fe0;2128d2d1-4e56-4788-a9eb-3015f54f1e44)\n\nBad request:"
]
}
],
"source": [
"item = {\n",
" \"task_id\": \"8e867cd7-cff9-4e6c-867a-ff5ddc2550be\",\n",
" \"question\": \"How many studio albums were published by Mercedes Sosa between 2000 and 2009 (included)? You can use the latest 2022 version of english wikipedia.\",\n",
" \"Level\": \"1\",\n",
" \"file_name\": \"\"\n",
" }\n",
"\n",
"question_text = item[\"question\"]\n",
"task_id = item[\"task_id\"]\n",
"\n",
"submitted_answer = agent(task_id,question_text)\n",
"\n",
"print(\"Submitted answer:\", submitted_answer)\n"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.13.5"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
|