update-notebooks
#87
by
sergiopaniego
HF Staff
- opened
- bonus-unit1/bonus-unit1.ipynb +12 -7
- bonus-unit2/{monitoring-and-evaluating-agents-notebook.ipynb β monitoring-and-evaluating-agents.ipynb} +14 -14
- dummy_agent_library.ipynb β unit1/dummy_agent_library.ipynb +20 -16
- unit2/langgraph/agent.ipynb +85 -313
- unit2/langgraph/mail_sorting.ipynb +443 -1
- unit2/llama-index/agents.ipynb +1 -1
- unit2/llama-index/components.ipynb +5 -3
- unit2/llama-index/tools.ipynb +72 -27
- unit2/llama-index/workflows.ipynb +6 -5
- unit2/smolagents/code_agents.ipynb +0 -0
- unit2/smolagents/multiagent_notebook.ipynb +0 -0
- unit2/smolagents/retrieval_agents.ipynb +8 -8
- unit2/smolagents/tool_calling_agents.ipynb +14 -14
- unit2/smolagents/tools.ipynb +19 -19
- unit2/smolagents/vision_agents.ipynb +17 -14
- unit2/smolagents/vision_web_browser.py +1 -1
bonus-unit1/bonus-unit1.ipynb
CHANGED
@@ -23,7 +23,7 @@
|
|
23 |
"id": "gWR4Rvpmjq5T"
|
24 |
},
|
25 |
"source": [
|
26 |
-
"## Prerequisites ποΈ\n",
|
27 |
"\n",
|
28 |
"Before diving into the notebook, you need to:\n",
|
29 |
"\n",
|
@@ -130,7 +130,9 @@
|
|
130 |
"!pip install -q -U peft\n",
|
131 |
"!pip install -q -U trl\n",
|
132 |
"!pip install -q -U tensorboardX\n",
|
133 |
-
"!pip install -q wandb"
|
|
|
|
|
134 |
]
|
135 |
},
|
136 |
{
|
@@ -184,7 +186,7 @@
|
|
184 |
"import torch\n",
|
185 |
"import json\n",
|
186 |
"\n",
|
187 |
-
"from transformers import AutoModelForCausalLM, AutoTokenizer, set_seed\n",
|
188 |
"from datasets import load_dataset\n",
|
189 |
"from trl import SFTConfig, SFTTrainer\n",
|
190 |
"from peft import LoraConfig, TaskType\n",
|
@@ -319,7 +321,10 @@
|
|
319 |
"source": [
|
320 |
"dataset = dataset.map(preprocess, remove_columns=\"messages\")\n",
|
321 |
"dataset = dataset[\"train\"].train_test_split(0.1)\n",
|
322 |
-
"print(dataset)"
|
|
|
|
|
|
|
323 |
]
|
324 |
},
|
325 |
{
|
@@ -650,7 +655,7 @@
|
|
650 |
"source": [
|
651 |
"## Step 9: Let's configure the LoRA\n",
|
652 |
"\n",
|
653 |
-
"This is we are going to define the parameter of our adapter. Those
|
654 |
]
|
655 |
},
|
656 |
{
|
@@ -1191,7 +1196,7 @@
|
|
1191 |
},
|
1192 |
{
|
1193 |
"cell_type": "code",
|
1194 |
-
"execution_count":
|
1195 |
"id": "56b89825-70ac-42c1-934c-26e2d54f3b7b",
|
1196 |
"metadata": {
|
1197 |
"colab": {
|
@@ -1471,7 +1476,7 @@
|
|
1471 |
"device = \"auto\"\n",
|
1472 |
"config = PeftConfig.from_pretrained(peft_model_id)\n",
|
1473 |
"model = AutoModelForCausalLM.from_pretrained(config.base_model_name_or_path,\n",
|
1474 |
-
" device_map
|
1475 |
" )\n",
|
1476 |
"tokenizer = AutoTokenizer.from_pretrained(peft_model_id)\n",
|
1477 |
"model.resize_token_embeddings(len(tokenizer))\n",
|
|
|
23 |
"id": "gWR4Rvpmjq5T"
|
24 |
},
|
25 |
"source": [
|
26 |
+
"## Exercise Prerequisites ποΈ\n",
|
27 |
"\n",
|
28 |
"Before diving into the notebook, you need to:\n",
|
29 |
"\n",
|
|
|
130 |
"!pip install -q -U peft\n",
|
131 |
"!pip install -q -U trl\n",
|
132 |
"!pip install -q -U tensorboardX\n",
|
133 |
+
"!pip install -q wandb\n",
|
134 |
+
"!pip install -q -U torchvision\n",
|
135 |
+
"!pip install -q -U transformers"
|
136 |
]
|
137 |
},
|
138 |
{
|
|
|
186 |
"import torch\n",
|
187 |
"import json\n",
|
188 |
"\n",
|
189 |
+
"from transformers import AutoModelForCausalLM, AutoTokenizer, TrainingArguments, BitsAndBytesConfig, set_seed\n",
|
190 |
"from datasets import load_dataset\n",
|
191 |
"from trl import SFTConfig, SFTTrainer\n",
|
192 |
"from peft import LoraConfig, TaskType\n",
|
|
|
321 |
"source": [
|
322 |
"dataset = dataset.map(preprocess, remove_columns=\"messages\")\n",
|
323 |
"dataset = dataset[\"train\"].train_test_split(0.1)\n",
|
324 |
+
"print(dataset)\n",
|
325 |
+
"\n",
|
326 |
+
"dataset[\"train\"] = dataset[\"train\"].select(range(100))\n",
|
327 |
+
"dataset[\"test\"] = dataset[\"test\"].select(range(10))"
|
328 |
]
|
329 |
},
|
330 |
{
|
|
|
655 |
"source": [
|
656 |
"## Step 9: Let's configure the LoRA\n",
|
657 |
"\n",
|
658 |
+
"This is we are going to define the parameter of our adapter. Those are the most important parameters in LoRA as they define the size and importance of the adapters we are training."
|
659 |
]
|
660 |
},
|
661 |
{
|
|
|
1196 |
},
|
1197 |
{
|
1198 |
"cell_type": "code",
|
1199 |
+
"execution_count": null,
|
1200 |
"id": "56b89825-70ac-42c1-934c-26e2d54f3b7b",
|
1201 |
"metadata": {
|
1202 |
"colab": {
|
|
|
1476 |
"device = \"auto\"\n",
|
1477 |
"config = PeftConfig.from_pretrained(peft_model_id)\n",
|
1478 |
"model = AutoModelForCausalLM.from_pretrained(config.base_model_name_or_path,\n",
|
1479 |
+
" device_map=device,\n",
|
1480 |
" )\n",
|
1481 |
"tokenizer = AutoTokenizer.from_pretrained(peft_model_id)\n",
|
1482 |
"model.resize_token_embeddings(len(tokenizer))\n",
|
bonus-unit2/{monitoring-and-evaluating-agents-notebook.ipynb β monitoring-and-evaluating-agents.ipynb}
RENAMED
@@ -134,12 +134,12 @@
|
|
134 |
"metadata": {},
|
135 |
"outputs": [],
|
136 |
"source": [
|
137 |
-
"from smolagents import
|
138 |
"\n",
|
139 |
"# Create a simple agent to test instrumentation\n",
|
140 |
"agent = CodeAgent(\n",
|
141 |
" tools=[],\n",
|
142 |
-
" model=
|
143 |
")\n",
|
144 |
"\n",
|
145 |
"agent.run(\"1+1=\")"
|
@@ -173,10 +173,10 @@
|
|
173 |
"metadata": {},
|
174 |
"outputs": [],
|
175 |
"source": [
|
176 |
-
"from smolagents import (CodeAgent, DuckDuckGoSearchTool,
|
177 |
"\n",
|
178 |
"search_tool = DuckDuckGoSearchTool()\n",
|
179 |
-
"agent = CodeAgent(tools=[search_tool], model=
|
180 |
"\n",
|
181 |
"agent.run(\"How many Rubik's Cubes could you fit inside the Notre Dame Cathedral?\")"
|
182 |
]
|
@@ -189,7 +189,7 @@
|
|
189 |
"\n",
|
190 |
"Most observability tools record a **trace** that contains **spans**, which represent each step of your agentβs logic. Here, the trace contains the overall agent run and sub-spans for:\n",
|
191 |
"- The tool calls (DuckDuckGoSearchTool)\n",
|
192 |
-
"- The LLM calls (
|
193 |
"\n",
|
194 |
"You can inspect these to see precisely where time is spent, how many tokens are used, and so on:\n",
|
195 |
"\n",
|
@@ -257,13 +257,13 @@
|
|
257 |
"metadata": {},
|
258 |
"outputs": [],
|
259 |
"source": [
|
260 |
-
"from smolagents import (CodeAgent, DuckDuckGoSearchTool,
|
261 |
"from opentelemetry import trace\n",
|
262 |
"\n",
|
263 |
"search_tool = DuckDuckGoSearchTool()\n",
|
264 |
"agent = CodeAgent(\n",
|
265 |
" tools=[search_tool],\n",
|
266 |
-
" model=
|
267 |
")\n",
|
268 |
"\n",
|
269 |
"with tracer.start_as_current_span(\"Smolagent-Trace\") as span:\n",
|
@@ -300,11 +300,11 @@
|
|
300 |
"source": [
|
301 |
"import gradio as gr\n",
|
302 |
"from opentelemetry.trace import format_trace_id\n",
|
303 |
-
"from smolagents import (CodeAgent,
|
304 |
"from langfuse import Langfuse\n",
|
305 |
"\n",
|
306 |
"langfuse = Langfuse()\n",
|
307 |
-
"model =
|
308 |
"agent = CodeAgent(tools=[], model=model, add_base_tools=True)\n",
|
309 |
"\n",
|
310 |
"formatted_trace_id = None # We'll store the current trace_id globally for demonstration\n",
|
@@ -390,10 +390,10 @@
|
|
390 |
"outputs": [],
|
391 |
"source": [
|
392 |
"# Example: Checking if the agentβs output is toxic or not.\n",
|
393 |
-
"from smolagents import (CodeAgent, DuckDuckGoSearchTool,
|
394 |
"\n",
|
395 |
"search_tool = DuckDuckGoSearchTool()\n",
|
396 |
-
"agent = CodeAgent(tools=[search_tool], model=
|
397 |
"\n",
|
398 |
"agent.run(\"Can eating carrots improve your vision?\")"
|
399 |
]
|
@@ -531,10 +531,10 @@
|
|
531 |
"outputs": [],
|
532 |
"source": [
|
533 |
"from opentelemetry.trace import format_trace_id\n",
|
534 |
-
"from smolagents import (CodeAgent,
|
535 |
"\n",
|
536 |
-
"# Example: using
|
537 |
-
"model =
|
538 |
"\n",
|
539 |
"agent = CodeAgent(\n",
|
540 |
" tools=[],\n",
|
|
|
134 |
"metadata": {},
|
135 |
"outputs": [],
|
136 |
"source": [
|
137 |
+
"from smolagents import InferenceClientModel, CodeAgent\n",
|
138 |
"\n",
|
139 |
"# Create a simple agent to test instrumentation\n",
|
140 |
"agent = CodeAgent(\n",
|
141 |
" tools=[],\n",
|
142 |
+
" model=InferenceClientModel()\n",
|
143 |
")\n",
|
144 |
"\n",
|
145 |
"agent.run(\"1+1=\")"
|
|
|
173 |
"metadata": {},
|
174 |
"outputs": [],
|
175 |
"source": [
|
176 |
+
"from smolagents import (CodeAgent, DuckDuckGoSearchTool, InferenceClientModel)\n",
|
177 |
"\n",
|
178 |
"search_tool = DuckDuckGoSearchTool()\n",
|
179 |
+
"agent = CodeAgent(tools=[search_tool], model=InferenceClientModel())\n",
|
180 |
"\n",
|
181 |
"agent.run(\"How many Rubik's Cubes could you fit inside the Notre Dame Cathedral?\")"
|
182 |
]
|
|
|
189 |
"\n",
|
190 |
"Most observability tools record a **trace** that contains **spans**, which represent each step of your agentβs logic. Here, the trace contains the overall agent run and sub-spans for:\n",
|
191 |
"- The tool calls (DuckDuckGoSearchTool)\n",
|
192 |
+
"- The LLM calls (InferenceClientModel)\n",
|
193 |
"\n",
|
194 |
"You can inspect these to see precisely where time is spent, how many tokens are used, and so on:\n",
|
195 |
"\n",
|
|
|
257 |
"metadata": {},
|
258 |
"outputs": [],
|
259 |
"source": [
|
260 |
+
"from smolagents import (CodeAgent, DuckDuckGoSearchTool, InferenceClientModel)\n",
|
261 |
"from opentelemetry import trace\n",
|
262 |
"\n",
|
263 |
"search_tool = DuckDuckGoSearchTool()\n",
|
264 |
"agent = CodeAgent(\n",
|
265 |
" tools=[search_tool],\n",
|
266 |
+
" model=InferenceClientModel()\n",
|
267 |
")\n",
|
268 |
"\n",
|
269 |
"with tracer.start_as_current_span(\"Smolagent-Trace\") as span:\n",
|
|
|
300 |
"source": [
|
301 |
"import gradio as gr\n",
|
302 |
"from opentelemetry.trace import format_trace_id\n",
|
303 |
+
"from smolagents import (CodeAgent, InferenceClientModel)\n",
|
304 |
"from langfuse import Langfuse\n",
|
305 |
"\n",
|
306 |
"langfuse = Langfuse()\n",
|
307 |
+
"model = InferenceClientModel()\n",
|
308 |
"agent = CodeAgent(tools=[], model=model, add_base_tools=True)\n",
|
309 |
"\n",
|
310 |
"formatted_trace_id = None # We'll store the current trace_id globally for demonstration\n",
|
|
|
390 |
"outputs": [],
|
391 |
"source": [
|
392 |
"# Example: Checking if the agentβs output is toxic or not.\n",
|
393 |
+
"from smolagents import (CodeAgent, DuckDuckGoSearchTool, InferenceClientModel)\n",
|
394 |
"\n",
|
395 |
"search_tool = DuckDuckGoSearchTool()\n",
|
396 |
+
"agent = CodeAgent(tools=[search_tool], model=InferenceClientModel())\n",
|
397 |
"\n",
|
398 |
"agent.run(\"Can eating carrots improve your vision?\")"
|
399 |
]
|
|
|
531 |
"outputs": [],
|
532 |
"source": [
|
533 |
"from opentelemetry.trace import format_trace_id\n",
|
534 |
+
"from smolagents import (CodeAgent, InferenceClientModel, LiteLLMModel)\n",
|
535 |
"\n",
|
536 |
+
"# Example: using InferenceClientModel or LiteLLMModel to access openai, anthropic, gemini, etc. models:\n",
|
537 |
+
"model = InferenceClientModel()\n",
|
538 |
"\n",
|
539 |
"agent = CodeAgent(\n",
|
540 |
" tools=[],\n",
|
dummy_agent_library.ipynb β unit1/dummy_agent_library.ipynb
RENAMED
@@ -40,14 +40,16 @@
|
|
40 |
"\n",
|
41 |
"In the Hugging Face ecosystem, there is a convenient feature called Serverless API that allows you to easily run inference on many models. There's no installation or deployment required.\n",
|
42 |
"\n",
|
43 |
-
"To run this notebook, **you need a Hugging Face token** that you can get from https://hf.co/settings/tokens.
|
|
|
|
|
44 |
"\n",
|
45 |
-
"You also need to request access to [the Meta Llama models](meta-llama/Llama-3.
|
46 |
]
|
47 |
},
|
48 |
{
|
49 |
"cell_type": "code",
|
50 |
-
"execution_count":
|
51 |
"id": "5af6ec14-bb7d-49a4-b911-0cf0ec084df5",
|
52 |
"metadata": {
|
53 |
"id": "5af6ec14-bb7d-49a4-b911-0cf0ec084df5",
|
@@ -58,10 +60,12 @@
|
|
58 |
"import os\n",
|
59 |
"from huggingface_hub import InferenceClient\n",
|
60 |
"\n",
|
61 |
-
"# os.environ
|
62 |
"\n",
|
63 |
-
"
|
64 |
-
"
|
|
|
|
|
65 |
"#client = InferenceClient(\"https://jc26mwg228mkj8dw.us-east-1.aws.endpoints.huggingface.cloud\")"
|
66 |
]
|
67 |
},
|
@@ -114,7 +118,7 @@
|
|
114 |
"id": "T9-6h-eVAWrR"
|
115 |
},
|
116 |
"source": [
|
117 |
-
"If we now add the special tokens related to the <a href=\"https://huggingface.co/meta-llama/Llama-3.
|
118 |
]
|
119 |
},
|
120 |
{
|
@@ -234,7 +238,7 @@
|
|
234 |
"outputs": [],
|
235 |
"source": [
|
236 |
"# This system prompt is a bit more complex and actually contains the function description already appended.\n",
|
237 |
-
"# Here we suppose that the textual description of the tools
|
238 |
"SYSTEM_PROMPT = \"\"\"Answer the following questions as best you can. You have access to the following tools:\n",
|
239 |
"\n",
|
240 |
"get_weather: Get the current weather in a given location\n",
|
@@ -243,7 +247,7 @@
|
|
243 |
"Specifically, this json should have a `action` key (with the name of the tool to use) and a `action_input` key (with the input to the tool going here).\n",
|
244 |
"\n",
|
245 |
"The only values that should be in the \"action\" field are:\n",
|
246 |
-
"get_weather: Get the current weather in a given location, args: {\"location\": {\"type\": \"string\"}}\n",
|
247 |
"example use :\n",
|
248 |
"```\n",
|
249 |
"{{\n",
|
@@ -313,7 +317,7 @@
|
|
313 |
" {\"role\": \"user\", \"content\": \"What's the weather in London ?\"},\n",
|
314 |
"]\n",
|
315 |
"from transformers import AutoTokenizer\n",
|
316 |
-
"tokenizer = AutoTokenizer.from_pretrained(\"meta-llama/Llama-3.
|
317 |
"\n",
|
318 |
"tokenizer.apply_chat_template(messages, tokenize=False,add_generation_prompt=True)\n",
|
319 |
"```"
|
@@ -354,7 +358,7 @@
|
|
354 |
"Specifically, this json should have a `action` key (with the name of the tool to use) and a `action_input` key (with the input to the tool going here).\n",
|
355 |
"\n",
|
356 |
"The only values that should be in the \"action\" field are:\n",
|
357 |
-
"get_weather: Get the current weather in a given location, args: {\"location\": {\"type\": \"string\"}}\n",
|
358 |
"example use :\n",
|
359 |
"```\n",
|
360 |
"{{\n",
|
@@ -456,7 +460,7 @@
|
|
456 |
},
|
457 |
{
|
458 |
"cell_type": "code",
|
459 |
-
"execution_count":
|
460 |
"id": "9fc783f2-66ac-42cf-8a57-51788f81d436",
|
461 |
"metadata": {
|
462 |
"colab": {
|
@@ -488,7 +492,7 @@
|
|
488 |
"# The answer was hallucinated by the model. We need to stop to actually execute the function!\n",
|
489 |
"output = client.text_generation(\n",
|
490 |
" prompt,\n",
|
491 |
-
" max_new_tokens=
|
492 |
" stop=[\"Observation:\"] # Let's stop before any actual function is called\n",
|
493 |
")\n",
|
494 |
"\n",
|
@@ -504,7 +508,7 @@
|
|
504 |
"source": [
|
505 |
"Much Better!\n",
|
506 |
"\n",
|
507 |
-
"Let's now create a **dummy get weather function**. In real situation you could call an API."
|
508 |
]
|
509 |
},
|
510 |
{
|
@@ -579,7 +583,7 @@
|
|
579 |
"Specifically, this json should have a `action` key (with the name of the tool to use) and a `action_input` key (with the input to the tool going here).\n",
|
580 |
"\n",
|
581 |
"The only values that should be in the \"action\" field are:\n",
|
582 |
-
"get_weather: Get the current weather in a given location, args: {\"location\": {\"type\": \"string\"}}\n",
|
583 |
"example use :\n",
|
584 |
"```\n",
|
585 |
"{{\n",
|
@@ -605,7 +609,7 @@
|
|
605 |
"\n",
|
606 |
"Now begin! Reminder to ALWAYS use the exact characters `Final Answer:` when you provide a definitive answer. \n",
|
607 |
"<|eot_id|><|start_header_id|>user<|end_header_id|>\n",
|
608 |
-
"What's the
|
609 |
"<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n",
|
610 |
"Question: What's the weather in London?\n",
|
611 |
"\n",
|
|
|
40 |
"\n",
|
41 |
"In the Hugging Face ecosystem, there is a convenient feature called Serverless API that allows you to easily run inference on many models. There's no installation or deployment required.\n",
|
42 |
"\n",
|
43 |
+
"To run this notebook, **you need a Hugging Face token** that you can get from https://hf.co/settings/tokens. A \"Read\" token type is sufficient. \n",
|
44 |
+
"- If you are running this notebook on Google Colab, you can set it up in the \"settings\" tab under \"secrets\". Make sure to call it \"HF_TOKEN\" and restart the session to load the environment variable (Runtime -> Restart session).\n",
|
45 |
+
"- If you are running this notebook locally, you can set it up as an [environment variable](https://huggingface.co/docs/huggingface_hub/en/package_reference/environment_variables). Make sure you restart the kernel after installing or updating huggingface_hub. You can update huggingface_hub by modifying the above `!pip install -q huggingface_hub -U`\n",
|
46 |
"\n",
|
47 |
+
"You also need to request access to [the Meta Llama models](https://huggingface.co/meta-llama), select [Llama-3.3-70B-Instruct](https://huggingface.co/meta-llama/Llama-3.3-70B-Instruct) if you haven't done it click on Expand to review and access and fill the form. Approval usually takes up to an hour."
|
48 |
]
|
49 |
},
|
50 |
{
|
51 |
"cell_type": "code",
|
52 |
+
"execution_count": null,
|
53 |
"id": "5af6ec14-bb7d-49a4-b911-0cf0ec084df5",
|
54 |
"metadata": {
|
55 |
"id": "5af6ec14-bb7d-49a4-b911-0cf0ec084df5",
|
|
|
60 |
"import os\n",
|
61 |
"from huggingface_hub import InferenceClient\n",
|
62 |
"\n",
|
63 |
+
"# HF_TOKEN = os.environ.get(\"HF_TOKEN\")\n",
|
64 |
"\n",
|
65 |
+
"\n",
|
66 |
+
"\n",
|
67 |
+
"client = InferenceClient(\"meta-llama/Llama-3.3-70B-Instruct\")\n",
|
68 |
+
"# if the outputs for next cells are wrong, the free model may be overloaded. You can also use this public endpoint that contains Llama-3.3-70B-Instruct\n",
|
69 |
"#client = InferenceClient(\"https://jc26mwg228mkj8dw.us-east-1.aws.endpoints.huggingface.cloud\")"
|
70 |
]
|
71 |
},
|
|
|
118 |
"id": "T9-6h-eVAWrR"
|
119 |
},
|
120 |
"source": [
|
121 |
+
"If we now add the special tokens related to the <a href=\"https://huggingface.co/meta-llama/Llama-3.3-70B-Instruct\">Llama-3.3-70B-Instruct model</a> that we're using, the behavior changes and it now produces the expected EOS."
|
122 |
]
|
123 |
},
|
124 |
{
|
|
|
238 |
"outputs": [],
|
239 |
"source": [
|
240 |
"# This system prompt is a bit more complex and actually contains the function description already appended.\n",
|
241 |
+
"# Here we suppose that the textual description of the tools have already been appended\n",
|
242 |
"SYSTEM_PROMPT = \"\"\"Answer the following questions as best you can. You have access to the following tools:\n",
|
243 |
"\n",
|
244 |
"get_weather: Get the current weather in a given location\n",
|
|
|
247 |
"Specifically, this json should have a `action` key (with the name of the tool to use) and a `action_input` key (with the input to the tool going here).\n",
|
248 |
"\n",
|
249 |
"The only values that should be in the \"action\" field are:\n",
|
250 |
+
"get_weather: Get the current weather in a given location, args: {{\"location\": {{\"type\": \"string\"}}}}\n",
|
251 |
"example use :\n",
|
252 |
"```\n",
|
253 |
"{{\n",
|
|
|
317 |
" {\"role\": \"user\", \"content\": \"What's the weather in London ?\"},\n",
|
318 |
"]\n",
|
319 |
"from transformers import AutoTokenizer\n",
|
320 |
+
"tokenizer = AutoTokenizer.from_pretrained(\"meta-llama/Llama-3.3-70B-Instruct\")\n",
|
321 |
"\n",
|
322 |
"tokenizer.apply_chat_template(messages, tokenize=False,add_generation_prompt=True)\n",
|
323 |
"```"
|
|
|
358 |
"Specifically, this json should have a `action` key (with the name of the tool to use) and a `action_input` key (with the input to the tool going here).\n",
|
359 |
"\n",
|
360 |
"The only values that should be in the \"action\" field are:\n",
|
361 |
+
"get_weather: Get the current weather in a given location, args: {{\"location\": {{\"type\": \"string\"}}}}\n",
|
362 |
"example use :\n",
|
363 |
"```\n",
|
364 |
"{{\n",
|
|
|
460 |
},
|
461 |
{
|
462 |
"cell_type": "code",
|
463 |
+
"execution_count": null,
|
464 |
"id": "9fc783f2-66ac-42cf-8a57-51788f81d436",
|
465 |
"metadata": {
|
466 |
"colab": {
|
|
|
492 |
"# The answer was hallucinated by the model. We need to stop to actually execute the function!\n",
|
493 |
"output = client.text_generation(\n",
|
494 |
" prompt,\n",
|
495 |
+
" max_new_tokens=150,\n",
|
496 |
" stop=[\"Observation:\"] # Let's stop before any actual function is called\n",
|
497 |
")\n",
|
498 |
"\n",
|
|
|
508 |
"source": [
|
509 |
"Much Better!\n",
|
510 |
"\n",
|
511 |
+
"Let's now create a **dummy get weather function**. In a real situation you could call an API."
|
512 |
]
|
513 |
},
|
514 |
{
|
|
|
583 |
"Specifically, this json should have a `action` key (with the name of the tool to use) and a `action_input` key (with the input to the tool going here).\n",
|
584 |
"\n",
|
585 |
"The only values that should be in the \"action\" field are:\n",
|
586 |
+
"get_weather: Get the current weather in a given location, args: {{\"location\": {{\"type\": \"string\"}}}}\n",
|
587 |
"example use :\n",
|
588 |
"```\n",
|
589 |
"{{\n",
|
|
|
609 |
"\n",
|
610 |
"Now begin! Reminder to ALWAYS use the exact characters `Final Answer:` when you provide a definitive answer. \n",
|
611 |
"<|eot_id|><|start_header_id|>user<|end_header_id|>\n",
|
612 |
+
"What's the weather in London?\n",
|
613 |
"<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n",
|
614 |
"Question: What's the weather in London?\n",
|
615 |
"\n",
|
unit2/langgraph/agent.ipynb
CHANGED
@@ -1,9 +1,8 @@
|
|
1 |
{
|
2 |
"cells": [
|
3 |
{
|
4 |
-
"cell_type": "markdown",
|
5 |
-
"id": "98f5e36a-da49-4ae2-8c74-b910a2f992fc",
|
6 |
"metadata": {},
|
|
|
7 |
"source": [
|
8 |
"# Agent\n",
|
9 |
"\n",
|
@@ -15,67 +14,50 @@
|
|
15 |
"\n",
|
16 |
"As seen in the Unit 1, an agent needs 3 steps as introduced in the ReAct architecture :\n",
|
17 |
"[ReAct](https://react-lm.github.io/), a general agent architecture.\n",
|
18 |
-
"
|
19 |
-
"* `act` - let the model call specific tools
|
20 |
-
"* `observe` - pass the tool output back to the model
|
21 |
"* `reason` - let the model reason about the tool output to decide what to do next (e.g., call another tool or just respond directly)\n",
|
22 |
"\n",
|
23 |
"\n",
|
24 |
""
|
25 |
-
]
|
|
|
26 |
},
|
27 |
{
|
|
|
28 |
"cell_type": "code",
|
29 |
-
"
|
30 |
-
"
|
31 |
-
"
|
32 |
-
|
33 |
-
},
|
34 |
-
"outputs": [
|
35 |
-
{
|
36 |
-
"name": "stdout",
|
37 |
-
"output_type": "stream",
|
38 |
-
"text": [
|
39 |
-
"Note: you may need to restart the kernel to use updated packages.\n"
|
40 |
-
]
|
41 |
-
}
|
42 |
-
],
|
43 |
-
"source": [
|
44 |
-
"%pip install -q -U langchain_openai langchain_core langgraph"
|
45 |
-
]
|
46 |
},
|
47 |
{
|
|
|
48 |
"cell_type": "code",
|
49 |
-
"execution_count": 2,
|
50 |
-
"id": "356a6482",
|
51 |
-
"metadata": {
|
52 |
-
"tags": []
|
53 |
-
},
|
54 |
"outputs": [],
|
|
|
55 |
"source": [
|
56 |
"import os\n",
|
57 |
"\n",
|
58 |
"# Please setp your own key.\n",
|
59 |
-
"os.environ[\"OPENAI_API_KEY\"]
|
60 |
-
]
|
|
|
61 |
},
|
62 |
{
|
|
|
63 |
"cell_type": "code",
|
64 |
-
"execution_count": 65,
|
65 |
-
"id": "71795ff1-d6a7-448d-8b55-88bbd1ed3dbe",
|
66 |
-
"metadata": {
|
67 |
-
"tags": []
|
68 |
-
},
|
69 |
"outputs": [],
|
|
|
70 |
"source": [
|
71 |
"import base64\n",
|
72 |
-
"from
|
73 |
-
"from langchain.schema import HumanMessage\n",
|
74 |
"from langchain_openai import ChatOpenAI\n",
|
75 |
"\n",
|
76 |
-
"\n",
|
77 |
"vision_llm = ChatOpenAI(model=\"gpt-4o\")\n",
|
78 |
"\n",
|
|
|
79 |
"def extract_text(img_path: str) -> str:\n",
|
80 |
" \"\"\"\n",
|
81 |
" Extract text from an image file using a multimodal model.\n",
|
@@ -88,7 +70,7 @@
|
|
88 |
" \"\"\"\n",
|
89 |
" all_text = \"\"\n",
|
90 |
" try:\n",
|
91 |
-
"
|
92 |
" # Read image and encode as base64\n",
|
93 |
" with open(img_path, \"rb\") as image_file:\n",
|
94 |
" image_bytes = image_file.read()\n",
|
@@ -129,62 +111,60 @@
|
|
129 |
" print(error_msg)\n",
|
130 |
" return \"\"\n",
|
131 |
"\n",
|
|
|
132 |
"llm = ChatOpenAI(model=\"gpt-4o\")\n",
|
133 |
"\n",
|
|
|
134 |
"def divide(a: int, b: int) -> float:\n",
|
135 |
" \"\"\"Divide a and b.\"\"\"\n",
|
136 |
" return a / b\n",
|
137 |
"\n",
|
|
|
138 |
"tools = [\n",
|
139 |
" divide,\n",
|
140 |
" extract_text\n",
|
141 |
"]\n",
|
142 |
"llm_with_tools = llm.bind_tools(tools, parallel_tool_calls=False)"
|
143 |
-
]
|
|
|
144 |
},
|
145 |
{
|
146 |
-
"cell_type": "markdown",
|
147 |
-
"id": "a2cec014-3023-405c-be79-de8fc7adb346",
|
148 |
"metadata": {},
|
149 |
-
"
|
150 |
-
|
151 |
-
|
152 |
},
|
153 |
{
|
|
|
154 |
"cell_type": "code",
|
155 |
-
"execution_count": 66,
|
156 |
-
"id": "deb674bc-49b2-485a-b0c3-4d7b05a0bfac",
|
157 |
-
"metadata": {
|
158 |
-
"tags": []
|
159 |
-
},
|
160 |
"outputs": [],
|
|
|
161 |
"source": [
|
162 |
-
"from typing import TypedDict, Annotated,
|
163 |
"from langchain_core.messages import AnyMessage\n",
|
164 |
"from langgraph.graph.message import add_messages\n",
|
|
|
|
|
165 |
"class AgentState(TypedDict):\n",
|
166 |
" # The input document\n",
|
167 |
-
" input_file:
|
168 |
" messages: Annotated[list[AnyMessage], add_messages]"
|
169 |
-
]
|
|
|
170 |
},
|
171 |
{
|
|
|
172 |
"cell_type": "code",
|
173 |
-
"execution_count": 76,
|
174 |
-
"id": "d061813f-ebc0-432c-91ec-3b42b15c30b6",
|
175 |
-
"metadata": {
|
176 |
-
"tags": []
|
177 |
-
},
|
178 |
"outputs": [],
|
|
|
179 |
"source": [
|
180 |
"from langchain_core.messages import HumanMessage, SystemMessage\n",
|
181 |
"from langchain_core.utils.function_calling import convert_to_openai_tool\n",
|
182 |
"\n",
|
183 |
"\n",
|
184 |
-
"# AgentState\n",
|
185 |
"def assistant(state: AgentState):\n",
|
186 |
" # System message\n",
|
187 |
-
" textual_description_of_tool
|
188 |
"extract_text(img_path: str) -> str:\n",
|
189 |
" Extract text from an image file using a multimodal model.\n",
|
190 |
"\n",
|
@@ -196,60 +176,45 @@
|
|
196 |
"divide(a: int, b: int) -> float:\n",
|
197 |
" Divide a and b\n",
|
198 |
"\"\"\"\n",
|
199 |
-
" image=state[\"input_file\"]\n",
|
200 |
" sys_msg = SystemMessage(content=f\"You are an helpful agent that can analyse some images and run some computatio without provided tools :\\n{textual_description_of_tool} \\n You have access to some otpional images. Currently the loaded images is : {image}\")\n",
|
201 |
"\n",
|
202 |
-
"\
|
203 |
-
|
204 |
-
|
205 |
},
|
206 |
{
|
207 |
-
"cell_type": "markdown",
|
208 |
-
"id": "4eb43343-9a6f-42cb-86e6-4380f928633c",
|
209 |
"metadata": {},
|
|
|
210 |
"source": [
|
211 |
-
"We define a `
|
212 |
"\n",
|
213 |
-
"The `
|
214 |
"\n",
|
215 |
-
"We create a graph with `
|
216 |
"\n",
|
217 |
-
"We add `tools_condition` edge, which routes to `End` or to `
|
218 |
"\n",
|
219 |
"Now, we add one new step:\n",
|
220 |
"\n",
|
221 |
-
"We connect the `
|
222 |
"\n",
|
223 |
"* After the `assistant` node executes, `tools_condition` checks if the model's output is a tool call.\n",
|
224 |
"* If it is a tool call, the flow is directed to the `tools` node.\n",
|
225 |
"* The `tools` node connects back to `assistant`.\n",
|
226 |
"* This loop continues as long as the model decides to call tools.\n",
|
227 |
"* If the model response is not a tool call, the flow is directed to END, terminating the process."
|
228 |
-
]
|
|
|
229 |
},
|
230 |
{
|
|
|
231 |
"cell_type": "code",
|
232 |
-
"
|
233 |
-
"
|
234 |
-
"metadata": {
|
235 |
-
"tags": []
|
236 |
-
},
|
237 |
-
"outputs": [
|
238 |
-
{
|
239 |
-
"data": {
|
240 |
-
"image/png": "iVBORw0KGgoAAAANSUhEUgAAANYAAAD5CAIAAADUe1yaAAAAAXNSR0IArs4c6QAAIABJREFUeJztnXdcU1fj/89NQnYChD1kiQgIjooTXFXqI44fUKt11Grr86271tX66GPt0Nplfdo+1rb6WBXrnlgVrKsuXBUVEESmjEBISEJCxk1yf3/EF6UYhpp7zw0571f/sMnNOZ/Am3PvPfcMjCAIgEDAgwE7AMLZQQoiIIMUREAGKYiADFIQARmkIAIyLNgBnge1AlfL8Ua1WdtgMhkdo1uJ5YIxWRhfxOSLWR5+bC6fCTsRXcAc4xcIAABAVqkvuqstydUKxCyzieCLmQIRi81jAEf4BiwOpqk3NTaYG9UmrcoscGWGxgi69RYK3V1gR4OMYyiokuNXj9cxXTB3b3ZoD4FnAAd2ohelskhXkqNVSA1uXuzB4z1YLs57ReQACl4/JS+41TB4gmd4LyHsLPbn7h/Kq+nyISmeMYNdYWeBA90VPPifiph4cWScGHYQcrmRoWhQ4COn+MAOAgH6KkgQxE8riye84+8XyoOdhQryrqtLc7VJb/nBDkI19FXwhxWPZqwOEYgd8p79+ci/qc65qp74biDsIJRCUwUPbqqIT/bwC3GK9q8596+o5FWG4a95ww5CHXS8Ecs6KY8dInZC/wAAsfGufBHzwQ017CDUQTsF62uNj7I13ft28vuPNnhppPuFAzLYKaiDdgpeTZcPHu8BOwVMWC6MvqPcr5+Sww5CEfRSUFqq5/AYYbGdsP/vmeg/WiIt1eNGC+wgVEAvBYvuaSS+bMqqy8nJMRgMsD7eNlwBsyRHS1LhtIJeCpbkakN7CKipKz09febMmTqdDsrH2yU0RoAUpJr6WqNYwnL3oagVfO4GzNqNRV77ZyUsVqCS46RWQRNopKCqDscwjIySy8rK5syZk5CQkJSUtH79eovFkp6evmHDBgDAqFGj4uLi0tPTAQDZ2dkLFixISEhISEh45513Hjx4YP24UqmMi4vbtWvX6tWrExIS/vnPf9r8uH1huTA0SpNWZbJ7yXSDRs8eGtVmvpiUUXSffPJJaWnp0qVLtVrtrVu3GAxGfHz89OnT09LSNm3aJBQKg4KCAABVVVUGg2H27NkMBuPAgQOLFi1KT0/ncrnWQrZt2/baa69t2bKFyWT6+Pg8/XG7IxCztGqTwJVGvyMyoNHX06pNJD2Oq6qqioyMTElJAQBMnz4dACCRSAIDAwEAMTExbm5u1sPGjBmTlJRk/Xd0dPScOXOys7MHDhxofSU2Nnb+/PlNZT79cbsjcGVqVWbQhaTi6QKNFASAYHFIOREnJSX98ssvX3zxxezZsyUSSWuHYRh2/vz5tLS0kpISPp8PAJDL/+qc69+/PxnZ2oDDZRIWOj4+tS80uhbkCVgNClIufebPn79kyZLMzMwJEybs37+/tcO2bt26fPny6OjojRs3Ll68GABgsfzVM8fjUf3AUFln5DvBKA0aKcgXMxvVZjJKxjBs6tSpx44dGzZs2BdffJGdnd30VtMoDYPBsH379uTk5KVLl/bu3Ts2NrYjJZM6yIO8i2NaQSMFRRIXF3JOxNYOFIFAMGfOHABAfn5+U6smkz15GqvT6QwGQ1RUlPV/lUpli1awBS0+TgYiCUvk1vlbQRp9Q68ATuUjnUZpEtr75/7+++8LhcKBAwdevnwZAGD1rFevXkwm86uvvpowYYLBYHj11VfDw8P37t3r4eGh0Wh++uknBoPx6NGj1sp8+uP2zVyap3VhMzAGKX+TtIK5du1a2Bn+QinDcb3FO4hr32IrKiouX758+vRpnU63cOHC4cOHAwDEYrGPj8+ZM2cuXbqkVqvHjRv30ksvXblyZf/+/WVlZQsXLgwODj506NC0adNwHN+5c2dCQkJ0dHRTmU9/3L6Z75xXBoTzvLvY+UdBQ+g1ZLU8X1ucox0+0YkGbLZG+k9VIyZ5Cd06/xRPGp2IAQBBkYLrpxTSMr1vsO2/fqVSmZycbPOtwMDAioqKp18fNmzYRx99ZO+kLZk9e7bNs3ZUVFTTU5bm9O3b9+uvv26ttJyrKqEbyxn8o10rCACofKS7flqeusD2/Amz2VxTU2PzLQyz/V14PJ67u7u9Y7ZEJpPhuI1Huq2l4nA4Hh6tDov8aWXxm2uCObzOfztMRwUBAOf313brIwzsxocdBA73r6iMekvfkaT/2dAEGnXKNDFikvfpHVKdhpQ+QppTXtBYfE/jPP7RVEEAwJQVQb9+Xg47BdU01ONn0mr+39wA2EEohY4nYisGnXn3hvJpHwQ5ySVRTZk+M61m2soghhP0BTaHvgpaW4U9Xzye8I6fb2ef0FlwW333D9Wk9zr7qBhb0FpBK2f31Oi05vjxnpQNqKaSisLGK+nywHBe/ARP2Fng4AAKAgBKcrRX0uvCYgU+QdzQGEEnOFXpteaSXG11iV5Vh8eP97D7AyEHwjEUtFJ4p6HwjqYkRxs1QMxiYwIxS+DK5HCZDvEFmExMqzY1qk0alUmtMNWU6UN7CCL6ioK6O2nfUxOOpGATpQ+0qlpcqzZpVWaTyWKxa+8NjuN5eXm9evWyZ6EA8IRMwkLwxSyhK8vDj+3ftZNf3XYch1SQVORy+ZQpUzIzM2EHcRZo2i+IcB6QggjIIAVbgmFYREQE7BROBFKwJQRBPHz4EHYKJwIp2BIMw1xdnXTxeyggBVtCEIRKpYKdwolACtrA19cXdgQnAiloA6lUCjuCE4EUbAmGYc1nyiHIBinYEoIg8vLyYKdwIpCCCMggBVuCYVgbq28h7A5SsCUEQSgUCtgpnAikoA08PZ10ADMUkII2qKurgx3BiUAKIiCDFGwJhmFdu3aFncKJQAq2hCCIoqIi2CmcCKQgAjJIQRs0LfeLoACkoA1srgiIIAmkIAIySMGWoJEyFIMUbAkaKUMxSEEEZJCCLUGTOCkGKdgSNImTYpCCCMggBVuC5hFTDFKwJWgeMcUgBVuCRspQDFKwJWikDMUgBRGQQQrawMfHB3YEJwIpaIPWdlpEkAFS0AZovCCVIAVtgMYLUglSsCVosBbFIAVbggZrUQxS0AaBgbb3hEeQAdr65glvv/22VCplMpkWi6W+vl4ikWAYZjKZTp48CTtaJwe1gk+YNGlSQ0NDVVWVVCo1GAzV1dVVVVUY5vD7LdIfpOATRo8eHRYW1vwVgiD69u0LL5GzgBT8iylTpvD5f+2L6evrO3XqVKiJnAKk4F+MHj06ODjY+m9rExgZGQk7VOcHKfg3ZsyYIRAIrE3glClTYMdxCpCCfyMxMTE4OJggiD59+qDHdNTAgh3ABhYLoZTh6jrcAqO/KPmVd0Dj0X8MfbM4R0t97UwmcPdmiz1cqK8aFrTrF8y/pc69qm7UmP3D+FqVCXYcqhG6s8rzte5e7H6j3f3DnGLndnop+OC6uvCudthrvgyGU3fI6XXmzB2ViVO9vbtwYWchHRpdCxZmawr+1IyY7Ofk/gEAuDzmhDlBp36RKmVG2FlIh0YK3rukjE9Gw5X/YtB471uZ9bBTkA5dFNRpzYpqI5fPhB2ERrh6sssLGmGnIB26KNigwH2CnOLqu+PwRSwun2kyWmAHIRe6KAgApm1wuvvfdlHJ8U4/VII+CiKcFKQgAjJIQQRkkIIIyCAFEZBBCiIggxREQAYpiIAMUhABGaQgAjJIQQRknFrBk6eOJaeOqqmRtnaA2Wy+fz/7xSuSSqurpVUvXk6nxKkVZLM5AoGQwWj1h/Dl159s3LT+BWuprKqYOn1CQQFaKsk2dJy+RBmjRv5j1Mh/tHGA0WB48VrMJhOtZkfQDQdW8P797F1pW+/nZAMAIrv3mDNncfeIKACAXq/f9O2Gq1f/AAD07Nlnwbxlvr5+WVmXf9r6XVVVha+v/4TxE1NTJm/4Ym1GxgkAwJmMLBaLZfOA8xfOAABGjIwDAPy6+7ifr/+p08ePHt1fXPKIx+P37zdowfxlbm7uAICDh349dz7ztYnTtm37r1xR161b5LIlq4OCQqqlVW/OmggA+OjjDz4CYPTocR+sWAv7J0cvHFhBqbTKYDS8MX02g8E4duzABysX7dmdzuVyf92zPSPjxKyZczw8PDMyT/B4vMbGxrUfvx8SHLZ0yeqSkkdyuQwAkJryusViOXPmJADA5gHTp74lq62prq5c+cHHAAAPiScAIC/vflBQSGJiUn294vCRvdpG7WfrNlnzPHiQs3//rqVLV5tMpo0b1332+Yc//HeHh8Rz1b8+Xbd+9ayZc/r0jnN3l8D+sdEOB1Zw1KgxiYlJ1n937x69ZOmc+znZ/eIGVkureDze1CkzWSzW2KRk69WYwWAYMuTlxFFjmj4e0S0yJPjJOkb1SsXTBwQGBrm6uinq5bGxvZteXPLev5rGkLJYrLTd/zMYDBwOx/rKuk+/kUg8AACpqa9v/uEblVrlKnaN6BYJAAgKCmleDqIJB1YQw7BLl8/vP5BWVlZiXY6oXiEHAIwaOebs2dPvf7Bw/rylYWHhAAB/v4AePXqm7d7G5fLGj0tls9ktimr3gCZwHD98ZO+Z30/W1ko5HK7FYlEq6318fK3vcrlP5h74+PgBAOR1Mlcx2s6uHRz4jnjnrq1rPlzePSJ63Scb57yzGABgISwAgAH9B3+2/j+Kevnb/3z9q68/NZlMGIZtWP/t6FfGbflx04yZqXfv/tmiqHYPsEIQxL9WLd796//G/GPC5xu+TxyV1FRpC1xYLgAAs8VMzlfvVDiqgjiO/7pn+9ik5AXzl8bG9o6Oim3+7oD+g7f9vHfe3Pd+O3l0z94dAAChULj43Q92/HJIIBCu/veSxsaWM9NaO6D5zezdu3/e/vPGu4s+mPjq1OiomLDQcEq+ayfHURU0Go0GgyEi4snKQyq1EgBgsVisbwEAGAzGaxOneXp6FRbmAwAMBoP1hJua8rpGq5E+1VFs8wAul6dQyK3FNtVivbZrUWkbcDhc60mZhB9DZ8BRrwUFAkFYWPjhI3slEg+tRrNj508MBqO4+BEA4PCRvVeuXkwclSSXy+rqZN27R+M4/uasV4cPSwwN6Xrs2AGhQOjv/7cFzVs7oFfPl06dPr7xm/WxMb1FInF0VCybzf556/djx6YUFxf+umc7AKCk+FGAf1vLo3t7+/j7Bew/mMbl8dRq1eRJb7TRGe6EOPDP4t+r1vO4vI8/WbnvwK65c997Y/rbGRnpOI77+wfiRuMPW7757eTR1NTXJ096Q6fX9end7/ezpzZ9u4Hl4rJ+3SYu929rtbR2QGJiUkrypAsXz/y09bvcvHteXt6rV60rfJS/9qMVt29f3/j1jwMHJhw+srftnBiGrV69ns8XfP/fr05npFsbaUQTdFnWqPax4eze2nH/1wV2EHqR9mnR/60PY7p05qnEDtwKIjoHSEEEZJCCCMggBRGQQQoiIIMUREAGKYiADFIQARmkIAIySEEEZJCCCMggBRGQQQoiIEMXBRlMTCxx1MGL5OEVyGEwO/MwGRop6OnPLsnV0mTkGE1QSA24wYLR5VdEFjT6fpH9RNUlOtgpaERNua5bHyHsFKRDIwVHTPK+fLhGp0Ub4AAAQGluQ2lOQ1xi55/6TpdR01YMOvOudeW9R0iEbi5u3mxAo2gUQQCgqNY3yPHyfM1r7wV2+q2XaKeglVu/KyoKdQSBqVrZCtVsNuM43mL+h70gCEKv1/N4FG2Ip9PpOBxO04QmzwAOACA4kheb4EZNAPgQDsjChQvJK3zTpk0JCQnHjx8nr4rm1NbWrlmzhpq66AkdW8E2OHfu3Msvv0xe+dXV1QsXLiwtLY2Kitq1axd5FT3Nzp07R44cGRAQQGWldIBGtyPtMnnyZLJ/QwcOHCgtLQUAlJeXnzhxgtS6WpCUlDR37lyDPVY0dCwcoxWUSqWurq6VlZXh4SSuoVFZWblo0aKysjLr/1LfEFovDe/duxcdHS0SiSiuGhYO0AoeOHAgKyuLx+OR6h8A4MiRI03+AQDKysqOHTtGao1Pw+PxunXrNn78eI1GQ3HVsHAABcvKypKTk8mupaqq6vz5881f0Wq1u3fvJrvep5FIJBcuXNDr9VJpq+uwdyZoreDVq1cBAMuWLaOgrr1791qbwKZlijAMe/z4MQVV28TT01MoFMbHxzdvmDsnsG/JbWM0GgcPHlxfX0991TKZ7JVXXqG+XpvodLrt27fDTkEudGwFlUplWVnZ2bNn3dwgdM+azebIyEjq67UJl8udOXMmAGDVqlVmc+dcMJN2Ch4/fry0tDQ8PJykhx/tguO4tV+GVsyaNWvx4sWwU5ACvRSUyWR37tzp3RvmsuA6nc7HxwdiAJuEh4d/9913AIALFy7AzmJnaKRgaWkphmEffvgh3BhyudzFxQVuhjbAcXzFihWwU9gTuii4Zs0aHo/n6ekJOwior68PCgqCnaJVEhMTx44dCwAwmTrJqDZaKFhRUTFgwACanP5KSkro8JfQBsOGDQMA7Nu37+HDh7Cz2AH4Cup0OqFQaP3LpgMGg6Fr166wU7TPtGnTPvzww05wmwxZweXLl1+7dg1K50trnDt3LiIiAnaKDrFnzx6TyVRQUAA7yAsBU8Hbt28vWrSI1MFXz4pSqRSLxf7+/rCDdBQOh6NQKHbu3Ak7yPMDTUGFQtGtW7cuXei1vnlWVlZISAjsFM/GoEGD6uvrYad4fuAoePDgwR9//FEsFkOpvQ3++OOPoUOHwk7xzLz77rvWvYBgB3keICgolUrd3NxWrlxJfdXtolKpHFFBAACbzd68eXNaWhrsIM+MYwxZpYaMjIyLFy+uX78edpDn5/r1656eng5xR98E1a3gggULcnJyKK60gxw5ciQlJQV2ihdiwIABwcHB7W6LRysoVfDixYvjx4+PiYmhstIOUlJSwmKx+vXrBzvIi8JisRITE5VKJewgHQWdiJ+wbNmysWPHjhgxAnYQO6BSqU6cODFt2jTYQToEda3gvn37aHsKzs/Pr66u7hz+AQBcXV0dxT/qFCwtLd2/fz89T8EAgG+++Yaa6QFUsnz58rt378JO0T4UKYhh2NatW6mp61k5evRoYGBgnz59YAexM8uXL//2229hp2gfZ78WNJlMo0ePPnv2LOwgzgsVreC5c+c+/vhjCip6DpYsWULbbHYhMzMTdoR2oELBrKysQYMGUVDRs7Jr166wsLD4+HjYQUjk4cOH27dvh52iLZz3RFxYWPjdd985xNXSi2AymdLT0+nc5U6Fgkajkc1mk13Ls9K/f/9r164xmUzYQZwd0k/Eubm5s2fPJruWZ2X69Ok7duxwEv9ycnI2b94MO0WrkK6gRqMhezmiZ+X777+fNm1aVFQU7CAUERMTs3v3br1eDzuIbZzuWnDr1q04js+dOxd2EEqpqKgQCATu7u6wg9iA9FbQZDIZjbaXjKae48ePV1ZWOpt/AIDAwEB6+keFgufOnYM+O93KzZs3c3NzaRKGYmpra+fNmwc7hW1I33PLw8ODDsPX7t27t3nzZpr3kJGHt7d3QUGBUqmk1WRFK05xLVhUVLRy5cr9+/fDDgITi8WCYRgNNzLp/P2CFRUVixYtOnz4MKwAiLah4gFdSkoKrDVrCwsL582bh/yz3or98MMPsFPYgIr9V4cPH/7mm2+azWa1Wu3t7U3ZZgr5+fl79+49fvw4NdXRHJFIVFRUBDuFDUhUcOjQoY2Njda1hK2XIARBREdHk1djc4qKilatWnXo0CFqqqM/Q4YM6dWrF+wUNiDxRPzyyy9bt1ZrugTmcDgDBgwgr8YmcnJyfv75Z+Rfc1gslkRCx309SVRw7dq10dHRzW93vLy8KPhDzM7O/vLLLzds2EB2RY6FTCYbN24c7BQ2IPd25PPPP29aooUgCD6fT/bz4kuXLp04cWLHjh2k1uKIsNls63UR3SBXQR8fn/fee8+6YiSGYWQ3gRkZGYcOHVq9ejWptTgoYrGYntN3SO+USUhISE1NFQgEQqGQ1AvBo0ePXrx4cdOmTeRV4dBgGBYWFgY7hQ06dEdswi06zfM/ZJvy2ltlRbVFRUVhQT0a6klZIfn8+fO594sdejkYssFxfOLEidTvqtcu7TwdeXBDfe+SSiE18oQvNLqzqV+GJIxGo3eAsKqoMaynsF+iu4c/h7y6HIvly5efPXu2qVPM2hwSBPHnn3/CjvaEtlrBG5mKuip8SKqvSELfTRCaYzETSpnx5C/SUVN9/ELg7JxDN+bOnZuXl1dTU9O8d4xWy3i2ei14/bRCJTMNSfFxFP8AAAwmJvHlJM8PPruntqacpoOEKSYsLKxv377Nz3UYhtFqDUXbCtbXGusqDQPHeVOexz68PMXvVqYDr31rX2bMmNF8Q43AwMDXX38daqK/YVvBukoDQdBuVE/HEbm7PC5sNBrgj1OkA+Hh4f3797f+myCIIUOG0GSLFyu2FdSozF5dHPtaKjhaoKh2yLWXyeCNN97w9vYGAAQEBNBt0S3bCuIGC6537CZELTcB4MANuX3p2rXrgAEDCIIYNmwYrZpAigZrIZ4Vi4Uoz2/U1Ju0apMJJ3RaO2yx1Mt/ur5Pt+6S+N/31Lx4aVwek81j8MVMsbtLUCT/RYpCCtKLBzfUBbc1FYWN/hFik5FgujAZLiyA2aNTgsHtP2gsbgG4PR4UN2gIM24ym3AXF8PxH6uCowURfYTd40TPURRSkC7kXVdfPlbnFSRiCUQxifQ6V7aNe7CkobYx97b+Srp8SLJHtz7PJiJSED46jfnk9hrczAgbEMhiO94aIxiGiX0EAAiEXuJb5xQPbmrGvu3LZHb0Qhz+TpxOTnmBdue6MmGAxLe7lyP61xw2j+UX7c12d9uyoqj2cUcfDSAFYVLzWH/xsKL70GAOz2EeQbULV8juMSr05PYatbxDq2ggBaFRkqvJTJN16e0wu34+EyH9Ag9vlkrL2m8LkYJw0ChNZ/d0Wv+shMQFHP6u0oS308GMFITD6Z01If0DYKcgna4D/X/7XzvdkEhBCNw6U28GbJaLY998dASOgK3VYrnXVG0cgxSEQNZJuXc4TZdaszveYZIr6Yo2DrCngnkPcl5wV+YLF38fMTKuvLzUfqFox+3fFQHREhouLwQA+PiLcQeP2XnyK4vD9AgS5VxttSG0m4KnM9LnL5ip1+vsVWBn5cFNDdfVsUchPSscITf/lqa1d+2moIPuSk8xagWu11p4Iuea2iL04Mke6/FWhm/a5wHd6Yz0Tf/ZAABITh0FAHh/xYf/GD0eAJCZ+dvuPdurqio8PDzHJqVMmzrLusSHyWTa/suWjMwTKpUyODh05pvvJMQPf7rYrKzLP239rqqqwtfXf8L4iakpk+2SFiKPCxrdA4UkFf6o+PbJM5urpA9FQkl4aNyYxLlikScAYPW6ka+Ofz/nwYW8gis8rnBgv5RXRjzZA8FsNv9+YVvWraNGo65rWF8cJ2u2g2eIqOxBY3hvG9/dPq3ggP7xk16bDgD4bN2mbzdtHdA/HgCQkXHis88/7NYt8t+r1w8flvi/7T/s/vXJIqdfff3pvv27xo1NWfWvT319/f+9Ztm9e3dalNnY2Lj24/fZLuylS1YPHjRULpfZJSpc6qpxgiDlFrCw6ObPOxf5eIdOSl41dPDU4tI7W7bPNxqfKLX38Ef+vhHz3t7yUq8xmed+ziu4Yn39yIkvz1zYFhkxOGXcMrYLV6dvICMbAMBsxuplth+W2KcVdHeX+PsHAgCiomJcXd2sA8S3/u+/sbG9V//rUwDA0CEvNzSo9+7b8WrqlLq62ozMEzPemD3zzXcAAMOGjpw+I+WXHT9u/HpL8zLrlQqDwTBkyMuJo8bYJSQd0KpMLA6PjJKP/vb1wLiUlHFPtrSNCB/w5beTCx5lxUYPBwD0f2nCyGEzAQD+vhE3bh97+Cgrunt8RVV+1q0jI4fNGjNqDgAgrs/YohKyZna6cFiaVqaQkzVSpqKivK5ONnnSG02v9Os36OSpYxWV5QUFeQCAhIQn+09jGNYvbuCZ30+2KMHfL6BHj55pu7dxubzx41JpuH/Tc6DTmDnu9u8OVNRX18hK6hSPs24dbf66UvWkW5jNfuI9k8l0FXur1DIAwP28CwCAoYOnNB2PYWR10rE4jEY1tQpqtBoAgJvbX6uJiURiAECdrFar1QAA3Ju9JRa7NjY2arXa5iVgGLZh/bdbt32/5cdNBw6mrXz/4169XiIpLWWQtKpyg0YOAEgcMbtn9N82lheJPJ8+mMFgWSxmAIBSKeVyhQK+KymZWkBglla+u52tb5qv6u3lAwBQqZRNb9XXK6wienp6AwDU6r86ihQKOYvF4nJbdlUIhcLF736w45dDAoFw9b+X0HNhqGdC4Mo0GewwCr8FPK4IAIDjBm+vkOb/8bht3foIBO56vQY3UbErjMlgErnbbu/spiCPywMA1NU9uWnw8PD09fG7ceNK0wEXL/7O5XLDw7tHRcVgGJZ1/bL1daPRmHX9co8ePZlMJtuF3dxOa0ePv19AasrrGq1GKq2yV1pYiFxZJqP9FfTyDHJz9b35Z7rB+KRf1mw2mUx4258KDIgEANy5l2H3PE9jMppFbrYVZK5du/bpVyuLdGYT8A15hgtnLo9/7PiB0rJiDGB5D+537x4tEor3HUiTyWpwHD98ZO/vZ09Nm/pWv7iBYpFYKq0+cnQfAFhdneyHH74pKS1avmyNn18Ay8XlyNF9+QW5QUEhnh5eM2am1tXJ5PK6I0f3GQ2Gt9+ax2J19Mqh8I46JIovbOVrw0KjwuVSE8/NznckGIa5u/nduH08L/8SAYiyx/ePnPjabDYGd4kFAJy7tDPQP7J7+JNlzbJuHuVyBX16vuLtGXov9+ztOyd1eo1GW3/t5pGikluB/lHRkQn2jQcA0Ku0odFciY+NC3q7KSgWib28fC5cOHPt2qWGBvXo0ePCwyPc3SXnzmeeOn1cWa+YOnXW9GlvWR9M9YsbpNU8IWSvAAADj0lEQVRqTp0+du5choAvWLZ0db9+gwAAIqHIz9f/zzs3GRgjKjq2oqL88pXzly6f8/Dw+mDF2oCAwI7noaeCfDHrxm91HsH2v/zy8QoJDIguLs2+nX2yvCLXzy+8b+8x1n7B1hRkMBhREQmyurJ7uWeLS7N9vcMU9VU+XqFkKFhyu2bUNB8Gw8ZjSdsra93IUBj1oNdwOi5N3EFObqsYlurpS7/FjX794rFbkAff1YkekDTUNZrUDSnzbQ+OpFcj4QxEDxQ+ytW1oeDDRzd27lv59Os8rqi1ruNxoxcOjEu2V8IHBVd2H1zz9OsEQQBA2Oy4mTPrv4H+ka0VaNAYevQXtPYuUpBqeg91v3aiyD1QzGTZvhcMCeq5ZN6up18nCNDa8Bo+z55n9q6hfW0GsFgsBEHY3EdcLPJqrTSjDldLNVH9Wl1ODikIgfjxHnm3Fb7dbXTaAQDYbK6EDXNAv30D1BXXD0n2aOMANGQVAj2HuPG4ZoOunU6TToC+weDmgbU9uR0pCIcxs3yLsyphpyAXi4UovlGVNMu37cOQgnBgcxjJc/1LbnRmC4uzKqasCGr3MKQgNPxCeakLfEtuVMAOYn/MJkvhlfKp7we6e7c/uAQpCBNXD/b42b45mSU6dedZGVtbry+8XD55SSBf2KGbXaQgZDwDOPM3drVo1JU5NQYtFSMGyEOnNjy+W+1i0cz5vKu4w6vko04Z+GAYNvZtv5Ic7R9HavluXBafI/biMx1nlrHJYFbLtGaDEdcahqd6dol4thUvkYJ0ITRGEBojKLqvKbyjfXRFIQnk4wYLk81icVg0XLGYIAizwWTGTS5sRr1UFxoj6BYvDIl+nmURkYL0omussGusEABQXaLTqsxalclosOjtsdCvfeHwGVw+my/mi9yZPkHtdLu0DVKQpviFkjLFhIbYVpDNxSz0a/yfCVcvF9ImQiDsie3fksjdRVbm2OsilNzTePh1hhlPnR7bCnp34dByzZOOopQZQ3rwWS6oGXQAWm0FA8K5fxySUp7HPpzdXTUwqa3RGQj60NZ+xLnXVIXZml7DPNx92K0NbqMVOo1JVYf/cVD66sIAtw48GkLQgXa2xC7J1WZfVEpL9EwW3U/MEj+OSmYMi+H3H+MhEKM7fYehHQWbMOjoviUdQQAu3wGaakQLOqogAkESqNlAQAYpiIAMUhABGaQgAjJIQQRkkIIIyPx/ohlWIXXfCHUAAAAASUVORK5CYII=",
|
241 |
-
"text/plain": [
|
242 |
-
"<IPython.core.display.Image object>"
|
243 |
-
]
|
244 |
-
},
|
245 |
-
"metadata": {},
|
246 |
-
"output_type": "display_data"
|
247 |
-
}
|
248 |
-
],
|
249 |
"source": [
|
250 |
"from langgraph.graph import START, StateGraph\n",
|
251 |
-
"from langgraph.prebuilt import tools_condition\n",
|
252 |
-
"from langgraph.prebuilt import ToolNode\n",
|
253 |
"from IPython.display import Image, display\n",
|
254 |
"\n",
|
255 |
"# Graph\n",
|
@@ -272,63 +237,35 @@
|
|
272 |
"\n",
|
273 |
"# Show\n",
|
274 |
"display(Image(react_graph.get_graph(xray=True).draw_mermaid_png()))"
|
275 |
-
]
|
|
|
276 |
},
|
277 |
{
|
|
|
278 |
"cell_type": "code",
|
279 |
-
"execution_count": 78,
|
280 |
-
"id": "75602459-d8ca-47b4-9518-3f38343ebfe4",
|
281 |
-
"metadata": {
|
282 |
-
"tags": []
|
283 |
-
},
|
284 |
"outputs": [],
|
|
|
285 |
"source": [
|
286 |
"messages = [HumanMessage(content=\"Divide 6790 by 5\")]\n",
|
287 |
"\n",
|
288 |
-
"messages = react_graph.invoke({\"messages\": messages
|
289 |
-
]
|
|
|
290 |
},
|
291 |
{
|
|
|
292 |
"cell_type": "code",
|
293 |
-
"
|
294 |
-
"
|
295 |
-
"metadata": {
|
296 |
-
"tags": []
|
297 |
-
},
|
298 |
-
"outputs": [
|
299 |
-
{
|
300 |
-
"name": "stdout",
|
301 |
-
"output_type": "stream",
|
302 |
-
"text": [
|
303 |
-
"================================\u001b[1m Human Message \u001b[0m=================================\n",
|
304 |
-
"\n",
|
305 |
-
"Divide 6790 by 5\n",
|
306 |
-
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
307 |
-
"Tool Calls:\n",
|
308 |
-
" divide (call_s0G5ewtIQyHUCOv0fClsCpgh)\n",
|
309 |
-
" Call ID: call_s0G5ewtIQyHUCOv0fClsCpgh\n",
|
310 |
-
" Args:\n",
|
311 |
-
" a: 6790\n",
|
312 |
-
" b: 5\n",
|
313 |
-
"=================================\u001b[1m Tool Message \u001b[0m=================================\n",
|
314 |
-
"Name: divide\n",
|
315 |
-
"\n",
|
316 |
-
"1358.0\n",
|
317 |
-
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
318 |
-
"\n",
|
319 |
-
"The result of dividing 6790 by 5 is 1358.0.\n"
|
320 |
-
]
|
321 |
-
}
|
322 |
-
],
|
323 |
"source": [
|
324 |
"for m in messages['messages']:\n",
|
325 |
" m.pretty_print()"
|
326 |
-
]
|
|
|
327 |
},
|
328 |
{
|
329 |
-
"cell_type": "markdown",
|
330 |
-
"id": "08386393-c270-43a5-bde2-2b4075238971",
|
331 |
"metadata": {},
|
|
|
332 |
"source": [
|
333 |
"## Training program\n",
|
334 |
"MR Wayne left a note with his training program for the week. I came up with a recipe for dinner leaft in a note.\n",
|
@@ -336,204 +273,39 @@
|
|
336 |
"you can find the document [HERE](https://huggingface.co/datasets/agents-course/course-images/blob/main/en/unit2/LangGraph/Batman_training_and_meals.png), so download it and upload it in the local folder.\n",
|
337 |
"\n",
|
338 |
""
|
339 |
-
]
|
|
|
340 |
},
|
341 |
{
|
|
|
342 |
"cell_type": "code",
|
343 |
-
"execution_count": 82,
|
344 |
-
"id": "f6e97e84-3b05-4aaf-a38f-1de9b73cd37f",
|
345 |
-
"metadata": {
|
346 |
-
"tags": []
|
347 |
-
},
|
348 |
"outputs": [],
|
|
|
349 |
"source": [
|
350 |
"messages = [HumanMessage(content=\"According the note provided by MR wayne in the provided images. What's the list of items I should buy for the dinner menu ?\")]\n",
|
351 |
"\n",
|
352 |
-
"messages = react_graph.invoke({\"messages\": messages
|
353 |
-
]
|
|
|
354 |
},
|
355 |
{
|
|
|
356 |
"cell_type": "code",
|
357 |
-
"
|
358 |
-
"
|
359 |
-
"metadata": {
|
360 |
-
"tags": []
|
361 |
-
},
|
362 |
-
"outputs": [
|
363 |
-
{
|
364 |
-
"name": "stdout",
|
365 |
-
"output_type": "stream",
|
366 |
-
"text": [
|
367 |
-
"================================\u001b[1m Human Message \u001b[0m=================================\n",
|
368 |
-
"\n",
|
369 |
-
"According the note provided by MR wayne in the provided images. What's the list of tiems I should buy for the dinner menu ?\n",
|
370 |
-
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
371 |
-
"Tool Calls:\n",
|
372 |
-
" extract_text (call_JalVBOR82hwRknFcplnLoTtG)\n",
|
373 |
-
" Call ID: call_JalVBOR82hwRknFcplnLoTtG\n",
|
374 |
-
" Args:\n",
|
375 |
-
" img_path: Batman_training_and_meals.png\n",
|
376 |
-
"=================================\u001b[1m Tool Message \u001b[0m=================================\n",
|
377 |
-
"Name: extract_text\n",
|
378 |
-
"\n",
|
379 |
-
"TRAINING SCHEDULE\n",
|
380 |
-
"For the week of 2/20-2/26\n",
|
381 |
-
"\n",
|
382 |
-
"SUNDAY 2/20\n",
|
383 |
-
"MORNING\n",
|
384 |
-
"30 minute jog\n",
|
385 |
-
"30 minute meditation\n",
|
386 |
-
"\n",
|
387 |
-
"EVENING\n",
|
388 |
-
"clean and jerk liftsβ3 reps/8 sets. 262 lbs.\n",
|
389 |
-
"5 sets metabolic conditioning:\n",
|
390 |
-
"10 mile run\n",
|
391 |
-
"12 kettlebell swings\n",
|
392 |
-
"12 pull-ups\n",
|
393 |
-
"30 minutes flexibility\n",
|
394 |
-
"30 minutes sparring\n",
|
395 |
-
"\n",
|
396 |
-
"MONDAY 2/21\n",
|
397 |
-
"MORNING\n",
|
398 |
-
"30 minute jog\n",
|
399 |
-
"30 minutes traditional kata (focus on Japanese forms)\n",
|
400 |
-
"\n",
|
401 |
-
"EVENING\n",
|
402 |
-
"5 sets 20 foot rope climb\n",
|
403 |
-
"30 minutes gymnastics (work on muscle ups in\n",
|
404 |
-
"particular)\n",
|
405 |
-
"high bar jumpsβ12 reps/8 sets\n",
|
406 |
-
"crunchesβ50 reps/5 sets\n",
|
407 |
-
"30 minutes heavy bag\n",
|
408 |
-
"30 minutes flexibility\n",
|
409 |
-
"20 minutes target practice\n",
|
410 |
-
"\n",
|
411 |
-
"TUESDAY 2/22\n",
|
412 |
-
"MORNING\n",
|
413 |
-
"30 minute jog\n",
|
414 |
-
"30 minutes yoga\n",
|
415 |
-
"\n",
|
416 |
-
"EVENING\n",
|
417 |
-
"off day\n",
|
418 |
-
"leg heavy dead liftβ5 reps/7 sets. 600 lbs.\n",
|
419 |
-
"clean and jerk liftβ3 reps/10 sets\n",
|
420 |
-
"30 minutes sparring\n",
|
421 |
-
"\n",
|
422 |
-
"WEDNESDAY 2/23\n",
|
423 |
-
"OFF DAY\n",
|
424 |
-
"\n",
|
425 |
-
"MORNING\n",
|
426 |
-
"20-mile runβlast weekβs time was 4:50 per mile.\n",
|
427 |
-
"Need to better that time by a half a minute.\n",
|
428 |
-
"\n",
|
429 |
-
"EVENING\n",
|
430 |
-
"skill training only\n",
|
431 |
-
"30 minutes yoga\n",
|
432 |
-
"30 minutes meditation\n",
|
433 |
-
"30 minutes body basics\n",
|
434 |
-
"30 minutes bow basics\n",
|
435 |
-
"30 minutes sword basics\n",
|
436 |
-
"30 minutes observational\n",
|
437 |
-
"exercise\n",
|
438 |
-
"30 minutes kata\n",
|
439 |
-
"30 minutes pressure points\n",
|
440 |
-
"30 minutes modus and pressure points\n",
|
441 |
-
"\n",
|
442 |
-
"THURSDAY 2/24\n",
|
443 |
-
"MORNING\n",
|
444 |
-
"30 minute jog\n",
|
445 |
-
"30 minute meditation\n",
|
446 |
-
"30 minutes traditional kata\n",
|
447 |
-
"(focus on Japanese forms)\n",
|
448 |
-
"\n",
|
449 |
-
"EVENING\n",
|
450 |
-
"squatsβ10 reps/5 sets. 525 lbs.\n",
|
451 |
-
"30 minutes flexibility\n",
|
452 |
-
"crunchesβ50 reps/5 sets\n",
|
453 |
-
"20 minutes target practice\n",
|
454 |
-
"30 minutes heavy bag\n",
|
455 |
-
"\n",
|
456 |
-
"FRIDAY 2/25\n",
|
457 |
-
"MORNING\n",
|
458 |
-
"30 minute jog\n",
|
459 |
-
"30 minute meditation\n",
|
460 |
-
"\n",
|
461 |
-
"EVENING\n",
|
462 |
-
"clean and jerk liftsβ3 reps/8 sets. 262 lbs.\n",
|
463 |
-
"5 sets metabolic conditioning:\n",
|
464 |
-
"10 mile run\n",
|
465 |
-
"12 kettlebell swings\n",
|
466 |
-
"12 pull-ups\n",
|
467 |
-
"30 minutes flexibility\n",
|
468 |
-
"30 minutes sparring\n",
|
469 |
-
"\n",
|
470 |
-
"SATURDAY 2/26)\n",
|
471 |
-
"MORNING\n",
|
472 |
-
"30 minute jog\n",
|
473 |
-
"30 minutes yoga\n",
|
474 |
-
"\n",
|
475 |
-
"EVENING\n",
|
476 |
-
"crunchesβ50 reps/5 sets\n",
|
477 |
-
"squatsβ(5 reps/10 sets. 525 lbs.\n",
|
478 |
-
"push-upsβ60 reps/sets\n",
|
479 |
-
"30 minutes monkey bars\n",
|
480 |
-
"30 minute pommel horse\n",
|
481 |
-
"30 minutes heavy bag\n",
|
482 |
-
"2 mile swim\n",
|
483 |
-
"\n",
|
484 |
-
"In an effort to inspire the all- important Dark Knight to take time out of his busy schedule and actually consume a reasonable amount of sustenance, I have taken the liberty of composing a menu for today's scheduled natal to its my hope that these elegantly prepared courses will not share the fate of their predecessors -mated cold and untouched on a computer console.\n",
|
485 |
-
"-A\n",
|
486 |
-
"\n",
|
487 |
-
"W A Y N E M A N O R\n",
|
488 |
-
"\n",
|
489 |
-
"Tuesday's Menu\n",
|
490 |
-
"\n",
|
491 |
-
"Breakfast\n",
|
492 |
-
"six poached eggs laid over artichoke bottoms with a sage pesto sauce\n",
|
493 |
-
"thinly sliced baked ham\n",
|
494 |
-
"mixed organic fresh fruit bowl\n",
|
495 |
-
"freshly squeezed orange juice\n",
|
496 |
-
"organic, grass-fed milk\n",
|
497 |
-
"4 grams branched-chain amino acid\n",
|
498 |
-
"2 grams fish oil\n",
|
499 |
-
"\n",
|
500 |
-
"Lunch\n",
|
501 |
-
"local salmon with a ginger glaze\n",
|
502 |
-
"organic asparagus with lemon garlic dusting\n",
|
503 |
-
"Asian yam soup with diced onions\n",
|
504 |
-
"2 grams fish oil\n",
|
505 |
-
"\n",
|
506 |
-
"Dinner\n",
|
507 |
-
"grass-fed local sirloin steak\n",
|
508 |
-
"bed of organic spinach and piquillo peppers\n",
|
509 |
-
"oven-baked golden herb potato\n",
|
510 |
-
"2 grams fish oil\n",
|
511 |
-
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
512 |
-
"\n",
|
513 |
-
"For the dinner menu, you should buy the following items:\n",
|
514 |
-
"\n",
|
515 |
-
"1. Grass-fed local sirloin steak\n",
|
516 |
-
"2. Organic spinach\n",
|
517 |
-
"3. Piquillo peppers\n",
|
518 |
-
"4. Potatoes (for oven-baked golden herb potato)\n",
|
519 |
-
"5. Fish oil (2 grams)\n",
|
520 |
-
"\n",
|
521 |
-
"Ensure the steak is grass-fed and the spinach and peppers are organic for the best quality meal.\n"
|
522 |
-
]
|
523 |
-
}
|
524 |
-
],
|
525 |
"source": [
|
526 |
"for m in messages['messages']:\n",
|
527 |
" m.pretty_print()"
|
528 |
-
]
|
|
|
529 |
},
|
530 |
{
|
531 |
-
"cell_type": "code",
|
532 |
-
"execution_count": null,
|
533 |
-
"id": "b96c8456-4093-4cd6-bc5a-f611967ab709",
|
534 |
"metadata": {},
|
|
|
535 |
"outputs": [],
|
536 |
-
"
|
|
|
|
|
537 |
}
|
538 |
],
|
539 |
"metadata": {
|
|
|
1 |
{
|
2 |
"cells": [
|
3 |
{
|
|
|
|
|
4 |
"metadata": {},
|
5 |
+
"cell_type": "markdown",
|
6 |
"source": [
|
7 |
"# Agent\n",
|
8 |
"\n",
|
|
|
14 |
"\n",
|
15 |
"As seen in the Unit 1, an agent needs 3 steps as introduced in the ReAct architecture :\n",
|
16 |
"[ReAct](https://react-lm.github.io/), a general agent architecture.\n",
|
17 |
+
"\n",
|
18 |
+
"* `act` - let the model call specific tools\n",
|
19 |
+
"* `observe` - pass the tool output back to the model\n",
|
20 |
"* `reason` - let the model reason about the tool output to decide what to do next (e.g., call another tool or just respond directly)\n",
|
21 |
"\n",
|
22 |
"\n",
|
23 |
""
|
24 |
+
],
|
25 |
+
"id": "89791f21c171372a"
|
26 |
},
|
27 |
{
|
28 |
+
"metadata": {},
|
29 |
"cell_type": "code",
|
30 |
+
"outputs": [],
|
31 |
+
"execution_count": null,
|
32 |
+
"source": "%pip install -q -U langchain_openai langchain_core langgraph",
|
33 |
+
"id": "bef6c5514bd263ce"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
34 |
},
|
35 |
{
|
36 |
+
"metadata": {},
|
37 |
"cell_type": "code",
|
|
|
|
|
|
|
|
|
|
|
38 |
"outputs": [],
|
39 |
+
"execution_count": null,
|
40 |
"source": [
|
41 |
"import os\n",
|
42 |
"\n",
|
43 |
"# Please setp your own key.\n",
|
44 |
+
"os.environ[\"OPENAI_API_KEY\"] = \"sk-xxxxxx\""
|
45 |
+
],
|
46 |
+
"id": "61d0ed53b26fa5c6"
|
47 |
},
|
48 |
{
|
49 |
+
"metadata": {},
|
50 |
"cell_type": "code",
|
|
|
|
|
|
|
|
|
|
|
51 |
"outputs": [],
|
52 |
+
"execution_count": null,
|
53 |
"source": [
|
54 |
"import base64\n",
|
55 |
+
"from langchain_core.messages import HumanMessage\n",
|
|
|
56 |
"from langchain_openai import ChatOpenAI\n",
|
57 |
"\n",
|
|
|
58 |
"vision_llm = ChatOpenAI(model=\"gpt-4o\")\n",
|
59 |
"\n",
|
60 |
+
"\n",
|
61 |
"def extract_text(img_path: str) -> str:\n",
|
62 |
" \"\"\"\n",
|
63 |
" Extract text from an image file using a multimodal model.\n",
|
|
|
70 |
" \"\"\"\n",
|
71 |
" all_text = \"\"\n",
|
72 |
" try:\n",
|
73 |
+
"\n",
|
74 |
" # Read image and encode as base64\n",
|
75 |
" with open(img_path, \"rb\") as image_file:\n",
|
76 |
" image_bytes = image_file.read()\n",
|
|
|
111 |
" print(error_msg)\n",
|
112 |
" return \"\"\n",
|
113 |
"\n",
|
114 |
+
"\n",
|
115 |
"llm = ChatOpenAI(model=\"gpt-4o\")\n",
|
116 |
"\n",
|
117 |
+
"\n",
|
118 |
"def divide(a: int, b: int) -> float:\n",
|
119 |
" \"\"\"Divide a and b.\"\"\"\n",
|
120 |
" return a / b\n",
|
121 |
"\n",
|
122 |
+
"\n",
|
123 |
"tools = [\n",
|
124 |
" divide,\n",
|
125 |
" extract_text\n",
|
126 |
"]\n",
|
127 |
"llm_with_tools = llm.bind_tools(tools, parallel_tool_calls=False)"
|
128 |
+
],
|
129 |
+
"id": "a4a8bf0d5ac25a37"
|
130 |
},
|
131 |
{
|
|
|
|
|
132 |
"metadata": {},
|
133 |
+
"cell_type": "markdown",
|
134 |
+
"source": "Let's create our LLM and prompt it with the overall desired agent behavior.",
|
135 |
+
"id": "3e7c17a2e155014e"
|
136 |
},
|
137 |
{
|
138 |
+
"metadata": {},
|
139 |
"cell_type": "code",
|
|
|
|
|
|
|
|
|
|
|
140 |
"outputs": [],
|
141 |
+
"execution_count": null,
|
142 |
"source": [
|
143 |
+
"from typing import TypedDict, Annotated, Optional\n",
|
144 |
"from langchain_core.messages import AnyMessage\n",
|
145 |
"from langgraph.graph.message import add_messages\n",
|
146 |
+
"\n",
|
147 |
+
"\n",
|
148 |
"class AgentState(TypedDict):\n",
|
149 |
" # The input document\n",
|
150 |
+
" input_file: Optional[str] # Contains file path, type (PNG)\n",
|
151 |
" messages: Annotated[list[AnyMessage], add_messages]"
|
152 |
+
],
|
153 |
+
"id": "f31250bc1f61da81"
|
154 |
},
|
155 |
{
|
156 |
+
"metadata": {},
|
157 |
"cell_type": "code",
|
|
|
|
|
|
|
|
|
|
|
158 |
"outputs": [],
|
159 |
+
"execution_count": null,
|
160 |
"source": [
|
161 |
"from langchain_core.messages import HumanMessage, SystemMessage\n",
|
162 |
"from langchain_core.utils.function_calling import convert_to_openai_tool\n",
|
163 |
"\n",
|
164 |
"\n",
|
|
|
165 |
"def assistant(state: AgentState):\n",
|
166 |
" # System message\n",
|
167 |
+
" textual_description_of_tool = \"\"\"\n",
|
168 |
"extract_text(img_path: str) -> str:\n",
|
169 |
" Extract text from an image file using a multimodal model.\n",
|
170 |
"\n",
|
|
|
176 |
"divide(a: int, b: int) -> float:\n",
|
177 |
" Divide a and b\n",
|
178 |
"\"\"\"\n",
|
179 |
+
" image = state[\"input_file\"]\n",
|
180 |
" sys_msg = SystemMessage(content=f\"You are an helpful agent that can analyse some images and run some computatio without provided tools :\\n{textual_description_of_tool} \\n You have access to some otpional images. Currently the loaded images is : {image}\")\n",
|
181 |
"\n",
|
182 |
+
" return {\"messages\": [llm_with_tools.invoke([sys_msg] + state[\"messages\"])], \"input_file\": state[\"input_file\"]}"
|
183 |
+
],
|
184 |
+
"id": "3c4a736f9e55afa9"
|
185 |
},
|
186 |
{
|
|
|
|
|
187 |
"metadata": {},
|
188 |
+
"cell_type": "markdown",
|
189 |
"source": [
|
190 |
+
"We define a `tools` node with our list of tools.\n",
|
191 |
"\n",
|
192 |
+
"The `assistant` node is just our model with bound tools.\n",
|
193 |
"\n",
|
194 |
+
"We create a graph with `assistant` and `tools` nodes.\n",
|
195 |
"\n",
|
196 |
+
"We add `tools_condition` edge, which routes to `End` or to `tools` based on whether the `assistant` calls a tool.\n",
|
197 |
"\n",
|
198 |
"Now, we add one new step:\n",
|
199 |
"\n",
|
200 |
+
"We connect the `tools` node *back* to the `assistant`, forming a loop.\n",
|
201 |
"\n",
|
202 |
"* After the `assistant` node executes, `tools_condition` checks if the model's output is a tool call.\n",
|
203 |
"* If it is a tool call, the flow is directed to the `tools` node.\n",
|
204 |
"* The `tools` node connects back to `assistant`.\n",
|
205 |
"* This loop continues as long as the model decides to call tools.\n",
|
206 |
"* If the model response is not a tool call, the flow is directed to END, terminating the process."
|
207 |
+
],
|
208 |
+
"id": "6f1efedd943d8b1d"
|
209 |
},
|
210 |
{
|
211 |
+
"metadata": {},
|
212 |
"cell_type": "code",
|
213 |
+
"outputs": [],
|
214 |
+
"execution_count": null,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
215 |
"source": [
|
216 |
"from langgraph.graph import START, StateGraph\n",
|
217 |
+
"from langgraph.prebuilt import ToolNode, tools_condition\n",
|
|
|
218 |
"from IPython.display import Image, display\n",
|
219 |
"\n",
|
220 |
"# Graph\n",
|
|
|
237 |
"\n",
|
238 |
"# Show\n",
|
239 |
"display(Image(react_graph.get_graph(xray=True).draw_mermaid_png()))"
|
240 |
+
],
|
241 |
+
"id": "e013061de784638a"
|
242 |
},
|
243 |
{
|
244 |
+
"metadata": {},
|
245 |
"cell_type": "code",
|
|
|
|
|
|
|
|
|
|
|
246 |
"outputs": [],
|
247 |
+
"execution_count": null,
|
248 |
"source": [
|
249 |
"messages = [HumanMessage(content=\"Divide 6790 by 5\")]\n",
|
250 |
"\n",
|
251 |
+
"messages = react_graph.invoke({\"messages\": messages, \"input_file\": None})"
|
252 |
+
],
|
253 |
+
"id": "d3b0ba5be1a54aad"
|
254 |
},
|
255 |
{
|
256 |
+
"metadata": {},
|
257 |
"cell_type": "code",
|
258 |
+
"outputs": [],
|
259 |
+
"execution_count": null,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
260 |
"source": [
|
261 |
"for m in messages['messages']:\n",
|
262 |
" m.pretty_print()"
|
263 |
+
],
|
264 |
+
"id": "55eb0f1afd096731"
|
265 |
},
|
266 |
{
|
|
|
|
|
267 |
"metadata": {},
|
268 |
+
"cell_type": "markdown",
|
269 |
"source": [
|
270 |
"## Training program\n",
|
271 |
"MR Wayne left a note with his training program for the week. I came up with a recipe for dinner leaft in a note.\n",
|
|
|
273 |
"you can find the document [HERE](https://huggingface.co/datasets/agents-course/course-images/blob/main/en/unit2/LangGraph/Batman_training_and_meals.png), so download it and upload it in the local folder.\n",
|
274 |
"\n",
|
275 |
""
|
276 |
+
],
|
277 |
+
"id": "e0062c1b99cb4779"
|
278 |
},
|
279 |
{
|
280 |
+
"metadata": {},
|
281 |
"cell_type": "code",
|
|
|
|
|
|
|
|
|
|
|
282 |
"outputs": [],
|
283 |
+
"execution_count": null,
|
284 |
"source": [
|
285 |
"messages = [HumanMessage(content=\"According the note provided by MR wayne in the provided images. What's the list of items I should buy for the dinner menu ?\")]\n",
|
286 |
"\n",
|
287 |
+
"messages = react_graph.invoke({\"messages\": messages, \"input_file\": \"Batman_training_and_meals.png\"})"
|
288 |
+
],
|
289 |
+
"id": "2e166ebba82cfd2a"
|
290 |
},
|
291 |
{
|
292 |
+
"metadata": {},
|
293 |
"cell_type": "code",
|
294 |
+
"outputs": [],
|
295 |
+
"execution_count": null,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
296 |
"source": [
|
297 |
"for m in messages['messages']:\n",
|
298 |
" m.pretty_print()"
|
299 |
+
],
|
300 |
+
"id": "5bfd67af70b7dcf3"
|
301 |
},
|
302 |
{
|
|
|
|
|
|
|
303 |
"metadata": {},
|
304 |
+
"cell_type": "code",
|
305 |
"outputs": [],
|
306 |
+
"execution_count": null,
|
307 |
+
"source": "",
|
308 |
+
"id": "8cd664ab5ee5450e"
|
309 |
}
|
310 |
],
|
311 |
"metadata": {
|
unit2/langgraph/mail_sorting.ipynb
CHANGED
@@ -1 +1,443 @@
|
|
1 |
-
{"cells":[{"cell_type":"markdown","metadata":{},"source":["# Alfred the Mail Sorting Butler: A LangGraph Example\n","\n","In this notebook, **we're going to build a complete email processing workflow using LangGraph**.\n","\n","This notebook is part of the <a href=\"https://www.hf.co/learn/agents-course\">Hugging Face Agents Course</a>, a free course from beginner to expert, where you learn to build Agents.\n","\n","\n","\n","## What You'll Learn\n","\n","In this notebook, you'll learn how to:\n","1. Set up a LangGraph workflow\n","2. Define state and nodes for email processing\n","3. Create conditional branching in a graph\n","4. Connect an LLM for classification and content generation\n","5. Visualize the workflow graph\n","6. Execute the workflow with example data"]},{"cell_type":"code","execution_count":6,"metadata":{},"outputs":[{"name":"stdout","output_type":"stream","text":["\u001b[33m WARNING: The script isympy is installed in '/Library/Frameworks/Python.framework/Versions/3.11/bin' which is not on PATH.\n"," Consider adding this directory to PATH or, if you prefer to suppress this warning, use --no-warn-script-location.\u001b[0m\u001b[33m\n","\u001b[0m\u001b[33m WARNING: The script huggingface-cli is installed in '/Library/Frameworks/Python.framework/Versions/3.11/bin' which is not on PATH.\n"," Consider adding this directory to PATH or, if you prefer to suppress this warning, use --no-warn-script-location.\u001b[0m\u001b[33m\n","\u001b[0m\u001b[33m WARNING: The scripts torchfrtrace and torchrun are installed in '/Library/Frameworks/Python.framework/Versions/3.11/bin' which is not on PATH.\n"," Consider adding this directory to PATH or, if you prefer to suppress this warning, use --no-warn-script-location.\u001b[0m\u001b[33m\n","\u001b[0m\u001b[33m WARNING: The script transformers-cli is installed in '/Library/Frameworks/Python.framework/Versions/3.11/bin' which is not on PATH.\n"," Consider adding this directory to PATH or, if you prefer to suppress this warning, use --no-warn-script-location.\u001b[0m\u001b[33m\n","\u001b[0m\n","\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m A new release of pip is available: \u001b[0m\u001b[31;49m23.2.1\u001b[0m\u001b[39;49m -> \u001b[0m\u001b[32;49m25.0.1\u001b[0m\n","\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m To update, run: \u001b[0m\u001b[32;49mpython3 -m pip install --upgrade pip\u001b[0m\n","Note: you may need to restart the kernel to use updated packages.\n"]}],"source":["# Install the required packages\n","%pip install -q langgraph langchain_openai langchain_huggingface"]},{"cell_type":"markdown","metadata":{},"source":["## Setting Up Our Environment\n","\n","First, let's import all the necessary libraries. LangGraph provides the graph structure, while LangChain offers convenient interfaces for working with LLMs."]},{"cell_type":"code","execution_count":41,"metadata":{},"outputs":[],"source":["import os\n","from typing import TypedDict, List, Dict, Any, Optional\n","from langgraph.graph import StateGraph, END\n","from langchain_openai import ChatOpenAI\n","from langchain_core.messages import HumanMessage, AIMessage\n","\n","# Set your OpenAI API key here\n","os.environ[\"OPENAI_API_KEY\"] = \"sk-xxxxx\" # Replace with your actual API key\n","\n","# Initialize our LLM\n","model = ChatOpenAI(model=\"gpt-4o\", temperature=0)"]},{"cell_type":"markdown","metadata":{},"source":["## Step 1: Define Our State\n","\n","In LangGraph, **State** is the central concept. It represents all the information that flows through our workflow.\n","\n","For Alfred's email processing system, we need to track:\n","- The email being processed\n","- Whether it's spam or not\n","- The draft response (for legitimate emails)\n","- Conversation history with the LLM"]},{"cell_type":"code","execution_count":42,"metadata":{},"outputs":[],"source":["class EmailState(TypedDict):\n"," email: Dict[str, Any] \n"," is_spam: Optional[bool] \n"," spam_reason: Optional[str] \n"," email_category: Optional[str] \n"," draft_response: Optional[str] \n"," messages: List[Dict[str, Any]] "]},{"cell_type":"markdown","metadata":{},"source":["## Step 2: Define Our Nodes"]},{"cell_type":"code","execution_count":43,"metadata":{},"outputs":[{"data":{"text/plain":["<langgraph.graph.state.StateGraph at 0x123332fd0>"]},"execution_count":43,"metadata":{},"output_type":"execute_result"}],"source":["import os\n","from typing import TypedDict, List, Dict, Any, Optional\n","from langgraph.graph import StateGraph, START, END\n","from langchain_openai import ChatOpenAI\n","from langchain_core.messages import HumanMessage, AIMessage\n","from langchain_huggingface import ChatHuggingFace, HuggingFaceEndpoint\n","\n","\n","\n","# Initialize LLM\n","model = ChatOpenAI( model=\"gpt-4o\",temperature=0)\n","\n","class EmailState(TypedDict):\n"," email: Dict[str, Any]\n"," is_spam: Optional[bool]\n"," draft_response: Optional[str]\n"," messages: List[Dict[str, Any]]\n","\n","# Define nodes\n","def read_email(state: EmailState):\n"," email = state[\"email\"]\n"," print(f\"Alfred is processing an email from {email['sender']} with subject: {email['subject']}\")\n"," return {}\n","\n","def classify_email(state: EmailState):\n"," email = state[\"email\"]\n"," \n"," prompt = f\"\"\"\n","As Alfred the butler of Mr wayne and it's SECRET identity Batman, analyze this email and determine if it is spam or legitimate and should be brought to Mr wayne's attention.\n","\n","Email:\n","From: {email['sender']}\n","Subject: {email['subject']}\n","Body: {email['body']}\n","\n","First, determine if this email is spam.\n","answer with SPAM or HAM if it's legitimate. Only reurn the answer\n","Answer :\n"," \"\"\"\n"," messages = [HumanMessage(content=prompt)]\n"," response = model.invoke(messages)\n"," \n"," response_text = response.content.lower()\n"," print(response_text)\n"," is_spam = \"spam\" in response_text and \"ham\" not in response_text\n"," \n"," if not is_spam:\n"," new_messages = state.get(\"messages\", []) + [\n"," {\"role\": \"user\", \"content\": prompt},\n"," {\"role\": \"assistant\", \"content\": response.content}\n"," ]\n"," else :\n"," new_messages = state.get(\"messages\", [])\n"," \n"," return {\n"," \"is_spam\": is_spam,\n"," \"messages\": new_messages\n"," }\n","\n","def handle_spam(state: EmailState):\n"," print(f\"Alfred has marked the email as spam.\")\n"," print(\"The email has been moved to the spam folder.\")\n"," return {}\n","\n","def drafting_response(state: EmailState):\n"," email = state[\"email\"]\n"," \n"," prompt = f\"\"\"\n","As Alfred the butler, draft a polite preliminary response to this email.\n","\n","Email:\n","From: {email['sender']}\n","Subject: {email['subject']}\n","Body: {email['body']}\n","\n","Draft a brief, professional response that Mr. Wayne can review and personalize before sending.\n"," \"\"\"\n"," \n"," messages = [HumanMessage(content=prompt)]\n"," response = model.invoke(messages)\n"," \n"," new_messages = state.get(\"messages\", []) + [\n"," {\"role\": \"user\", \"content\": prompt},\n"," {\"role\": \"assistant\", \"content\": response.content}\n"," ]\n"," \n"," return {\n"," \"draft_response\": response.content,\n"," \"messages\": new_messages\n"," }\n","\n","def notify_mr_wayne(state: EmailState):\n"," email = state[\"email\"]\n"," \n"," print(\"\\n\" + \"=\"*50)\n"," print(f\"Sir, you've received an email from {email['sender']}.\")\n"," print(f\"Subject: {email['subject']}\")\n"," print(\"\\nI've prepared a draft response for your review:\")\n"," print(\"-\"*50)\n"," print(state[\"draft_response\"])\n"," print(\"=\"*50 + \"\\n\")\n"," \n"," return {}\n","\n","# Define routing logic\n","def route_email(state: EmailState) -> str:\n"," if state[\"is_spam\"]:\n"," return \"spam\"\n"," else:\n"," return \"legitimate\"\n","\n","# Create the graph\n","email_graph = StateGraph(EmailState)\n","\n","# Add nodes\n","email_graph.add_node(\"read_email\", read_email) # the read_email node executes the read_mail function\n","email_graph.add_node(\"classify_email\", classify_email) # the classify_email node will execute the classify_email function\n","email_graph.add_node(\"handle_spam\", handle_spam) #same logic \n","email_graph.add_node(\"drafting_response\", drafting_response) #same logic\n","email_graph.add_node(\"notify_mr_wayne\", notify_mr_wayne) # same logic\n"]},{"cell_type":"markdown","metadata":{},"source":["## Step 3: Define Our Routing Logic"]},{"cell_type":"code","execution_count":44,"metadata":{},"outputs":[{"data":{"text/plain":["<langgraph.graph.state.StateGraph at 0x123332fd0>"]},"execution_count":44,"metadata":{},"output_type":"execute_result"}],"source":["# Add edges\n","email_graph.add_edge(START, \"read_email\") # After starting we go to the \"read_email\" node\n","\n","email_graph.add_edge(\"read_email\", \"classify_email\") # after_reading we classify\n","\n","# Add conditional edges\n","email_graph.add_conditional_edges(\n"," \"classify_email\", # after classify, we run the \"route_email\" function\"\n"," route_email,\n"," {\n"," \"spam\": \"handle_spam\", # if it return \"Spam\", we go the \"handle_span\" node\n"," \"legitimate\": \"drafting_response\" # and if it's legitimate, we go to the \"drafting response\" node\n"," }\n",")\n","\n","# Add final edges\n","email_graph.add_edge(\"handle_spam\", END) # after handling spam we always end\n","email_graph.add_edge(\"drafting_response\", \"notify_mr_wayne\")\n","email_graph.add_edge(\"notify_mr_wayne\", END) # after notifyinf Me wayne, we can end too\n"]},{"cell_type":"markdown","metadata":{},"source":["## Step 4: Create the StateGraph and Define Edges"]},{"cell_type":"code","execution_count":45,"metadata":{},"outputs":[],"source":["# Compile the graph\n","compiled_graph = email_graph.compile()"]},{"cell_type":"code","execution_count":46,"metadata":{},"outputs":[{"data":{"image/png":"iVBORw0KGgoAAAANSUhEUgAAAVsAAAIrCAIAAADOZ63mAAAAAXNSR0IArs4c6QAAIABJREFUeJzs3XdcE/f/B/BPdkgCYe+9XYCCSq1VcFFwb9Fadx2odSJ127q3OFGsOEFRcH/Vuqq21boZRQTZskeAhISQS35/nL9ALdvAZbyfDx8+krvL5X0heeVzn9x9jiSTyRAAACCEECITXQAAQIlAIgAAakEiAABqQSIAAGpBIgAAakEiAABqUYkuAPxLdZW0JK9aUI4JKiSYRCapUYHfhhlaZBqDzNKhcHRoRlZ0ossBXwQSQSkIKrCUV5Vp8XxBOcbmUthcKluHytGjSaUY0aU1S2G2SFCB0RjkrHcC+84c+y4cu84soosCrUGCI5SIJcVkf1wtKc0XG5jT7TuzzR20iK7oi4iqpOnx/I9potwPwl5DDRzdOURXBFoGEoFIiX9V/H6psNdQQ4++ukTXomDlxTV/Xi+RVEsHTjZlsqC7SmVAIhDmwYVClja1p78+0YW0oZJccczBnIBpZhaOqt320RyQCMS4fTLf0pnV6SsdogtpDzEHPvqMMdI3hU5HFQCJQICYAx9du2t37KkRcYCL2f/Rw0fXvgub6EJAE2AHr709iilydOdoVBwghEYtsHhypaiipIboQkATIBHaVfKLSgaT4vYNl+hCCPBdiM3980VEVwGaAInQrh5GF3brr0d0FcQgU0kWjsynN0uILgQ0BhKh/Ty/U+reV5fGIBFdCGG6D9J/+zuvphq6rpQXJEI7kWLoY6rQO8CA6EII5jPW+PWDMqKrAA2CRGgnafF8JovSzk+6YsWKa9euteKBAwYMyM3NbYOKkKUzK+Gv8rZYM1AISIR2kp4gsOvc3r+9JSUlteJR+fn5PB6vDcpBCCG2DoXDpRZmVbfR+sEXguMR2kn03pwR8yxo9DbpRLh8+fK5c+c+fvzIZDK7deu2bNkyExMTLy8vfC6Hw3n48CGGYceOHbt161ZhYSGXy+3bt++PP/6opaWFtwimT5/+9OnT58+fb9++ffHixfgD+/btu2vXLoVXG/eovEYs9RygoT2syk4G2l5VpSR8dVobrfzVq1eenp4xMTHZ2dnx8fEzZ86cOnWqTCYrKCjw9PSMiori8XgymezUqVM9e/a8fft2ZmbmX3/99e233+7YsQNfg5+f3+jRo/ft2/f27VuhUHjnzh1PT8+kpCQ+n98WBae8rvzfiby2WDP4cnA2dHsQlEvYOm3VifDhwwcGgzF06FAqlWppabl169a8vDyEEJfLRQixWCz8hr+//1dffeXo6IgQsra2HjRo0B9//IGvgUQiMZnMhQsX4nfZbDZCSEdHB7+hcGwuVVAhaYs1gy8HidAeBBUYm9tWL7WXlxeJRJo5c+bw4cN79uxpbm5uYFDPLxq6uro3btzYuHFjYWGhRCKpqqpisWqHMHBzc2uj8v6LrUOBRFBa0LPYHmQyRGO0VRvB1tb2xIkTlpaW+/fvHzZs2NSpUxMSEv672I4dO8LDw8eNG3fs2LFz586NHDmy7lwOp/0GMqBQSVQavPGUFPxh2gNLm1JRIm679Ts5OW3cuPG3334LCwujUCiLFi0Si//1dBiGXblyZcqUKQEBARYWFoaGhnw+v+3qaRyfJ2mjHlbw5SAR2kObtpMTEhLi4uIQQhQKxdPTc+7cuTwer6Tk08HC+G9JUqkUwzC8QwEhJBAIHj161PjPTG33I5SgAmPrwO6qkoJEaA9sLlXXkI7a5iP2559/Llmy5N69ezk5OcnJyVFRUWZmZqampgwGg8FgvHr1Kjk5mUQiubi4XL9+PScnJyUlZdGiRV9//XVFRUVGRoZE8nlU6ejoIISePHmSlpbWFgVXC6WGloy2WDP4cpAI7YSuRU5LELTFmqdPnz5y5Mi9e/eOGTMmKChIJpOFhoaSSCSE0NSpU+/evTtv3jyhULh27VoMw8aNG/fTTz9NmDAhKCjI1NT0+++/Lyws/GyFHTp06NWr1549e7Zv394WBSe/qDC3gyGVlBQcodRO/nlakZ8h6jfBmOhCCFYjlh1fkzZnmwPRhYD6QRuhndh14vDhJzeEspOrOn+licNDqAro4GknWtpkrgEt/kl5l971fx4kEsmAAQPqnSUWi+n0+gcptLOzO3HihEIrrRUREREREVHvLA6H09CvFZ07dz5w4EBD6/zjavGw2eaKqxEoGOw1tJ/GG8wymQw/1vC/+Hw+i8Uik+tp0NFoNCMjI0VX+kllZWVlZWW9s6qrqxmM+nsH6XS6oaFhvbMSn1YUZIr6jdf0XSdlBonQrl4/4FGoJM0cVQ0hdO1o3sBJJkw27KsqL/jbtKuuvrqZSYLMpCqiCyHAlSO57n25EAdKDv487W3oD+b3zxeUF2tWL+O9yEJrF5a1C1wMUtnBXgMBZFIUuT3Ld7yxmR2T6Fraw4PzhdauLAe4BqQqgDYCAUhkNDHE+o+rxckv6u+3UxtSDMXszzG0YEAcqApoIxDpz2sl2e+reg01tHJWw2P4/r5VmvKG7zvWSNUveK1RIBEIVpRT/ce1Yh09mpkd064LRw0uo1yYVZ39vurv26We/fW6++mT4CxHlQKJoBRyUoTJLyrTEvimNkwdfRpLh8LSprJ0KJhEBf46ZAqpoqSmqgJDJPTueQWHS3X04Lh/o0uhQRioHkgE5ZKXJirOra6qxKoqJGQKqYqPKXDlfD4/IyOjc+fOClwnQojDpSKEWDoUbT2ahaNW240fB9oBJIIGiY+P37VrV0MHJgMAvzUAAP4FEgEAUAsSQYNQKBRLS0uiqwBKDRJBg2AYlpOTQ3QVQKlBImgQEonURhdlAWoDEkGDyGQygaBNxnoEagMSQYOQSCR9fX2iqwBKDRJBg8hkstLSUqKrAEoNEkGDUCgUa2troqsASg0SQYNgGJaVlUV0FUCpQSIAAGpBImgQMpncnteABqoIEkGDSKVSAi8JDVQCJIIGIZPJ8stDA1AvSAQNIpVKy8vLia4CKDVIBABALUgEDUImk83N4ZqLoDGQCBpEKpXm5uYSXQVQapAIAIBakAgahEKhWFlZEV0FUGqQCBoEw7Ds7GyiqwBKDRIBAFALEkGDwLmPoEmQCBoEzn0ETYJEAADUgkTQIDA6O2gSJIIGgdHZQZMgEQAAtSARNAhcrwE0CRJBg8D1GkCTIBE0CIVCgXMfQeMgETQIhmFw7iNoHCQCAKAWJIIGIZPJenp6RFcBlBokggaRSqVlZWVEVwGUGiSCBoEznUCTIBE0CJzpBJoEiaBBKBSKjY0N0VUApQaJoEEwDMvMzCS6CqDUIBE0CJlMNjIyIroKoNRIMpmM6BpA25owYYJQKJTJZGKxmM/nGxgYyGQykUh0584doksDSgfaCOrP398/Ly8vNze3uLhYJBJ9/PgxNzcXLhIN6gWJoP7GjRv33w7F/v37E1QOUGqQCOpPS0tr6NChFApFPsXa2nrs2LGEFgWUFCSCRhg7dqyFhYX87oABA4yNjQmtCCgpSASNoKWlNWrUKLyZYG1tPWbMGKIrAkoKEkFTyJsJ/fv3hwYCaAiV6ALUU1UlVvxRLK7GiC7kX4b4znz8+HEv91Gpb/lE11KLTCJx9KgGpnQKjUR0LQCOR1C0aqH0XmRhXrrQ2pUtEkqJLkcFMBiUknwRkiJnL45nfzhZm2CQCIok5GMxBz72GmZiaMEguhbV8/f/itk6ZO8AfaIL0WjQj6BIZ7dmDfreAuKgdXr4G1bxpc/vwAgORIJEUJg3D3ideukx2ZRmLAvq193PMD1RIKqCvS3CQCIoTF6miKMLPbUKUFYgJroEzQWJoDCSaqmOPp3oKlSegRmzoqyG6Co0FySCwlTxMakUumm/lFiEIdhpIA4kAgCgFiQCAKAWJAIAoBYkAgCgFiQCAKAWJAIAoBYkAgCgFiQCAKAWJAIAoBYkAgCgFiQCAKAWJIK6mTZj3L7QbQQWkJaW6tvfKz7+DUJo3frgpcvmElgMaClIBKBghkbGi34MMTe3JLoQ0BpwPj9QMB1tneHDYPR3VQWJQJj09A/TZ47f9Mvuo+H7tZhahw+dkkgkZ84ev//gTkFBnpGRydgxk+QfrbKy0sNhe1+9+ruyssLIyGTUiPGjRk3AZ8XHv9m3f1tmZrqpqfnMGUHNfHYer+zQkT1v374sL+fZ2zvNmjm/q4cXQujK1YsnIo6sW7v1wMGdubk55uaWP634+cOH96fPHi8rK+nc2eOnFRt0dfUQQu+S/wkPP5CSmiwWV9va2M+YEeTl2RPfa5gxa0Lo3vAuXTza7MUDbQUSgTA0Gg0hdPLU0fHjJrs4d0QIHQnbd+Nm7KKFIZ06u798+ezAwZ1UKnVwwAiE0PadP2dnZaxZtVlf3yA+4c2u3ZuMTUx7f+3D5/NXrVni6OB85NDpGknNsWP7S0qKm3xqqVS6ImQBX8BfEbzeQN/wytXokJ8WHj54yt7ekUqlCgT869dj9u45hhAKmj913frlXbp0DT8aWVlZMWv2xAvRZ36YtaC6unpFyIKOHbvs3HGIRqVduxGzZu3SUxExRkZwJQjVBolAHBIJIeTh4eX/7TCEEJ/Pv3I1etLEaX5+QxBClhZWKSnvzkVG4IkQNG8pmUw2N7NACFlZ2Vy5Ev3ixdPeX/s8ffaksrJi4YJgW1t7hFDIig3jJgQ0+cwvXj57n/Ju964jeLtgftCyFy+fxcRGLVu6GiEkkUjGj/9em6ONEOrZ4+uLl84dPBDBZDKZTGZXD6/U1GSEEIVC2bMrzMDAkMvVRQhNnzo3JiYqIfGtr8/AdnntQFuBRCBYx45d8BsfPryXSCRent7yWe7unjduXq6qqmKxWFpMrXNREW/evCgv50ml0srKCgsLK4RQZmYak8nE4wAhZGRk3Jxv6aSkBBqN5uHuid8lk8luXbriH3WcleWna0mz2WwdHS6+m4AQYrHYBYX5CCEqlVojqQndvz31w3s+vxIf47+iolxxLwwgBiQCwdhsDn6jqkqAEFq8dDaJ9OnSRvjHrLSshE6nB4fMxzBsftAyaytbCoWyeu3ST48SVjEYzLor1NJiNfmkVVWCmpoaP/9e8ikYhunrG8jv4ns0ODq9nsEjc3Kyli6b09Wj+8qffjE0MJJKpc1pmwDlB4mgLPBoWLVyo72dY93pxkYmSUkJaWmp+/Ycc3Prik8s55WZmZojhJgMpkDwr0u28fmVzXkuOp1+LOxc3Ylkcgt+ir7/4A6GYatXbWIwGAihgoL85j8WKDNIBGVhb+9Eo9HKykqt+9riU3i8MhKJRKfTq8XVCCEdHS4+PTExLi8/18WlI0LI2spWIpFkZKThOw5paamlpSVNPperayexWIxhmJ2dAz4lPz9PvmvQHDU1YgaDiccBQui3uzdbvsVAGcERSsqCw+EMGTIq4mTY/Qd3cvM+vn7zYlnwvK3b1yOEHB2c6XR6TGxUSUnx8xdPQ/dv7+7lnZ2TWVZW6u3dm8Vihe7fnvQuMT7+zd7QrXp6TV8lzbNbDydHl81b1rx58zIvP/fuvVs/zJ545Wp086vt4Nq5vJz3v1tXS0qKL1+JfpecqKur9+HDez5fia4xC1oB2ghKZN6cxdoc7aPHQktKivX1DXp91WfG9CCEkK6uXvDydeHhB+78dsPZucOK4PVFxYW/bPxpybI5J45f+HnDzgMHdy78cYaJidmsmfMvXjrX5LU8KRTKtq37D4ftXbchWCQSmpqaT548c+yYSc0vtVevPuPHTQ47Gnro8O6ePb4OCd5w8dLZyKiTZDJ52FA4PEmFwZVgFeb8ruwe/sZw0ccv9CS2wL4zy8VLm+hCNBTsNQAAasFeg3o6FxkRGRVR7yxra7uD+0+0e0VANUAiqKehQ0f7+g6qdxaNSqt3OgCQCGpLm6ONH4YMQItAPwIAoBYkAgCgFiQCAKAWJAIAoBYkAgCgFiQCAKAWJAIAoBYkAgCgFiQCAKAWJILC6BrT4TTSL8dgUWgMeFsSBl56hWGyyCUfRURXofJy3gv0TesZ2RG0D0gEhbHrxOYVVhNdhWoTlEu4hjRdIzgXizCQCApj7crSYpOf3276AiqgIffP5fUdbUR0FRoNxlBSsD+vl/B5mLG1lpEFkwR52wwkMqmyrIZfWvPn9cLvV9vq6MP5uESCRFC8jERByhu+WCQtyRUTXcu/YBhWXS1isdhEF/IvWtoUKo1kZqfV01///y9VAQgDiaBB4uPjd+3aFRFR/9hKAEA/AgDgXyARAAC1IBE0CIVCsba2JroKoNQgETQIhmFZWVlEVwGUGiSCBiGTyebm5kRXAZQaJIIGkUqlubm5RFcBlBokggahUChWVlZEVwGUGiSCBsEwLDs7m+gqgFKDRNAg0I8AmgSJoEGgHwE0CRIBAFALEkGDUCgU2GsAjYNE0CAYhsFeA2gcJAIAoBYkggYhkUh0OgxhCBoDiaBBZDKZWKxcg7gAZQOJoEFIJBKbrVwDKAFlA4mgQWQymUAgILoKoNQgEQAAtSARNAiZTDYygrHPQWMgETSIVCotKioiugqg1CARAAC1IBE0CBzFDJoEiaBB4Chm0CRIBABALUgEDQKjs4MmQSJoEBidHTQJEgEAUAsSQYPAOIugSZAIGgTGWQRNgkTQIHDuI2gSJIIGgXMfQZMgEQAAtSARNAiFQrG0tCS6CqDUIBE0CIZhOTk5RFcBlBokggaBYxZBkyARNAgcswiaBImgQSgUioWFBdFVAKUGiaBBMAz7+PEj0VUApQaJoEEoFIqVlRXRVQClRpLJZETXANrWlClTioqK8Mu3CAQCfX19mUxWU1Nz9+5doksDSgfaCOrP29u7qKioqKiovLxcIpEUFhYWFRXB5d5AvSAR1N/IkSM/21mQyWQ9e/YkriKgvCAR1J+pqWm/fv1IJJJ8iomJyXfffUdoUUBJQSJohNGjR8uPTZLJZN27d3dwcCC6KKCMIBE0gpmZma+vL95MMDU1/f7774muCCgpSARNMXbsWGtra2gggMZRiS5AhZWXSJDq/HbLpOj37eUvEd4bO2JKeXEN0eW0gBaHSmeSmrEgUAA4HqHFJGLZ75eKUt9UWjiyS/KqiS5H/cmkMhKF5NFH170vl+ha1B8kQssIBdKI9ekDvjM3MGPQGLDP1U4qS2sS/+Jpscm9hxsQXYuag0RomQOLU6esdyS6Cg31+n6pDMP6joEL3rch+JZrgcexxf0CzYiuQnN17acvEsryM0REF6LOIBFaIOMfAdcQDv4lEplCKsyBvps2BInQXDIMsbSpOgY0ogvRaEYWDGGFhOgq1BkkQrORUH6GkOgiNJ24WioSSomuQp1BIgAAakEiAABqQSIAAGpBIgAAakEiAABqQSIAAGpBIgAAakEiAABqQSIAAGpBIgAAakEiAABqQSIQICb2fP+BPRS+2uEj+586HY7fPhcZMWLUgGHDfRX+LF9OXmcbvQ7gS8A4i+pj3pzFdvaOCKGamppfTxz+1m/oyBHjiS6qHvI6gRKCRFAffn5D8BtVVQIMw7y8vB0cnIguqh7yOoESgkRoW0lJCYfD9r5/n6Sjw+3n6zd92tzPLrhYVlZ6OGzvq1d/V1ZWGBmZjBoxftSoCfisuLjX4b8eTE9PxTDMwcF55vQgd/dujUwfPrL/6FGBHTt2WR4chBDa8HPIZhrN1bUTg87Ysf2g/BnXrF1WUlp86EBEI2XzeGWHjux5+/ZleTnP3t5p1sz5XT28EEKZmelTp4/dvu1AZGTE+5QkNpsza+YCc3PL/fu3Z2VnmJlZLF2yuoNrp8a3C6/z+8kz2+YlB18E+hHaUF5+7rLgeeZmlrt3Hlkwf/mt29cOH9nz2TLbd/78T2LcmlWbw49GTgycevDw7id/PEQICYXClasX2drYHwg9cejASQd7p5CVCysqKxqaLl+hh7vnqYhLCKHg5Wujz/9vsP+Il6/+Li4uwucKhcLnL/761m9oI2VLpdIVIQsSE+NWBK8PO3zG1aVjyE8L09JSEUIUKhUh9OuJw4t+DLkSe9+tS9c9ezdHRBz55eddsZfu6mhz9x/Y0fh2ASUHbYQ2dONGLJ3OWL5sDYVCQQgJq6ri4l9/tkzQvKVkMtnczAIhZGVlc+VK9IsXT3t/7VNYmC8QCAYOCLCxsUMIzQ9a5tN3IJ1GLyjIq3e6fIVUKlVHh4sQ0tJicbm6ffsOOHBo5737t8aPm4wQ+uvpY5lM1s/Xr5GyX7x89j7l3e5dR/B2wfygZS9ePouJjVq2dDW+gK/PQGtrW4SQT9+Bd+/dCggYYWhohBDq06e/PPIa2q42eJmBIkEitKH375OcnVzxOEAIDRo0eNCgwZ8to8XUOhcV8ebNi/JynlQqrayssLCwQghZWlpbWdls2rJ62NAxXl7eTo4uHh6ejUxvCJPJ7Ofrd+e3G3giPHp075vevhwOp5GHJCUl0Gg0D/dPqyWTyW5duqamJssXsLayxW+w2Oy6d9kstlgsFovFdDq9oe0CSg4SoQ1VVlYYG5s2soBEIgkOmY9h2PygZdZWthQKZfXapfgsCoUSujc8MurkjRuxx8IPmJiYTp86d9CgwQ1Nb+RZAgJGXL12KTX1vaWl9bO///h5w87Gy66qEtTU1Pj595JPwTBMX7/2QglU2r8Gm6QzGHXvymSyRrYLKDlIhDbE1dWrqhI0skBSUkJaWuq+Pcfc3LriU8p5ZWam5vhtXV29uXMWzZ2zKCMj7UL0mS3b1tnY2rs4d2hoekPP4uLcwcnR5eHvvzk5uerocD27NXEIAJvNodPpx8LO1Z1IJregy6nx7QLKDHoW25CTo0vSu4Tq6k+jid+5c2PhoplSae3AodXiaoQQvtuPEEpMjMvLz8WvqZOb9/HJk09dcba29ksWrySTyRnpHxqa3ngl/v7DHzz87eHD3wYNHNzkZ9vVtZNYLMYwzNraFv9HpzMMDY2bv+GNbBdQcpAIbWjI4FESiWTT5tUJCW+fPHkYdizUxtqu7gfS0cGZTqfHxEaVlBQ/f/E0dP/27l7e2TmZZWWlhQX56zYEX4g+k5WVkZ2defpMOJlM7tixS0PTG69kwAD/kpKiJ3889Gv0VwacZ7ceTo4um7esefPmZV5+7t17t36YPfHK1ejmb3gj29X8lQBCwF5DGzIxMd22Zf+Ro/uWLp+ro8P18Rk4a8b8ugvo6uoFL18XHn7gzm83nJ07rAheX1Rc+MvGn5Ysm3Pi+IUVy9dduHjmRMQRCoViY2P/y4adVlY2VlY29U5vvBJtjraHh1dVlcCyGd17FApl29b9h8P2rtsQLBIJTU3NJ0+eOXbMpOZveOPb1fz1gPYH131sLpkUHVqW+v06lTz8lscrm/jdsODl63z6DiC6li+S9IwnrKzpOxou/dhWoI2g5sorynM/Zh84tMvGxr7PN/2ILgcoO0gENXf79rVj4Qfc3botX7ZW3oURH/9m5epFDT3kzOkr3P/vFASaBhJBzY0b+924sd99NtHZucPRf/+4WJc2R7vt6wJKChJBEzEYDDg6ANQLfn0EANSCRAAA1IJEAADUgkQAANSCRAAA1IJEAADUgkQAANSCRAAA1IJEAADUgmMWW8DMjkV0CZqOxqCgOkPOAIWDNkJzkchIKJDwCsVEF6LRirKFbC58jbUhSIQWsO/C5hVBIhAJw2TG1kyiq1BnkAgt8NVggz+vFgorMaIL0VBPrxXqGdGMLOjNWBa0Eoyh1DJYDTq2+kOfUaa6xnRtfVozHgG+FCaRleZXJz3lWThoefjAwA1tCxKhNf64WvIhjq+tTyvMFLZ6JTKpTCqTyq/vopZkMiSVYl+4jTQmRVuP6tFX16lrYxeeAQoBidB6NWIZau2LJ5FI5s6du3XrVgMDg2YsrsIuXbqUn58fFBTU6jXQGCSFVgQaA4nQ3l69elVTU+Pl5aXerYO6hEKhlpbWmTNnvvvu89GcgLKBnsV2FR8ff/jw4a5du2pOHCCEtLS0EEL29vbffvst0bWAJkAboZ08e/asZ8+eGRkZtra2RNdCGJFIxGQyX7582a1bNxIJ9gWUEbQR2sOZM2eio6MRQpocB/iFqhFCZmZm3bt3LyoqIrocUA9oI7StuLg4Nzc3vIFAdC3K5d27d8bGxlpaWvg+BVAS0EZoQ4cOHXr58iVCCOLgv1xdXVks1sCBA3NycoiuBdSCRGgTiYmJCKHu3btPmzaN6FqUF5PJfPLkCY/Hk0gklZWVRJcDECRCm1ixYkVSUhKeCETXogI6d+5MJpOHDh36999/E10LgERQKJFIxOPxBg4cOGbMGKJrUSVkMvnhw4fZ2dkIIbEYziUjEvQsKsz69et//PFHPT09ogtRbWFhYSYmJiNGjCC6EA0FbQTF2Lhxo6enJ8TBl5s9e3Z8fHxycjLRhWgoaCN8qdjY2JEjR1ZVVbFYMMKSwvB4vNLS0rS0tAEDBhBdi2aBNsIX2blzJ4ZhCCGIA8XS1dW1t7e/e/fuX3/9RXQtmgXaCK2Umprq6OiI/090LeoMP+47KSmpQ4cORNeiEaCN0Bq//vrrgwcPEEIQB20NP+57//79N2/eJLoWjQCJ0BokEmnWrFlEV6FBDh06hJ8ZJRKJiK5FzcFeQwvk5eU9ffp05MiRRBeiuXbs2NG9e3cfHx+iC1Fb0EZoLj6fP2vWrCFDhhBdiEZbvnz5tWvXoKXQdqCN0Cx5eXkSicTKyoroQgBCCNXU1Lx8+bJr164MBoPoWtQNtBGatnnzZgzDIA6UB41G69Spk6+vL5wfpXCQCE3IyMhwcXGxtLQkuhDwL9ra2n/++WdeXh6PxyO6FrUCidCYV69eGRoajh49muhCQP2cnZ0xDPvpp5+ILkR9QCI0yM/Pz9XVlcOBawQoNQMDA19f37t37xJdiJqAnsX6paWl6ejoGBoaEl2IJpLJZC09J1ooFCKEysrKzM3N26wuBSOTyTSa0l0WDC6zW4/r16/Dr4zEKi8vb8WjxGJxWVkZmayaaPCVAAAgAElEQVQaLV8qlaqvr090FZ+DRPhc9+7dnz17RnQVoDX09PTEYjGdDpeKbT3VSNN2w+fznz9/ripfMuC/8DioqqoiuhBVBW/9WufPn6dSodGkDkgkEozO1jqQCJ8EBgZ27doVv8QIUHVaWlrQ0GsdeNUQfqXms2fPOjs7E10IqN+mTZtaetABlUqVSCT4bxDp6ekBAQH4kPl1NTRdk0EioFevXqWmpsJXivqhUqlUKrWqqsrQ0DAoKMjMzAw/CHXq1Kn4AnWnK0RgYGB+fr6i1kYITf8YXLt27erVq66urkQXAtoEjUZjsVja2tqDBw/Gf+pLTU2Vz607/csVFha27kdTpaLRHWk1NTVfffXV0KFDiS4EtACPxwsPD4+Pj6+oqLC1tZ06daq7uzs+KzEx8fDhw9nZ2WZmZjNnzoyKirKzswsKCnr//v2iRYt27Njx+vXrc+fOIYQCAgJ++OEHd3f3oKCgHTt2dOrUacuWLQihjh07xsbGlpeXu7m5LVmyJDo6+uHDhzU1NT4+PnPmzMFHbXnw4EFMTMzHjx/pdLqrq+vs2bPNzMzevn2L79dMnz7d29t77dq1EokkKirq0aNHhYWFhoaGI0eOHDx4MNEvXtM0uo3w6tUrGE9dtUil0rVr1yYlJS1evHjfvn3Ozs7r1q1LT09HCFVXV//yyy8sFmv37t3z5s2LiIjIz8/HP8P4oYESiWTMmDHDhw83MjKKjIz09/evu2YKhZKQkFBeXh4eHr579+5Xr14tWbLE3Nz85MmTISEh165dwy/hmZycvGPHDi8vr3379m3YsKG6unrjxo0IoU6dOoWEhCCEQkNDly1bhhA6fvx4TEzMuHHjDh06NHLkyLCwsFu3bhH3yjWX5iZCcHAwn8+nUChEFwJa4PXr16mpqQsXLvTw8LC2tp49e7axsfHVq1cRQn///XdFRUVQUJCDg4Obm9vcuXNLS0vrPpZKpTKZTDqdTiKRuFzuf8dWwDBs4sSJVCrVzs7O1taWTqcHBARQKJSuXbtyudy0tDSEkKWl5b59+yZNmmRlZeXi4jJ8+PD09PSysjIqlYqPx83hcFgslkAguHHjxqhRowYMGGBubj548OD+/ftHR0e376vVGhq615CTkzNlypROnToRXQhomeTkZBqN5ubmht8lk8mdOnXCP6s5OTlsNtvGxgaf1alTJy6X+9nDy8rKGjmRx8TERH5ACovF0tHRkc9isVj4UU9sNjs/Pz8iIiI3N7e6uloikeAHtn3W2ExLS5NIJN26dZNPcXNzu337tlAo1NLSUsQr0VY0NBHMzMxgyANVVFVVVVNTU/cacBiG4Z/GioqKz66aoa2t/dnDuVxuTU1NQyv/7Lyjz+7iUfL7779v27ZtwoQJc+bMYbPZiYmJeAfEf+tECIWEhOC7LfKHl5WVQSIonXnz5k2ZMqVnz55EFwJajM1m0+n0/fv3152I/3LMYDCqq6vrTv/vCEtkMvkLz3q4deuWm5vb999/j9/97Bnr1omPComPLi+n/GfTalwivH79uk+fPhAHKsrZ2VksFmMYJv+kFRQU4HsHZmZmFRUVeXl5+PEFiYmJDf0WKJPJMAxrXRdSTU2NgYGB/O7Dhw/l3//ylSOE7OzsaDQaj8eTD8bH4/FIJJLyn4WlcT2LXbt2nTBhAtFVgFby8PBwcHDYuXNnXFxcfn7+gwcPFixYcOPGDYRQjx49GAxGWFhYdnZ2YmJieHh4vQcasNnssrKyZ8+eFRQUtKIAFxeXV69evXv3rqCg4MCBA/hTpKSkiEQifHCd58+fZ2Zmstlsf3//s2fP/v7773l5eW/fvl21atWePXsU8Rq0Lc1qIzx+/JhEIvXu3ZvoQkArUSiUn3/++fjx45s3bxaJRCYmJoGBgfgVNPT09EJCQo4dOzZ//nxbW9vZs2fv27fvv9/JPj4+9+7d27p169ixY1vxThg/fnxeXt7KlStZLJa/v39gYGBJSUloaCiZTO7Tp4+Xl1d4eDh+dMPMmTPZbPaJEydKS0v19PR69uw5ZcoUxb0SbUWDxlCqrq729fX9888/iS4ENEEmkxUVFbXigRUVFQwGA/9ZUSwWT5gwYdq0aQ0dgSaRSIg91RVGTCFYZWUlXDtQjQkEghkzZnh4eEycOBEhFBMTQyKRvv7664aWl0gk8qY+kNOgNgKMrqMqWt1GePfuXUREREpKCplMtre3nzZtWuNnrAiFQiaTKf+BsJ0pZxtBUxIhLCyMRCL98MMPRBcCmtbqRFAtypkImvJbw8uXL+W/IQMgB+OvfUZT2ghAhbRnG4HP59NoNEKuH6mcbQSN6Fl8/vy5o6MjnOaoQuqeU9CmSCRSdnZ2x44d2+fpPnvq9n/SJql/GyEvL2/WrFnXr18nuhAAVID6J8Lz588xDPP29ia6EKCkTp8+7eTkBO8QnPr3LHbv3h3+2KAR8hEWgPq3EXg83qVLl2bMmEF0IUB5yWQygUAAhyrh1LyN8L///a+srIzoKoBSI5FIEAdyap4Ijo6O8qG4AWjItm3bnj59SnQVSkHNE6F79+7KP0YFIJyOjk5CQgLRVSgFde5HSElJuXXr1oIFC4guBCg7Pp8vEAhMTEyILoR46nyE0oMHD+DUJtAcHA4HuhJw6rzXMHDgwEmTJhFdBVAB2dnZcNoLTp3bCHZ2dkSXAFSDtrb2x48fia5CKahtGyE3N3fx4sVEVwFUg66u7oULF4iuQimobSLEx8cr+cD4QKnUHWFZk6ntbw08Ho9Cofz3Gh4A1Gvq1KnHjh377KotGkht+xF0dXWJLgGokqysLAzDIBHUdq9hzpw5ubm5RFcBVMb9+/eZTCbRVRBPbRPhxYsX5ubmRFcBVIZQKCS6BKWgnv0IEomkvLwc+opAkzw9PeW3SSSSTCajUCizZ8/W2PNl1bONQKVSIQ5Aczg4OOBZgI9xRiKRrK2tNfk6gOqZCLdv3963bx/RVQAVMHHixLrdBxQKZdiwYfiVnTWTeiZCenq6Jv9RQfONGDHC0tJSftfKymrUqFGEVkQw9UyE7777bvLkyURXAVTDhAkT8DPi8AaChp/ypJ6JwOFwCBmBH6iikSNH4qfAWFpajhkzhuhyCKaeiTB37tz3798TXQVQGePHj9fS0ho6dCiLxSK6FoKp56+PQ4YMOXbsmJmZGdGFqInnd8oykgQ0GqkgU0R0LW1FIpFQKFSlvKiKAhhba9WIpdauLG//Jq4ipZ6JwOPx4ChmhZDJ0OlNmZ176XGN6HqmDDX9vGgAEiorqK4oqXlxp3jaelsKtcG/pHomAlCUUz9n9hphYmIDh/eqCQFPcuVw5uytDg0toIb9CDweD/qHFOLvW6Vd+upDHKgTti6190jTx5eLG1pADROBz+dLJBKiq1AHaQkCPRMYqFLdGJgxPrzlNzRXDRPBzMzs9OnTRFehDmgMsoEp/Iirbthcqq4RXVQlrXeuGiYCDJSiKHlpQgR9ieqoOFfUUAeiGibCq1evgoODia4CAJWkholQWVkJ/QgAtI4ajqrWq1cvLy8voqsAQCWpYSLQaDQYLQ+A1lHDvYbbt2/v3r2b6CoAUElqmAiVlZXV1dVEVwGASlKuvYby8vIvP6ra19fXx8eHx+N9eT1wcgTQNMqVCGKxWFHnWYjFYoWsBwCNooZ7DSKRqKqqiugqAFBJapgIMpkMTugEoHWUa69BIeDKPAC0mhomAkldx8EBoO2p4V6DSCSCK3Ypg/Jynm9/r4e/323pA5+/eDpx0rCBft7J75PqTo+JPd9/YA+F1gg+p4aJIJPJpNL6z/QEKuHM2ePa2joHD0RYW9nGXr6wdft6fHpXD69FP4YQXZ2aU+q9BolEEhER8fjxYx6Px+Vye/fuPW3aNBqNFhsbGxUVtWLFiqNHjxYUFOjq6k6aNGnAgAEIIQzDLl68+PDhw5KSEm1tbW9v7+nTp2tpaSGEtmzZghDq2LFjbGxseXm5m5vbkiVLoqOjHz58WFNT4+PjM2fOHNjjUAaVlRXubt2cnVwRQu/rNBPs7Bzs7BocDgwohFInQnR09L1795YvX25mZpadnR0aGkqn06dOnUqhUAQCQUxMzObNmzkczvnz5/fs2ePi4mJlZXX58uXo6OilS5c6OjoWFBTs2bOHQqHMmTMHHzfh7du3FhYW4eHh2dnZCxcuXLJkyejRo0+ePBkXF7dq1aru3bvDKVJf6Oq1S2fP/crjlTk5uc6cHiSfHnv5wqnTx5YtWb1z98ZBAwfPnbPoXfI/4eEHUlKTxeJqWxv7GTOCvDx7SiSSgX7eCKH09A+Xr0Q7ObqkpCYjhG7fvn407Gx8/JuDh3bd++1vhNDI0QMnT5pRUJh//8FtobCqS5euy5asNjAwRAgVFxft2rPp9evnHI72mNETBQL+o8f3T5642EjZ6ekfps8cv+mX3UfD92sxtQ4fOiWRSM6cPX7/wZ2CgjwjI5OxYyYNH/ZpqL64uNfhvx5MT0/FMMzBwXnm9CB3924IoSHD+k4MnJaVlfH02RORSOjl5b186RouVxc/Oub4r4cePLxTVlZqYGA4oL//1CmzqVRqZmb61Oljd+86cikmMj7+DZlM9vUZGDRvKYVCQQjduHn54qVzeXkfGQymu1u3+UHLjI1NEEI8XtmhI3vevn1ZXs6zt3eaNXN+Vw+FvW+Veq8hIyPD1ta2W7duZmZmPXr02LJlC94QQAhJpdLAwEB9fX06nT5hwgQGg/Hw4UP8gMUdO3b06NHDwsKiW7duffr0ef36tXyFGIZNnDiRSqXa2dnZ2trS6fSAgAAKhdK1a1cul5uWlkbctqqDuLjXe/Zu6dtnQPjRyO8mzTh8ZI98Fo1GE4mEMbFRK4LXDx8+trq6ekXIAhqdvnPHocMHT3Xs5LZm7dKiokIqlXo55q61tW2A//DLMXd3bD/o7OTaz3fQ5Zi79naOdZ+LSqVGnj9pa2sfefbar+EXUlLenT4Tjs/auXtjSsq7X37etW3L/rdxr+4/uEMmN/E+x0+NO3nq6Phxk5cvW4sQOhK27/yF05MCpx0PPz92zKQDB3feuHkZv6j8ytWLbG3sD4SeOHTgpIO9U8jKhRWVFQghCoUadf5UVw+vmIt3jh45m5Lybv/Bnfj69+7b+r9bV+fMXhRx4uKM6UGxl8+HHQ1FCFGoVITQwUO7AsdPuRJ7b/WqTbGXLzx6fB9/MXfu2jh6VODx8PNbNu8rr+Bt+CUEf+evCFmQmBi3Inh92OEzri4dQ35amJaWqqg/olK3EXr27Llz586tW7d+/fXXHh4eVlZWdefiV/XF/5zm5ua5ubkIIR0dnVu3bh06dKisrEwikQiFQnyXAWdiYkKlftpkFoulo6Mjn8ViseC4pi9057cb+voGs39YSKFQrKxs+PzKTZtX47NIJJJIJBozeqJ3z6/x/cE9u8IMDAzxr9DpU+fGxEQlJL719RnI5eqSyWQ6nY7PolCptP+//Rkbazv/b4chhIyNTXp075Wc/A9CqLS05O+//1y4ILi7lzdCaPXKTRMCBxsaGTdROomEEPLw8MJXyOfzr1yNnjRxmp/fEISQpYVVSsq7c5ERgwNGFBbmCwSCgQMCbGzsEELzg5b59B1Ip30ajdLJ0QV/iLW17dAho0+fCRcKhWJx9Z3fbsyZ/WM/30EIIQtzy6ys9IuXzv0wawH+qL59BnTq5IYQ8uzWw9zMIjn5H1+fgekZHxgMxrd+Q6lUqoW55bo1W/ML8hBCL14+e5/ybveuI3i7YH7Qshcvn8XERi1bulohf0SlToR+/fqxWKzr16/v2rULwzBvb+958+bp6enhc+sed8BkMgUCAULoyJEj9+/fDwoK6tixI4PBiI6O/v333+WLfXaW9Gd34bimL5SZle7s3AFv8SKEOnTo/NkCHTt2wW9QqdQaSU3o/u2pH97z+ZX4K19RUd6ip7O3d5Lf1tbWwb+oP37MlslknTu549PZbLanZ8/MrPTmrFBe3ocP7yUSiZent3yWu7vnjZuXq6qqLC2traxsNm1ZPWzoGC8vbydHFw8PT/liTk6u8tu2NvZisbi4uLCouBDDsI4dushnubh0FIlEOTlZNDodIeRQZ0M4HG0+vxLvRiWRSAsXzQzwH+7p2dPM1Fxf3wAhlJSUQKPRPNw/PSmZTHbr0jU1NblFL10jlDoREELe3t7e3t5CofD58+dHjx7dt2/f+vWfep7rfv9XVVUZGxtjGHbnzp3AwMD+/fvLpxNXu8apqhIY6BvK72oxtT5bgM3+dJHVnJyspcvmdPXovvKnXwwNjKRS6bgJAS19us8u7Yn3CZeX8xBCWnUu1qajw23mCuXlVVUJEEKLl86W9zTjmVVaVmJpYRW6Nzwy6uSNG7HHwg+YmJhOnzp30KDBnzZZq/Z5mVpaCKFKfiW+Nhar9mLl+GJCYRWeCPR/bwj+XNbWtgdCT0SeP3n02P7K3Zs6dOg8P2hZxw6dq6oENTU1fv695MtjGIaHhUIodSL89ddfdnZ2pqamWlpaffr0yczMvH//vnxufHx8jx498GjIycn55ptvpFIphmFMJhMPi6qqqmfPnsHPB+2GydQSCGqH/ca/6+p1/8EdDMNWr9qEf6oLCvIVVQP+6aoW1V6NrrKyoqUrwaNh1cqNn3VeGBuZIIR0dfXmzlk0d86ijIy0C9FntmxbZ2Nr7+LcQR4lOPy2jrZOdbWo3lnyAGqIg4PT6pUbMQyLj39z/MShlasWXYi6yWZz6HT6sbBzdZdssqOk+ZS6Z/HKlSvbtm2Lj4/Py8t7+/btkydPunT51PSiUCjR0dGJiYk5OTkHDx5ECPn4+NBoNAcHh4cPH+bm5qanp69fv97Ly4vP52dnZ8PIi+3AytLmQ1qK/GCQFy+fNbRkTY2YwWDKv+R/u3uzkdW2aG/OwsIKIfQuORG/KxAIXjZcRkPs7Z1oNFpZWam1tS3+T0eHy+Xq0un03LyPT548xBeztbVfsnglmUzOSP+AT4mLeyVfSXLyP0wm08jIxN7eiUKhJCS+lc9KTIzjcDh4qQ1JSkpITIzD3+oeHp7Tp80tL+eVlpa4unYSi8UYhslro9MZhoZNdZQ0m1InwooVK8zMzDZv3jx79uw9e/a4ubnNnj1bPnfatGlhYWHz5s1LTExcvXo1ft3XRYsWyWSypUuXbt26ddiwYVOmTDEyMlq0aFFxcYMXsQGK0r//t2VlpQcP705LS330+P6dO9cbWrKDa+fyct7/bl0tKSm+fCX6XXKirq7ehw/v+fzPryyizdFOTU1OSU3GdweaZGFu6ezkevbsr4mJcVlZGVu2rdVreYuaw+EMGTIq4mTY/Qd3cvM+vn7zYlnwPPxAqcKC/HUbgi9En8nKysjOzjx9JpxMJss7IIpLiiJOhn3MzXn69MnVaxf7+foxGAyuDtf/22Fnz5148uRhQUH+7dvXr1yNHj0qUN7JXa9nf/+5as2S3x/d+5ibk5KaHBMTZWpiZmJi6tmth5Ojy+Yta968eZmXn3v33q0fZk+8cjW6pdvYEKXea9DT02tonHWZTNaxY8fQ0NDPptvb23828ddff8VvfLaqbdu21b17/PhxBVWtubp7eQfNWxJ1/tS1a5ecnFyXLl39w+xJ9X7D9+rVZ/y4yWFHQw8d3t2zx9chwRsuXjobGXWSTCZ/dlTiyJETtmxdu/DHGRvW72hmGatXbdqx65fFS2cbGhhNmjTdQN/w3bvElm7LvDmLtTnaR4+FlpQU6+sb9Pqqz4zpQQghDw/PFcvXXbh45kTEEQqFYmNj/8uGnVZWNvijBgeMqORXzguaIhZXf+X9zYL5y/HpCxcEs1jsvaFbebwyYyOT7ybNmBg4tfECvps0XSKpOXJkb3FJEZvN6dzZfeuWUBKJRKFQtm3dfzhs77oNwSKR0NTUfPLkmWPHTGrpBjZEua4EW1RU1Jx6rl69evTo0evX6/8KEolEMpms7o+OrWZsrLDGmCo6sDh1ynrHZiyoXEQiUY2kRpvz6So+S5bO0dHhrl+3ranHfanhI/uPHhX4/eSZbf1EX+78jrRJP9losSn/naXUbYTWgfMaNNzKVYtKy0qWLl6lp6f/19PHr9+82LJpL9FFqQyVTIRhw4YNGzasobkwPoKGW71q06HDu9esW1ZdLTI3twwJXu/t3ftcZERkVES9y1tb2x3cf6Ldy1RSKpkIjYOfGzWcvr7B6lWbPps4elTg0KGj612eTFJM//qV2HsKWQ+x1DARFNiPANQGg8H47IgmUC+l/vWxdaAfAYBWU642gqGhYTOWaoJQKJTJZKw6x7ECAJpJuRJBIV0AkAUAtJoa7jXExsaeOXOG6CoAUEnK1UZQiPLy8srKBs+xAQA0Qg0TYciQIdCzCEDrqGEiKKR7EgDNpIb9CDdv3rxw4QLRVQCgktSwjVBYWAj9CF9OJkOGFnA8uHrSM2GgBs4oVMNEgH4EhSCRkFiEVZbWaOvTmrE4UBlikbQkt1qLU8+Jj+qZCNCPoCjWrqwKSAS1U15SY9uJ3dBcNexHgOMRFOWrAINHFxU2AiJQEo8v5ffw029orhomQnl5eVlZGdFVqAMGixwYbBO9O4NXVEN0LUAB+GWSywcyh8ww0zVqsN2nXGMoKYRIJIJREhSooqTmz+sl6YkC+y7a5cViostpKxiGya80oX64hvSMhEorF1YPPwNDC3ojS6phIoC2IBHLivPEUkxtu2yDgoJ2796trmdMk8lkAzMajdH0PoEa9ixeuXJFIBBMnDiR6ELUCpVOMrVRz08LrpifYmJDZ7E0fVgNNUyEsrIyOB4BgNZRw0QICAjAMIzoKoCK0dXVhfH41DMRNHxIddA6PB4P+tTU89fHW7duxcTEEF0FUDGdOnUiugSloIZthPz8fOhHAC2VmNji6z6pJTVMhMGDB0M/AmgpR0fVu3pVW1DDRDAyMiK6BKB6UlNTiS5BKahhP0JMTMypU6eIrgIAlaSGbYSKigroRwAtpaenB78+qmcijBkzBsZHAC1VVlYGvz6qZyJwOByiSwCqp0OHDkSXoBTUsB/h5s2bFy9eJLoKoGKSkpKILkEpqGEbAcZZBKDV1DARBg4cKJFIiK4CqJguXboQXYJSUMNEsLCwILoEoHri4+OJLkEpqGE/ApzXAECrqWEbAc5rAK0ARzHj1DARRo8eDec1gJaCo5hxapgI2traRJcAgKpSw36EK1eunDt3jugqgIqBDmmcGrYRYJxF0AofP34kugSloIaJAMcjANBqapgI0PwDrcDhcODcR/XsR7hz587ly5eJrgKoGD6fD+c+qmcbITc3F/oRQEvB6Ow4NUyEkSNHwvgIoKVgdHacGiYCl8slugQAVJUa9iNcvXo1MjKS6CqAioHrNeDUsI1QWloK/QigpeB6DTg1TAQ/Pz84HgGA1lHDRDAzMyO6BKB64NxHHPQjAIDg3Ec5NWwjQD8CaAUXFxeiS1AKapgIw4YNg+MRQEslJycTXYJSUMNE0NfXJ7oEoHrodDocs6ie/QgwziJoBbFYDMcsqmcbAcZZBKDV1DARxo8fD2EPQOuoYSJoaWkRXQJQPXZ2dkSXoBTUsB/h5s2bFy5cILoKoGLS09OJLkEpkNSmgT106NC8vDypVEomf4o5qVRqamp68+ZNoksDysvT01P+E4NMJiORSDKZbMSIEWvWrCG6NGKoTxth9OjRVCpVHgcIITKZPGDAAEKLAsrO2tpafhuPBnNz8xkzZhBaFJHUKhGsrKzqTjE3N580aRJxFQEVEBAQUPeuTCbz8fExNzcnriKCqU8iaGtrDx48mEr91Fcqk8n69u1rYmJCdF1AqU2YMKHuF4mFhcXEiRMJrYhg6pMIeDPB0tISvw0NBNAc2tra/v7++G2ZTNanTx8NP3dWrRIB/+vinUO+vr6mpqZEVwRUQGBgIN5MsLS0/O6774guh2BqlQgIobFjx1pZWZmbmwcGBhJdC1AN2traI0aMIJFIvXv3hm+RJn59lErR6/tlBVmiqkqVudpySUkxhkmNjY2JLqS5OFyqgSndva8uXUsFAvrVfV5BlqhaKJWI1ecEU5lUmvMxx8zMXN4PpR7YOlQjK0Y3X10ypbkncTWWCMUfq8/vzvbw0eca0pkciuLqBP8iFspK80SJf/GG/mBubs8kupwGVZTUnNmS5eGjr61PY+lQ1eZIFjVWLZCWFYrjHpWMW2xlaMFozkMaTIT8zOonV4r9psAV09rP3TO5XgP1rJyV8ShsXlHN7dMFflMsKFQ4ZVj13D758ZvhhiY2TYdC/c1UmRQ9jC7sN0GjO13b34DvzH+/VCTFlPG7915UYd8xphAHKqp/oPmDC4XNGUio/kTISRXSmWQaQwV2a9WMriE9LUFAdBWfK8qpFgulbK5a7WNrFCqdxGBTspOrmlyy/s98WYHY2IbVBoWBJhjbMMsLa4iu4nMleWJzB3g/qDYTGy1egbjJxepPfZEAk6nMbwtqRSZFVUKle+lFVZhEooz7MqD5pJhMWNX0HxH2CwAAtSARAAC1IBEAALUgEQAAtSARAAC1IBEAALUgEQAAtSARAAC1IBEAALUgEQAAtSARAAC1IBEAALUUlghjx/sf//WQotb2X9NmjNsXug0hFBN7vv/AHm33REBJrFsfvHTZXPz28xdPJ04aNtDPO/l9EtF1qTk44x0okfUbVnh79/7WbyhCaMiQUZKaTyeGnzl7XFtbZ/367VaWNgSXqO4gEYASef8+ydu7N367u5e3fHplZYW7WzdnJ1fiStMUikwEMpl88tSxK1ej+fzKrl27hwSv19PTRwiVlZUeDtv76tXflZUVRkYmo0aMHzVqAkIoMzN96vSxu3cduRQTGR//hkwm+/oMDJq3lEKhIITi49/s278tMzPd1NR85oyghp703v3b0dFnMrPStbRY/Xz9Zs4IYjKbGL2tLtQAAByHSURBVLw0Lu51+K8H09NTMQxzcHCeOT3I3b0bQmjIsL4TA6dlZWU8ffZEJBJ6eXkvX7qGy9VFCL1L/ic8/EBKarJYXG1rYz9jRpCXZ0/5JmzfdiAyMuJ9ShKbzZk1c4G5ueX+/duzsjPMzCyWLlndwbWTAl9kVTFy9MDJk2YUFObff3BbKKzq0qXrsiWrDQwMEUJisfj4r4cePLxTVlZqYGA4oL//1CmzqVSqb38vhNC27RsOHtp17crDdeuD+fzKbVv3D/TzRgilp3+4fCX6W7+hj5/cv3jhtvyvfOlS5NHw/Rejb2tztBsqZsPPIQihzp09oi+e4fHKPDy8flqx4VxkxL37t8Ri8YD+3y6Yv5xEIsVevnDq9LFlS1bv3L1x0MDBc+csqndtWVkZU6aN2bfnmJtbV/wduHHTqkU/hgwfNkY+99DBk85OrqdOH7t371ZRcaGODvfrXn1n//CjlpbWrycOx8RG1bsJu3dvQgj16NHrXGRESUmRlaXNjwtXdOzYBSEkkUjOnD1+/8GdgoI8IyOTsWMm4U+ncIrsWXzw8Lfy8rItm/etXrXpn3/iIk6G4dO37/z5n8S4Nas2hx+NnBg49eDh3U/+eIgQolCpCKGDh3YFjp9yJfbe6lWbYi9fePT4PkKIz+evWrNER5t75NDpVSs3Xr16saSk+L/P+OTJw42bVnl69jx2NDJ4+bpHj+/t2rOp8SKFQuHK1YtsbewPhJ44dOCkg71TyMqFFZUVCCEKhRp1/lRXD6+Yi3eOHjmbkvJu/8GdCKHq6uoVIQtodPrOHYcOHzzVsZPbmrVLi4oK5Zvw64nDi34MuRJ7361L1z17N0dEHPnl512xl+7qaHP3H9ihwFdYhVCp1MjzJ21t7SPPXvs1/EJKyrvTZ8LxWXv3bf3fratzZi+KOHFxxvSg2Mvnw46GIoQuRN1ECC2Yv/zM6St113M55q61tW2A//DLMXcnTZwmEAj+/OuRfIHfH9/r/bVPI3GA/5ni4l+Xl5edOXX50IGTL148nTd/qoWF1fnIG2vXbIm9fOHv538hhGg0mkgkjImNWhG8fvjwsQ2tzdra1tjYJCHxLX43Lu6VsbFJfPxr/O7buFfaHG0X5w4XL507Fxkxffq848eigpev++PP38N/PYgQ8vcf3tAmUKjU+IQ3SUkJR4+cjbn4G5eru23HBnyZI2H7zl84PSlw2vHw82PHTDpwcOeNm5db+8dpjCITgc3mLFwQ7OLcoc83/by9v0lKSsCnB81bun37QXf3blZWNgH+wx0dnF+8eCp/VN8+Azp1ckMIeXbrYW5mkZz8D0Lo6bMnlZUVCxcEOzg4ubp0DFmxobKy4r/PeC4qwt2926yZ8y0trLx7fj1r5oK7d/9XWFjQSJGFhfkCgWDggAAbGztbW/v5Qcu2bNpHp9HxuU6OLn5+Q8hksrW17dAhox8/vi8UCikUyp5dYSHB650cXWxt7adPnSsSieRvCISQr89Aa2tbCoXi03dgVVVVQMAIQ0MjOp3ep0//Dx/eK+4FVjE21nb+3w6jUqnGxiY9uvfC/7Ll5bw7v934fvLMfr6DLMwtBw7wHzVywvUbMTU1NTo6XIQQi8Xi6nDrrofL1SWTyXQ6ncvVtbS09uzW47e7N/FZJSXFCQlvv/12WJPFSCSS7yfPolKp9vaO9naOdDp92NDRFArFy7Mnl6uL/5lIJJJIJBozeqJ3z6/NzRobhbyrR/f4hDf47TdvXw4OGBlXJxG6detBJpMH9PcPO3ymn+8gS0vr7l7evj6D8Le9mal5I5sgEgnnzV2ipaXFZDIH9PfPysoQiUR8Pv/K1ejx4yb7+Q2xtLAaPmyM36Ah5yIjWvVnaYIiE6FTRzf5bT1dfUHVpxFEtZhal2IiZ8yaMGbct6PGDEpLT62oKJcv6WDvJL/N4Wjz+ZUIoczMNCaTaWtrj083MjI2Mvr8iixSqfT9+yQvz9q9TQ93T4RQWlpKI0VaWlpbWdls2rL6XGTE+5R3FArFw8NT3n5zqrOnamtjLxaLi4sLqVRqjaQmdP/2KdPGjB7rN3nKSIRQ3U2wtrLFb7DY7Lp32Sy2WCwWi5se3E4t2df5y2pr6+ANsQ9pKRiGdezQRT7LxaWjSCTKyclq5moDAkY8f/5XWVkpQujR4/uGhkae3Zr+7cnMtPbqLCw2W/43Qghx2ByBgC+/i7fSG+fZrUdiwluZTFZWVvrxY/bwYWPKy3l5+bkIoYSEN56ePfEge/b3H/PmTx03IWDUmEHXrl+Sf6s1sgkW5lbyd6O2tg7eh/Lhw3uJRFL3re7u7pmbm1NV1fRIqi2lyH4ELa3aCw2QSCR8HG+JRBIcMh/DsPlBy6ytbCkUyuq1S+s+is741xjy+PUjqoRVDMa/ugO0tD4f+VMkEmEYFnEy7NTpY3Wnl5TWs38hR6FQQveGR0advHEj9lj4ARMT0+lT5w4aNPi/z8LU0kIIVfIrc3Kyli6b09Wj+8qffjE0MJJKpeMm/OsS41Qarckt0kCMf78O+PuhqkqAEGKx2PLp+GsuFDb3zf1Nb18OR/v+/dujRwc+enRv0MDBZHLTX2w0Or2Ru3X/Rmw2p8m1devWo5JfmZGRlpmV7mDvxOXqurh0jI97jRAqKMjHE2H/gR2/3b25+MefOnV2Z9AZkVEn7z+43eQmfPbmwWvDX7TFS2f//6fqU8GlZSUsloJHxG3z3xqSkhLS0lLl3TAIoXJemZmpeeOPYjKYdWMbIYS3Hf61DJNJpVJHjZwwOGBE3em6evqNr1xXV2/unEVz5yzKyEi7EH1my7Z1Nrb2Ls4d5O9XHH5bR1vn/oM7GIatXrUJf4sXFOQ3b9NBPfDP239f5+Z8DnE0Gm1Af/8Hv//Wr59fXPzrpUtWtVmxDTIwMLSxsUtIfPvhw/suXboihLp09ohPeCOTySzMLc3NLDAMu/m/K5O/mzlw4Kcvj7rv55ZuAv7irFq50d7Ose50YyMThW9amx+zWC2uRgjp/P+eYWJiXF5+bpNfm9ZWthKJJCMjDb+blpZaWlry2TJkMtnJybWgIM/a2hb/Z2ZmQaFSdbR1Gllzbt7HJ08e4rdtbe2XLF5JJpMz0j/gU+LiXsmXTE7+h8lkGhmZ1NSIGQym/BtPvgcIWsHe3olCodTthUlMjONwOBYWVvjd5jSpBgeMSEyMu3jpXMeOXSwtrduy3gZ5evZMSHz7Nu4V/kNVl84ecfGv4/9/l0EqlWIYJn/b412JdTetRZtgb+9Eo9HKykrlb3UdHS6Xq0v/d0tHIdo8ERwdnOl0ekxsVElJ8fMXT0P3b+/u5Z2dk4nvRDXE27s3i8UK3b896V1ifPybvaFb9er75p8w/vtHj++fi4zIzs5MSU3evGXNwh9nCASNXQGlsCB/3YbgC9FnsrIysrMzT58JJ5PJ8l3H4pKiiJNhH3Nznj59cvXaxX6+fgwGo4Nr5/Jy3v9uXS0pKb58JfpdcqKurt6HD+/5fH4jTwTqxdXh+n877Oy5E0+ePCwoyL99+/qVq9GjRwVSqVQGg8FgMN7GvUpJTZZIJI2sxM7OoUOHzucvnMaPZSJEN4/ur18/z8xM79LZAyHUqbN7Tk7Wi5dP8USg0WhOji6371z/mJvz4UPKytWLevb8urKyIisrA9+0Fm0Ch8MZMmRUxMmw+w/u5OZ9fP3mxbLgeVu3r2+L7WrzvQZdXb3g5evCww/c+e2Gs3OHFcHri4oLf9n405Jlc375eVdDj+JydX/esPPAwZ0Lf5xhYmI2a+b8i5fO/ffbo883/Vb+9EtkVMSJiCNsNqdzZ/c9u8LYbHYDa0UIIQ8PzxXL1124eOZExBEKhWJjY//Lhp1WVp+OhBscMKKSXzkvaIpYXP2V9zcL5i9HCPXq1Wf8uMlhR0MPHd7ds8fXIcEbLl46Gxl1kkwmjxkzSUGvkwZZuCCYxWLvDd3K45UZG5l8N2nGxMCp+KzACVOjzp/866/HZ0438dNan2/6paen9u0zoF1Kroe7u2dpaYmVlY2urh5CSJujbWtrn57+wcPDC19g+bK1O3b+PH3GOFNT8+nT5nZw7ZyY8HZu0Pfhx6LwveYWbcK8OYu1OdpHj4WWlBTr6xv0+qrPjOkNHqTzJeq/Euzft0qrRcjDt4kdcjUzfGT/0aMCv588k8Aa/vmLJxZKvhlpSGAN//Xmd15JvqTHt8pSlUwmC1owzdnJddGPIUTX0krtvwlvfy+lUpF3QBMfajiKGagSkUiUm5sTExuVlZW+Yd12ostpDSXfBDVMhPj4NytX13/8KULozOkrnx0AA1RIRmbavKApNjZ2m37ZU/cQlf9r796DmjrzPoA/ITlJyD0QAlEEwUsrFlotdRdb4W1rl2pBrZdRASkio3SLXezra7et4+u7YzvO4lSmbF/psAUWsDhjtbpaVurqTr1wEa1L0cUXNGAFgRhuud/P+0d2gCLXmpPnJPl9/nAGTMJXA1/O85znPCd59X+M95Tf7/mfF19MmO4X+qqytPLY2EuAwsIiPi8ome4LDhnvn0ATXjhqsFqtI5cPjRIQEDh0UpeGYNTwy4y5yN1JKBT9gjl5g8Ew3hIJFovlvNrFs/juqIEgCOcVNcB3uPwd5/F4Ll/84xFgDyUAwDBoBADAMGgEAMAwaAQAwDBoBADAMGgEAMAwaAQAwDBoBADAsLEbgcFEDKbbswCE/JgMJot2SyoZDMRkwS8Pz8ZkTWWvqXEagSdk6QcmukAdUEQ3YOXyaVfGAjFL22fGnQI8EV2/lSec/Ftr7EaQKdgmvZ2CVGASRp1dNsP1G+M8ocAZXPh+8HRGvS1QMcmtTMZthOBwrh8TPbgz0WZEwOV67huNWmvY07RbTi8JYknl7P+7PsYG+cAjdLQYGCSpiBy9revjxh1YJGcpmq8NtN2CjcPcpKNFf/Ni75q3J7pNAEbLU+Q97XooBU/UflvXXNefvH2S7Y6dxr4aesi5v3T391gFUpa/gJjgYeBJWM0OdadREkQkZc1g0Hv+7kKlqrfLzOGzhFK2w+6ju857EJPeNqi2BISwV2SETPEpkzQCQmhQbVN3mvQaj5lorK+vN5lMCQnT3iQDF56IFTSDIw7yjM7V9NrUD836QZuX3YciPz//nXfeIQjPeBemSCAiZDM5Itk0Nj2Y/KFiGUssm+pe+nTww70uk1Ybs8zz9rTwCKJAlijQC7fVaP3wwoJf7/HNPRFGovdBKgDAvaARAADDvLARWCyWl40GgRsIhcIprenzdl74X0AQBLy1YLr4fD6dt+R1Gy/8yWEymX19E91CDoBRHA5He3s757H7MvsgL2wEkUhksVhwpwCexGAwKBQK3ClowQsbISAgoLOzE3cK4Em6u7v9/f1xp6AFL2yE0NBQaAQwLR0dHaGhobhT0IIXNkJISIhcLp/4duMAjKRSqaKjo3GnoAUvbATnDXkaGhpwpwAe4+rVq3PnzsWdgha8sxGWLFnS1NSEOwXwGEqlcvHixbhT0IJ3NkJ8fHx1dTXuFMAz1NXVhYWFwRUNTt7ZCLNnzxYKhbdu3cIdBHiAs2fPJicn405BF97ZCAihtWvXXrlyBXcKQHd2u/3hw4evv/467iB04bWNsGrVqhMnTsDiRTCxI0eOLFu2DHcKGpl8xxTPdfr06cbGxn379uEOAmhKr9evWLHi0qVLuIPQiNceIyCEVq9erdFoYLUSGM9XX321e/du3CnoxZsbASGUk5Pz7rvv4k4B6Ki2traxsXHVqlW4g9CLN48anCorKzs7O+FXARglNja2oaEBroAexcuPERBCmzdvNplM169fxx0E0MjBgwcLCgqgDh7n/Y2AENq7d+9nn312+/Zt3EEALezbty86OjouLg53EDry/lHDkPXr1x88eBCWr/u4Tz755Kmnnlq3bh3uIDTlE8cITl9//XVRUdHly5dxBwHYvP/++wkJCVAHE/ChYwSnrKysxMTEDRs24A4C3C0jIyM9Pf2VV17BHYTWfK4RnLNKJEl+8MEHuIMAN+nq6tqwYcORI0dgE4RJ+WIjOEcQTU1NO3fulMlkuLMAap08ebKqqqqgoAD2TZsK5v79+3FnwCAqKkoul+/YscPf3z8qKgp3HEAJu93+3nvvGQyGQ4cOwS08psiHZhZHWbhwYXV1dWtr686dO61WK+44wMUuXrwYFxe3bt26jz76CHcWT+Kjo4aRampqSkpKEhMT169fjzsLcAGdTpeXl2cwGPLy8nBn8Ty+e4wwZOnSpUVFRXfv3k1NTW1ubsYdBzyR8vLypKSk+Ph4qINfBo4Rht25c+fAgQNRUVEffvgh7ixg2n788cf9+/fHx8fn5ubizuLBoBFGO3HixNmzZ1999dW0tDTcWcCUdHR05Ofnczic7du3h4eH447j2aARxpafn3/u3Lnc3FzYb4vOTCZTfn5+bW1tbm7uyy+/jDuON4BGGJdarT58+PD9+/dzc3NjY2NxxwGjHTt2rKCgYNeuXTAl7ELQCJNobm4uLy9Xq9XZ2dmwpT9NlJaWFhYW5uTkwMjO5aARpuTGjRuFhYUEQWRnZ8fExOCO47sqKioKCws3btyYnZ0Ni46oAI0wDXV1dV988YVEIklLS3v++edxx/EtZWVlDQ0NERER2dnZcLcV6kAjTFt9ff2XX35ps9kyMzNfeukl3HG8nM1mKy4uLi4u3rx5c2ZmplAoxJ3Iy0Ej/EKNjY0lJSUsFmv58uVwPoIKfX19x48fLy4uzszM3LZtG4vFwp3IJ0AjPJHW1tbS0tIffvjhrbfe2rRpE+44XuLu3bvl5eU1NTVZWVkbN27EHce3QCO4gEqlKisrO3XqVEpKSlpamkgkwp3IU127du3ixYs3b97csmVLUlIS7ji+CBrBZaxWa1lZWUVFRUJCQnp6emRk5KgHvPbaa+fPn8eUji72798/5gX4VVVV5eXlEolk69atS5YswZAMIASNQIkzZ85cuXJFr9enpqaO3PA3NjY2PDy8tLTUZ6fHysvLS0pK7Hb7999/7/yMzWarqKj45ptvYmJitmzZMn/+fNwZfR00AlXq6uoqKip6enpSU1PXrFmTmJjY29tLkuTixYuLiopwp8Pg6tWrH3/8sUqlQghdv369s7Pz6NGjJ0+eTE1N3bJli0QiwR0QIGgEyimVyqNHj164cEGr1TrvF0IQRGJioq9tXdXV1ZWTk3P//n3nh2w2WyaTpaamwsQh3UAjuMPKlSudvxudRCJRenp6RkYG1lBulZKScufOHT+/f+/HQZLkjRs3cIcCY4AdU9yhp6dn5Icajeb48eNDY2mvt2fPnnv37g3VAUKIwWCsXr0aaygwNjhGoFxiYqJarWYwGCRJOv90/kgEBgZWV1eP+RSTwdHdZtJrbAatjSSRSe9we+opYfv7cf39eEKmUEooIrljPubAgQNVVVUmk8k5aBq61SKDwWhoaHBvXjA5WAdGuaioKCaTyefzeTwel8vlcDhcLpfP5z9+xsFiIm/VDLTe1Pc/skiDeYjB8COYBJew2+h6w1IS2a1Wu9XIZjMfPeiMWCiYv0gQEc0f+ZC9e/dGR0fr9XqtVmu1Wi0Wi06nMxgMDgdNa87HwTECXVz9a29zg1Yk5wtkfJ6EgzvOtDlsDo3KYDeb9f2m+Ddls6PgYiSPBI2AX8sN/XcVXSHzpbLZ3nAGzqyzPlL2SoNYb2QG484Cpg0aAbOaM70/tVpDFgThDuJixkGzsuFhyvvhUjnsYuBJoBFwqjvX39Hm8I5Dg8eRDtRW37HxP2fyxTBd5TGgEbD5e6Wqv88vKFKKOwi1lPUdq3coAhVs3EHAlMB6BDwaLw30qZHX1wFCKPJXoZV//Al3CjBV0AgYdLebWhrN8rmBuIO4yby40DN/7sadAkwJNAIG/ziu5st86PJHjpDQDJB3GrS4g4DJQSO4m7JJ5yD9PHHFwZOQzwm88lc17hRgctAI7tZUo5VF0ne8kFew+eQZ199DleAypTNEzfUal78ycC1oBLcaeGRVd1o4fF88G8cWcJobdLhTgElAI7iVskknCPTR5b1CmX+X0uCw484BJuSLv6ww6mq3iIKpmlO0221//77kn03n+we6JOLg+KWbly5ZhxDqUbXlFWzK3vq/l2uPtf3U6Mfwe/aZ5atW7GIymQgh5f1/fnP2kErVFiCdsWL52xRlcwqOFN1v1kc8w5/CYwEe0Ahu1aU0hD9P1RqEs9UF9ddPvZm8JyIspuXetdPffsr0Y/0qdjWTyUIInf7b4XXJe7aG5bXea/iiNCci/LnnopcbTbrSo/+lCJn3u7dL7Xbrt999rtVSOP9nszH6VdYI6r4AeGIwanArk95OcJhUvLLRpKup/zrhpbQXFr0hC5y1dMm62EVvXLxcNvSAZxe+MjssBiE0b84LgdKZHZ3NCKHmlqsGo+bNpN0zQubNmhm1ae1/G4wUTv6xOExtv4261wdPDhrBfQxaO5vLRNTsdfCwq8XusM2fM7yv+ZyIxb19HWazwfmhImTe0F9xuUKjSescUBAEN0T+743kJWK5WCSnJB9CCCGCy9INQiPQGowa3IfBQAzKGtj5k19Y/FvEGKocEiGk1fU6PyBYP1sBQSLS+Sw28bO9jzgcaic+GQy67v4CEIJGcCt/AdNsoGqqncvlI4RSNvxBETxn5OfF4uDBwZ7xnsUmuCbTz84IGo0Uriy0mm1SOSWDJuAq0AhuxeUzbWY7i4KpBEXIPCaT0On65M+86vyMTt+PEINgTXTRoTwo3O6wdauUzoFDV8/doWMKKtgsdoEEvuVoDd4et5o5h2c12ahoBH+uIO6FN6v/UcTnS2bNjOof6D79t8MSsXxb2qcTPOvp+S9y2LxTZw+t/M07dru16vwRgSDA5dmGsFhIGgSXRdMaNIJbBYez7/1L7y+m5KKG5Nd/588VfvvdnzRatVAQGPXUshWvTbK+QMCXZKT88VTVp5//ebtUoli5/LeXao85JyCo0HNvMHyHjKIXBy4BO6a41cAj68k/PYz8dSjuIBjoeo2WAc3anBm4g4CJwNlHt5IEEQEKjsXgi2fgzDrzgiUC3CnAJGDU4G4xS4W11b0znxl3n+L8I2+p+zoe/7zDYUck6ccc+y37YNdJPk/sqpAXL/1l5OqmkRiIQY4zrPh97gkBf+w9I21me3+HZkE2rFekOxg1YFCZ90A8K5A3zmzCwKDK4RjjIMJqNZMIsYmxnyURh4y8jdoTMhq1ziVMjzMYtTz/sS/NEIuCnddKPK6r+dGieN7TsSJXJQQUgUbAoEtpqqselM72lTk2i8Fm6e9PygrBHQRMDuYRMFBEciOiOGolhWf+aaXlyoM3tkEdeAZoBDyeS5CIJaS6bQB3EMq11Xds2j0L1i57Chg14FRb1d/9wCEN8847uCASKa91bMydyYd1ip4DjhFwilsplSuQquUR7iCuZ9Jabp1vW5OtgDrwLHCMgF/rTV11WfeMBQEBs1x2+hAjs97a294nlTFXZMCdYD0PNAItkCS6elrdclMnDBLwZbzxTkzSmcNOalQGh8WsfWRYtkYWGQ1bp3kkaAQaMekdt2sHW2/qNH1WcTCP4efnRzAJDuFw0PQ9Ih2k3WqzW2wE20/Vrp29kD9/kXDOs9AFHgwagY6MOntXu0k/YNNrbIhEBh1NNzDm+jM5PD++iCUMYM2c6487DnABaAQAwDA41wAAGAaNAAAYBo0AABgGjQAAGAaNAAAYBo0AABj2/xL4dskDlaFcAAAAAElFTkSuQmCC","text/plain":["<IPython.core.display.Image object>"]},"metadata":{},"output_type":"display_data"}],"source":["\n","from IPython.display import Image, display\n","\n","display(Image(compiled_graph.get_graph().draw_mermaid_png()))"]},{"cell_type":"code","execution_count":47,"metadata":{},"outputs":[{"name":"stdout","output_type":"stream","text":["\n","Processing legitimate email...\n","Alfred is processing an email from Joker with subject: Found you Batman ! \n","ham\n","\n","==================================================\n","Sir, you've received an email from Joker.\n","Subject: Found you Batman ! \n","\n","I've prepared a draft response for your review:\n","--------------------------------------------------\n","Subject: Re: Found you Batman!\n","\n","Dear Mr. Joker,\n","\n","Thank you for reaching out. Your message has been received and noted. Mr. Wayne is currently unavailable, but rest assured, your concerns will be addressed in due course.\n","\n","Kind regards,\n","\n","Alfred Pennyworth \n","Personal Assistant to Bruce Wayne\n","==================================================\n","\n","\n","Processing spam email...\n","Alfred is processing an email from Crypto bro with subject: The best investment of 2025\n","spam\n","Alfred has marked the email as spam.\n","The email has been moved to the spam folder.\n"]}],"source":[" # Example emails for testing\n","legitimate_email = {\n"," \"sender\": \"Joker\",\n"," \"subject\": \"Found you Batman ! \",\n"," \"body\": \"Mr. Wayne,I found your secret identity ! I know you're batman ! Ther's no denying it, I have proof of that and I'm coming to find you soon. I'll get my revenge. JOKER\"\n","}\n","\n","spam_email = {\n"," \"sender\": \"Crypto bro\",\n"," \"subject\": \"The best investment of 2025\",\n"," \"body\": \"Mr Wayne, I just launched an ALT coin and want you to buy some !\"\n","}\n","# Process legitimate email\n","print(\"\\nProcessing legitimate email...\")\n","legitimate_result = compiled_graph.invoke({\n"," \"email\": legitimate_email,\n"," \"is_spam\": None,\n"," \"draft_response\": None,\n"," \"messages\": []\n","})\n","\n","# Process spam email\n","print(\"\\nProcessing spam email...\")\n","spam_result = compiled_graph.invoke({\n"," \"email\": spam_email,\n"," \"is_spam\": None,\n"," \"draft_response\": None,\n"," \"messages\": []\n","}) "]},{"cell_type":"code","execution_count":null,"metadata":{},"outputs":[],"source":[]}],"metadata":{"kernelspec":{"display_name":"Python 3","language":"python","name":"python3"},"language_info":{"codemirror_mode":{"name":"ipython","version":3},"file_extension":".py","mimetype":"text/x-python","name":"python","nbconvert_exporter":"python","pygments_lexer":"ipython3","version":"3.11.6"}},"nbformat":4,"nbformat_minor":2}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cells": [
|
3 |
+
{
|
4 |
+
"metadata": {},
|
5 |
+
"cell_type": "markdown",
|
6 |
+
"source": [
|
7 |
+
"# Alfred the Mail Sorting Butler: A LangGraph Example\n",
|
8 |
+
"\n",
|
9 |
+
"In this notebook, **we're going to build a complete email processing workflow using LangGraph**.\n",
|
10 |
+
"\n",
|
11 |
+
"This notebook is part of the <a href=\"https://www.hf.co/learn/agents-course\">Hugging Face Agents Course</a>, a free course from beginner to expert, where you learn to build Agents.\n",
|
12 |
+
"\n",
|
13 |
+
"\n",
|
14 |
+
"\n",
|
15 |
+
"## What You'll Learn\n",
|
16 |
+
"\n",
|
17 |
+
"In this notebook, you'll learn how to:\n",
|
18 |
+
"1. Set up a LangGraph workflow\n",
|
19 |
+
"2. Define state and nodes for email processing\n",
|
20 |
+
"3. Create conditional branching in a graph\n",
|
21 |
+
"4. Connect an LLM for classification and content generation\n",
|
22 |
+
"5. Visualize the workflow graph\n",
|
23 |
+
"6. Execute the workflow with example data"
|
24 |
+
]
|
25 |
+
},
|
26 |
+
{
|
27 |
+
"metadata": {},
|
28 |
+
"cell_type": "code",
|
29 |
+
"outputs": [],
|
30 |
+
"execution_count": null,
|
31 |
+
"source": [
|
32 |
+
"# Install the required packages\n",
|
33 |
+
"%pip install -q langgraph langchain_openai langchain_huggingface"
|
34 |
+
]
|
35 |
+
},
|
36 |
+
{
|
37 |
+
"metadata": {},
|
38 |
+
"cell_type": "markdown",
|
39 |
+
"source": [
|
40 |
+
"## Setting Up Our Environment\n",
|
41 |
+
"\n",
|
42 |
+
"First, let's import all the necessary libraries. LangGraph provides the graph structure, while LangChain offers convenient interfaces for working with LLMs."
|
43 |
+
]
|
44 |
+
},
|
45 |
+
{
|
46 |
+
"metadata": {},
|
47 |
+
"cell_type": "code",
|
48 |
+
"outputs": [],
|
49 |
+
"execution_count": null,
|
50 |
+
"source": [
|
51 |
+
"import os\n",
|
52 |
+
"from typing import TypedDict, List, Dict, Any, Optional\n",
|
53 |
+
"from langgraph.graph import StateGraph, END\n",
|
54 |
+
"from langchain_openai import ChatOpenAI\n",
|
55 |
+
"from langchain_core.messages import HumanMessage\n",
|
56 |
+
"\n",
|
57 |
+
"# Set your OpenAI API key here\n",
|
58 |
+
"os.environ[\"OPENAI_API_KEY\"] = \"sk-xxxxx\" # Replace with your actual API key\n",
|
59 |
+
"\n",
|
60 |
+
"# Initialize our LLM\n",
|
61 |
+
"model = ChatOpenAI(model=\"gpt-4o\", temperature=0)"
|
62 |
+
]
|
63 |
+
},
|
64 |
+
{
|
65 |
+
"metadata": {},
|
66 |
+
"cell_type": "markdown",
|
67 |
+
"source": [
|
68 |
+
"## Step 1: Define Our State\n",
|
69 |
+
"\n",
|
70 |
+
"In LangGraph, **State** is the central concept. It represents all the information that flows through our workflow.\n",
|
71 |
+
"\n",
|
72 |
+
"For Alfred's email processing system, we need to track:\n",
|
73 |
+
"- The email being processed\n",
|
74 |
+
"- Whether it's spam or not\n",
|
75 |
+
"- The draft response (for legitimate emails)\n",
|
76 |
+
"- Conversation history with the LLM"
|
77 |
+
]
|
78 |
+
},
|
79 |
+
{
|
80 |
+
"metadata": {},
|
81 |
+
"cell_type": "code",
|
82 |
+
"outputs": [],
|
83 |
+
"execution_count": null,
|
84 |
+
"source": [
|
85 |
+
"class EmailState(TypedDict):\n",
|
86 |
+
" email: Dict[str, Any]\n",
|
87 |
+
" is_spam: Optional[bool]\n",
|
88 |
+
" spam_reason: Optional[str]\n",
|
89 |
+
" email_category: Optional[str]\n",
|
90 |
+
" email_draft: Optional[str]\n",
|
91 |
+
" messages: List[Dict[str, Any]]"
|
92 |
+
]
|
93 |
+
},
|
94 |
+
{
|
95 |
+
"metadata": {},
|
96 |
+
"cell_type": "markdown",
|
97 |
+
"source": "## Step 2: Define Our Nodes"
|
98 |
+
},
|
99 |
+
{
|
100 |
+
"metadata": {},
|
101 |
+
"cell_type": "code",
|
102 |
+
"outputs": [],
|
103 |
+
"execution_count": null,
|
104 |
+
"source": [
|
105 |
+
"def read_email(state: EmailState):\n",
|
106 |
+
" email = state[\"email\"]\n",
|
107 |
+
" print(f\"Alfred is processing an email from {email['sender']} with subject: {email['subject']}\")\n",
|
108 |
+
" return {}\n",
|
109 |
+
"\n",
|
110 |
+
"\n",
|
111 |
+
"def classify_email(state: EmailState):\n",
|
112 |
+
" email = state[\"email\"]\n",
|
113 |
+
"\n",
|
114 |
+
" prompt = f\"\"\"\n",
|
115 |
+
"As Alfred the butler of Mr wayne and it's SECRET identity Batman, analyze this email and determine if it is spam or legitimate and should be brought to Mr wayne's attention.\n",
|
116 |
+
"\n",
|
117 |
+
"Email:\n",
|
118 |
+
"From: {email['sender']}\n",
|
119 |
+
"Subject: {email['subject']}\n",
|
120 |
+
"Body: {email['body']}\n",
|
121 |
+
"\n",
|
122 |
+
"First, determine if this email is spam.\n",
|
123 |
+
"answer with SPAM or HAM if it's legitimate. Only return the answer\n",
|
124 |
+
"Answer :\n",
|
125 |
+
" \"\"\"\n",
|
126 |
+
" messages = [HumanMessage(content=prompt)]\n",
|
127 |
+
" response = model.invoke(messages)\n",
|
128 |
+
"\n",
|
129 |
+
" response_text = response.content.lower()\n",
|
130 |
+
" print(response_text)\n",
|
131 |
+
" is_spam = \"spam\" in response_text and \"ham\" not in response_text\n",
|
132 |
+
"\n",
|
133 |
+
" if not is_spam:\n",
|
134 |
+
" new_messages = state.get(\"messages\", []) + [\n",
|
135 |
+
" {\"role\": \"user\", \"content\": prompt},\n",
|
136 |
+
" {\"role\": \"assistant\", \"content\": response.content}\n",
|
137 |
+
" ]\n",
|
138 |
+
" else:\n",
|
139 |
+
" new_messages = state.get(\"messages\", [])\n",
|
140 |
+
"\n",
|
141 |
+
" return {\n",
|
142 |
+
" \"is_spam\": is_spam,\n",
|
143 |
+
" \"messages\": new_messages\n",
|
144 |
+
" }\n",
|
145 |
+
"\n",
|
146 |
+
"\n",
|
147 |
+
"def handle_spam(state: EmailState):\n",
|
148 |
+
" print(f\"Alfred has marked the email as spam.\")\n",
|
149 |
+
" print(\"The email has been moved to the spam folder.\")\n",
|
150 |
+
" return {}\n",
|
151 |
+
"\n",
|
152 |
+
"\n",
|
153 |
+
"def drafting_response(state: EmailState):\n",
|
154 |
+
" email = state[\"email\"]\n",
|
155 |
+
"\n",
|
156 |
+
" prompt = f\"\"\"\n",
|
157 |
+
"As Alfred the butler, draft a polite preliminary response to this email.\n",
|
158 |
+
"\n",
|
159 |
+
"Email:\n",
|
160 |
+
"From: {email['sender']}\n",
|
161 |
+
"Subject: {email['subject']}\n",
|
162 |
+
"Body: {email['body']}\n",
|
163 |
+
"\n",
|
164 |
+
"Draft a brief, professional response that Mr. Wayne can review and personalize before sending.\n",
|
165 |
+
" \"\"\"\n",
|
166 |
+
"\n",
|
167 |
+
" messages = [HumanMessage(content=prompt)]\n",
|
168 |
+
" response = model.invoke(messages)\n",
|
169 |
+
"\n",
|
170 |
+
" new_messages = state.get(\"messages\", []) + [\n",
|
171 |
+
" {\"role\": \"user\", \"content\": prompt},\n",
|
172 |
+
" {\"role\": \"assistant\", \"content\": response.content}\n",
|
173 |
+
" ]\n",
|
174 |
+
"\n",
|
175 |
+
" return {\n",
|
176 |
+
" \"email_draft\": response.content,\n",
|
177 |
+
" \"messages\": new_messages\n",
|
178 |
+
" }\n",
|
179 |
+
"\n",
|
180 |
+
"\n",
|
181 |
+
"def notify_mr_wayne(state: EmailState):\n",
|
182 |
+
" email = state[\"email\"]\n",
|
183 |
+
"\n",
|
184 |
+
" print(\"\\n\" + \"=\" * 50)\n",
|
185 |
+
" print(f\"Sir, you've received an email from {email['sender']}.\")\n",
|
186 |
+
" print(f\"Subject: {email['subject']}\")\n",
|
187 |
+
" print(\"\\nI've prepared a draft response for your review:\")\n",
|
188 |
+
" print(\"-\" * 50)\n",
|
189 |
+
" print(state[\"email_draft\"])\n",
|
190 |
+
" print(\"=\" * 50 + \"\\n\")\n",
|
191 |
+
"\n",
|
192 |
+
" return {}\n",
|
193 |
+
"\n",
|
194 |
+
"\n",
|
195 |
+
"# Define routing logic\n",
|
196 |
+
"def route_email(state: EmailState) -> str:\n",
|
197 |
+
" if state[\"is_spam\"]:\n",
|
198 |
+
" return \"spam\"\n",
|
199 |
+
" else:\n",
|
200 |
+
" return \"legitimate\"\n",
|
201 |
+
"\n",
|
202 |
+
"\n",
|
203 |
+
"# Create the graph\n",
|
204 |
+
"email_graph = StateGraph(EmailState)\n",
|
205 |
+
"\n",
|
206 |
+
"# Add nodes\n",
|
207 |
+
"email_graph.add_node(\"read_email\", read_email) # the read_email node executes the read_mail function\n",
|
208 |
+
"email_graph.add_node(\"classify_email\", classify_email) # the classify_email node will execute the classify_email function\n",
|
209 |
+
"email_graph.add_node(\"handle_spam\", handle_spam) #same logic\n",
|
210 |
+
"email_graph.add_node(\"drafting_response\", drafting_response) #same logic\n",
|
211 |
+
"email_graph.add_node(\"notify_mr_wayne\", notify_mr_wayne) # same logic\n"
|
212 |
+
]
|
213 |
+
},
|
214 |
+
{
|
215 |
+
"metadata": {},
|
216 |
+
"cell_type": "markdown",
|
217 |
+
"source": "## Step 3: Define Our Routing Logic"
|
218 |
+
},
|
219 |
+
{
|
220 |
+
"metadata": {},
|
221 |
+
"cell_type": "code",
|
222 |
+
"outputs": [],
|
223 |
+
"execution_count": null,
|
224 |
+
"source": [
|
225 |
+
"# Add edges\n",
|
226 |
+
"email_graph.add_edge(START, \"read_email\") # After starting we go to the \"read_email\" node\n",
|
227 |
+
"\n",
|
228 |
+
"email_graph.add_edge(\"read_email\", \"classify_email\") # after_reading we classify\n",
|
229 |
+
"\n",
|
230 |
+
"# Add conditional edges\n",
|
231 |
+
"email_graph.add_conditional_edges(\n",
|
232 |
+
" \"classify_email\", # after classify, we run the \"route_email\" function\"\n",
|
233 |
+
" route_email,\n",
|
234 |
+
" {\n",
|
235 |
+
" \"spam\": \"handle_spam\", # if it return \"Spam\", we go the \"handle_span\" node\n",
|
236 |
+
" \"legitimate\": \"drafting_response\" # and if it's legitimate, we go to the \"drafting response\" node\n",
|
237 |
+
" }\n",
|
238 |
+
")\n",
|
239 |
+
"\n",
|
240 |
+
"# Add final edges\n",
|
241 |
+
"email_graph.add_edge(\"handle_spam\", END) # after handling spam we always end\n",
|
242 |
+
"email_graph.add_edge(\"drafting_response\", \"notify_mr_wayne\")\n",
|
243 |
+
"email_graph.add_edge(\"notify_mr_wayne\", END) # after notifyinf Me wayne, we can end too\n"
|
244 |
+
]
|
245 |
+
},
|
246 |
+
{
|
247 |
+
"metadata": {},
|
248 |
+
"cell_type": "markdown",
|
249 |
+
"source": "## Step 4: Create the StateGraph and Define Edges"
|
250 |
+
},
|
251 |
+
{
|
252 |
+
"metadata": {},
|
253 |
+
"cell_type": "code",
|
254 |
+
"outputs": [],
|
255 |
+
"execution_count": null,
|
256 |
+
"source": [
|
257 |
+
"# Compile the graph\n",
|
258 |
+
"compiled_graph = email_graph.compile()"
|
259 |
+
]
|
260 |
+
},
|
261 |
+
{
|
262 |
+
"metadata": {},
|
263 |
+
"cell_type": "code",
|
264 |
+
"outputs": [],
|
265 |
+
"execution_count": null,
|
266 |
+
"source": [
|
267 |
+
"from IPython.display import Image, display\n",
|
268 |
+
"\n",
|
269 |
+
"display(Image(compiled_graph.get_graph().draw_mermaid_png()))"
|
270 |
+
]
|
271 |
+
},
|
272 |
+
{
|
273 |
+
"metadata": {},
|
274 |
+
"cell_type": "code",
|
275 |
+
"outputs": [],
|
276 |
+
"execution_count": null,
|
277 |
+
"source": [
|
278 |
+
" # Example emails for testing\n",
|
279 |
+
"legitimate_email = {\n",
|
280 |
+
" \"sender\": \"Joker\",\n",
|
281 |
+
" \"subject\": \"Found you Batman ! \",\n",
|
282 |
+
" \"body\": \"Mr. Wayne,I found your secret identity ! I know you're batman ! Ther's no denying it, I have proof of that and I'm coming to find you soon. I'll get my revenge. JOKER\"\n",
|
283 |
+
"}\n",
|
284 |
+
"\n",
|
285 |
+
"spam_email = {\n",
|
286 |
+
" \"sender\": \"Crypto bro\",\n",
|
287 |
+
" \"subject\": \"The best investment of 2025\",\n",
|
288 |
+
" \"body\": \"Mr Wayne, I just launched an ALT coin and want you to buy some !\"\n",
|
289 |
+
"}\n",
|
290 |
+
"# Process legitimate email\n",
|
291 |
+
"print(\"\\nProcessing legitimate email...\")\n",
|
292 |
+
"legitimate_result = compiled_graph.invoke({\n",
|
293 |
+
" \"email\": legitimate_email,\n",
|
294 |
+
" \"is_spam\": None,\n",
|
295 |
+
" \"spam_reason\": None,\n",
|
296 |
+
" \"email_category\": None,\n",
|
297 |
+
" \"email_draft\": None,\n",
|
298 |
+
" \"messages\": []\n",
|
299 |
+
"})\n",
|
300 |
+
"\n",
|
301 |
+
"# Process spam email\n",
|
302 |
+
"print(\"\\nProcessing spam email...\")\n",
|
303 |
+
"spam_result = compiled_graph.invoke({\n",
|
304 |
+
" \"email\": spam_email,\n",
|
305 |
+
" \"is_spam\": None,\n",
|
306 |
+
" \"spam_reason\": None,\n",
|
307 |
+
" \"email_category\": None,\n",
|
308 |
+
" \"email_draft\": None,\n",
|
309 |
+
" \"messages\": []\n",
|
310 |
+
"})"
|
311 |
+
]
|
312 |
+
},
|
313 |
+
{
|
314 |
+
"metadata": {},
|
315 |
+
"cell_type": "markdown",
|
316 |
+
"source": [
|
317 |
+
"## Step 5: Inspecting Our Mail Sorting Agent with Langfuse π‘\n",
|
318 |
+
"\n",
|
319 |
+
"As Alfred fine-tunes the Main Sorting Agent, he's growing weary of debugging its runs. Agents, by nature, are unpredictable and difficult to inspect. But since he aims to build the ultimate Spam Detection Agent and deploy it in production, he needs robust traceability for future monitoring and analysis.\n",
|
320 |
+
"\n",
|
321 |
+
"To do this, Alfred can use an observability tool such as [Langfuse](https://langfuse.com/) to trace and monitor the inner steps of the agent.\n",
|
322 |
+
"\n",
|
323 |
+
"First, we need to install the necessary dependencies:"
|
324 |
+
]
|
325 |
+
},
|
326 |
+
{
|
327 |
+
"metadata": {},
|
328 |
+
"cell_type": "code",
|
329 |
+
"outputs": [],
|
330 |
+
"execution_count": null,
|
331 |
+
"source": "%pip install -q langfuse"
|
332 |
+
},
|
333 |
+
{
|
334 |
+
"metadata": {},
|
335 |
+
"cell_type": "markdown",
|
336 |
+
"source": "Next, we set the Langfuse API keys and host address as environment variables. You can get your Langfuse credentials by signing up for [Langfuse Cloud](https://cloud.langfuse.com) or [self-hosting Langfuse](https://langfuse.com/self-hosting)."
|
337 |
+
},
|
338 |
+
{
|
339 |
+
"metadata": {},
|
340 |
+
"cell_type": "code",
|
341 |
+
"outputs": [],
|
342 |
+
"execution_count": null,
|
343 |
+
"source": [
|
344 |
+
"import os\n",
|
345 |
+
"\n",
|
346 |
+
"# Get keys for your project from the project settings page: https://cloud.langfuse.com\n",
|
347 |
+
"os.environ[\"LANGFUSE_PUBLIC_KEY\"] = \"pk-lf-...\"\n",
|
348 |
+
"os.environ[\"LANGFUSE_SECRET_KEY\"] = \"sk-lf-...\"\n",
|
349 |
+
"os.environ[\"LANGFUSE_HOST\"] = \"https://cloud.langfuse.com\" # πͺπΊ EU region\n",
|
350 |
+
"# os.environ[\"LANGFUSE_HOST\"] = \"https://us.cloud.langfuse.com\" # πΊπΈ US region"
|
351 |
+
]
|
352 |
+
},
|
353 |
+
{
|
354 |
+
"metadata": {},
|
355 |
+
"cell_type": "markdown",
|
356 |
+
"source": "Now, we configure the [Langfuse `callback_handler`](https://langfuse.com/docs/integrations/langchain/tracing#add-langfuse-to-your-langchain-application)."
|
357 |
+
},
|
358 |
+
{
|
359 |
+
"metadata": {},
|
360 |
+
"cell_type": "code",
|
361 |
+
"outputs": [],
|
362 |
+
"execution_count": null,
|
363 |
+
"source": [
|
364 |
+
"from langfuse.callback import CallbackHandler\n",
|
365 |
+
"\n",
|
366 |
+
"# Initialize Langfuse CallbackHandler for LangGraph/Langchain (tracing)\n",
|
367 |
+
"langfuse_handler = CallbackHandler()"
|
368 |
+
]
|
369 |
+
},
|
370 |
+
{
|
371 |
+
"metadata": {},
|
372 |
+
"cell_type": "markdown",
|
373 |
+
"source": "We then add `config={\"callbacks\": [langfuse_handler]}` to the invocation of the agents and run them again."
|
374 |
+
},
|
375 |
+
{
|
376 |
+
"metadata": {},
|
377 |
+
"cell_type": "code",
|
378 |
+
"outputs": [],
|
379 |
+
"execution_count": null,
|
380 |
+
"source": [
|
381 |
+
"# Process legitimate email\n",
|
382 |
+
"print(\"\\nProcessing legitimate email...\")\n",
|
383 |
+
"legitimate_result = compiled_graph.invoke(\n",
|
384 |
+
" input={\n",
|
385 |
+
" \"email\": legitimate_email,\n",
|
386 |
+
" \"is_spam\": None,\n",
|
387 |
+
" \"draft_response\": None,\n",
|
388 |
+
" \"messages\": []\n",
|
389 |
+
" },\n",
|
390 |
+
" config={\"callbacks\": [langfuse_handler]}\n",
|
391 |
+
")\n",
|
392 |
+
"\n",
|
393 |
+
"# Process spam email\n",
|
394 |
+
"print(\"\\nProcessing spam email...\")\n",
|
395 |
+
"spam_result = compiled_graph.invoke(\n",
|
396 |
+
" input={\n",
|
397 |
+
" \"email\": spam_email,\n",
|
398 |
+
" \"is_spam\": None,\n",
|
399 |
+
" \"draft_response\": None,\n",
|
400 |
+
" \"messages\": []\n",
|
401 |
+
" },\n",
|
402 |
+
" config={\"callbacks\": [langfuse_handler]}\n",
|
403 |
+
")"
|
404 |
+
]
|
405 |
+
},
|
406 |
+
{
|
407 |
+
"metadata": {},
|
408 |
+
"cell_type": "markdown",
|
409 |
+
"source": [
|
410 |
+
"Alfred is now connected π! The runs from LangGraph are being logged in Langfuse, giving him full visibility into the agent's behavior. With this setup, he's ready to revisit previous runs and refine his Mail Sorting Agent even further.\n",
|
411 |
+
"\n",
|
412 |
+
"\n",
|
413 |
+
"\n",
|
414 |
+
"_[Public link to the trace with the legit email](https://cloud.langfuse.com/project/cloramnkj0002jz088vzn1ja4/traces/f5d6d72e-20af-4357-b232-af44c3728a7b?timestamp=2025-03-17T10%3A13%3A28.413Z&observation=6997ba69-043f-4f77-9445-700a033afba1)_\n",
|
415 |
+
"\n",
|
416 |
+
"\n",
|
417 |
+
"\n",
|
418 |
+
"_[Public link to the trace with the spam email](https://langfuse.com/project/cloramnkj0002jz088vzn1ja4/traces/6e498053-fee4-41fd-b1ab-d534aca15f82?timestamp=2025-03-17T10%3A13%3A30.884Z&observation=84770fc8-4276-4720-914f-bf52738d44ba)_\n"
|
419 |
+
]
|
420 |
+
}
|
421 |
+
],
|
422 |
+
"metadata": {
|
423 |
+
"kernelspec": {
|
424 |
+
"display_name": "Python 3",
|
425 |
+
"language": "python",
|
426 |
+
"name": "python3"
|
427 |
+
},
|
428 |
+
"language_info": {
|
429 |
+
"codemirror_mode": {
|
430 |
+
"name": "ipython",
|
431 |
+
"version": 3
|
432 |
+
},
|
433 |
+
"file_extension": ".py",
|
434 |
+
"mimetype": "text/x-python",
|
435 |
+
"name": "python",
|
436 |
+
"nbconvert_exporter": "python",
|
437 |
+
"pygments_lexer": "ipython3",
|
438 |
+
"version": "3.13.2"
|
439 |
+
}
|
440 |
+
},
|
441 |
+
"nbformat": 4,
|
442 |
+
"nbformat_minor": 2
|
443 |
+
}
|
unit2/llama-index/agents.ipynb
CHANGED
@@ -167,7 +167,7 @@
|
|
167 |
},
|
168 |
{
|
169 |
"cell_type": "code",
|
170 |
-
"execution_count":
|
171 |
"metadata": {},
|
172 |
"outputs": [],
|
173 |
"source": [
|
|
|
167 |
},
|
168 |
{
|
169 |
"cell_type": "code",
|
170 |
+
"execution_count": 46,
|
171 |
"metadata": {},
|
172 |
"outputs": [],
|
173 |
"source": [
|
unit2/llama-index/components.ipynb
CHANGED
@@ -118,7 +118,7 @@
|
|
118 |
},
|
119 |
{
|
120 |
"cell_type": "code",
|
121 |
-
"execution_count":
|
122 |
"metadata": {},
|
123 |
"outputs": [
|
124 |
{
|
@@ -143,6 +143,7 @@
|
|
143 |
],
|
144 |
"source": [
|
145 |
"from llama_index.embeddings.huggingface import HuggingFaceEmbedding\n",
|
|
|
146 |
"from llama_index.core.node_parser import SentenceSplitter\n",
|
147 |
"from llama_index.core.ingestion import IngestionPipeline\n",
|
148 |
"\n",
|
@@ -175,7 +176,7 @@
|
|
175 |
},
|
176 |
{
|
177 |
"cell_type": "code",
|
178 |
-
"execution_count":
|
179 |
"metadata": {},
|
180 |
"outputs": [
|
181 |
{
|
@@ -218,13 +219,14 @@
|
|
218 |
},
|
219 |
{
|
220 |
"cell_type": "code",
|
221 |
-
"execution_count":
|
222 |
"metadata": {},
|
223 |
"outputs": [],
|
224 |
"source": [
|
225 |
"from llama_index.core import VectorStoreIndex\n",
|
226 |
"from llama_index.embeddings.huggingface import HuggingFaceEmbedding\n",
|
227 |
"\n",
|
|
|
228 |
"embed_model = HuggingFaceEmbedding(model_name=\"BAAI/bge-small-en-v1.5\")\n",
|
229 |
"index = VectorStoreIndex.from_vector_store(\n",
|
230 |
" vector_store=vector_store, embed_model=embed_model\n",
|
|
|
118 |
},
|
119 |
{
|
120 |
"cell_type": "code",
|
121 |
+
"execution_count": 16,
|
122 |
"metadata": {},
|
123 |
"outputs": [
|
124 |
{
|
|
|
143 |
],
|
144 |
"source": [
|
145 |
"from llama_index.embeddings.huggingface import HuggingFaceEmbedding\n",
|
146 |
+
"\n",
|
147 |
"from llama_index.core.node_parser import SentenceSplitter\n",
|
148 |
"from llama_index.core.ingestion import IngestionPipeline\n",
|
149 |
"\n",
|
|
|
176 |
},
|
177 |
{
|
178 |
"cell_type": "code",
|
179 |
+
"execution_count": 18,
|
180 |
"metadata": {},
|
181 |
"outputs": [
|
182 |
{
|
|
|
219 |
},
|
220 |
{
|
221 |
"cell_type": "code",
|
222 |
+
"execution_count": 19,
|
223 |
"metadata": {},
|
224 |
"outputs": [],
|
225 |
"source": [
|
226 |
"from llama_index.core import VectorStoreIndex\n",
|
227 |
"from llama_index.embeddings.huggingface import HuggingFaceEmbedding\n",
|
228 |
"\n",
|
229 |
+
"\n",
|
230 |
"embed_model = HuggingFaceEmbedding(model_name=\"BAAI/bge-small-en-v1.5\")\n",
|
231 |
"index = VectorStoreIndex.from_vector_store(\n",
|
232 |
" vector_store=vector_store, embed_model=embed_model\n",
|
unit2/llama-index/tools.ipynb
CHANGED
@@ -86,7 +86,7 @@
|
|
86 |
},
|
87 |
{
|
88 |
"cell_type": "code",
|
89 |
-
"execution_count":
|
90 |
"metadata": {},
|
91 |
"outputs": [
|
92 |
{
|
@@ -139,21 +139,21 @@
|
|
139 |
},
|
140 |
{
|
141 |
"cell_type": "code",
|
142 |
-
"execution_count":
|
143 |
"metadata": {},
|
144 |
"outputs": [
|
145 |
{
|
146 |
"data": {
|
147 |
"text/plain": [
|
148 |
-
"[<llama_index.core.tools.function_tool.FunctionTool at
|
149 |
-
" <llama_index.core.tools.function_tool.FunctionTool at
|
150 |
-
" <llama_index.core.tools.function_tool.FunctionTool at
|
151 |
-
" <llama_index.core.tools.function_tool.FunctionTool at
|
152 |
-
" <llama_index.core.tools.function_tool.FunctionTool at
|
153 |
-
" <llama_index.core.tools.function_tool.FunctionTool at
|
154 |
]
|
155 |
},
|
156 |
-
"execution_count":
|
157 |
"metadata": {},
|
158 |
"output_type": "execute_result"
|
159 |
}
|
@@ -175,39 +175,84 @@
|
|
175 |
},
|
176 |
{
|
177 |
"cell_type": "code",
|
178 |
-
"execution_count":
|
179 |
"metadata": {},
|
180 |
"outputs": [
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
181 |
{
|
182 |
"data": {
|
183 |
"text/plain": [
|
184 |
-
"[
|
185 |
-
" \"load_data() -> List[llama_index.core.schema.Document]\\nLoad emails from the user's account.\"),\n",
|
186 |
-
" ('search_messages',\n",
|
187 |
-
" \"search_messages(query: str, max_results: Optional[int] = None)\\nSearches email messages given a query string and the maximum number\\n of results requested by the user\\n Returns: List of relevant message objects up to the maximum number of results.\\n\\n Args:\\n query[str]: The user's query\\n max_results (Optional[int]): The maximum number of search results\\n to return.\\n \"),\n",
|
188 |
-
" ('create_draft',\n",
|
189 |
-
" \"create_draft(to: Optional[List[str]] = None, subject: Optional[str] = None, message: Optional[str] = None) -> str\\nCreate and insert a draft email.\\n Print the returned draft's message and id.\\n Returns: Draft object, including draft id and message meta data.\\n\\n Args:\\n to (Optional[str]): The email addresses to send the message to\\n subject (Optional[str]): The subject for the event\\n message (Optional[str]): The message for the event\\n \"),\n",
|
190 |
-
" ('update_draft',\n",
|
191 |
-
" \"update_draft(to: Optional[List[str]] = None, subject: Optional[str] = None, message: Optional[str] = None, draft_id: str = None) -> str\\nUpdate a draft email.\\n Print the returned draft's message and id.\\n This function is required to be passed a draft_id that is obtained when creating messages\\n Returns: Draft object, including draft id and message meta data.\\n\\n Args:\\n to (Optional[str]): The email addresses to send the message to\\n subject (Optional[str]): The subject for the event\\n message (Optional[str]): The message for the event\\n draft_id (str): the id of the draft to be updated\\n \"),\n",
|
192 |
-
" ('get_draft',\n",
|
193 |
-
" \"get_draft(draft_id: str = None) -> str\\nGet a draft email.\\n Print the returned draft's message and id.\\n Returns: Draft object, including draft id and message meta data.\\n\\n Args:\\n draft_id (str): the id of the draft to be updated\\n \"),\n",
|
194 |
-
" ('send_draft',\n",
|
195 |
-
" \"send_draft(draft_id: str = None) -> str\\nSends a draft email.\\n Print the returned draft's message and id.\\n Returns: Draft object, including draft id and message meta data.\\n\\n Args:\\n draft_id (str): the id of the draft to be updated\\n \")]"
|
196 |
]
|
197 |
},
|
198 |
-
"execution_count":
|
199 |
"metadata": {},
|
200 |
"output_type": "execute_result"
|
201 |
}
|
202 |
],
|
203 |
"source": [
|
204 |
-
"[(tool.metadata.name, tool.metadata.description) for tool in tool_spec_list]"
|
205 |
]
|
206 |
}
|
207 |
],
|
208 |
"metadata": {
|
209 |
"kernelspec": {
|
210 |
-
"display_name": "
|
211 |
"language": "python",
|
212 |
"name": "python3"
|
213 |
},
|
@@ -221,9 +266,9 @@
|
|
221 |
"name": "python",
|
222 |
"nbconvert_exporter": "python",
|
223 |
"pygments_lexer": "ipython3",
|
224 |
-
"version": "3.
|
225 |
}
|
226 |
},
|
227 |
"nbformat": 4,
|
228 |
-
"nbformat_minor":
|
229 |
}
|
|
|
86 |
},
|
87 |
{
|
88 |
"cell_type": "code",
|
89 |
+
"execution_count": 8,
|
90 |
"metadata": {},
|
91 |
"outputs": [
|
92 |
{
|
|
|
139 |
},
|
140 |
{
|
141 |
"cell_type": "code",
|
142 |
+
"execution_count": 1,
|
143 |
"metadata": {},
|
144 |
"outputs": [
|
145 |
{
|
146 |
"data": {
|
147 |
"text/plain": [
|
148 |
+
"[<llama_index.core.tools.function_tool.FunctionTool at 0x7f0d50623d90>,\n",
|
149 |
+
" <llama_index.core.tools.function_tool.FunctionTool at 0x7f0d1c055210>,\n",
|
150 |
+
" <llama_index.core.tools.function_tool.FunctionTool at 0x7f0d1c055780>,\n",
|
151 |
+
" <llama_index.core.tools.function_tool.FunctionTool at 0x7f0d1c0556f0>,\n",
|
152 |
+
" <llama_index.core.tools.function_tool.FunctionTool at 0x7f0d1c0559f0>,\n",
|
153 |
+
" <llama_index.core.tools.function_tool.FunctionTool at 0x7f0d1c055b40>]"
|
154 |
]
|
155 |
},
|
156 |
+
"execution_count": 1,
|
157 |
"metadata": {},
|
158 |
"output_type": "execute_result"
|
159 |
}
|
|
|
175 |
},
|
176 |
{
|
177 |
"cell_type": "code",
|
178 |
+
"execution_count": 2,
|
179 |
"metadata": {},
|
180 |
"outputs": [
|
181 |
+
{
|
182 |
+
"name": "stdout",
|
183 |
+
"output_type": "stream",
|
184 |
+
"text": [
|
185 |
+
"load_data load_data() -> List[llama_index.core.schema.Document]\n",
|
186 |
+
"Load emails from the user's account.\n",
|
187 |
+
"search_messages search_messages(query: str, max_results: Optional[int] = None)\n",
|
188 |
+
"Searches email messages given a query string and the maximum number\n",
|
189 |
+
" of results requested by the user\n",
|
190 |
+
" Returns: List of relevant message objects up to the maximum number of results.\n",
|
191 |
+
"\n",
|
192 |
+
" Args:\n",
|
193 |
+
" query[str]: The user's query\n",
|
194 |
+
" max_results (Optional[int]): The maximum number of search results\n",
|
195 |
+
" to return.\n",
|
196 |
+
" \n",
|
197 |
+
"create_draft create_draft(to: Optional[List[str]] = None, subject: Optional[str] = None, message: Optional[str] = None) -> str\n",
|
198 |
+
"Create and insert a draft email.\n",
|
199 |
+
" Print the returned draft's message and id.\n",
|
200 |
+
" Returns: Draft object, including draft id and message meta data.\n",
|
201 |
+
"\n",
|
202 |
+
" Args:\n",
|
203 |
+
" to (Optional[str]): The email addresses to send the message to\n",
|
204 |
+
" subject (Optional[str]): The subject for the event\n",
|
205 |
+
" message (Optional[str]): The message for the event\n",
|
206 |
+
" \n",
|
207 |
+
"update_draft update_draft(to: Optional[List[str]] = None, subject: Optional[str] = None, message: Optional[str] = None, draft_id: str = None) -> str\n",
|
208 |
+
"Update a draft email.\n",
|
209 |
+
" Print the returned draft's message and id.\n",
|
210 |
+
" This function is required to be passed a draft_id that is obtained when creating messages\n",
|
211 |
+
" Returns: Draft object, including draft id and message meta data.\n",
|
212 |
+
"\n",
|
213 |
+
" Args:\n",
|
214 |
+
" to (Optional[str]): The email addresses to send the message to\n",
|
215 |
+
" subject (Optional[str]): The subject for the event\n",
|
216 |
+
" message (Optional[str]): The message for the event\n",
|
217 |
+
" draft_id (str): the id of the draft to be updated\n",
|
218 |
+
" \n",
|
219 |
+
"get_draft get_draft(draft_id: str = None) -> str\n",
|
220 |
+
"Get a draft email.\n",
|
221 |
+
" Print the returned draft's message and id.\n",
|
222 |
+
" Returns: Draft object, including draft id and message meta data.\n",
|
223 |
+
"\n",
|
224 |
+
" Args:\n",
|
225 |
+
" draft_id (str): the id of the draft to be updated\n",
|
226 |
+
" \n",
|
227 |
+
"send_draft send_draft(draft_id: str = None) -> str\n",
|
228 |
+
"Sends a draft email.\n",
|
229 |
+
" Print the returned draft's message and id.\n",
|
230 |
+
" Returns: Draft object, including draft id and message meta data.\n",
|
231 |
+
"\n",
|
232 |
+
" Args:\n",
|
233 |
+
" draft_id (str): the id of the draft to be updated\n",
|
234 |
+
" \n"
|
235 |
+
]
|
236 |
+
},
|
237 |
{
|
238 |
"data": {
|
239 |
"text/plain": [
|
240 |
+
"[None, None, None, None, None, None]"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
241 |
]
|
242 |
},
|
243 |
+
"execution_count": 2,
|
244 |
"metadata": {},
|
245 |
"output_type": "execute_result"
|
246 |
}
|
247 |
],
|
248 |
"source": [
|
249 |
+
"[print(tool.metadata.name, tool.metadata.description) for tool in tool_spec_list]"
|
250 |
]
|
251 |
}
|
252 |
],
|
253 |
"metadata": {
|
254 |
"kernelspec": {
|
255 |
+
"display_name": "Python 3 (ipykernel)",
|
256 |
"language": "python",
|
257 |
"name": "python3"
|
258 |
},
|
|
|
266 |
"name": "python",
|
267 |
"nbconvert_exporter": "python",
|
268 |
"pygments_lexer": "ipython3",
|
269 |
+
"version": "3.10.12"
|
270 |
}
|
271 |
},
|
272 |
"nbformat": 4,
|
273 |
+
"nbformat_minor": 4
|
274 |
}
|
unit2/llama-index/workflows.ipynb
CHANGED
@@ -153,6 +153,9 @@
|
|
153 |
"name": "stdout",
|
154 |
"output_type": "stream",
|
155 |
"text": [
|
|
|
|
|
|
|
156 |
"Good thing happened\n"
|
157 |
]
|
158 |
},
|
@@ -182,7 +185,7 @@
|
|
182 |
"\n",
|
183 |
"class MultiStepWorkflow(Workflow):\n",
|
184 |
" @step\n",
|
185 |
-
" async def step_one(self, ev: StartEvent) -> ProcessingEvent | LoopEvent:\n",
|
186 |
" if random.randint(0, 1) == 0:\n",
|
187 |
" print(\"Bad thing happened\")\n",
|
188 |
" return LoopEvent(loop_output=\"Back to step one.\")\n",
|
@@ -191,7 +194,7 @@
|
|
191 |
" return ProcessingEvent(intermediate_result=\"First step complete.\")\n",
|
192 |
"\n",
|
193 |
" @step\n",
|
194 |
-
" async def step_two(self, ev: ProcessingEvent
|
195 |
" # Use the intermediate result\n",
|
196 |
" final_result = f\"Finished processing: {ev.intermediate_result}\"\n",
|
197 |
" return StopEvent(result=final_result)\n",
|
@@ -321,9 +324,7 @@
|
|
321 |
{
|
322 |
"data": {
|
323 |
"text/plain": [
|
324 |
-
"AgentOutput(response=ChatMessage(role=<MessageRole.ASSISTANT: 'assistant'>, additional_kwargs={}, blocks=[TextBlock(block_type='text', text='5 and 3 add up to 8.')]), tool_calls=[ToolCallResult(tool_name='handoff', tool_kwargs={'to_agent': 'add_agent', 'reason': 'The user wants to add two numbers, and the add_agent is better suited for this task.'}, tool_id='831895e7-3502-4642-92ea-8626e21ed83b', tool_output=ToolOutput(content='Agent add_agent is now handling the request due to the following reason: The user wants to add two numbers, and the add_agent is better suited for this task..\
|
325 |
-
"Please continue with the current request.', tool_name='handoff', raw_input={'args': (), 'kwargs': {'to_agent': 'add_agent', 'reason': 'The user wants to add two numbers, and the add_agent is better suited for this task.'}}, raw_output='Agent add_agent is now handling the request due to the following reason: The user wants to add two numbers, and the add_agent is better suited for this task..\n",
|
326 |
-
"Please continue with the current request.', is_error=False), return_direct=True), ToolCallResult(tool_name='add', tool_kwargs={'a': 5, 'b': 3}, tool_id='c29dc3f7-eaa7-4ba7-b49b-90908f860cc5', tool_output=ToolOutput(content='8', tool_name='add', raw_input={'args': (), 'kwargs': {'a': 5, 'b': 3}}, raw_output=8, is_error=False), return_direct=False)], raw=ChatCompletionStreamOutput(choices=[ChatCompletionStreamOutputChoice(delta=ChatCompletionStreamOutputDelta(role='assistant', content='.', tool_call_id=None, tool_calls=None), index=0, finish_reason=None, logprobs=None)], created=1744553546, id='', model='Qwen/Qwen2.5-Coder-32B-Instruct', system_fingerprint='3.2.1-sha-4d28897', usage=None, object='chat.completion.chunk'), current_agent_name='add_agent')"
|
327 |
]
|
328 |
},
|
329 |
"execution_count": 33,
|
|
|
153 |
"name": "stdout",
|
154 |
"output_type": "stream",
|
155 |
"text": [
|
156 |
+
"Bad thing happened\n",
|
157 |
+
"Bad thing happened\n",
|
158 |
+
"Bad thing happened\n",
|
159 |
"Good thing happened\n"
|
160 |
]
|
161 |
},
|
|
|
185 |
"\n",
|
186 |
"class MultiStepWorkflow(Workflow):\n",
|
187 |
" @step\n",
|
188 |
+
" async def step_one(self, ev: StartEvent | LoopEvent) -> ProcessingEvent | LoopEvent:\n",
|
189 |
" if random.randint(0, 1) == 0:\n",
|
190 |
" print(\"Bad thing happened\")\n",
|
191 |
" return LoopEvent(loop_output=\"Back to step one.\")\n",
|
|
|
194 |
" return ProcessingEvent(intermediate_result=\"First step complete.\")\n",
|
195 |
"\n",
|
196 |
" @step\n",
|
197 |
+
" async def step_two(self, ev: ProcessingEvent) -> StopEvent:\n",
|
198 |
" # Use the intermediate result\n",
|
199 |
" final_result = f\"Finished processing: {ev.intermediate_result}\"\n",
|
200 |
" return StopEvent(result=final_result)\n",
|
|
|
324 |
{
|
325 |
"data": {
|
326 |
"text/plain": [
|
327 |
+
"AgentOutput(response=ChatMessage(role=<MessageRole.ASSISTANT: 'assistant'>, additional_kwargs={}, blocks=[TextBlock(block_type='text', text='5 and 3 add up to 8.')]), tool_calls=[ToolCallResult(tool_name='handoff', tool_kwargs={'to_agent': 'add_agent', 'reason': 'The user wants to add two numbers, and the add_agent is better suited for this task.'}, tool_id='831895e7-3502-4642-92ea-8626e21ed83b', tool_output=ToolOutput(content='Agent add_agent is now handling the request due to the following reason: The user wants to add two numbers, and the add_agent is better suited for this task..\nPlease continue with the current request.', tool_name='handoff', raw_input={'args': (), 'kwargs': {'to_agent': 'add_agent', 'reason': 'The user wants to add two numbers, and the add_agent is better suited for this task.'}}, raw_output='Agent add_agent is now handling the request due to the following reason: The user wants to add two numbers, and the add_agent is better suited for this task..\nPlease continue with the current request.', is_error=False), return_direct=True), ToolCallResult(tool_name='add', tool_kwargs={'a': 5, 'b': 3}, tool_id='c29dc3f7-eaa7-4ba7-b49b-90908f860cc5', tool_output=ToolOutput(content='8', tool_name='add', raw_input={'args': (), 'kwargs': {'a': 5, 'b': 3}}, raw_output=8, is_error=False), return_direct=False)], raw=ChatCompletionStreamOutput(choices=[ChatCompletionStreamOutputChoice(delta=ChatCompletionStreamOutputDelta(role='assistant', content='.', tool_call_id=None, tool_calls=None), index=0, finish_reason=None, logprobs=None)], created=1744553546, id='', model='Qwen/Qwen2.5-Coder-32B-Instruct', system_fingerprint='3.2.1-sha-4d28897', usage=None, object='chat.completion.chunk'), current_agent_name='add_agent')"
|
|
|
|
|
328 |
]
|
329 |
},
|
330 |
"execution_count": 33,
|
unit2/smolagents/code_agents.ipynb
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
unit2/smolagents/multiagent_notebook.ipynb
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
unit2/smolagents/retrieval_agents.ipynb
CHANGED
@@ -93,7 +93,7 @@
|
|
93 |
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span> <span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span>\n",
|
94 |
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span> <span style=\"font-weight: bold\">Search for luxury superhero-themed party ideas, including decorations, entertainment, and catering.</span> <span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span>\n",
|
95 |
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span> <span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span>\n",
|
96 |
-
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">β°β
|
97 |
"</pre>\n"
|
98 |
],
|
99 |
"text/plain": [
|
@@ -101,7 +101,7 @@
|
|
101 |
"\u001b[38;2;212;183;2mβ\u001b[0m \u001b[38;2;212;183;2mβ\u001b[0m\n",
|
102 |
"\u001b[38;2;212;183;2mβ\u001b[0m \u001b[1mSearch for luxury superhero-themed party ideas, including decorations, entertainment, and catering.\u001b[0m \u001b[38;2;212;183;2mβ\u001b[0m\n",
|
103 |
"\u001b[38;2;212;183;2mβ\u001b[0m \u001b[38;2;212;183;2mβ\u001b[0m\n",
|
104 |
-
"\u001b[38;2;212;183;2mβ°β\u001b[0m\u001b[38;2;212;183;2m
|
105 |
]
|
106 |
},
|
107 |
"metadata": {},
|
@@ -1733,13 +1733,13 @@
|
|
1733 |
}
|
1734 |
],
|
1735 |
"source": [
|
1736 |
-
"from smolagents import CodeAgent, DuckDuckGoSearchTool,
|
1737 |
"\n",
|
1738 |
"# Initialize the search tool\n",
|
1739 |
"search_tool = DuckDuckGoSearchTool()\n",
|
1740 |
"\n",
|
1741 |
"# Initialize the model\n",
|
1742 |
-
"model =
|
1743 |
"\n",
|
1744 |
"agent = CodeAgent(\n",
|
1745 |
" model = model,\n",
|
@@ -1812,7 +1812,7 @@
|
|
1812 |
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span> <span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span>\n",
|
1813 |
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span> <span style=\"font-weight: bold\">Find ideas for a luxury superhero-themed party, including entertainment, catering, and decoration options.</span> <span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span>\n",
|
1814 |
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span> <span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span>\n",
|
1815 |
-
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">β°β
|
1816 |
"</pre>\n"
|
1817 |
],
|
1818 |
"text/plain": [
|
@@ -1820,7 +1820,7 @@
|
|
1820 |
"\u001b[38;2;212;183;2mβ\u001b[0m \u001b[38;2;212;183;2mβ\u001b[0m\n",
|
1821 |
"\u001b[38;2;212;183;2mβ\u001b[0m \u001b[1mFind ideas for a luxury superhero-themed party, including entertainment, catering, and decoration options.\u001b[0m \u001b[38;2;212;183;2mβ\u001b[0m\n",
|
1822 |
"\u001b[38;2;212;183;2mβ\u001b[0m \u001b[38;2;212;183;2mβ\u001b[0m\n",
|
1823 |
-
"\u001b[38;2;212;183;2mβ°β\u001b[0m\u001b[38;2;212;183;2m
|
1824 |
]
|
1825 |
},
|
1826 |
"metadata": {},
|
@@ -2783,7 +2783,7 @@
|
|
2783 |
"from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
|
2784 |
"from smolagents import Tool\n",
|
2785 |
"from langchain_community.retrievers import BM25Retriever\n",
|
2786 |
-
"from smolagents import CodeAgent,
|
2787 |
"\n",
|
2788 |
"class PartyPlanningRetrieverTool(Tool):\n",
|
2789 |
" name = \"party_planning_retriever\"\n",
|
@@ -2843,7 +2843,7 @@
|
|
2843 |
"party_planning_retriever = PartyPlanningRetrieverTool(docs_processed)\n",
|
2844 |
"\n",
|
2845 |
"# Initialize the agent\n",
|
2846 |
-
"agent = CodeAgent(tools=[party_planning_retriever], model=
|
2847 |
"\n",
|
2848 |
"# Example usage\n",
|
2849 |
"response = agent.run(\n",
|
|
|
93 |
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span> <span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span>\n",
|
94 |
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span> <span style=\"font-weight: bold\">Search for luxury superhero-themed party ideas, including decorations, entertainment, and catering.</span> <span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span>\n",
|
95 |
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span> <span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span>\n",
|
96 |
+
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">β°β InferenceClientModel - Qwen/Qwen2.5-Coder-32B-Instruct βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ―</span>\n",
|
97 |
"</pre>\n"
|
98 |
],
|
99 |
"text/plain": [
|
|
|
101 |
"\u001b[38;2;212;183;2mβ\u001b[0m \u001b[38;2;212;183;2mβ\u001b[0m\n",
|
102 |
"\u001b[38;2;212;183;2mβ\u001b[0m \u001b[1mSearch for luxury superhero-themed party ideas, including decorations, entertainment, and catering.\u001b[0m \u001b[38;2;212;183;2mβ\u001b[0m\n",
|
103 |
"\u001b[38;2;212;183;2mβ\u001b[0m \u001b[38;2;212;183;2mβ\u001b[0m\n",
|
104 |
+
"\u001b[38;2;212;183;2mβ°β\u001b[0m\u001b[38;2;212;183;2m InferenceClientModel - Qwen/Qwen2.5-Coder-32B-Instruct \u001b[0m\u001b[38;2;212;183;2mβββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ\u001b[0m\u001b[38;2;212;183;2mββ―\u001b[0m\n"
|
105 |
]
|
106 |
},
|
107 |
"metadata": {},
|
|
|
1733 |
}
|
1734 |
],
|
1735 |
"source": [
|
1736 |
+
"from smolagents import CodeAgent, DuckDuckGoSearchTool, InferenceClientModel\n",
|
1737 |
"\n",
|
1738 |
"# Initialize the search tool\n",
|
1739 |
"search_tool = DuckDuckGoSearchTool()\n",
|
1740 |
"\n",
|
1741 |
"# Initialize the model\n",
|
1742 |
+
"model = InferenceClientModel()\n",
|
1743 |
"\n",
|
1744 |
"agent = CodeAgent(\n",
|
1745 |
" model = model,\n",
|
|
|
1812 |
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span> <span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span>\n",
|
1813 |
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span> <span style=\"font-weight: bold\">Find ideas for a luxury superhero-themed party, including entertainment, catering, and decoration options.</span> <span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span>\n",
|
1814 |
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span> <span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span>\n",
|
1815 |
+
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">β°β InferenceClientModel - Qwen/Qwen2.5-Coder-32B-Instruct οΏ½οΏ½ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ―</span>\n",
|
1816 |
"</pre>\n"
|
1817 |
],
|
1818 |
"text/plain": [
|
|
|
1820 |
"\u001b[38;2;212;183;2mβ\u001b[0m \u001b[38;2;212;183;2mβ\u001b[0m\n",
|
1821 |
"\u001b[38;2;212;183;2mβ\u001b[0m \u001b[1mFind ideas for a luxury superhero-themed party, including entertainment, catering, and decoration options.\u001b[0m \u001b[38;2;212;183;2mβ\u001b[0m\n",
|
1822 |
"\u001b[38;2;212;183;2mβ\u001b[0m \u001b[38;2;212;183;2mβ\u001b[0m\n",
|
1823 |
+
"\u001b[38;2;212;183;2mβ°β\u001b[0m\u001b[38;2;212;183;2m InferenceClientModel - Qwen/Qwen2.5-Coder-32B-Instruct \u001b[0m\u001b[38;2;212;183;2mβββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ\u001b[0m\u001b[38;2;212;183;2mββ―\u001b[0m\n"
|
1824 |
]
|
1825 |
},
|
1826 |
"metadata": {},
|
|
|
2783 |
"from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
|
2784 |
"from smolagents import Tool\n",
|
2785 |
"from langchain_community.retrievers import BM25Retriever\n",
|
2786 |
+
"from smolagents import CodeAgent, InferenceClientModel\n",
|
2787 |
"\n",
|
2788 |
"class PartyPlanningRetrieverTool(Tool):\n",
|
2789 |
" name = \"party_planning_retriever\"\n",
|
|
|
2843 |
"party_planning_retriever = PartyPlanningRetrieverTool(docs_processed)\n",
|
2844 |
"\n",
|
2845 |
"# Initialize the agent\n",
|
2846 |
+
"agent = CodeAgent(tools=[party_planning_retriever], model=InferenceClientModel())\n",
|
2847 |
"\n",
|
2848 |
"# Example usage\n",
|
2849 |
"response = agent.run(\n",
|
unit2/smolagents/tool_calling_agents.ipynb
CHANGED
@@ -6,7 +6,7 @@
|
|
6 |
"id": "Pi9CF0391ARI"
|
7 |
},
|
8 |
"source": [
|
9 |
-
"#
|
10 |
"\n",
|
11 |
"This notebook is part of the [Hugging Face Agents Course](https://www.hf.co/learn/agents-course), a free Course from beginner to expert, where you learn to build Agents.\n",
|
12 |
"\n",
|
@@ -37,12 +37,12 @@
|
|
37 |
},
|
38 |
{
|
39 |
"cell_type": "markdown",
|
40 |
-
"source": [
|
41 |
-
"Let's also login to the Hugging Face Hub to have access to the Inference API."
|
42 |
-
],
|
43 |
"metadata": {
|
44 |
"id": "cH-4W1GhYL4T"
|
45 |
-
}
|
|
|
|
|
|
|
46 |
},
|
47 |
{
|
48 |
"cell_type": "code",
|
@@ -87,7 +87,7 @@
|
|
87 |
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span> <span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span>\n",
|
88 |
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span> <span style=\"font-weight: bold\">Search for the best music recommendations for a party at the Wayne's mansion.</span> <span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span>\n",
|
89 |
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span> <span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span>\n",
|
90 |
-
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">β°β
|
91 |
"</pre>\n"
|
92 |
],
|
93 |
"text/plain": [
|
@@ -95,7 +95,7 @@
|
|
95 |
"\u001b[38;2;212;183;2mβ\u001b[0m \u001b[38;2;212;183;2mβ\u001b[0m\n",
|
96 |
"\u001b[38;2;212;183;2mβ\u001b[0m \u001b[1mSearch for the best music recommendations for a party at the Wayne's mansion.\u001b[0m \u001b[38;2;212;183;2mβ\u001b[0m\n",
|
97 |
"\u001b[38;2;212;183;2mβ\u001b[0m \u001b[38;2;212;183;2mβ\u001b[0m\n",
|
98 |
-
"\u001b[38;2;212;183;2mβ°β\u001b[0m\u001b[38;2;212;183;2m
|
99 |
]
|
100 |
},
|
101 |
"metadata": {},
|
@@ -550,15 +550,18 @@
|
|
550 |
}
|
551 |
],
|
552 |
"source": [
|
553 |
-
"from smolagents import ToolCallingAgent, DuckDuckGoSearchTool,
|
554 |
"\n",
|
555 |
-
"agent = ToolCallingAgent(tools=[DuckDuckGoSearchTool()], model=
|
556 |
"\n",
|
557 |
"agent.run(\"Search for the best music recommendations for a party at the Wayne's mansion.\")"
|
558 |
]
|
559 |
},
|
560 |
{
|
561 |
"cell_type": "markdown",
|
|
|
|
|
|
|
562 |
"source": [
|
563 |
"\n",
|
564 |
"When you examine the agent's trace, instead of seeing `Executing parsed code:`, you'll see something like:\n",
|
@@ -573,10 +576,7 @@
|
|
573 |
"The agent generates a structured tool call that the system processes to produce the output, rather than directly executing code like a `CodeAgent`.\n",
|
574 |
"\n",
|
575 |
"Now that we understand both agent types, we can choose the right one for our needs. Let's continue exploring `smolagents` to make Alfred's party a success! π"
|
576 |
-
]
|
577 |
-
"metadata": {
|
578 |
-
"id": "Cl19VWGRYXrr"
|
579 |
-
}
|
580 |
}
|
581 |
],
|
582 |
"metadata": {
|
@@ -593,4 +593,4 @@
|
|
593 |
},
|
594 |
"nbformat": 4,
|
595 |
"nbformat_minor": 0
|
596 |
-
}
|
|
|
6 |
"id": "Pi9CF0391ARI"
|
7 |
},
|
8 |
"source": [
|
9 |
+
"# Writing actions as code snippets or JSON blobs\n",
|
10 |
"\n",
|
11 |
"This notebook is part of the [Hugging Face Agents Course](https://www.hf.co/learn/agents-course), a free Course from beginner to expert, where you learn to build Agents.\n",
|
12 |
"\n",
|
|
|
37 |
},
|
38 |
{
|
39 |
"cell_type": "markdown",
|
|
|
|
|
|
|
40 |
"metadata": {
|
41 |
"id": "cH-4W1GhYL4T"
|
42 |
+
},
|
43 |
+
"source": [
|
44 |
+
"Let's also login to the Hugging Face Hub to have access to the Inference API."
|
45 |
+
]
|
46 |
},
|
47 |
{
|
48 |
"cell_type": "code",
|
|
|
87 |
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span> <span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span>\n",
|
88 |
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span> <span style=\"font-weight: bold\">Search for the best music recommendations for a party at the Wayne's mansion.</span> <span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span>\n",
|
89 |
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span> <span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span>\n",
|
90 |
+
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">β°β InferenceClientModel - Qwen/Qwen2.5-Coder-32B-Instruct βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ―</span>\n",
|
91 |
"</pre>\n"
|
92 |
],
|
93 |
"text/plain": [
|
|
|
95 |
"\u001b[38;2;212;183;2mβ\u001b[0m \u001b[38;2;212;183;2mβ\u001b[0m\n",
|
96 |
"\u001b[38;2;212;183;2mβ\u001b[0m \u001b[1mSearch for the best music recommendations for a party at the Wayne's mansion.\u001b[0m \u001b[38;2;212;183;2mβ\u001b[0m\n",
|
97 |
"\u001b[38;2;212;183;2mβ\u001b[0m \u001b[38;2;212;183;2mβ\u001b[0m\n",
|
98 |
+
"\u001b[38;2;212;183;2mβ°β\u001b[0m\u001b[38;2;212;183;2m InferenceClientModel - Qwen/Qwen2.5-Coder-32B-Instruct \u001b[0m\u001b[38;2;212;183;2mβββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ\u001b[0m\u001b[38;2;212;183;2mββ―\u001b[0m\n"
|
99 |
]
|
100 |
},
|
101 |
"metadata": {},
|
|
|
550 |
}
|
551 |
],
|
552 |
"source": [
|
553 |
+
"from smolagents import ToolCallingAgent, DuckDuckGoSearchTool, InferenceClientModel\n",
|
554 |
"\n",
|
555 |
+
"agent = ToolCallingAgent(tools=[DuckDuckGoSearchTool()], model=InferenceClientModel())\n",
|
556 |
"\n",
|
557 |
"agent.run(\"Search for the best music recommendations for a party at the Wayne's mansion.\")"
|
558 |
]
|
559 |
},
|
560 |
{
|
561 |
"cell_type": "markdown",
|
562 |
+
"metadata": {
|
563 |
+
"id": "Cl19VWGRYXrr"
|
564 |
+
},
|
565 |
"source": [
|
566 |
"\n",
|
567 |
"When you examine the agent's trace, instead of seeing `Executing parsed code:`, you'll see something like:\n",
|
|
|
576 |
"The agent generates a structured tool call that the system processes to produce the output, rather than directly executing code like a `CodeAgent`.\n",
|
577 |
"\n",
|
578 |
"Now that we understand both agent types, we can choose the right one for our needs. Let's continue exploring `smolagents` to make Alfred's party a success! π"
|
579 |
+
]
|
|
|
|
|
|
|
580 |
}
|
581 |
],
|
582 |
"metadata": {
|
|
|
593 |
},
|
594 |
"nbformat": 4,
|
595 |
"nbformat_minor": 0
|
596 |
+
}
|
unit2/smolagents/tools.ipynb
CHANGED
@@ -91,7 +91,7 @@
|
|
91 |
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span> <span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span>\n",
|
92 |
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span> <span style=\"font-weight: bold\">Can you give me the name of the highest-rated catering service in Gotham City?</span> <span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span>\n",
|
93 |
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span> <span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span>\n",
|
94 |
-
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">β°β
|
95 |
"</pre>\n"
|
96 |
],
|
97 |
"text/plain": [
|
@@ -99,7 +99,7 @@
|
|
99 |
"\u001b[38;2;212;183;2mβ\u001b[0m \u001b[38;2;212;183;2mβ\u001b[0m\n",
|
100 |
"\u001b[38;2;212;183;2mβ\u001b[0m \u001b[1mCan you give me the name of the highest-rated catering service in Gotham City?\u001b[0m \u001b[38;2;212;183;2mβ\u001b[0m\n",
|
101 |
"\u001b[38;2;212;183;2mβ\u001b[0m \u001b[38;2;212;183;2mβ\u001b[0m\n",
|
102 |
-
"\u001b[38;2;212;183;2mβ°β\u001b[0m\u001b[38;2;212;183;2m
|
103 |
]
|
104 |
},
|
105 |
"metadata": {},
|
@@ -246,7 +246,7 @@
|
|
246 |
}
|
247 |
],
|
248 |
"source": [
|
249 |
-
"from smolagents import CodeAgent,
|
250 |
"\n",
|
251 |
"# Let's pretend we have a function that fetches the highest-rated catering services.\n",
|
252 |
"@tool\n",
|
@@ -270,7 +270,7 @@
|
|
270 |
" return best_service\n",
|
271 |
"\n",
|
272 |
"\n",
|
273 |
-
"agent = CodeAgent(tools=[catering_service_tool], model=
|
274 |
"\n",
|
275 |
"# Run the agent to find the best catering service\n",
|
276 |
"result = agent.run(\n",
|
@@ -314,7 +314,7 @@
|
|
314 |
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span> <span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span>\n",
|
315 |
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span> <span style=\"font-weight: bold\">What would be a good superhero party idea for a 'villain masquerade' theme?</span> <span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span>\n",
|
316 |
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span> <span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span>\n",
|
317 |
-
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">β°β
|
318 |
"</pre>\n"
|
319 |
],
|
320 |
"text/plain": [
|
@@ -322,7 +322,7 @@
|
|
322 |
"\u001b[38;2;212;183;2mβ\u001b[0m \u001b[38;2;212;183;2mβ\u001b[0m\n",
|
323 |
"\u001b[38;2;212;183;2mβ\u001b[0m \u001b[1mWhat would be a good superhero party idea for a 'villain masquerade' theme?\u001b[0m \u001b[38;2;212;183;2mβ\u001b[0m\n",
|
324 |
"\u001b[38;2;212;183;2mβ\u001b[0m \u001b[38;2;212;183;2mβ\u001b[0m\n",
|
325 |
-
"\u001b[38;2;212;183;2mβ°β\u001b[0m\u001b[38;2;212;183;2m
|
326 |
]
|
327 |
},
|
328 |
"metadata": {},
|
@@ -395,7 +395,7 @@
|
|
395 |
}
|
396 |
],
|
397 |
"source": [
|
398 |
-
"from smolagents import Tool, CodeAgent,
|
399 |
"\n",
|
400 |
"class SuperheroPartyThemeTool(Tool):\n",
|
401 |
" name = \"superhero_party_theme_generator\"\n",
|
@@ -423,7 +423,7 @@
|
|
423 |
"\n",
|
424 |
"# Instantiate the tool\n",
|
425 |
"party_theme_tool = SuperheroPartyThemeTool()\n",
|
426 |
-
"agent = CodeAgent(tools=[party_theme_tool], model=
|
427 |
"\n",
|
428 |
"# Run the agent to generate a party theme idea\n",
|
429 |
"result = agent.run(\n",
|
@@ -514,7 +514,7 @@
|
|
514 |
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span> <span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span>\n",
|
515 |
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span> <span style=\"font-weight: bold\">Generate an image of a luxurious superhero-themed party at Wayne Manor with made-up superheros.</span> <span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span>\n",
|
516 |
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span> <span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span>\n",
|
517 |
-
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">β°β
|
518 |
"</pre>\n"
|
519 |
],
|
520 |
"text/plain": [
|
@@ -522,7 +522,7 @@
|
|
522 |
"\u001b[38;2;212;183;2mβ\u001b[0m \u001b[38;2;212;183;2mβ\u001b[0m\n",
|
523 |
"\u001b[38;2;212;183;2mβ\u001b[0m \u001b[1mGenerate an image of a luxurious superhero-themed party at Wayne Manor with made-up superheros.\u001b[0m \u001b[38;2;212;183;2mβ\u001b[0m\n",
|
524 |
"\u001b[38;2;212;183;2mβ\u001b[0m \u001b[38;2;212;183;2mβ\u001b[0m\n",
|
525 |
-
"\u001b[38;2;212;183;2mβ°β\u001b[0m\u001b[38;2;212;183;2m
|
526 |
]
|
527 |
},
|
528 |
"metadata": {},
|
@@ -604,7 +604,7 @@
|
|
604 |
}
|
605 |
],
|
606 |
"source": [
|
607 |
-
"from smolagents import load_tool, CodeAgent,
|
608 |
"\n",
|
609 |
"image_generation_tool = load_tool(\n",
|
610 |
" \"m-ric/text-to-image\",\n",
|
@@ -613,7 +613,7 @@
|
|
613 |
"\n",
|
614 |
"agent = CodeAgent(\n",
|
615 |
" tools=[image_generation_tool],\n",
|
616 |
-
" model=
|
617 |
")\n",
|
618 |
"\n",
|
619 |
"agent.run(\"Generate an image of a luxurious superhero-themed party at Wayne Manor with made-up superheros.\")"
|
@@ -679,7 +679,7 @@
|
|
679 |
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span> <span style=\"font-weight: bold\">python code:</span> <span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span>\n",
|
680 |
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span> <span style=\"font-weight: bold\">{'user_prompt': 'A grand superhero-themed party at Wayne Manor, with Alfred overseeing a luxurious gala'}.</span> <span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span>\n",
|
681 |
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span> <span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span>\n",
|
682 |
-
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">β°β
|
683 |
"</pre>\n"
|
684 |
],
|
685 |
"text/plain": [
|
@@ -690,7 +690,7 @@
|
|
690 |
"\u001b[38;2;212;183;2mβ\u001b[0m \u001b[1mpython code:\u001b[0m \u001b[38;2;212;183;2mβ\u001b[0m\n",
|
691 |
"\u001b[38;2;212;183;2mβ\u001b[0m \u001b[1m{'user_prompt': 'A grand superhero-themed party at Wayne Manor, with Alfred overseeing a luxurious gala'}.\u001b[0m \u001b[38;2;212;183;2mβ\u001b[0m\n",
|
692 |
"\u001b[38;2;212;183;2mβ\u001b[0m \u001b[38;2;212;183;2mβ\u001b[0m\n",
|
693 |
-
"\u001b[38;2;212;183;2mβ°β\u001b[0m\u001b[38;2;212;183;2m
|
694 |
]
|
695 |
},
|
696 |
"metadata": {},
|
@@ -799,7 +799,7 @@
|
|
799 |
}
|
800 |
],
|
801 |
"source": [
|
802 |
-
"from smolagents import CodeAgent,
|
803 |
"\n",
|
804 |
"image_generation_tool = Tool.from_space(\n",
|
805 |
" \"black-forest-labs/FLUX.1-schnell\",\n",
|
@@ -807,7 +807,7 @@
|
|
807 |
" description=\"Generate an image from a prompt\"\n",
|
808 |
")\n",
|
809 |
"\n",
|
810 |
-
"model =
|
811 |
"\n",
|
812 |
"agent = CodeAgent(tools=[image_generation_tool], model=model)\n",
|
813 |
"\n",
|
@@ -913,7 +913,7 @@
|
|
913 |
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span> <span style=\"font-weight: bold\">Search for luxury entertainment ideas for a superhero-themed event, such as live performances and interactive </span> <span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span>\n",
|
914 |
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span> <span style=\"font-weight: bold\">experiences.</span> <span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span>\n",
|
915 |
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span> <span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span>\n",
|
916 |
-
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">β°β
|
917 |
"</pre>\n"
|
918 |
],
|
919 |
"text/plain": [
|
@@ -922,7 +922,7 @@
|
|
922 |
"\u001b[38;2;212;183;2mβ\u001b[0m \u001b[1mSearch for luxury entertainment ideas for a superhero-themed event, such as live performances and interactive \u001b[0m \u001b[38;2;212;183;2mβ\u001b[0m\n",
|
923 |
"\u001b[38;2;212;183;2mβ\u001b[0m \u001b[1mexperiences.\u001b[0m \u001b[38;2;212;183;2mβ\u001b[0m\n",
|
924 |
"\u001b[38;2;212;183;2mβ\u001b[0m \u001b[38;2;212;183;2mβ\u001b[0m\n",
|
925 |
-
"\u001b[38;2;212;183;2mβ°β\u001b[0m\u001b[38;2;212;183;2m
|
926 |
]
|
927 |
},
|
928 |
"metadata": {},
|
@@ -1208,7 +1208,7 @@
|
|
1208 |
],
|
1209 |
"source": [
|
1210 |
"from langchain.agents import load_tools\n",
|
1211 |
-
"from smolagents import CodeAgent,
|
1212 |
"\n",
|
1213 |
"search_tool = Tool.from_langchain(load_tools([\"serpapi\"])[0])\n",
|
1214 |
"\n",
|
|
|
91 |
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span> <span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span>\n",
|
92 |
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span> <span style=\"font-weight: bold\">Can you give me the name of the highest-rated catering service in Gotham City?</span> <span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span>\n",
|
93 |
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span> <span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span>\n",
|
94 |
+
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">β°β InferenceClientModel - Qwen/Qwen2.5-Coder-32B-Instruct βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ―</span>\n",
|
95 |
"</pre>\n"
|
96 |
],
|
97 |
"text/plain": [
|
|
|
99 |
"\u001b[38;2;212;183;2mβ\u001b[0m \u001b[38;2;212;183;2mβ\u001b[0m\n",
|
100 |
"\u001b[38;2;212;183;2mβ\u001b[0m \u001b[1mCan you give me the name of the highest-rated catering service in Gotham City?\u001b[0m \u001b[38;2;212;183;2mβ\u001b[0m\n",
|
101 |
"\u001b[38;2;212;183;2mβ\u001b[0m \u001b[38;2;212;183;2mβ\u001b[0m\n",
|
102 |
+
"\u001b[38;2;212;183;2mβ°β\u001b[0m\u001b[38;2;212;183;2m InferenceClientModel - Qwen/Qwen2.5-Coder-32B-Instruct \u001b[0m\u001b[38;2;212;183;2mβββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ\u001b[0m\u001b[38;2;212;183;2mββ―\u001b[0m\n"
|
103 |
]
|
104 |
},
|
105 |
"metadata": {},
|
|
|
246 |
}
|
247 |
],
|
248 |
"source": [
|
249 |
+
"from smolagents import CodeAgent, InferenceClientModel, tool\n",
|
250 |
"\n",
|
251 |
"# Let's pretend we have a function that fetches the highest-rated catering services.\n",
|
252 |
"@tool\n",
|
|
|
270 |
" return best_service\n",
|
271 |
"\n",
|
272 |
"\n",
|
273 |
+
"agent = CodeAgent(tools=[catering_service_tool], model=InferenceClientModel())\n",
|
274 |
"\n",
|
275 |
"# Run the agent to find the best catering service\n",
|
276 |
"result = agent.run(\n",
|
|
|
314 |
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span> <span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span>\n",
|
315 |
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span> <span style=\"font-weight: bold\">What would be a good superhero party idea for a 'villain masquerade' theme?</span> <span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span>\n",
|
316 |
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span> <span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span>\n",
|
317 |
+
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">β°β InferenceClientModel - Qwen/Qwen2.5-Coder-32B-Instruct βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ―</span>\n",
|
318 |
"</pre>\n"
|
319 |
],
|
320 |
"text/plain": [
|
|
|
322 |
"\u001b[38;2;212;183;2mβ\u001b[0m \u001b[38;2;212;183;2mβ\u001b[0m\n",
|
323 |
"\u001b[38;2;212;183;2mβ\u001b[0m \u001b[1mWhat would be a good superhero party idea for a 'villain masquerade' theme?\u001b[0m \u001b[38;2;212;183;2mβ\u001b[0m\n",
|
324 |
"\u001b[38;2;212;183;2mβ\u001b[0m \u001b[38;2;212;183;2mβ\u001b[0m\n",
|
325 |
+
"\u001b[38;2;212;183;2mβ°β\u001b[0m\u001b[38;2;212;183;2m InferenceClientModel - Qwen/Qwen2.5-Coder-32B-Instruct \u001b[0m\u001b[38;2;212;183;2mβββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ\u001b[0m\u001b[38;2;212;183;2mββ―\u001b[0m\n"
|
326 |
]
|
327 |
},
|
328 |
"metadata": {},
|
|
|
395 |
}
|
396 |
],
|
397 |
"source": [
|
398 |
+
"from smolagents import Tool, CodeAgent, InferenceClientModel\n",
|
399 |
"\n",
|
400 |
"class SuperheroPartyThemeTool(Tool):\n",
|
401 |
" name = \"superhero_party_theme_generator\"\n",
|
|
|
423 |
"\n",
|
424 |
"# Instantiate the tool\n",
|
425 |
"party_theme_tool = SuperheroPartyThemeTool()\n",
|
426 |
+
"agent = CodeAgent(tools=[party_theme_tool], model=InferenceClientModel())\n",
|
427 |
"\n",
|
428 |
"# Run the agent to generate a party theme idea\n",
|
429 |
"result = agent.run(\n",
|
|
|
514 |
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span> <span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span>\n",
|
515 |
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span> <span style=\"font-weight: bold\">Generate an image of a luxurious superhero-themed party at Wayne Manor with made-up superheros.</span> <span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span>\n",
|
516 |
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span> <span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span>\n",
|
517 |
+
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">β°β InferenceClientModel - Qwen/Qwen2.5-Coder-32B-Instruct βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ―</span>\n",
|
518 |
"</pre>\n"
|
519 |
],
|
520 |
"text/plain": [
|
|
|
522 |
"\u001b[38;2;212;183;2mβ\u001b[0m \u001b[38;2;212;183;2mβ\u001b[0m\n",
|
523 |
"\u001b[38;2;212;183;2mβ\u001b[0m \u001b[1mGenerate an image of a luxurious superhero-themed party at Wayne Manor with made-up superheros.\u001b[0m \u001b[38;2;212;183;2mβ\u001b[0m\n",
|
524 |
"\u001b[38;2;212;183;2mβ\u001b[0m \u001b[38;2;212;183;2mβ\u001b[0m\n",
|
525 |
+
"\u001b[38;2;212;183;2mβ°β\u001b[0m\u001b[38;2;212;183;2m InferenceClientModel - Qwen/Qwen2.5-Coder-32B-Instruct \u001b[0m\u001b[38;2;212;183;2mβββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ\u001b[0m\u001b[38;2;212;183;2mββ―\u001b[0m\n"
|
526 |
]
|
527 |
},
|
528 |
"metadata": {},
|
|
|
604 |
}
|
605 |
],
|
606 |
"source": [
|
607 |
+
"from smolagents import load_tool, CodeAgent, InferenceClientModel\n",
|
608 |
"\n",
|
609 |
"image_generation_tool = load_tool(\n",
|
610 |
" \"m-ric/text-to-image\",\n",
|
|
|
613 |
"\n",
|
614 |
"agent = CodeAgent(\n",
|
615 |
" tools=[image_generation_tool],\n",
|
616 |
+
" model=InferenceClientModel()\n",
|
617 |
")\n",
|
618 |
"\n",
|
619 |
"agent.run(\"Generate an image of a luxurious superhero-themed party at Wayne Manor with made-up superheros.\")"
|
|
|
679 |
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span> <span style=\"font-weight: bold\">python code:</span> <span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span>\n",
|
680 |
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span> <span style=\"font-weight: bold\">{'user_prompt': 'A grand superhero-themed party at Wayne Manor, with Alfred overseeing a luxurious gala'}.</span> <span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span>\n",
|
681 |
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span> <span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span>\n",
|
682 |
+
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">β°β InferenceClientModel - Qwen/Qwen2.5-Coder-32B-Instruct βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ―</span>\n",
|
683 |
"</pre>\n"
|
684 |
],
|
685 |
"text/plain": [
|
|
|
690 |
"\u001b[38;2;212;183;2mβ\u001b[0m \u001b[1mpython code:\u001b[0m \u001b[38;2;212;183;2mβ\u001b[0m\n",
|
691 |
"\u001b[38;2;212;183;2mβ\u001b[0m \u001b[1m{'user_prompt': 'A grand superhero-themed party at Wayne Manor, with Alfred overseeing a luxurious gala'}.\u001b[0m \u001b[38;2;212;183;2mβ\u001b[0m\n",
|
692 |
"\u001b[38;2;212;183;2mβ\u001b[0m \u001b[38;2;212;183;2mβ\u001b[0m\n",
|
693 |
+
"\u001b[38;2;212;183;2mβ°β\u001b[0m\u001b[38;2;212;183;2m InferenceClientModel - Qwen/Qwen2.5-Coder-32B-Instruct \u001b[0m\u001b[38;2;212;183;2mβββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ\u001b[0m\u001b[38;2;212;183;2mββ―\u001b[0m\n"
|
694 |
]
|
695 |
},
|
696 |
"metadata": {},
|
|
|
799 |
}
|
800 |
],
|
801 |
"source": [
|
802 |
+
"from smolagents import CodeAgent, InferenceClientModel, Tool\n",
|
803 |
"\n",
|
804 |
"image_generation_tool = Tool.from_space(\n",
|
805 |
" \"black-forest-labs/FLUX.1-schnell\",\n",
|
|
|
807 |
" description=\"Generate an image from a prompt\"\n",
|
808 |
")\n",
|
809 |
"\n",
|
810 |
+
"model = InferenceClientModel(\"Qwen/Qwen2.5-Coder-32B-Instruct\")\n",
|
811 |
"\n",
|
812 |
"agent = CodeAgent(tools=[image_generation_tool], model=model)\n",
|
813 |
"\n",
|
|
|
913 |
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span> <span style=\"font-weight: bold\">Search for luxury entertainment ideas for a superhero-themed event, such as live performances and interactive </span> <span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span>\n",
|
914 |
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span> <span style=\"font-weight: bold\">experiences.</span> <span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span>\n",
|
915 |
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span> <span style=\"color: #d4b702; text-decoration-color: #d4b702\">β</span>\n",
|
916 |
+
"<span style=\"color: #d4b702; text-decoration-color: #d4b702\">β°β InferenceClientModel - Qwen/Qwen2.5-Coder-32B-Instruct βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ―</span>\n",
|
917 |
"</pre>\n"
|
918 |
],
|
919 |
"text/plain": [
|
|
|
922 |
"\u001b[38;2;212;183;2mβ\u001b[0m \u001b[1mSearch for luxury entertainment ideas for a superhero-themed event, such as live performances and interactive \u001b[0m \u001b[38;2;212;183;2mβ\u001b[0m\n",
|
923 |
"\u001b[38;2;212;183;2mβ\u001b[0m \u001b[1mexperiences.\u001b[0m \u001b[38;2;212;183;2mβ\u001b[0m\n",
|
924 |
"\u001b[38;2;212;183;2mβ\u001b[0m \u001b[38;2;212;183;2mβ\u001b[0m\n",
|
925 |
+
"\u001b[38;2;212;183;2mβ°β\u001b[0m\u001b[38;2;212;183;2m InferenceClientModel - Qwen/Qwen2.5-Coder-32B-Instruct \u001b[0m\u001b[38;2;212;183;2mβββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ\u001b[0m\u001b[38;2;212;183;2mββ―\u001b[0m\n"
|
926 |
]
|
927 |
},
|
928 |
"metadata": {},
|
|
|
1208 |
],
|
1209 |
"source": [
|
1210 |
"from langchain.agents import load_tools\n",
|
1211 |
+
"from smolagents import CodeAgent, InferenceClientModel, Tool\n",
|
1212 |
"\n",
|
1213 |
"search_tool = Tool.from_langchain(load_tools([\"serpapi\"])[0])\n",
|
1214 |
"\n",
|
unit2/smolagents/vision_agents.ipynb
CHANGED
@@ -38,12 +38,12 @@
|
|
38 |
},
|
39 |
{
|
40 |
"cell_type": "markdown",
|
41 |
-
"source": [
|
42 |
-
"Let's also login to the Hugging Face Hub to have access to the Inference API."
|
43 |
-
],
|
44 |
"metadata": {
|
45 |
"id": "WJGFjRbZbL50"
|
46 |
-
}
|
|
|
|
|
|
|
47 |
},
|
48 |
{
|
49 |
"cell_type": "code",
|
@@ -94,19 +94,22 @@
|
|
94 |
"\n",
|
95 |
"images = []\n",
|
96 |
"for url in image_urls:\n",
|
97 |
-
"
|
|
|
|
|
|
|
98 |
" image = Image.open(BytesIO(response.content)).convert(\"RGB\")\n",
|
99 |
" images.append(image)"
|
100 |
]
|
101 |
},
|
102 |
{
|
103 |
"cell_type": "markdown",
|
104 |
-
"source": [
|
105 |
-
"Now that we have the images, the agent will tell us wether the guests is actually a superhero (Wonder Woman) or a villian (The Joker)."
|
106 |
-
],
|
107 |
"metadata": {
|
108 |
"id": "vUBQjETkbRU6"
|
109 |
-
}
|
|
|
|
|
|
|
110 |
},
|
111 |
{
|
112 |
"cell_type": "code",
|
@@ -499,12 +502,12 @@
|
|
499 |
},
|
500 |
{
|
501 |
"cell_type": "markdown",
|
502 |
-
"source": [
|
503 |
-
"In this case, the output reveals that the person is impersonating someone else, so we can prevent The Joker from entering the party!"
|
504 |
-
],
|
505 |
"metadata": {
|
506 |
"id": "NrV-yK5zbT9r"
|
507 |
-
}
|
|
|
|
|
|
|
508 |
},
|
509 |
{
|
510 |
"cell_type": "markdown",
|
@@ -532,4 +535,4 @@
|
|
532 |
},
|
533 |
"nbformat": 4,
|
534 |
"nbformat_minor": 0
|
535 |
-
}
|
|
|
38 |
},
|
39 |
{
|
40 |
"cell_type": "markdown",
|
|
|
|
|
|
|
41 |
"metadata": {
|
42 |
"id": "WJGFjRbZbL50"
|
43 |
+
},
|
44 |
+
"source": [
|
45 |
+
"Let's also login to the Hugging Face Hub to have access to the Inference API."
|
46 |
+
]
|
47 |
},
|
48 |
{
|
49 |
"cell_type": "code",
|
|
|
94 |
"\n",
|
95 |
"images = []\n",
|
96 |
"for url in image_urls:\n",
|
97 |
+
" headers = {\n",
|
98 |
+
" \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36\" \n",
|
99 |
+
" }\n",
|
100 |
+
" response = requests.get(url,headers=headers)\n",
|
101 |
" image = Image.open(BytesIO(response.content)).convert(\"RGB\")\n",
|
102 |
" images.append(image)"
|
103 |
]
|
104 |
},
|
105 |
{
|
106 |
"cell_type": "markdown",
|
|
|
|
|
|
|
107 |
"metadata": {
|
108 |
"id": "vUBQjETkbRU6"
|
109 |
+
},
|
110 |
+
"source": [
|
111 |
+
"Now that we have the images, the agent will tell us wether the guests is actually a superhero (Wonder Woman) or a villian (The Joker)."
|
112 |
+
]
|
113 |
},
|
114 |
{
|
115 |
"cell_type": "code",
|
|
|
502 |
},
|
503 |
{
|
504 |
"cell_type": "markdown",
|
|
|
|
|
|
|
505 |
"metadata": {
|
506 |
"id": "NrV-yK5zbT9r"
|
507 |
+
},
|
508 |
+
"source": [
|
509 |
+
"In this case, the output reveals that the person is impersonating someone else, so we can prevent The Joker from entering the party!"
|
510 |
+
]
|
511 |
},
|
512 |
{
|
513 |
"cell_type": "markdown",
|
|
|
535 |
},
|
536 |
"nbformat": 4,
|
537 |
"nbformat_minor": 0
|
538 |
+
}
|
unit2/smolagents/vision_web_browser.py
CHANGED
@@ -34,7 +34,7 @@ def parse_arguments():
|
|
34 |
"--model-type",
|
35 |
type=str,
|
36 |
default="LiteLLMModel",
|
37 |
-
help="The model type to use (e.g., OpenAIServerModel, LiteLLMModel, TransformersModel,
|
38 |
)
|
39 |
parser.add_argument(
|
40 |
"--model-id",
|
|
|
34 |
"--model-type",
|
35 |
type=str,
|
36 |
default="LiteLLMModel",
|
37 |
+
help="The model type to use (e.g., OpenAIServerModel, LiteLLMModel, TransformersModel, InferenceClientModel)",
|
38 |
)
|
39 |
parser.add_argument(
|
40 |
"--model-id",
|