{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "cc072a08",
   "metadata": {},
   "outputs": [],
   "source": [
    "pip install --quiet -U langchain langchain_community tiktoken langchain-nomic \"nomic[local]\" langchain-ollama scikit-learn langgraph tavily-python bs4"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "aa9e528c",
   "metadata": {},
   "outputs": [],
   "source": [
    "!pip install langchain-nomic\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "ca507554",
   "metadata": {},
   "outputs": [],
   "source": [
    "### LLM配置 - 使用通义千问模型\n",
    "from langchain_community.chat_models import ChatTongyi\n",
    "\n",
    "llm = ChatTongyi(\n",
    "    model_name=\"qwen-turbo\",\n",
    "    temperature=0.7,\n",
    "    streaming=True\n",
    ")\n",
    "llm_json_mode = ChatTongyi(\n",
    "    model_name=\"qwen-turbo\",\n",
    "    temperature=0.7,\n",
    "    streaming=True,\n",
    "    format=\"json\"\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "49f25504",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 使用 LangSmith 追踪\n",
    "import os\n",
    "os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"\n",
    "os.environ[\"LANGCHAIN_PROJECT\"] = \"langgraph-rag-demo\""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "3f2d7188",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "USER_AGENT environment variable not set, consider setting it to identify your requests.\n",
      "Embedding texts: 100%|██████████| 47/47 [01:53<00:00,  2.42s/inputs]\n"
     ]
    }
   ],
   "source": [
    "# 向量存储 - 文档加载、分割和向量化\n",
    "# 导入必要的库用于文档处理和向量存储\n",
    "from langchain.text_splitter import RecursiveCharacterTextSplitter  # 递归字符文本分割器\n",
    "from langchain_community.document_loaders import WebBaseLoader  # 网页文档加载器\n",
    "from langchain_community.vectorstores import SKLearnVectorStore  # 基于SKLearn的向量存储\n",
    "from langchain_nomic.embeddings import NomicEmbeddings  # Nomic嵌入模型\n",
    "\n",
    "# 定义要加载的网页URL列表，包含关于AI代理、提示工程和对抗攻击的文章\n",
    "urls = [\n",
    "    \"https://lilianweng.github.io/posts/2023-06-23-agent/\",  # AI代理相关文章\n",
    "    \"https://lilianweng.github.io/posts/2023-03-15-prompt-engineering/\",  # 提示工程文章\n",
    "    \"https://lilianweng.github.io/posts/2023-10-25-adv-attack-llm/\",  # LLM对抗攻击文章\n",
    "]\n",
    "\n",
    "# 加载文档\n",
    "# 使用WebBaseLoader为每个URL加载网页内容，返回文档列表的列表\n",
    "docs = [WebBaseLoader(url).load() for url in urls]\n",
    "# 将嵌套的文档列表展平为单一的文档列表\n",
    "docs_list = [item for sublist in docs for item in sublist]\n",
    "\n",
    "# 分割文档\n",
    "# 创建递归字符文本分割器，使用tiktoken编码器来准确计算token数量\n",
    "text_splitter = RecursiveCharacterTextSplitter.from_tiktoken_encoder(\n",
    "    chunk_size=1000,  # 每个文档块的最大大小为1000个token\n",
    "    chunk_overlap=200  # 相邻文档块之间重叠200个token，确保上下文连续性\n",
    ")\n",
    "# 将所有文档分割成较小的文档块，便于检索和处理\n",
    "doc_splits = text_splitter.split_documents(docs_list)\n",
    "\n",
    "# 添加到向量数据库\n",
    "# 使用分割后的文档创建向量存储，将文档转换为向量表示\n",
    "vectorstore = SKLearnVectorStore.from_documents(\n",
    "    documents=doc_splits,  # 输入分割后的文档\n",
    "    embedding=NomicEmbeddings(model=\"nomic-embed-text-v1.5\", inference_mode=\"local\"),  # 使用Nomic嵌入模型，本地推理模式\n",
    ")\n",
    "\n",
    "# 创建检索器\n",
    "# 将向量存储转换为检索器，设置返回最相关的3个文档\n",
    "retriever = vectorstore.as_retriever(k=3)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "279fa588",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'datasource': 'websearch'} {'datasource': 'websearch'} {'datasource': 'vectorstore'}\n"
     ]
    }
   ],
   "source": [
    "### Router - 路由模块\n",
    "import json  # 用于JSON数据处理\n",
    "from langchain_core.messages import HumanMessage, SystemMessage  # 导入消息类型\n",
    "\n",
    "# 提示词定义\n",
    "# 定义路由的系统指令，用于决定问题应该路由到向量存储还是网络搜索\n",
    "router_instructions = \"\"\"You are an expert at routing a user question to a vectorstore or web search.\n",
    "\n",
    "The vectorstore contains documents related to agents, prompt engineering, and adversarial attacks.\n",
    "\n",
    "Use the vectorstore for questions on these topics. For all else, and especially for current events, use web-search.\n",
    "\n",
    "Return JSON with single key, datasource, that is 'websearch' or 'vectorstore' depending on the question.\"\"\"\n",
    "\n",
    "# 测试路由功能\n",
    "# 测试1：体育赛事问题 - 应该路由到网络搜索（时事相关）\n",
    "test_web_search = llm_json_mode.invoke(\n",
    "    [SystemMessage(content=router_instructions)]  # 系统消息包含路由指令\n",
    "    + [\n",
    "        HumanMessage(\n",
    "            content=\"Who is favored to win the NFC Championship game in the 2024 season?\"  # 询问2024赛季NFC冠军赛的热门球队\n",
    "        )\n",
    "    ]\n",
    ")\n",
    "\n",
    "# 测试2：最新模型发布问题 - 应该路由到网络搜索（时事相关）\n",
    "test_web_search_2 = llm_json_mode.invoke(\n",
    "    [SystemMessage(content=router_instructions)]  # 系统消息包含路由指令\n",
    "    + [HumanMessage(content=\"What are the models released today for llama3.2?\")]  # 询问今天发布的llama3.2模型\n",
    ")\n",
    "\n",
    "# 测试3：代理记忆类型问题 - 应该路由到向量存储（与已有文档相关）\n",
    "test_vector_store = llm_json_mode.invoke(\n",
    "    [SystemMessage(content=router_instructions)]  # 系统消息包含路由指令\n",
    "    + [HumanMessage(content=\"What are the types of agent memory?\")]  # 询问代理记忆的类型\n",
    ")\n",
    "\n",
    "# 打印所有测试结果，展示路由器的决策\n",
    "print(\n",
    "    json.loads(test_web_search.content),      # 解析第一个测试的JSON响应\n",
    "    json.loads(test_web_search_2.content),   # 解析第二个测试的JSON响应\n",
    "    json.loads(test_vector_store.content),   # 解析第三个测试的JSON响应\n",
    ")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "24de6201",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Embedding texts: 100%|██████████| 1/1 [00:00<00:00, 21.43inputs/s]\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "{'binary_score': 'yes'}"
      ]
     },
     "execution_count": 5,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "### Retrieval Grader - 检索评分器模块\n",
    "\n",
    "# 文档评分器指令\n",
    "# 定义文档评分器的系统指令，用于评估检索到的文档与用户问题的相关性\n",
    "doc_grader_instructions = \"\"\"You are a grader assessing relevance of a retrieved document to a user question.\n",
    "\n",
    "If the document contains keyword(s) or semantic meaning related to the question, grade it as relevant.\"\"\"\n",
    "\n",
    "# 评分器提示词模板\n",
    "# 定义具体的评分提示词，包含文档内容和用户问题\n",
    "doc_grader_prompt = \"\"\"Here is the retrieved document: \\n\\n {document} \\n\\n Here is the user question: \\n\\n {question}. \n",
    "\n",
    "This carefully and objectively assess whether the document contains at least some information that is relevant to the question.\n",
    "\n",
    "Return JSON with single key, binary_score, that is 'yes' or 'no' score to indicate whether the document contains at least some information that is relevant to the question.\"\"\"\n",
    "\n",
    "# 测试检索评分器功能\n",
    "question = \"What is Chain of thought prompting?\"  # 测试问题：什么是思维链提示\n",
    "docs = retriever.invoke(question)  # 使用检索器根据问题检索相关文档\n",
    "doc_txt = docs[1].page_content  # 获取第二个文档的页面内容用于测试\n",
    "# 格式化评分提示词，将实际的文档内容和问题填入模板\n",
    "doc_grader_prompt_formatted = doc_grader_prompt.format(\n",
    "    document=doc_txt, question=question\n",
    ")\n",
    "# 调用LLM进行文档相关性评分\n",
    "result = llm_json_mode.invoke(\n",
    "    [SystemMessage(content=doc_grader_instructions)]  # 系统消息包含评分指令\n",
    "    + [HumanMessage(content=doc_grader_prompt_formatted)]  # 用户消息包含格式化的评分提示\n",
    ")\n",
    "# 解析并返回JSON格式的评分结果\n",
    "json.loads(result.content)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "d97be755",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Embedding texts: 100%|██████████| 1/1 [00:00<00:00, 24.79inputs/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Chain of Thought (CoT) prompting generates a sequence of short sentences to describe reasoning steps, leading to a final answer. It is particularly effective for complex reasoning tasks and can be implemented through few-shot examples or zero-shot instructions like \"Let's think step by step.\" CoT helps improve accuracy by breaking down problems into logical steps.\n"
     ]
    }
   ],
   "source": [
    "### Generate - 生成器模块\n",
    "\n",
    "# 提示词模板\n",
    "# 定义RAG（检索增强生成）的提示词模板，用于基于检索到的上下文回答问题\n",
    "rag_prompt = \"\"\"You are an assistant for question-answering tasks. \n",
    "\n",
    "Here is the context to use to answer the question:\n",
    "\n",
    "{context} \n",
    "\n",
    "Think carefully about the above context. \n",
    "\n",
    "Now, review the user question:\n",
    "\n",
    "{question}\n",
    "\n",
    "Provide an answer to this questions using only the above context. \n",
    "\n",
    "Use three sentences maximum and keep the answer concise.\n",
    "\n",
    "Answer:\"\"\"\n",
    "\n",
    "\n",
    "# 后处理函数\n",
    "# 定义文档格式化函数，将多个文档合并为单一的上下文字符串\n",
    "def format_docs(docs):\n",
    "    return \"\\n\\n\".join(doc.page_content for doc in docs)  # 用双换行符连接所有文档的页面内容\n",
    "\n",
    "\n",
    "# 测试生成功能\n",
    "docs = retriever.invoke(question)  # 使用检索器根据问题检索相关文档\n",
    "docs_txt = format_docs(docs)  # 将检索到的文档格式化为文本字符串\n",
    "# 格式化RAG提示词，将上下文和问题填入模板\n",
    "rag_prompt_formatted = rag_prompt.format(context=docs_txt, question=question)\n",
    "# 调用LLM生成基于上下文的答案\n",
    "generation = llm.invoke([HumanMessage(content=rag_prompt_formatted)])\n",
    "# 打印生成的答案内容\n",
    "print(generation.content)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "d2870ee1",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'binary_score': 'yes',\n",
       " 'explanation': \"The student's answer is grounded in the facts provided. The description of Chain-of-Thought (CoT) prompting as generating a sequence of short sentences to describe reasoning steps, leading to a final answer, matches the definition given in the facts. The statement that it is particularly effective for complex reasoning tasks aligns with the fact that 'the benefit of CoT is more pronounced for complicated reasoning tasks.' The mention of few-shot examples and zero-shot instructions like 'Let's think step by step' corresponds to the facts describing two main types of CoT prompting: Few-shot CoT and Zero-shot CoT. Finally, the claim that CoT helps improve accuracy by breaking down problems into logical steps is consistent with the overall purpose of CoT as described in the facts.\"}"
      ]
     },
     "execution_count": 7,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "### Hallucination Grader - 幻觉评分器模块\n",
    "\n",
    "# 幻觉评分器指令\n",
    "# 定义幻觉评分器的系统指令，用于检查生成的答案是否基于提供的事实，避免产生虚假信息\n",
    "hallucination_grader_instructions = \"\"\"\n",
    "\n",
    "You are a teacher grading a quiz. \n",
    "\n",
    "You will be given FACTS and a STUDENT ANSWER. \n",
    "\n",
    "Here is the grade criteria to follow:\n",
    "\n",
    "(1) Ensure the STUDENT ANSWER is grounded in the FACTS. \n",
    "\n",
    "(2) Ensure the STUDENT ANSWER does not contain \"hallucinated\" information outside the scope of the FACTS.\n",
    "\n",
    "Score:\n",
    "\n",
    "A score of yes means that the student's answer meets all of the criteria. This is the highest (best) score. \n",
    "\n",
    "A score of no means that the student's answer does not meet all of the criteria. This is the lowest possible score you can give.\n",
    "\n",
    "Explain your reasoning in a step-by-step manner to ensure your reasoning and conclusion are correct. \n",
    "\n",
    "Avoid simply stating the correct answer at the outset.\"\"\"\n",
    "\n",
    "# 评分器提示词模板\n",
    "# 定义具体的幻觉检测提示词，包含事实文档和学生答案的占位符\n",
    "hallucination_grader_prompt = \"\"\"FACTS: \\n\\n {documents} \\n\\n STUDENT ANSWER: {generation}. \n",
    "\n",
    "Return JSON with two two keys, binary_score is 'yes' or 'no' score to indicate whether the STUDENT ANSWER is grounded in the FACTS. And a key, explanation, that contains an explanation of the score.\"\"\"\n",
    "\n",
    "# 使用上面的文档和生成内容进行测试\n",
    "# 格式化幻觉检测提示词，将实际的文档内容和生成的答案填入模板\n",
    "hallucination_grader_prompt_formatted = hallucination_grader_prompt.format(\n",
    "    documents=docs_txt, generation=generation.content  # docs_txt是格式化的文档，generation.content是LLM生成的答案\n",
    ")\n",
    "# 调用LLM进行幻觉检测评分\n",
    "result = llm_json_mode.invoke(\n",
    "    [SystemMessage(content=hallucination_grader_instructions)]  # 系统消息包含幻觉检测指令\n",
    "    + [HumanMessage(content=hallucination_grader_prompt_formatted)]  # 用户消息包含格式化的检测提示\n",
    ")\n",
    "# 解析并返回JSON格式的评分结果，包含二元评分和解释\n",
    "json.loads(result.content)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "e6a696f7",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'binary_score': 'yes',\n",
       " 'explanation': \"The student's answer directly addresses the question by listing the vision models released as part of Llama 3.2, specifically naming 'Llama 3.2 11B Vision Instruct' and 'Llama 3.2 90B Vision Instruct.' This information is exactly what the question asks for. While the answer includes additional context about the models being part of Meta's first foray into multimodal AI and their competition with other models, this extra information does not detract from the accuracy or relevance of the answer. Therefore, the student's response meets all the criteria for a 'yes' score.\"}"
      ]
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "### Answer Grader - 答案评分器模块\n",
    "\n",
    "# 答案评分器指令\n",
    "# 定义答案评分器的系统指令，用于评估学生答案是否有效回答了给定问题\n",
    "answer_grader_instructions = \"\"\"You are a teacher grading a quiz. \n",
    "\n",
    "You will be given a QUESTION and a STUDENT ANSWER. \n",
    "\n",
    "Here is the grade criteria to follow:\n",
    "\n",
    "(1) The STUDENT ANSWER helps to answer the QUESTION\n",
    "\n",
    "Score:\n",
    "\n",
    "A score of yes means that the student's answer meets all of the criteria. This is the highest (best) score. \n",
    "\n",
    "The student can receive a score of yes if the answer contains extra information that is not explicitly asked for in the question.\n",
    "\n",
    "A score of no means that the student's answer does not meet all of the criteria. This is the lowest possible score you can give.\n",
    "\n",
    "Explain your reasoning in a step-by-step manner to ensure your reasoning and conclusion are correct. \n",
    "\n",
    "Avoid simply stating the correct answer at the outset.\"\"\"\n",
    "\n",
    "# 评分器提示词模板\n",
    "# 定义具体的答案评分提示词，包含问题和学生答案的占位符\n",
    "answer_grader_prompt = \"\"\"QUESTION: \\n\\n {question} \\n\\n STUDENT ANSWER: {generation}. \n",
    "\n",
    "Return JSON with two two keys, binary_score is 'yes' or 'no' score to indicate whether the STUDENT ANSWER meets the criteria. And a key, explanation, that contains an explanation of the score.\"\"\"\n",
    "\n",
    "# 测试数据\n",
    "question = \"What are the vision models released today as part of Llama 3.2?\"  # 测试问题：今天发布的Llama 3.2视觉模型有哪些？\n",
    "# 测试答案：包含Llama 3.2视觉模型的详细信息\n",
    "answer = \"The Llama 3.2 models released today include two vision models: Llama 3.2 11B Vision Instruct and Llama 3.2 90B Vision Instruct, which are available on Azure AI Model Catalog via managed compute. These models are part of Meta's first foray into multimodal AI and rival closed models like Anthropic's Claude 3 Haiku and OpenAI's GPT-4o mini in visual reasoning. They replace the older text-only Llama 3.1 models.\"\n",
    "\n",
    "# 使用上面的问题和答案进行测试\n",
    "# 格式化答案评分提示词，将实际的问题和答案填入模板\n",
    "answer_grader_prompt_formatted = answer_grader_prompt.format(\n",
    "    question=question, generation=answer  # question是测试问题，answer是要评分的答案\n",
    ")\n",
    "# 调用LLM进行答案质量评分\n",
    "result = llm_json_mode.invoke(\n",
    "    [SystemMessage(content=answer_grader_instructions)]  # 系统消息包含答案评分指令\n",
    "    + [HumanMessage(content=answer_grader_prompt_formatted)]  # 用户消息包含格式化的评分提示\n",
    ")\n",
    "# 解析并返回JSON格式的评分结果，包含二元评分和解释\n",
    "json.loads(result.content)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "id": "0ad40f71",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_804\\2725807216.py:5: LangChainDeprecationWarning: The class `TavilySearchResults` was deprecated in LangChain 0.3.25 and will be removed in 1.0. An updated version of the class exists in the :class:`~langchain-tavily package and should be used instead. To use it run `pip install -U :class:`~langchain-tavily` and import as `from :class:`~langchain_tavily import TavilySearch``.\n",
      "  web_search_tool = TavilySearchResults(k=3)\n"
     ]
    }
   ],
   "source": [
    "# 搜索\n",
    "\n",
    "from langchain_community.tools.tavily_search import TavilySearchResults\n",
    "\n",
    "web_search_tool = TavilySearchResults(k=3)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "cd4d8292",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 状态定义\n",
    "# 图的 `state` (状态) 模式包含我们想要\n",
    "# 传递给我们图中每个节点的键\n",
    "# （可选）在我们图的每个节点中修改\n",
    "\n",
    "# 导入必要的类型定义库\n",
    "import operator  # 操作符模块，用于定义累加操作\n",
    "from typing_extensions import TypedDict  # 类型化字典，提供类型提示\n",
    "from typing import List, Annotated  # 列表类型和注解类型\n",
    "\n",
    "\n",
    "class GraphState(TypedDict):\n",
    "    \"\"\"\n",
    "    Graph state is a dictionary that contains information we want to propagate to, and modify in, each graph node.\n",
    "    图状态是一个字典，包含我们想要传播到图中每个节点并在其中修改的信息。\n",
    "    \"\"\"\n",
    "\n",
    "    question: str  # 用户问题 - 存储用户输入的查询问题\n",
    "    generation: str  # LLM生成内容 - 存储大语言模型生成的回答\n",
    "    web_search: str  # 网络搜索决策 - 二元决策，决定是否运行网络搜索\n",
    "    max_retries: int  # 最大重试次数 - 答案生成的最大重试次数限制\n",
    "    answers: int  # 答案数量 - 已生成的答案数量计数\n",
    "    loop_step: Annotated[int, operator.add]  # 循环步骤 - 使用累加操作符跟踪循环迭代次数\n",
    "    documents: List[str]  # 文档列表 - 存储检索到的相关文档列表\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "fd57da41",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 节点定义\n",
    "# 我们图中的每个节点都只是一个函数，它会\n",
    "# (1) 将 `state` (状态) 作为输入\n",
    "# (2) 修改 `state` (状态)\n",
    "# (3) 将修改后的 `state` (状态) 写入状态模式 (字典) 中\n",
    "\n",
    "# 导入必要的模块\n",
    "from langchain.schema import Document  # 文档类，用于创建文档对象\n",
    "from langgraph.graph import END  # 图的结束节点\n",
    "\n",
    "\n",
    "### Nodes - 节点函数定义\n",
    "def retrieve(state):\n",
    "    \"\"\"\n",
    "    从向量存储中检索文档\n",
    "    Retrieve documents from vectorstore\n",
    "\n",
    "    Args:\n",
    "        state (dict): 当前图状态 The current graph state\n",
    "\n",
    "    Returns:\n",
    "        state (dict): 添加了新键documents的状态，包含检索到的文档 New key added to state, documents, that contains retrieved documents\n",
    "    \"\"\"\n",
    "    print(\"---RETRIEVE---\")  # 打印当前执行的步骤\n",
    "    question = state[\"question\"]  # 从状态中获取用户问题\n",
    "\n",
    "    # 将检索到的文档写入状态的documents键中\n",
    "    documents = retriever.invoke(question)  # 使用检索器根据问题检索相关文档\n",
    "    return {\"documents\": documents}  # 返回包含文档的状态更新\n",
    "\n",
    "\n",
    "def generate(state):\n",
    "    \"\"\"\n",
    "    使用RAG在检索到的文档上生成答案\n",
    "    Generate answer using RAG on retrieved documents\n",
    "\n",
    "    Args:\n",
    "        state (dict): 当前图状态 The current graph state\n",
    "\n",
    "    Returns:\n",
    "        state (dict): 添加了新键generation的状态，包含LLM生成内容 New key added to state, generation, that contains LLM generation\n",
    "    \"\"\"\n",
    "    print(\"---GENERATE---\")  # 打印当前执行的步骤\n",
    "    question = state[\"question\"]  # 从状态中获取用户问题\n",
    "    documents = state[\"documents\"]  # 从状态中获取检索到的文档\n",
    "    loop_step = state.get(\"loop_step\", 0)  # 获取当前循环步骤，默认为0\n",
    "\n",
    "    # RAG生成过程\n",
    "    docs_txt = format_docs(documents)  # 格式化文档为文本字符串\n",
    "    rag_prompt_formatted = rag_prompt.format(context=docs_txt, question=question)  # 格式化RAG提示词\n",
    "    generation = llm.invoke([HumanMessage(content=rag_prompt_formatted)])  # 调用LLM生成答案\n",
    "    return {\"generation\": generation, \"loop_step\": loop_step + 1}  # 返回生成内容和更新的循环步骤\n",
    "\n",
    "\n",
    "def grade_documents(state):\n",
    "    \"\"\"\n",
    "    判断检索到的文档是否与问题相关\n",
    "    如果任何文档不相关，我们将设置标志来运行网络搜索\n",
    "    Determines whether the retrieved documents are relevant to the question\n",
    "    If any document is not relevant, we will set a flag to run web search\n",
    "\n",
    "    Args:\n",
    "        state (dict): 当前图状态 The current graph state\n",
    "\n",
    "    Returns:\n",
    "        state (dict): 过滤掉不相关文档并更新web_search状态 Filtered out irrelevant documents and updated web_search state\n",
    "    \"\"\"\n",
    "\n",
    "    print(\"---CHECK DOCUMENT RELEVANCE TO QUESTION---\")  # 打印当前执行的步骤\n",
    "    question = state[\"question\"]  # 从状态中获取用户问题\n",
    "    documents = state[\"documents\"]  # 从状态中获取检索到的文档\n",
    "\n",
    "    # 对每个文档进行评分\n",
    "    filtered_docs = []  # 初始化过滤后的文档列表\n",
    "    web_search = \"No\"  # 初始化网络搜索标志为\"No\"\n",
    "    for d in documents:  # 遍历每个文档\n",
    "        # 格式化文档评分提示词\n",
    "        doc_grader_prompt_formatted = doc_grader_prompt.format(\n",
    "            document=d.page_content, question=question\n",
    "        )\n",
    "        # 调用LLM对文档相关性进行评分\n",
    "        result = llm_json_mode.invoke(\n",
    "            [SystemMessage(content=doc_grader_instructions)]\n",
    "            + [HumanMessage(content=doc_grader_prompt_formatted)]\n",
    "        )\n",
    "        grade = json.loads(result.content)[\"binary_score\"]  # 解析评分结果\n",
    "        # 文档相关\n",
    "        if grade.lower() == \"yes\":\n",
    "            print(\"---GRADE: DOCUMENT RELEVANT---\")  # 打印文档相关信息\n",
    "            filtered_docs.append(d)  # 将相关文档添加到过滤列表\n",
    "        # 文档不相关\n",
    "        else:\n",
    "            print(\"---GRADE: DOCUMENT NOT RELEVANT---\")  # 打印文档不相关信息\n",
    "            # 我们不将该文档包含在filtered_docs中\n",
    "            # 我们设置标志表示要运行网络搜索\n",
    "            web_search = \"Yes\"  # 设置网络搜索标志为\"Yes\"\n",
    "            continue  # 继续处理下一个文档\n",
    "    return {\"documents\": filtered_docs, \"web_search\": web_search}  # 返回过滤后的文档和搜索标志\n",
    "\n",
    "\n",
    "def web_search(state):\n",
    "    \"\"\"\n",
    "    基于问题进行网络搜索\n",
    "    Web search based based on the question\n",
    "\n",
    "    Args:\n",
    "        state (dict): 当前图状态 The current graph state\n",
    "\n",
    "    Returns:\n",
    "        state (dict): 将网络搜索结果附加到文档中 Appended web results to documents\n",
    "    \"\"\"\n",
    "\n",
    "    print(\"---WEB SEARCH---\")  # 打印当前执行的步骤\n",
    "    question = state[\"question\"]  # 从状态中获取用户问题\n",
    "    documents = state.get(\"documents\", [])  # 从状态中获取现有文档，默认为空列表\n",
    "\n",
    "    # 网络搜索\n",
    "    docs = web_search_tool.invoke({\"query\": question})  # 使用网络搜索工具搜索问题\n",
    "    web_results = \"\\n\".join([d[\"content\"] for d in docs])  # 将搜索结果合并为文本\n",
    "    web_results = Document(page_content=web_results)  # 创建文档对象\n",
    "    documents.append(web_results)  # 将网络搜索结果添加到文档列表\n",
    "    return {\"documents\": documents}  # 返回更新后的文档列表\n",
    "\n",
    "\n",
    "### Edges - 边函数定义（用于节点间路由）\n",
    "\n",
    "\n",
    "def route_question(state):\n",
    "    \"\"\"\n",
    "    将问题路由到网络搜索或RAG\n",
    "    Route question to web search or RAG\n",
    "\n",
    "    Args:\n",
    "        state (dict): 当前图状态 The current graph state\n",
    "\n",
    "    Returns:\n",
    "        str: 要调用的下一个节点 Next node to call\n",
    "    \"\"\"\n",
    "\n",
    "    print(\"---ROUTE QUESTION---\")  # 打印当前执行的步骤\n",
    "    # 调用路由器LLM决定使用哪种数据源\n",
    "    route_question = llm_json_mode.invoke(\n",
    "        [SystemMessage(content=router_instructions)]\n",
    "        + [HumanMessage(content=state[\"question\"])]\n",
    "    )\n",
    "    source = json.loads(route_question.content)[\"datasource\"]  # 解析数据源决策\n",
    "    if source == \"websearch\":\n",
    "        print(\"---ROUTE QUESTION TO WEB SEARCH---\")  # 路由到网络搜索\n",
    "        return \"websearch\"\n",
    "    elif source == \"vectorstore\":\n",
    "        print(\"---ROUTE QUESTION TO RAG---\")  # 路由到RAG（向量存储）\n",
    "        return \"vectorstore\"\n",
    "\n",
    "\n",
    "def decide_to_generate(state):\n",
    "    \"\"\"\n",
    "    决定是生成答案还是添加网络搜索\n",
    "    Determines whether to generate an answer, or add web search\n",
    "\n",
    "    Args:\n",
    "        state (dict): 当前图状态 The current graph state\n",
    "\n",
    "    Returns:\n",
    "        str: 要调用的下一个节点的二元决策 Binary decision for next node to call\n",
    "    \"\"\"\n",
    "\n",
    "    print(\"---ASSESS GRADED DOCUMENTS---\")  # 打印当前执行的步骤\n",
    "    question = state[\"question\"]  # 从状态中获取用户问题\n",
    "    web_search = state[\"web_search\"]  # 从状态中获取网络搜索标志\n",
    "    filtered_documents = state[\"documents\"]  # 从状态中获取过滤后的文档\n",
    "\n",
    "    if web_search == \"Yes\":\n",
    "        # 所有文档都已被过滤检查相关性\n",
    "        # 我们将重新生成一个新查询\n",
    "        print(\n",
    "            \"---DECISION: NOT ALL DOCUMENTS ARE RELEVANT TO QUESTION, INCLUDE WEB SEARCH---\"\n",
    "        )\n",
    "        return \"websearch\"  # 返回网络搜索节点\n",
    "    else:\n",
    "        # 我们有相关文档，所以生成答案\n",
    "        print(\"---DECISION: GENERATE---\")\n",
    "        return \"generate\"  # 返回生成节点\n",
    "\n",
    "\n",
    "def grade_generation_v_documents_and_question(state):\n",
    "    \"\"\"\n",
    "    判断生成内容是否基于文档并回答了问题\n",
    "    Determines whether the generation is grounded in the document and answers question\n",
    "\n",
    "    Args:\n",
    "        state (dict): 当前图状态 The current graph state\n",
    "\n",
    "    Returns:\n",
    "        str: 要调用的下一个节点的决策 Decision for next node to call\n",
    "    \"\"\"\n",
    "\n",
    "    print(\"---CHECK HALLUCINATIONS---\")  # 打印当前执行的步骤\n",
    "    question = state[\"question\"]  # 从状态中获取用户问题\n",
    "    documents = state[\"documents\"]  # 从状态中获取文档\n",
    "    generation = state[\"generation\"]  # 从状态中获取生成内容\n",
    "    max_retries = state.get(\"max_retries\", 3)  # 获取最大重试次数，默认为3\n",
    "\n",
    "    # 格式化幻觉检测提示词\n",
    "    hallucination_grader_prompt_formatted = hallucination_grader_prompt.format(\n",
    "        documents=format_docs(documents), generation=generation.content\n",
    "    )\n",
    "    # 调用LLM进行幻觉检测\n",
    "    result = llm_json_mode.invoke(\n",
    "        [SystemMessage(content=hallucination_grader_instructions)]\n",
    "        + [HumanMessage(content=hallucination_grader_prompt_formatted)]\n",
    "    )\n",
    "    grade = json.loads(result.content)[\"binary_score\"]  # 解析幻觉检测结果\n",
    "\n",
    "    # 检查幻觉\n",
    "    if grade == \"yes\":\n",
    "        print(\"---DECISION: GENERATION IS GROUNDED IN DOCUMENTS---\")  # 生成内容基于文档\n",
    "        # 检查问题回答质量\n",
    "        print(\"---GRADE GENERATION vs QUESTION---\")\n",
    "        # 使用上面的问题和生成内容进行测试\n",
    "        answer_grader_prompt_formatted = answer_grader_prompt.format(\n",
    "            question=question, generation=generation.content\n",
    "        )\n",
    "        # 调用LLM进行答案质量评分\n",
    "        result = llm_json_mode.invoke(\n",
    "            [SystemMessage(content=answer_grader_instructions)]\n",
    "            + [HumanMessage(content=answer_grader_prompt_formatted)]\n",
    "        )\n",
    "        grade = json.loads(result.content)[\"binary_score\"]  # 解析答案质量评分\n",
    "        if grade == \"yes\":\n",
    "            print(\"---DECISION: GENERATION ADDRESSES QUESTION---\")  # 生成内容回答了问题\n",
    "            return \"useful\"  # 返回有用标志\n",
    "        elif state[\"loop_step\"] <= max_retries:\n",
    "            print(\"---DECISION: GENERATION DOES NOT ADDRESS QUESTION---\")  # 生成内容未回答问题\n",
    "            return \"not useful\"  # 返回无用标志\n",
    "        else:\n",
    "            print(\"---DECISION: MAX RETRIES REACHED---\")  # 达到最大重试次数\n",
    "            return \"max retries\"  # 返回最大重试标志\n",
    "    elif state[\"loop_step\"] <= max_retries:\n",
    "        print(\"---DECISION: GENERATION IS NOT GROUNDED IN DOCUMENTS, RE-TRY---\")  # 生成内容未基于文档，重试\n",
    "        return \"not supported\"  # 返回不支持标志\n",
    "    else:\n",
    "        print(\"---DECISION: MAX RETRIES REACHED---\")  # 达到最大重试次数\n",
    "        return \"max retries\"  # 返回最大重试标志\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "aea450c9",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 每个边在图中的节点之间进行路由。\n",
    "# 边函数负责决定工作流的下一步执行哪个节点\n",
    "\n",
    "# 导入必要的模块\n",
    "from langchain.schema import Document  # 文档类，用于创建文档对象\n",
    "from langgraph.graph import END  # 图的结束节点\n",
    "\n",
    "\n",
    "### Nodes - 节点函数定义\n",
    "def retrieve(state):\n",
    "    \"\"\"\n",
    "    从向量存储中检索文档\n",
    "    Retrieve documents from vectorstore\n",
    "\n",
    "    Args:\n",
    "        state (dict): 当前图状态 The current graph state\n",
    "\n",
    "    Returns:\n",
    "        state (dict): 添加了新键documents的状态，包含检索到的文档 New key added to state, documents, that contains retrieved documents\n",
    "    \"\"\"\n",
    "    print(\"---RETRIEVE---\")  # 打印检索步骤标识\n",
    "    question = state[\"question\"]  # 从状态中提取用户问题\n",
    "\n",
    "    # 将检索到的文档写入状态的documents键中\n",
    "    documents = retriever.invoke(question)  # 调用检索器获取相关文档\n",
    "    return {\"documents\": documents}  # 返回包含检索文档的状态更新\n",
    "\n",
    "\n",
    "def generate(state):\n",
    "    \"\"\"\n",
    "    使用RAG在检索到的文档上生成答案\n",
    "    Generate answer using RAG on retrieved documents\n",
    "\n",
    "    Args:\n",
    "        state (dict): 当前图状态 The current graph state\n",
    "\n",
    "    Returns:\n",
    "        state (dict): 添加了新键generation的状态，包含LLM生成内容 New key added to state, generation, that contains LLM generation\n",
    "    \"\"\"\n",
    "    print(\"---GENERATE---\")  # 打印生成步骤标识\n",
    "    question = state[\"question\"]  # 从状态中获取用户问题\n",
    "    documents = state[\"documents\"]  # 从状态中获取检索到的文档\n",
    "    loop_step = state.get(\"loop_step\", 0)  # 获取当前循环步数，默认为0\n",
    "\n",
    "    # RAG生成过程\n",
    "    docs_txt = format_docs(documents)  # 将文档列表格式化为文本字符串\n",
    "    rag_prompt_formatted = rag_prompt.format(context=docs_txt, question=question)  # 使用上下文和问题格式化提示词\n",
    "    generation = llm.invoke([HumanMessage(content=rag_prompt_formatted)])  # 调用LLM生成回答\n",
    "    return {\"generation\": generation, \"loop_step\": loop_step + 1}  # 返回生成结果和递增的循环步数\n",
    "\n",
    "\n",
    "def grade_documents(state):\n",
    "    \"\"\"\n",
    "    判断检索到的文档是否与问题相关\n",
    "    如果任何文档不相关，我们将设置标志来运行网络搜索\n",
    "    Determines whether the retrieved documents are relevant to the question\n",
    "    If any document is not relevant, we will set a flag to run web search\n",
    "\n",
    "    Args:\n",
    "        state (dict): 当前图状态 The current graph state\n",
    "\n",
    "    Returns:\n",
    "        state (dict): 过滤掉不相关文档并更新web_search状态 Filtered out irrelevant documents and updated web_search state\n",
    "    \"\"\"\n",
    "\n",
    "    print(\"---CHECK DOCUMENT RELEVANCE TO QUESTION---\")  # 打印文档相关性检查标识\n",
    "    question = state[\"question\"]  # 从状态中获取用户问题\n",
    "    documents = state[\"documents\"]  # 从状态中获取检索到的文档\n",
    "\n",
    "    # 对每个文档进行评分\n",
    "    filtered_docs = []  # 初始化过滤后的文档列表\n",
    "    web_search = \"No\"  # 初始化网络搜索标志为否\n",
    "    for d in documents:  # 遍历每个检索到的文档\n",
    "        # 格式化文档评分提示词\n",
    "        doc_grader_prompt_formatted = doc_grader_prompt.format(\n",
    "            document=d.page_content, question=question\n",
    "        )\n",
    "        # 调用LLM对文档相关性进行评分\n",
    "        result = llm_json_mode.invoke(\n",
    "            [SystemMessage(content=doc_grader_instructions)]\n",
    "            + [HumanMessage(content=doc_grader_prompt_formatted)]\n",
    "        )\n",
    "        grade = json.loads(result.content)[\"binary_score\"]  # 解析评分结果\n",
    "        # 文档相关\n",
    "        if grade.lower() == \"yes\":\n",
    "            print(\"---GRADE: DOCUMENT RELEVANT---\")  # 打印文档相关信息\n",
    "            filtered_docs.append(d)  # 将相关文档添加到过滤列表\n",
    "        # 文档不相关\n",
    "        else:\n",
    "            print(\"---GRADE: DOCUMENT NOT RELEVANT---\")  # 打印文档不相关信息\n",
    "            # 我们不将该文档包含在filtered_docs中\n",
    "            # 我们设置标志表示要运行网络搜索\n",
    "            web_search = \"Yes\"  # 设置网络搜索标志为是\n",
    "            continue  # 继续处理下一个文档\n",
    "    return {\"documents\": filtered_docs, \"web_search\": web_search}  # 返回过滤后的文档和搜索标志\n",
    "\n",
    "\n",
    "def web_search(state):\n",
    "    \"\"\"\n",
    "    基于问题进行网络搜索\n",
    "    Web search based based on the question\n",
    "\n",
    "    Args:\n",
    "        state (dict): 当前图状态 The current graph state\n",
    "\n",
    "    Returns:\n",
    "        state (dict): 将网络搜索结果附加到文档中 Appended web results to documents\n",
    "    \"\"\"\n",
    "\n",
    "    print(\"---WEB SEARCH---\")  # 打印网络搜索步骤标识\n",
    "    question = state[\"question\"]  # 从状态中获取用户问题\n",
    "    documents = state.get(\"documents\", [])  # 从状态中获取现有文档，默认为空列表\n",
    "\n",
    "    # 网络搜索\n",
    "    docs = web_search_tool.invoke({\"query\": question})  # 使用网络搜索工具执行搜索\n",
    "    web_results = \"\\n\".join([d[\"content\"] for d in docs])  # 将搜索结果内容合并为字符串\n",
    "    web_results = Document(page_content=web_results)  # 创建包含搜索结果的文档对象\n",
    "    documents.append(web_results)  # 将网络搜索结果添加到现有文档列表\n",
    "    return {\"documents\": documents}  # 返回更新后的文档列表\n",
    "\n",
    "\n",
    "### Edges - 边函数定义（控制节点间的路由逻辑）\n",
    "\n",
    "\n",
    "def route_question(state):\n",
    "    \"\"\"\n",
    "    将问题路由到网络搜索或RAG\n",
    "    Route question to web search or RAG\n",
    "\n",
    "    Args:\n",
    "        state (dict): 当前图状态 The current graph state\n",
    "\n",
    "    Returns:\n",
    "        str: 要调用的下一个节点名称 Next node to call\n",
    "    \"\"\"\n",
    "\n",
    "    print(\"---ROUTE QUESTION---\")  # 打印问题路由步骤标识\n",
    "    # 使用路由器LLM决定数据源\n",
    "    route_question = llm_json_mode.invoke(\n",
    "        [SystemMessage(content=router_instructions)]\n",
    "        + [HumanMessage(content=state[\"question\"])]\n",
    "    )\n",
    "    source = json.loads(route_question.content)[\"datasource\"]  # 解析路由决策结果\n",
    "    if source == \"websearch\":\n",
    "        print(\"---ROUTE QUESTION TO WEB SEARCH---\")  # 路由到网络搜索\n",
    "        return \"websearch\"  # 返回网络搜索节点名\n",
    "    elif source == \"vectorstore\":\n",
    "        print(\"---ROUTE QUESTION TO RAG---\")  # 路由到RAG检索\n",
    "        return \"vectorstore\"  # 返回向量存储节点名\n",
    "\n",
    "\n",
    "def decide_to_generate(state):\n",
    "    \"\"\"\n",
    "    决定是生成答案还是添加网络搜索\n",
    "    Determines whether to generate an answer, or add web search\n",
    "\n",
    "    Args:\n",
    "        state (dict): 当前图状态 The current graph state\n",
    "\n",
    "    Returns:\n",
    "        str: 要调用的下一个节点的二元决策 Binary decision for next node to call\n",
    "    \"\"\"\n",
    "\n",
    "    print(\"---ASSESS GRADED DOCUMENTS---\")  # 打印文档评估步骤标识\n",
    "    question = state[\"question\"]  # 从状态中获取用户问题\n",
    "    web_search = state[\"web_search\"]  # 从状态中获取网络搜索标志\n",
    "    filtered_documents = state[\"documents\"]  # 从状态中获取过滤后的文档\n",
    "\n",
    "    if web_search == \"Yes\":\n",
    "        # 所有文档都已被过滤检查相关性\n",
    "        # 我们将重新生成一个新查询\n",
    "        print(\n",
    "            \"---DECISION: NOT ALL DOCUMENTS ARE RELEVANT TO QUESTION, INCLUDE WEB SEARCH---\"\n",
    "        )\n",
    "        return \"websearch\"  # 返回网络搜索节点\n",
    "    else:\n",
    "        # 我们有相关文档，所以生成答案\n",
    "        print(\"---DECISION: GENERATE---\")  # 决定生成答案\n",
    "        return \"generate\"  # 返回生成节点\n",
    "\n",
    "\n",
    "def grade_generation_v_documents_and_question(state):\n",
    "    \"\"\"\n",
    "    判断生成内容是否基于文档并回答了问题\n",
    "    Determines whether the generation is grounded in the document and answers question\n",
    "\n",
    "    Args:\n",
    "        state (dict): 当前图状态 The current graph state\n",
    "\n",
    "    Returns:\n",
    "        str: 要调用的下一个节点的决策 Decision for next node to call\n",
    "    \"\"\"\n",
    "\n",
    "    print(\"---CHECK HALLUCINATIONS---\")  # 打印幻觉检查步骤标识\n",
    "    question = state[\"question\"]  # 从状态中获取用户问题\n",
    "    documents = state[\"documents\"]  # 从状态中获取文档\n",
    "    generation = state[\"generation\"]  # 从状态中获取生成内容\n",
    "    max_retries = state.get(\"max_retries\", 3)  # 获取最大重试次数，默认为3\n",
    "\n",
    "    # 格式化幻觉检测提示词\n",
    "    hallucination_grader_prompt_formatted = hallucination_grader_prompt.format(\n",
    "        documents=format_docs(documents), generation=generation.content\n",
    "    )\n",
    "    # 调用LLM进行幻觉检测\n",
    "    result = llm_json_mode.invoke(\n",
    "        [SystemMessage(content=hallucination_grader_instructions)]\n",
    "        + [HumanMessage(content=hallucination_grader_prompt_formatted)]\n",
    "    )\n",
    "    grade = json.loads(result.content)[\"binary_score\"]  # 解析幻觉检测结果\n",
    "\n",
    "    # 检查幻觉\n",
    "    if grade == \"yes\":\n",
    "        print(\"---DECISION: GENERATION IS GROUNDED IN DOCUMENTS---\")  # 生成内容基于文档\n",
    "        # 检查问题回答质量\n",
    "        print(\"---GRADE GENERATION vs QUESTION---\")  # 评估生成内容与问题的匹配度\n",
    "        # 使用上面的问题和生成内容进行测试\n",
    "        answer_grader_prompt_formatted = answer_grader_prompt.format(\n",
    "            question=question, generation=generation.content\n",
    "        )\n",
    "        # 调用LLM进行答案质量评分\n",
    "        result = llm_json_mode.invoke(\n",
    "            [SystemMessage(content=answer_grader_instructions)]\n",
    "            + [HumanMessage(content=answer_grader_prompt_formatted)]\n",
    "        )\n",
    "        grade = json.loads(result.content)[\"binary_score\"]  # 解析答案质量评分\n",
    "        if grade == \"yes\":\n",
    "            print(\"---DECISION: GENERATION ADDRESSES QUESTION---\")  # 生成内容回答了问题\n",
    "            return \"useful\"  # 返回有用标志，结束流程\n",
    "        elif state[\"loop_step\"] <= max_retries:\n",
    "            print(\"---DECISION: GENERATION DOES NOT ADDRESS QUESTION---\")  # 生成内容未回答问题\n",
    "            return \"not useful\"  # 返回无用标志，需要网络搜索\n",
    "        else:\n",
    "            print(\"---DECISION: MAX RETRIES REACHED---\")  # 达到最大重试次数\n",
    "            return \"max retries\"  # 返回最大重试标志，结束流程\n",
    "    elif state[\"loop_step\"] <= max_retries:\n",
    "        print(\"---DECISION: GENERATION IS NOT GROUNDED IN DOCUMENTS, RE-TRY---\")  # 生成内容未基于文档，重试\n",
    "        return \"not supported\"  # 返回不支持标志，重新生成\n",
    "    else:\n",
    "        print(\"---DECISION: MAX RETRIES REACHED---\")  # 达到最大重试次数\n",
    "        return \"max retries\"  # 返回最大重试标志，结束流程\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "id": "28a1536a",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAaAAAAI5CAIAAABo1XkYAAAAAXNSR0IArs4c6QAAIABJREFUeJzs3WdYE9nbBvCTEFoooffepKkg4GIBFLFXFOsiNsS6dnEt2F3Xta+Kyq4NG3ZERcCyrgooooIUUVF67yFASEjyfhjf/FmMiJhkkuH5ffAibeYhxJMz95w5h8Tj8RAAABARGe8CAABAVKCBAwAQFjRwAADCggYOAEBY0MABAAgLGjgAAGFR8C6ACLhcVJbHbKC3NNI5nBYuiykFI2/kFckysiQlVYqSKkXXVB7vcgAQCRKMg+s0bgvKTKrLSW/If99oaKkoryijpCqjpi3X3MTBu7Rvk1OQqSlnNdJbSGRSbmaDuaOSuYNSNxcVvOsCQJiggeukF/eqs17UG1krWjgqm9pR8S7nh3DYvE8ZDTnpDbmZDf1Gazn0UcW7IgCEAxq475ab3hB7rrSnl5r7cE28axGy5iZuwq3Kklzm0Ol6mvpyeJcDwI+CBu77vLhXXVPOHuinIytPwrsWUaFXt9z+u9jVR92mFxyxAukGDdx3ePmghs3iEq/jJlDs2VI7N1UTW+k++gZdHDRwHfUgopyqItNnZJdo3TAxZ0r1TBWcBqjhXQgAnQTj4Dok5d9aOQVyl2rdEELDZujlvWvMf9eIdyEAdBI0cN9WlN1UW87yGKeFdyE4GDvPIC2+rqFOCga+APAlaOC+7fGNCsd+XfcwzaaXypObFXhXAUBnQAP3De9f1WvoyWkZdN0xE9ZOyrUV7IqiZrwLAeC7QQP3DR9eM/qP1sa7Cpx5jNNOT6DjXQUA3w0auPZUFDYzaluU1GTEudNff/315s2bnXjh4MGDi4qKRFARMrRUyEqmt7DhhDuQMtDAtedTeoN5dyUx7zQzM7MTryopKampqRFBOZ9ZOCjlpDeIbvsAiAKMg2vPnRMlfUdqquuJJICLj48PDw/PyMjQ0tLq2bPnL7/8oqWl5erqij2qrKz86NEjBoNx7ty5xMTEjx8/amlpeXl5LViwQEFBASEUHBwsIyOjr68fHh4+b96848ePYy/08vLau3ev0Kv9+KahKLvJc3xXPJUMpBf04NqT/75RRVNWFFvOyspaunSpm5vb1atXg4OD379/v3nzZqzVQwiFhIQ8evQIIRQREXH69Onp06cfOHBg6dKl9+7dCwsLw7YgKyubnZ2dnZ29b98+Pz+/AwcOIIRu3rwpitYNIaSiRinNaxLFlgEQHZgP7qtYTC6ZTKLIiuSa05SUFAUFhdmzZ5PJZD09PXt7++zs7C+f5u/vP2jQIHNzc+xmampqQkLCkiVLEEIkEqm4uPjs2bNYh07UlGgyDfQWMewIACGCBu6rGus5SqqiOr3g5OTEZDKXLVv2008/eXp6Ghsb8w9OW5OVlU1MTNy0adP79+9bWloQQhoaGvxHzc3NxdO6IYSoqpQGOgz3BVIGDlG/isdF8oqiauBsbW3//PNPbW3tQ4cO+fr6Lly4MDU19cunHTp0KCwszNfXNzIyMjk5edasWa0flZcX30y8JBJSUCQjCGyBVIEG7quoKjK1FSzRbb9v374hISG3bt3avHlzXV3dsmXLsD4aH4/Hu3bt2uTJk319ffX09BBC9fX1oqunfY31HBKZhAg7RxQgJmjgvkqeSmY3c7miOSx7+fJlQkICQkhbW3vUqFErV66sr68vKSlp/Rw2m93U1KSjo4PdZLFYjx8/Fkk1HdBIbxHdATsAIgINXHvM7JVEFDylpqYGBwdfv369pqYmPT09IiJCW1tbX19fXl5eR0fn2bNnycnJZDLZzMwsKiqqsLCwtrZ269atTk5OdDq9oUHAeDQzMzOE0L1799LT00VRcCODq2+uKIotAyA60MC1R0VD9uMbkRwV+vv7+/r67tmzZ/DgwUFBQUpKSmFhYRQKBSE0e/bsFy9erFy5sqmp6bffflNQUPDz8xs3blzv3r0XL16soKDg4+NTXFzcZoNGRkajR48+duzYoUOHRFFwdkq9liEsvgWkDAz0bU9RdlNSbLXvIkO8C8HfiY05U1ebUFXgKBVIE+jBtcfQSpFERnANZmUxy9iGCq0bkDowDu4bzOyVEu9UtTPb5ZgxY+h0ATNtcDgcMplMIgk+7xgZGammJpI55lJSUpYtWybwIRaLJSsrK7AkCwuLkydPfm2bCbcre3p23RnxgPSCQ9RvO7kpZ8qqrx6dlZaWcrnc792mgYGBMEoT7MuEDsNgMJSVlQU+RKFQ+Kdr24DjdCC9oIH7tg+vGZXFzV1tQQa++xfKe3jQdIzhDAOQPpDBfZu1szK7mfvmSR3eheDg0dUKXVN5aN2AlIIGrkM8x2tnv2FkpzLwLkSsnsdUk0ioez8a3oUA0ElwiPodYsPLLByVrHsJjrEIJim2Wlae7AyLogJpBj247zA0QPdTOuPFPRFOnCshYs+Wcjg8aN2AtIMe3Hd79bA2Lb627ygta2cCduXePKl7EVftOV6bkL8d6GqggeuM+pqWhFuVzUyumZ2SuaOSirrUDyesKmHlZjS8ia+17K7cZ5SWrBxMGwKIABq4Dvn48aOlpWWbOyuLWJnP63IyGuQUyPrmigpKZCVVioq6bAv7u4fFiZ+MDJlezWqs53BaeB/fMGTlyeYOSj3605RoUt9YA8AHDdw3FBUVzZs3b9asWRMmTPjac6pKWOX5zQw6u5HOIZFRQ52kzHybkJDg7u5OJgtIWlXUKVwuT0mVoqwmq2+uoKoB7RogIGjgBHv58uXDhw9Xr16dn58vLy+vq6uLd0WdwWazQ0NDly5dinchAOADGrj/YLPZjY2NNBpt0aJFU6dO7d+/P94VCcfbt2/t7OzwrgIAcYNhIv9z6dIlT09PNpuNEDpy5AhhWjeE0IEDB3JycvCuAgBxgwYO3bhxIyoqCptRIzExUUuLgGsbHz9+/O3bt3hXAYC4dd0GDptyIyoqKjMzE+usubm54V2UCI0YMQIhdPr0abwLAUB8umIG19zcvHjxYgMDgy1btvB4vK9N2UZIhw8fdnNz++mnn/AuBABx6EINXHl5+aVLl7ClRT98+ODs7Ix3RfjIyMiwtraWk5PDuxAARK5LHKKWlZUhhHbt2kWj0ZSVlZWVlbts64YQcnBwkJGRWbRoEd6FACByBO/BvXz5cv369Tt27HBxccG7FsmSlJRUVVU1fPhwvAsBQISI2cAlJSVlZ2dPmzbt5cuXZmZmmppddDLe9tXW1vJ4PHV1dbwLAUBUCHWI2tTUhF03eubMmR49eiCEXFxcoHX7GjU1NRqN9tNPP7FYLLxrAUAkiNOD271794MHD2JiYthstqysLN7lSA0OhxMbGztkyBBs2WkAiETqe3DXrl3LzMxECPXq1SsmJgYhBK3bd5GRkRkxYkRlZeXr16/xrgUAIZPWBq6yshIhtH379vfv35ubmyOEBg0ahHdRUkxPTy80NLS8vBzvQgAQJuk7RC0tLV27du3gwYOnTZuGdy1Ek5aWZmJiQqPBKjOAIKSmgcvPz3/06FFAQMC7d+9YLFb37t3xroiYysrKoqKi5s6di3chAAiBFByiNjY2crncZcuWqampIYS6desGrZvo6OrqcrlcmHoEEINE9+BiYmL27Nlz6dIlDQ2NLnXFKO5KS0uZTKaZmRnehQDwQySxB5eQkPD48WOEEJlMvnr1qqamJrRuYqanp6eurh4UFIR3IQD8EAnqwTU1NSkqKsbExERHR69Zs8bQ0BDvirq6V69eKSkpdevWDe9CAOgkiWjg2Gz25s2bmUzm3r17GxoalJSU8K4IfNbc3Jyamurq6ipw5RoAJByen1oOh3P58mU2m81gMLy8vPbu3YsQgtZNosjLy7u4uLi7u3M4krJUGAAdh08DV19fjxCaNWtWXl4ehUJRV1cfMmQILpWAb5KRkUlKSsrNzWUwGHjXAsD3EfchalZW1u+//75w4cLevXuLc7/gxyUkJFAoFPjDASkipgYuNzf37du3w4cP/+eff3R0dBwcHMSw064JW/lQRBtPTk52dnaWkZER0faFSFFREWYtBuJo4HJyclavXh0cHAxf/mLQ3NyMJQAiwuVyuVyu5E89oqysrKCggHcVAGcibOBOnTp18+bNyMhIODEqTqJu4LBOIovFkvC/KTRwQCQnGR4/fpyXl4edgDt//jycGCUeWVlZEkkiBhgB0D6hNXDNzc0IoX379kVGRmpoaCCEpk2bBk0bUVGpVIQQTAUMJJwQvodramp27dplY2Mze/bs+vp6FRUVIdUGOkMMh6h8HA6nvr4emwRB0sAhKvihHlxzc3N0dDR2DsHHx2f27NkIIWjdiC0qKmrPnj38mzIyMkpKSj/yHTllypSSkhIhVQdAW51p4NhsNofD8fb2rqmpweYK9/HxEUFtQOJ8+PChzT1YHtfY2NiJZq6srKy2tlZ41QHQ1vcdoiYkJBw5cuTw4cOqqqpSMRiqC2pziLpy5UoFBYUdO3bw79m4cSOdTj9w4EBLS8uZM2eSkpLKy8sdHBzGjBnDH8fD4XCuX7+OnSOytbX19/d3dHRcvXp1Wloa9oTDhw9bWVklJiaeO3euoKBAVVXV2Nh4+fLlOjo62FTyZDJZV1f3ypUrGzZs6NevX2Rk5L1794qKioyNjV1cXAICAtLT09esWYNtrU+fPps2bUIIXbhw4d69e1VVVdra2j169Pjll1+wa2AnTZo0bdq0p0+fpqenX7lyRUVFJS4uLjo6Ojc318zMzMvLa9y4cW2mnIFDVIAQktm8efM3n5SdnZ2fn6+np/fkyZOpU6caGRnBpdcSi8PhtM7+mUzm7du3fX19sbV4mEzmwYMHJ0yYYGtre/jw4Tt37vj7+y9dupRMJu/du9fIyMjU1BQhdOLEifv37wcHB/fp06e6uvr06dP9+vXz8/NLTk52cnIKDQ3V0NB49erV5s2bJ06cuHLlSicnp8ePH2dkZHh5eZFIpISEhJycHDabHRQUZG9vHxsbGxER8fPPPy9atEhRUfHq1atcLtfHx8fGxuaff/45derUqFGjEELh4eF37txZsGDBwoULdXV1L1++TKFQ7O3tEUI3btzIz8+3t7efMWOGkZHRv//+u2fPHnd395CQEHNz81OnTpWVlbm5ubV+H+Tk5CR/sB4QtW+3U/fu3duwYQMWrk2bNg0mz5Eu/fv353K5T58+xW4mJiZyuVwPD4/m5ub79+9PmjRp5MiRqqqqQ4cOHTBgwIULFxBCdDr92rVrEydOdHFx6dOnz9KlS11cXKqrq9tsOTw8vF+/fr6+vjQazd7ePigoKCkpKSMjg8VikUiksrKyDRs2uLu7q6mppaWlWVtbDx48WE1Nbfjw4fv372/TGCGEGAzGlStXpk6d2rdvX2VlZU9PzzFjxly8eJHNZiOESCSSiorKggULevXqRaFQYmJiHB0dFy9erK6u7uTkNH369Fu3bmGBCQCtfbWBO3ToUHBwMELI2dk5IiLC0tJSvIUB4dDU1OzRo0dCQgJ2MyEhwdnZWUND48OHDywWy8XFhf/MHj165OTk0Ol0bBgj/5uMQqGEhIT07NmzzZZzcnJaf9vZ2Nhg1+QxmUyEkLGxMf8I0d7e/vXr1/v27YuLi6PT6QYGBl9+nAoLC9lstq2tLf8ea2vrhoaG4uLi1tvHLqXIzMx0dXXlP9PJyYnL5aanpwvjDQOEIrgPT6fTZWVlt27dihDS0tISe1VAmDw9PY8dO8ZkMrF5QRYuXIgQamhowBK6Nk+uqanBZg2Rl5dvZ5sNDQ3Nzc2tn6OoqIgtoKGqqtrm5b6+vlQqNTExcd++fRQKxdPTc86cOZqamq03iPUQv9xgU1MTdpO/3C2LxWKz2adPnz59+nTrLcD5CvAlwQ2cqqrq/PnzxV4MEAlPT8/Q0NDnz5/Lyspix6dYzw4htHTpUgMDg9ZP1tbWrqurw5qqdraJtURYZw2DPR8b481ms1ufvCKTycOHDx8+fHheXl5KSsq5c+caGhq2bNnSeoPYmPCvbbA1BQUFRUVFHx+f/v37t75fX1//+98bQHCCG7i4uDgejzd06FCx1wOET1VV1dnZOTk5mclkuru7YxchGBgYYI0U/9izpqaGx+NRqVRLS0sKhZKWloYdMPJ4vI0bN3p6eg4ePJi/TQqFYm1t/fbtW/49mZmZCCFsEW4ul9u6gHv37llbW5uZmZmampqamjIYjLt377Yp0sLCQkZGJjMzk3/Y++7dO2VlZYEHEBYWFgwGg185m80uLS3V1tYW3nsGCEJwBpebm4sFMYAYPDw80tLSXr9+7enpid1DpVL9/f3Pnz+fnp7OYrGePHmybt26I0eOYJ0pb2/v27dvx8bGpqamHj169PXr11hjZ2BgkJWVlZKSUlNTM2bMmISEhMjIyPr6+tTU1LCwMCcnJysrq9aHk5hHjx5t27bt2bNndDo9KSkpPj4eOzdqZGSEXbyclZWloqLi7e0dERHx7Nmz+vr6+/fvR0VFjR8/XuD5+lmzZiUmJsbGxmLR286dO9esWQPXjYEvCR4Hl5ubixCCVeOkkcBLtRobG/38/OTk5K5evdp68MTLly9v3ryZkpKipKRkZ2e3bNkyLEFrbm4+fPjww4cPORyOhYXFjBkzfvrpJ4RQenr6wYMHi4qKtm/f7uzsfOnSpdu3b1dWVuro6PTq1WvWrFk0Gg0htGvXrvLycmwOeoRQeXn5sWPHsBMd6urqw4cPnzBhAnZMunfv3n/++cfe3v6PP/5oaGgICwv7999/W1pa9PX1vb29J06ciFX7888/+/j4zJo1i195YWHhpUuXnj9/zmQy7ezs5syZwz8LgYFxcEBSFp0BQiTOa1G/pr6+XllZGd/FHqGBA189RI2Li4uNjRV7MYAgsKllAMCd4JMM2CEqAJ0Dcy4ACSG4gYM1rsCPaH8MHQBiI7iBg9ML4EdIQgYHAGRwQCQggwMSAjI4IHyQwQEJAePgCKjNhQRdFkzqBWAcHBC+LVu2rF27FtZdBriDDA4I34MHD1paWvCuAgDI4IAIbNq0CbpvQBJABgcAICzBh6hmZmbQuoFO27JlC8ztASQBZHBA+CCDAxICMjggfJDBAQkBGRwAgLAggwPCBxkckBCQwQHhgwwOSAjI4IDwQQYHJARkcAAAwoIMDggfZHBAQkAGB4QPMjggISCDA8IHGRyQEJDBAQAIC+aDA0IzbNgwrONWU1NDo9HIZDKHw9HT0ztx4gTepYEuSvAhalxcHI/HGzp0qNjrAVJMRkamuLgY+7mpqQkhRKVS16xZg3ddoOsSfJIhNzc3Ly9P7MUA6ebs7NzmgMDKysrT0xO/ikBXB+uiAqGZMmVKSkpKaWkpdpNKpQYEBOBdFOjSYF1UIDSOjo7du3fnN3C2trYDBgzAuyjQpcE4OCBMAQEB+vr6CCEajTZ9+nS8ywFdHYyDA8JkZ2fn6OhYUlJiY2Pj4eGBdzmgq4MMjviYDdyKomZmg5guLfBx9y/NRiM8x394XS+ePcorUrQM5agqMuLZHZAiMA6O4OLOluVlNehbUMkkvEsRGRlZcuGHBn1zxSH+uhRZ4v6e4PsJbuBgHBwBcDm864eL7H5SN7FTwrsWcSjLY76IrRi/yFCeCgvag89gHBxh3Qgtchqg1UVaN4SQrqmCl5/epX0FeBcCJAhkcMT0Ka1BTVte10wB70LESkVD1txRJSOR7tBHFe9agESAcXDEVFHULKfYFUN3RRWZ8kKmA4IGDiAYB0dYzEaOqqYs3lXgQFVTrrkJzpuBz2AcHDG1sHhcTlf8f87l8JobOXhXASSF4AZu2LBhMHwEACDtBDdwJiYmYq8EAACETHAGFxMTExMTI/ZiAABAmAT34PLz88VeCQAACBlkcAAAwoIMDgBAWJDBAQAICzI4AABhQQYHACAsyOAAAIQFGRwAgLAEN3D5+fkQw4HOGes7KPzs33hXAQCCDA50xo3Iy1nvMtau2SLw0cmTptvbdRd7UQAIABkc+G7v3mW28+i0qTPFWAsA7YEMDnw21nfQtWsXly6fO3CQK72ejhCKib21cPHM4SP7L1w88+q1C1inftmKoNi423FxdwYOcn3/Ieva9YgJE4c+jX80aHDvQ0f2tDlEzch4E7xm8ZixA6fPGB96dH9DQwNC6O8TR0aO9mSz2fxdR1wKHzzUvbGx8Ws7BaBzIIMDn8nKyt6OvmFl1W33H0eoitT7D2J2/bHFxtr2wrmowDmLrl67cDh0L0LowL4wOzvHIUNG/vMg2cbaVk5OrrGxISrq6tpft/qOndR6g4VFBauCFzKbmYcPndq2Zc+nTx+WrwhqaWkZOGBIY2NjUlIC/5lPnv7Tx92DSv3qTgHoHMEN3LBhw2BJra6GRCKpqtJ+WbTK1eUnCoUSHR3Zo4fzsqW/qqtr9HJ2mzVjfmTk5Zqa6i9fxWQyp0yZ4TNomJHRf5KN+/fvylJkt23ZY2JiZmZmsWplyIfsd0/jH1laWhsYGD15+g/2tKqqyszMNG/voQghgTutq6sV49sACEVwA2diYmJqair2YgDOutnYYz9wudz0jFQ31z78h5yd3bhc7pu01wJfaNvN4cs7MzJSbW0daDQ17Kaenr6BgRG2hcE+w588fcjhcBBCj588VFRU7N9vwNd2mtVu5AdAOwSfZMACuGHDhom9HoAnOTk57AcWi8Vms0+cDD1xMrT1E77swbV5YWsMRn3Wu8yBg1z/s4XqKoSQz6DhZ8L/evX6hZur+9On/3h4eFMoFCaTKXCndOjBgc6Ca1GBAAoKClQqdcjgkZ6eg1rfb6Bv1PGNaGhqde/uNGvm/NZ30lTVEEJGRiaWltbx8Y9sbOxSUl/+vvPPdnZqYgxrvIFOgnFwQDBLS5t6Rr2z0+f+F5vNLikp0tHR/Y4tWFjH3bvTs0cvMvlzEpKb+4mf0w0cMOT27eumphaqqrRezm7t7FRDQ1OovxnoQiCDA4LNnbM4Pv5R9N2bXC43LS1l67a1K1bNZ7FYCCFDQ+O3b9NfvX7xtSNWjJ/fz1wu93DoXiaTWVCQdzzsz9mBkz/lZGOPDhgwuLSsJCYmauDAITIyMu3stPWAEgC+C4yDA4J17+4Uduz8mzevfScMXhW8sKGBsX3bPnl5eYTQ6JHjSSTS6uBFHz99aGcLqiqqJ/6+pKigOG+Bf8DMCSmpL1evCrGxtsUeNTQw6mZj9/5D1qCB/ztfL3CnAgM+ADqCJPBQNCwsDCEUFBSER0lACB5ElGvoK1g5dbkF3ouyG9+9qB073wDvQoBEgAwOAEBYcC0qAICwIIMDABAWjIMDABAWZHAAAMKCDA4AQFiQwQEACAsyOAAAYUEGBwAgLMjgAACEBRkcAICwIIMDABAWZHAAAMKCDI6YqKoyZBIJ7ypwQEIkVU1ZvKsAkgIyOGJSUaOUFzLxrgIH5YVNVBUZvKsAkgLWRSUmU1slRm1XnAiXXsUys6PiXQWQFIInvMzPz+fxeDBruVTLelH/IaVhwCQ9vAsRn/ib5TpGcr281fAuBEgKwQ0cIIaPbxqSYqutnFS1DBVk5QX31gmghc2tLGIWZTea2VF7etLwLgdIEMENHKyLShjVpey0+Nq6Kja9SnxHrHW1dao0mthOcqjpyCmpynRzVTUwVxDTLoGUgHFwBKehJ+s1QVvMO/X09IyJiaFSIQsDOINxcAAAwoJxcAAAwoJxcAAAwoIMDgBAWJDBAQAICzI4AABhQQYHACAsyOAAAIQFGRwAgLAggwMAEBZkcAAAwoIMDgBAWJDBAQAICzI4AABhQQYHACAsyOAAAIQFGRwAgLAggwMAEBZkcAAAwoIMDgBAWJDBAQAICzI4AABhQQYHACAswT24qqoqOEQFnWZnZ4d3CQCgrzZwHh4e0MCBTnv79i3eJQCAIIMDABAZZHAAAMKCcXAAAMKCcXAAAMKCDA4AQFiQwQEACAsyOAAAYUEGBwAgLMjgAACEBRkcAICwIIMDABAWZHAAAMKCDA4AQFiQwQEACAsyOAAAYZEEZm35+fk8Hs/U1BSPkoC0Gjx4MIVCIZFI5eXlmpqaZDIZIWRgYHDixAm8SwNdFGRwQGiqq6tJJBL2c1VVFUKISqX6+vriXRfouiCDA0Lj6urK4XBa32Nubj5q1Cj8KgJdneAGLj8/H2I48L0CAgLU1dX5N5WVlSdOnIhrRaCrg3FwQGj69etnY2OTnJyM3TQxMYHuG8CX4B6ciYkJnGEAnTBz5kwajYZ13yZPnox3OaCrgwwOCJO7u7uNjQ2PxzMwMBg5ciTe5YCuDsbB4aO6nIW4eBchGhNGzyjKqfMbM7O6lIV3LSJBQiR1PVm8qwAdAuPgxIrdzP33euX7V3RTO+XacmL+/yc8NR25nHSGtbNK31GaKuqCuwhAQghu4IAoNDdyT23JGTzdSMtAjixDwrsc0Hk8HqopZT24WDzhF0OaFvTmJJfgBg4L4IYNG4ZHSYR1eEX2jI1WCFo2Arm8N2fKKhMlVRm8CwGCwTg4MXl6s2rgZH1o3QhmwET9Z3eq8K4CfBWMgxOTvKwGU3sVvKsAQkbTkvuUwRiEdPAuBAgG16KKBQ/Jy5Np2nJ41wGETJ5K1jZQaGRwqMpwlCqJYBycWJBQWSET+sSEVFnCJEHyIKlgHBwAgLAggwMAEBZkcAAAwhKcwd29ezc6OlrsxQAAgDAJ7sEVFBSIvRIAABAywQ3ciBEjIIMDAEg7wQ2ckZGR2CsBAAAhgwwOAEBYkMEBAAgLMjgAAGFBBgcAICzI4LqWWXMmHTj4u7C29s+jewMHudbW1ghrgwAIl+AGrqCgoLCwUOzFACAOOTkfp0yD9Qy7BMjgQJfz7n0m3iUAMYEMTnJF3bp2+fJZej3d3b3/nFkLp0wbtWH9jkHeQ69dj7hw8dTyZWs3bQ4eN27SL4tWJSY+efhP7Ju013R6nZ2t4/Tpgc5OrthGcnM//b5rU15+jpOTa4B/YOvtZ2S8OROneuV6AAAgAElEQVQelpWVQVNT7+PuMSMgSElJ6ZtVHTt+MO7eHaoiddCgYUZG/1mWKD7+3zPhYXn5OTSampVVt6W/rNHV1cMeSkx8cvDQroqKcitLm3HjJg0fNgYhtHb9MoTQzh0HsOfExt7+/Y/Nd249plKp48b7zJwxr7Aw/9r1i2pq6n3cPRYvWvXb7yHx8f8aG5v6T5s9ZMjnNQljYm9F3bqWk5Ntbm7lPXDIhPFTSSQSQmjL1l9JJJLPoOG//7G5qanR3r77/KCldnaOp04fCz/7N0Jo4CDXhQuW+02Ydu36xdjY2wWFeaYm5q6u7rNnLZCRgcndCAIyOAn1Nitj/4GdXl4+Z89cH+Dps3X7WoQQmUxGCMnJyTU2NkRFXV3761bfsZOYTOaOnRuam5t/XbPltx0HTEzM1m9YXl1dhRBis9lr1v6ira17+uTVeXOXRFwKr6qqxLZfWFSwKnghs5l5+NCpbVv2fPr0YfmKoJaWlvaruhl19WbUlaVL1oSGhuvrG4af/Yv/UPLL5xs3rx4yZOTliOhNIb+XlZUc+PNz2JeY+CRk06o5sxf9vvPP/v0H/rF76/0H35htUFZWNuLSGRMTs9i7CYFzFt2NiVq+ImiQ97B7sc8GDhi8e++2ekY9Quj+g5hdf2yxsba9cC4qcM6iq9cuHA7di22BQqFkZL65dz/62NGzd+88lZeT37lrE0Jo1sz5UyYH6Orq/fMgeaLfz9evR5w7f9JvwrSIC7dHj55wJzoy4lL4j/3pgASBDE5CxcXd1tDQnDVzPo2m1revp5urO/8hEonEZDKnTJnhM2iYkZGJgoLC32ERK1esd3ZydXZynT9vWVNTU1p6CkLo8ZOH5eVlixau1NXVMzOzWPJLMINRj23k/v27shTZbVv2mJiYmZlZrFoZ8iH73dP4R+1Xdf1GhJenj5fnIFUV1WFDR/dyduM/dPLUUU8Pb78J02g0NQeHHgsXrHj27GnWu0yE0KnTxzw9vAf7DHdzdZ/uP2fypOmNjQ3ffAesrWzHjJ4gJyc3wGswQsjBocfAAYMpFMrAAUNaWlry83IQQtHRkT16OC9b+qu6ukYvZ7dZM+ZHRl6uqanGttDU2Lh61UYDfUMKhTLIe1hBQV5jY2ObvaS+edWtm/3QoaPU1NRHjfQ9cvj0T737fc8fCkg0wQ3ciBEjhg8fLvZiwP98ysm2s3OkUD5nCJ4eg9o8wbabA//nxsaGQ4d3+00aNnCQ6/CR/RFC2JnNoqICBQUFPT197Gmamlo6OrrYzxkZqba2DjSaGnZTT0/fwMDoTdrrdkri8XhFRQVmZhb8e2xs7P5X8KcPtrb/K6mbjT1CKCsrg8vlfvzvQ/PnLR0zesI33wETEzPsB+zA2czMErupqEhFCNXX07lcbnpGqptrH/5LnJ3duFwu/7cwNjGjUqnYz8rKKtir2uzF0bHny5fP/9i9NSb2Vh29ztDAyMrK5pu1AWkBGZyEYjDqdXT0+Df5LRGfnNznFR7KykqXLg/s5dw7ZP1v9vbdSSTS4KGfu3t0eh3WHPDJyyvwt5/1LnPgINfWj9ZUt7dAVENDA4fDab1BBQXF/98ao7m5mb9xhBDWsjQ2NjCZTC6X2/qhDiL9dyJw7PC8NRaLxWazT5wMPXEy9D+/xf/34L58yZf8JkyjUpXiE/7d9ccWCoUyYMDgeXOXaGlpf2+1QDIJbuDu3r3L4/FGjBgh9nrAZ/LyCi1sNv9mVXXl15756N97LBbr1zVbFBUV+X03jKoqranpPwdl/GNDDU2t7t2dZs2c3/pRmmrbZrQ1JSUlGRmZ5mYm/x7+xhUUFBBCTGYT/6GGxgaEkKaGlry8PJlMbmhgfPNX5nA533xOawoKClQqdcjgkZ6e/+neGuh/x9czmUweNdJ31Ejf3NxPr14lnQ4Pa2hg/LZ9/3dVAiQWXIsqoQwNjT98yOLfjP96Okan16moqGKtG0Lo38cP+A/p6eozmcxPn7ItLKwQQtnZ7ysrK7CHLC2s4+7d6dmjF7+bk5v7yciovZmcSSSSrq5+RsYbNPHzPc+eP8V+oFAo3WzsMjLe8J+M/WxhaS0jI9Otmz2WCWL++vswi8VatHCFnKxcbd3/muOCgryOvjv/z9LSpp5Rzz9lzGazS0qK+IfhHREbe9vGxs7c3NLMzMLMzKKeUX8n+sb3lgEkFmRwEqpfX6+8vJwLF0/zeLwXyc/S0lK+9kwLC+uqqsqoW9daWlqeJyW8epVEo6mVl5cihPr29ZKTk9uzbzuTyaysrNi6fa2qKg17lZ/fz1wu93DoXiaTWVCQdzzsz9mBkz/lZLdf1cABgx8/efjPo3sIoYsRZzIz0/gP+Y6b/DT+0bVrF+n19NcpyaFH9/VydrO26oYQGjva78WLxEuXz75OSb4ZdfVixBlzc0uEkJ2dY1ZWxqdP2dhJ2G+e4vjS3DmL4+MfRd+9yeVy09JStm5bu2LVfBaL1f6rjIxMqqoqnz59VFCQ9+BhzMbNqxMSHtfR6549e/rk6UNHh57fWwaQWJDBSShPD2/fcZPOhIddvnLO3r57YODiRYtnysrKfvnMQd5D8/I+hZ/9a/+BnW6u7muCN0dcCr9w8XR9PX3F8nW/7TgQFvbnqDFeCgoKQXOX3H9wF3uVqorqib8vRUScmbfAPz8/19bWYfWqEBtr2/ar8v95Tm1tzaHDu7duW9u9u9PCBSt2/LYBGxM+ZMjIisryS1fOHg7dq6ur5+riPjdwMfaqoUNH0evrzoSHNTQ0aGpqBc39ZcTwsQihcWMn5efnBs3/mcPheA8c4j9t9u9/bP6uEebduzuFHTt//sKp42F/MplNDvY9tm/bJy8v3/6r3H/q393RKWTTqhkBQStXbDh8ZM/6kBUIIQ0NzVEjfSf6+Xe8ACDhSAI/T5DBCd2RVdn+6606kHp/1tLSkpv7iX9G721WxsJFM/46fgHO8UmaS3s+/fyrqaISjA2WRJDBSai09JQVK+ePGztx8qSA6urKPw/94eDQw9LSGu+6AJAmcC2qhHJ2cl25Yv3dmKjZgZOUlVVcXdznz19GEv0S6mvXL0v/St43YsS4BfOXiboAAIQIMjjJhQ1fEPNOV63YwGILDump/x1SB4Dkg3Fw4D80NbXwLgEAoYEMDgBAWJDBAQAICzI4AABhwXxwAADCggwOAEBYkMEBAAgLMjgAAGFBBgcAICzI4AAAhAUZnJgYmFFFfh0pwIO2oQIJwd9WQkEGJyasZk51abOmwTemKgPSpZHeUlXarKDU4WmwgHhBBicmZvZKdZXsDjwRSJOacpZldxW8qwBfBeuiislPwzVePqioKvnGbNpAinBaePfPF3tNgOkJJJfgGX0LCwt5PJ6xsTEeJREWj4dObc5x8dFS15VX15XDuxzQebUVLHol+/H10sBtFrLyEMBJLsENHBCd5zHVn9IY8ooyJblNHXi6cPB4PDFMliksEl6tvhm1idFi5qDUb7Qm3rWAb4A1GXDCRVxx7erOnTslJSWBgYHi2uGPio+Pv3r16v79krs4acfX1gD4gnFwOCF/Jf4UqoiIiClTpnh69qfRaKLfm9B4ePTr0+cnDoctcBUxADoOxsER1rZt28zMzBBC0tW6YSgUyosXLzQ0NCwtLfGuBUgxyOAIKDk52dXVNT8/38SkvZXqJd+8efOCgoJcXFzwLgRIKxgHRyhcLnfWrFn19fUIIWlv3RBCx48ft7Cw4HLFFlcCooFxcMRRVlbGYDBWrFgxcOBAvGsRGhUVlbi4OLyrANJKcAM3YsSI4cOHi70Y0ElsNnv+/PlMJlNVVbV79+54lyNMFArFxMQkICAA70KAVIIMjgiio6N1dHRcXV3xLkRUGhsbm5qaNDVh3Bn4PpDBSTEWixUcHIz1uAncuiGEqFRqbW1tbm4u3oUAKQMZnBQLCQmZMGEC3lWIiaWl5dGjRx8+fIh3IUCawLWo0ofH4128eHHatGl4F4KDT58+GRoaysvDrFOgQwT34IyMjKB1k0w8Hq93797Ozs54F4IPCwuLlJQUGDgCOggyOGmSnp7O4/FevHhhZ2eHdy24MTY2HjduHN5VAOkAGZx0qKysdHd319bWJnf567wNDAzOnj2bnZ2NdyFACkAGJwW4XG5aWpqjo6OMjAzetUiKqqoqFoulr6+PdyFAokEGJ9E+fvzo5eVFIpF69uwJrVtrmpqaZ86cuXr1Kt6FAIkG88FJtDNnzvj5+SkpKeFdiIRKT083MjJSU1PDuxAgoSCDk0SpqakbNmxACM2YMQNat3Y4OjrW1dVxOBy8CwESCuaDk0QnTpzYuXMn3lVIB21t7YEDBz5+/BjvQoAkgmtRJUh6enpxcfGQIUPwLkTK1NXVZWRk9O3bF+9CgMSBcXCSoqCgYM+ePR4eHngXIn1oNJqzs3NtbS3ehQCJA2sy4O/9+/d6enqysrKnT5/GuxZppaioePbsWYRQUFAQ3rUACQLj4HAWHx9/5MiRc+fOwQjeH5eUlGRgYGBkZIR3IUBSQAaHm7q6OhqNlpCQAOGRENXX11OpVBgzCDCCew3379+/f/++2IvpQkpLS3fv3o0QgtZNuFRUVJYsWdLc3Ix3IUAiCM7gPn36JPZKupaKioqWlha8qyAmCoUCxyUAAxkcAICw4FpUfLDZ7JqaGryrIKaUlBS4tgFgYBwcPt68efPrr7/iXQUxQQYH+GAcHD7k5OTU1dXxroKYnJ2dYcwNwEAGBwAgLMjg8AEZnOhABgf4IIPDB2RwogMZHOCDDA4fkMGJDmRwgA8yOAAAYUEGhw/I4EQHMjjABxkcPiCDEx3I4AAfZHD4gAxOdCCDA3yQwQEACAsyOHxABic6kMEBPsjg8AEZnOhABgf4IIPDB2RwogMZHOCDDA4QhLOzM4lEwn4mkUg8Ho9MJi9atGjWrFl4lwZwAxkcPiCDEzobGxusUSOTySQSiUwmm5mZTZ06Fe+6AJ4gg8MHZHBC5+/vT6VS+TcpFMrYsWMVFBRwLQrgTHADV1BQUFhYKPZiuhDI4IRu9OjRpqam/JtGRkYTJ07EtSKAP8jgAHHcvn37t99+Y7FYFApl8eLF/v7+eFcEcAYZHD4ggxOFUaNGWVpaIoQMDQ3Hjx+PdzkAf5DB4QMyOBGZOnWqgoLC2LFjW+dxoMuCcXD4IGoGV5Td9PpRbWVRM6MOr1Vfbab0Pk1PQYdTsnHZvZqOnJKqTE8PNYvuSrgUAFqDDA4IzYfXDalParv3V9fQk1dQksG7HHywmNyq4uYPr+lGVgo9PWl4l9PVCW7ggKix2WwGg0GkTtybJ3W5WU0DJ+nhXYikSLxVrqIh02eEJt6FdGmQweGDYBkco7YlL6sRWrfW+ozWqatsKS+Aq2LxBBkcPgiWwZXkMMkyJLyrkDiy8jIlOU06xvJ4F9J1QQYHhODVwxoOh2zbGyKn/8jJYDCqmH1Ha+FdSNcluAdnZGQk9kq6FoJlcM1NXMhyv8RhcxvruXhX0aVBBocPgmVwAEgmyODwQbAMDgDJJLiBGzFiBAwfEanu3bv//vvveFcBAMFBBocPgmVwAEgmyODwARkcAGIAGRw+IIMDQAwEN3AjR46EDE6kIIMDQAwEN3CGhoZir6RrgQwOADEQnMHduXPn9u3b4q6lK4EMDgAxENyDKyoqEnslXQtkcACIAWRw+IAMDgAxgAwOH5DBASAGkMHhAzI4AMRAcANXVFRUXFws9mK6EMjgJNOWrb9G372JdxVAaCCDwwdkcJLp3btMN7c+eFcBhAbWZMAHwTK4xDtVPB65u8d3/Do1NdU7f9+YkfnGxNhs7NiJhYX5T57+c+bUVYRQdXVV6NF96RmpTCbTza1PgH+gsbEpQign5+PswMmhR85cuHDqafwjbW2dgQOGBM39RUZGpp1XXbseceHiqeXL1m7aHDxu3KRfFq3KyfkYdevqq9cvSkuLzUwtRowYN3aMH0Jo4CBXrDZlZeVbNx8hhGJib0XdupaTk21ubuU9cMiE8VNJpO+YuDg7hV5VxPSZpvP97ygQDsjg8AEZ3B97tuYX5O7+I3T7tn3Pn8c/fx5PJpMRQhwOZ/nKeSmpL5cvW3fy70vqahoLF80oKi5ECMnKyiKE9u7bPmjQsLiYxPVrt1++cu6fR/faf5WcnFxjY0NU1NW1v271HTsJIXQkdO+LF4lLl6z5feefI0aMO/jnrmfP4xFCMdHxCKHVq0Kw1u3+g5hdf2yxsba9cC4qcM6iq9cuHA7di/fbBr4PZHD46OIZXF1d7bNnTydNnG5v56ipqbVyxYbS0s+ft7S0lPz83HVrt/3Uu6+GhuaC+ctUaWrXrl3gv9bL02eAl4+srGzPnr0M9A3fv3/b/qtIJBKTyZwyZYbPoGFGRiYIoZCQnbt3h/ZydnN2ch07xq+bjV3Si4Qvi4yOjuzRw3nZ0l/V1TV6ObvNmjE/MvJybW2NGN8n8KMEZ3CjRo3icmGqZRHq4hncx08fEEKOjj2xm8rKyr169c4vyEUIpaWnyMrK9nJ2wx4ikUhOPV1S37ziv9bGxo7/s7KyCoNR35FX2XZz+N/uebzr1yOeJ8UXFORhd+jrtx0XxeVy0zNSA6bP5d/j7OyG3dm/3wBhvhdAlAQ3cAYGBmKvpGshWAb3verr6QghJSVl/j2qqp8XrGEw6tlsNj8Ow6ip/e+Nwo5k2/jmq+Tk5LAfuFzur+uWstmsuYGLnZxcVZRVflk658sNslgsNpt94mToiZOhre+vqan+/l8X4EZwA3fnzh0ejzdq1Cix19NVvHnzJiws7Pjx43gXgg95eQWEEJvF4t9TU/u54dDU1FJUVNyxfX/r58uQZdrfYMdf9f5DVlZWxp7doS69emP3MBj12lptzwMoKChQqdQhg0d6eg5qfb+hASw1J03gWlR8dPEM7vNZ0dyPZmYWCCEGg/HqVZKurj5CyNLSpqmpSUdHz9Dg87TSxSVFarRvvFcdf1VdXS1CiN+i5eZ+ys39ZG5mKXCb9Yx6Z6fPvUI2m11SUqStDadEpYngkwwjR44cMWKE2IvpQrp4BmdoYGRqan4mPKyouJDBYBw4uJOfgrn06t27d989e7aVlZXW1dVG3rwyf8H0mJio9jfY8VeZmVpQKJRLl8/S6+n5+bmHDu92c3UvLStBCMnLy2tr6yQnP3udktzS0jJ3zuL4+EfRd29yudy0tJSt29auWDWf1arXCSSf4AbO0NAQlmUQKTabXVPTpc/HBa/aSCaTpwf4Ll8RZGNj5+jQU5Yiiz20c8cBLy+frdvXjhvvc/1GhI/P8PHjp3xzgx18la6u3vp12zPfpo0d571uw/LAOYvGjPF7+zZ9xiw/hNDP02a/ev0iZOPKJmZT9+5OYcfOv3nz2nfC4FXBCxsaGNu37ZOXh2XqpYnggb6QwYnay5cviZTBdWKgb11dLZPJ1NXVw26uXb+MIkPZtnWPyGrEAQz0xR1kcPjo4hkcdtVnaWnxggXLe3R3jrp17eXL521OEQDw4wT34IqKing8Hhylgg7qTA+OXrd7z9b8/NyKijJTE/Pp/oH9+nmJskYcQA8OdzAfHD66+Dg4hBBNlbZ9K1z5BEQLrkXFB1yLCoAYQAaHD8jgABADmA8OH118HBwA4gEZHD4ggwNADCCDwwdkcACIAWRw+IAMDgAxgHFw4Eddu3btzSPm0KEjvmscXFeQnULP/1Bt0LOiurq6oqKirKyspqamsrKysrKSyWTCQZIYQAaHD2nP4DgcTnR0dM+ePU1MTCoqKtzcvPGuSEKlpqTsO7ufyWS2tLS07kzAfzHxgAwOH9KbwRUWFiKE1q1b9+rVK01NTYTQ/PnzsR/Al2zt7JSVlTkcDolEIv8/Eol069YtvEvrEmBNBnxIYwb37Nmzfv365efnI4R27dq1adMmJSUl7CE5ebKswncsN9VFUGTJBkaay5cv19LSan0/tnoOEAMYB4cPaRkH19TUdOzYMSaTuXbtWi0trYcPHwqcL4iqKpOXxcSjQIlWXdqsrCrT38enrq7uyJEjdDodIcTj8SZPnowQKi8vz8zMHDAAVngQIZgPDh8SPh9cQUHBxYsXsR90dXWXLFmCELKysvrabGhaBgrwjfglDpurZSSPEJowYUJAQADW4eXxeMuXL8eW2rl9+/aaNWsQQpL8YZBqkMHhQzIzuJqamsbGRoTQkiVLOBwOQsjGxmbatGn8Q9Gv0TaSU6bJvHpYJa5KpUBWUh27mWNqS8Vuzpw5c+LEiRQKhUb7vLwOlUrds2fPli1bEEKvX7+ePHlyRkYGriUTEIyDw4dEZXBcLpdMJu/evTsuLu7GjRsIIezf7+IxTuvpzarn0ZU9PNUVlb+xRgyxNTdy3iXTGbWsoQF6re9fvHhxVVXVpk2bWt+poKCAEPL29jY1NcX6cadOndLT0xs+fLjYCycgGAfXpX38+DEsLGzw4ME+Pj6ZmZn29vY/sjUWi5UYU/LxFZvHI8kqkHk8Ho/H46/XJzYcDkdGRnwtLP8MKXaTREYNNeweHuq9h3byCyw7O/vMmTOTJ092dHRMT093dHQUar1di+AGDogavuPgXrx4UVNTM2TIkNu3bysqKg4aNKgDL/qGwMDA6upqhBCPh3hsCoct29TURCKRRo0aNW3aNGFU3VGzZ88ODQ3FekZiwGazf//99+nTp5uZmSGE5KkyquqCD4w6YdWqVTk5OdeuXWtpaaFQhLbZrgPWZMAHLmsy5OTkmJubJyYmhoeHL1682MHBoQMv6ignJ6c2/wN5PJ61tXVERIQQ99IR69ev37hxo5hXhykqKjI0NExMTOzTp49wt1xaWqqnp1dUVLR58+bZs2cLffvEBuPg8CHODI7H4zU1NY0fP/706dMIIRcXl6NHjwq3dUMIpaSktPmypNFoixYtEu5eOmLHjh3iX/sKuzLhwoULV69eFe6W9fT0sO0vXLgwNzcX64A/e/ZMuHshKsjgiOz27dtXr17966+/2Gx2ZWWliYmJSHc3bNiwyspK7Gcej+ft7b17926R7lGgp0+f9unTR5wxXGvJycmurq7v37+3sbER0S4KCgp27drl4eExefLksrIyXV1dEe2IACCDw4foMjgej3fnzp1u3bpZW1ufPHmyd+/e4kmpQ0JC6HT6y5cvmUwmQsjAwCA0NBSX70hPT8+YmBgqlSr+XfOdPHmyoKCgzQlT4WIymQoKCn/++eeLFy8OHDgAV8sJBOPg8CGKcXB5eXkIoe3btycnJ2Pf6rNnzxZD6xYbG+vm5ta3b9+DBw8+ffqUx+NRKJShQ4fidQTg4eGBV/eNb/bs2b169UII8bu0QoedRVmyZMm6deuam5sRQlu2bHn8+LGIdielIIPDh3AzuFevXvXv3z87OxvrSW3evFlVVVVYG29HQ0PD8uXLHz9+nJSUxB+3pa+vb2xsjEv6hsElg/vS6NGjsf9KIu3HIYTs7OwMDAwQQkOGDLl79y5CqKqqKjMzU6Q7lRaQwUmr5ubm0NDQysrKHTt25Obm6uvri/l/9bVr1w4dOrRt2zYPDw9x7veb8M3gvhQdHU2lUsV5zWl9ff2iRYvs7OzWrl3b3NwsCc09bngAD83NzRUVFZ14YVFRUXh4OI/HKygoOHfuHJ1OF0F131BSUjJnzpzffvtN/LvuCA8Pj4aGBryr+A8Oh8Pj8datWyfOnZaUlGCB7OLFiz99+iTOXUsOyODwkZaWtn79+o4/v7a2tq6uDiEUHBzMZrMRQkZGRj///LOKioooyxTg9OnTgYGBixcvXrt2rZh33UGSkMG1gV3n4OXlJc43DRtfMmLEiGnTphUUFCCErl+//vLlS7EVIAkgg8OHvLx8R057YQHCkSNH/Pz8sHbt3Llzs2fPFkuNbWVnZ0+ZMoXBYNy+fdvJyQmXGjpCQjK4Lw0ZMuS3337DhsuJc799+vTx9PRECJmamoaFhb179w4hVFZWJs4a8AIZnITKz88/evRo7969fX193717161bN3zrOXToUHx8/I4dOywtLfGt5JskLYP7UmpqamBg4IsXL3DZO5vNlpWVnTlzpqKi4tGjR3GpQWxgHBw+WCwWnU5vM9ErdglXUVHRmDFj7t+/z+PxBg8ejFOB//P69euQkJCJEyfOmDED71o6RBLGwXVQWlqag4MD/0J9MXvz5k2PHj3y8vLCwsL8/f3t7OxwKUOkIIPDR5sM7uPHj9gHLiwszNjYGCHk4+MjCa3bzp07Q0NDT5w4IS2tm2RmcF+jp6fn7u6O19Fijx49sONWT0/PR48eIYTS09M/ffqESzEiAhkcPrAMjsvlcjgcPz8/7EjBzs7u+PHjzs7OeFeHEEKPHz/28vKysbH566+/pOtiIInN4L6kra2dlJRUUVGBderxKmPo0KELFizAFov49ddfsQVxsCtSpB1kcPiIi4u7cOHCn3/+qaSkVFhYaGpqindF/8PlckNCQpqamrZt2/bNuXwlkORncAL17dv38OHD2PUP+KqqqtLU1Ny2bVttbe2mTZvEM2hcRGBNBvHhcDi3b99+8+YNQqi4uDgwMFBVVVVGRkaiWrfo6Gh3d3cvL699+/ZJY+uGLWmIXbokXRISErBrUVpaWvCtBDu/HxISMmbMGGyS4SNHjmRlZeFbVedABicOWMR28ODB5ORk7Juje/fuZ8+exbuu/6DT6UuWLHn27FlSUtKQIUPwLqfzpCiDa2PSpEkIoW3btkVHR+NdC8IG7mHfviYmJnv27EEI1dXVVVVJ1cobAof/Hj9+/Pjx42IfdUxAGRkZ/fv3j4qKanN/Wlra2rVrcSpKgEuXLg0cODA+Ph7vQgCPx+Nt3LiRx+O1tLTgXUhb1dXVQ4cODQ0NxbuQjoIMTvhaWlqOHDmSn5+/d+/ewsJCTU1NRUVFvIv6qqKiopCQEDs7u9WrV+Ndi3BIaQb3pe8kHNcAACAASURBVH///be2tnbs2LF4F9LW27dv7ezsrl+/npGRERQUJNHnoPBuYYmjpKTkxIkTPB6vqqoqPDy8tra2nSd3+lpU4fr777/HjBmTmpqKdyHCJIHXonba1q1bk5OT8a5CMC6XGxkZGR0dzePx7t+/X1BQgHdFAkAG96Pq6uqwOb82bNiAZdsaGhrTp0/nL38p0Pdeiyp07969mzhxIovFunnzJjYeijCkN4P7UkhICLaWTWRkJN61tEUikcaOHYtNk0UikRYvXoxlzdiKuhIC1kXtJB6PRyKRTp06de7cufDwcITQ33//3fGXd/BaVBE5ePDgixcv/vjjD3Nzc7xqEJ0dO3bgXYIwYZ+T9PT0ioqKuXPn4l2OYN7e3t7e3tiq4WPHjnVxccEWtMafwH5dYWGhZHY4JUFRUdHatWvPnz/P4/E+fPiAdznfJzk5eejQodiES0T15MkTCYznfxz2YXv+/DnehXwbdqFhfn7+wYMHi4uLcaxEJNeicrlcoW8Td2/evMnPzx81atTz58+bmpqw+Qs7fRXh165FFant27cXFBTs2LFDpPvF/a8/bty4Cxcu4HstquguL42Kirp7965UXCTP4XDOnz+PdQg+fvyop6cn/pGVIlkXVXTz0IsftuBuS0sLg8GgUqlt1mnvdEsh5nVRHz16tGHDhlWrVo0bN06kO2pubq6vrxfpLr6JTqerqKiQSCQca1BWVhbdytMvXrxwc3MrLCyUonEOqampS5cu3bhxo7e3tzj3Cxlce2pqashkMo1Gk5GRUVNTE+KWxZbBtbS0rF+/nsPh3L9/X2yLveNLqi8t6gg3NzeEUGNjY2Bg4JEjR6TiwtuePXs+evQIm3dz27ZtsrKyS5YsEUMvWyTj4KS6B8disRobG1VUVGRkZDgcTvvn48R8jPm9bt26tWPHjh07dgwaNEg8e5SEHhyLxWrT0RY/kfbg+FJSUurq6ry8vES9I+FqaWmJjIzs0aOHjY3N5cuXvb29Rff/CK5F/YzJZGLTOXA4HCUlJaxdE91oAxaLJdKvgZqamkWLFr169erZs2dia90kBLZOBd5ViIOTkxPWugUGBmIz2ksFCoXi5+eHLYzNZDJnzpyJrdAmin119XFw2IXNjY2NbDabQqEghBQVFWVlZUW9X5GOg7t48eKkSZMCAgJEvWCdZMK9+yZ+v/zyS2hoKN5VdEZAQADW1DQ3N7u5uQn9Au2uOx9cS0tLZWUlm83Oycn57bffpk6devny5Xaev2PHDiGuGCKiDK6goGDGjBnFxcX37t376aefhL59qaCqqtrpMww5OTnDhg1LT08XdlGi1bNnT+zDefTo0ZKSErzL6QwNDY2kpCQrKytsMrFjx44JJesQ3MCNHDlyxIgRP771H5GbmxsQECD0zTY0NGCdeTKZjF0l+ujRo/T09A0bNgwcOFDou/saR0dHbP0RIQoLC1uyZElwcPDKlSuFu2Xp8s2ZI6OiorC5Mb5Eo9GmTZumra0tmtJEbvTo0UFBQVJ6hE4ikfr06YNdi0KhULBFrJOTk39k8ivJzeDev38vrE1xudzGxsbPA/9IJGypPTKZjH3PNzQ06Orquru7i/OaYeFmcJmZmePHj0cI3bhxw8HBQViblVLfzOA+fPjwtYc0NDQCAgIk+urxdhkZGd26dYvH4yUlJRUWFuJdTicpKioGBgZik0cVFxd7e3vn5uZ2blPiGAcXFRV18eLFP/74Y/v27Xl5eebm5r6+vvwZxxITE8+dO1dQUKCqqmppablo0SIdHZ3w8HD+0mpBQUHY/16+jRs3IoS2bt2K3bx3797evXuvX79OpVILCgrCw8PT0tJ4PJ6tre2ECRN69OiBddmuXr2alJRUXl7u4OAwZsyY3r17I4RWrlyZkZGBbWfmzJmZmZlf2/KOHTsYDMbOnTtbVyIJ4+D27duXkpKyfft2ExOTH9/aD/ryLOrkyZP9/f2LiooiIyPV1NR69+49f/783bt3JyYmGhkZTZkyxcfHB/uauXbt2suXL/Py8jQ0NNzd3QMCAhQUFIqLi+fNmxcYGIhNqtHY2Dhr1iwvL6+FCxe23ktOTs6CBQu2bt164MABFRWVY8eOcTicM2fOfPkXX716dVpaGvaqw4cPR0REkMlkXV3dK1eubNiwwdDQcMGCBXv27HF0dMSOlaKjo3Nzc83MzLy8vMaNG4d9Kbb+mNnZ2fn5+WHP5xPPWdR21NfXT58+ff/+/cS4Gq+uro5Go02dOrVPnz5Llizp+AvFkcHJysoyGIzQ0NBly5bdvXvXw8Nj//795eXlCKFXr15t27bNx8fn7Nmz69atKy8vP3z4MBY9Tpw4UUdHJyYmpk3r1g4WixUcHCwjI7N9+/bNmzdzudytW7cymUwajXbmzJkbN26MGTPmzJkzHh4e27dvf/LkCUJo7969o0aNMjU1jYmJmTJlirB+5W8SSgaHzUypp6cXHh4uCa2bQBQK5cqVK8bGxlFRUTNnzoyLiwsODh44cODt27c9PT0PHDjAYDAQQjdv3rx8+fKECRO2bNkyZ86cx48fnz9/HiFkYGDw888/nz59ura2Flt2WklJac6cOW32gp0XunDhgp+f34oVK0gkUmhoqMC/+O7du21tbX18fGJiYqysrCgUSm5ubk5OzubNm9s0Uv/888++ffusrKxOnTo1c+bMGzduHDt2rM3HbOfOnRQKZfPmzZK2goGKikpkZCR2Di0/Px/vcn4UNnXF8ePHsS5FWVnZxYsXO3LiVUwZHJvN/vnnn+3s7Egkko+PD4/HwyYeCA8P79evn6+vL41Gs7e3DwoKSkpK6vTBaX5+fk1NzdChQ62srKytrbds2RISEsLhcJqbm+/fvz9p0qSRI0eqqqoOHTp0wIABYl58t40fz+C2bt165syZiIiIadOmCa8ukbCysho5cqScnBy2/LC9vb2npyeFQvHy8mppacH++40fPz40NNTT07Nnz579+vXz8vJKTk7GXo591f3999/5+fl37twJDg7+cmgr1rHq1avX+PHjzc3NO/4XJ5FIZWVlGzZscHd3bzOWOyYmxtHRcfHixerq6k5OTtOnT79161ZNTU1hYWFNTc24ceOsrKwsLCzWrVuHfcxE+RZ2krW1NUJo1apVDx8+xLsWIVBVVcU+7erq6sXFxZs3b8aOYdt5yVczOKyHJUT8pYuVlZURQtj3dk5OTusljbGhMdjK252gp6dHo9FCQ0MjIiLevXtHJpN79uyppKT04cMHFovl4uLCf2aPHj1ycnLodPoP/1qd1NLSUlpa2rnXslgsf39/JyenI0eOaGhoCLs04cMWQkQIYSPX+WtQYPOAYp8EWVnZly9fLlmyZNSoUcOGDbt27RrWZcNGI65cufL+/ftbt24dP368ra3t13aE/X9ubGz8rr+4sbHxl4eTXC43MzPT1dWVf4+TkxOXy01PTzc0NFRTU9u7d29ERERGRgb/Y/Zjb5IIXb58Gfd1HoRLTk5u5cqVu3fvRggVFhaOGzfua/+bBF+qhQ3AmzdvnhAvlvzyzH1DQ0Nzc3Prb2PsE49NuvK9qqqqjI2N9+zZExMTc+PGjdOnT+vr6/v7+w8aNAjryn55brGmpgavy3o2bdrk5eWlp6fXidcOHz78ypUrUtG0CSTwQvSTJ0/GxMQEBga6uLjo6OicOnUqLi6O/6iNjY2Li8vLly/bH/uCjYAjk8lYo9nBv7jAS51YLBabzT59+vTp06db319bWysvL7979+4vP2Yd++3xMWTIkEePHmEzRBBM7969N23a9LXTSl9t4Pr27SsvL19dXY2dWhJFZdgHq3V4gTVt37s7bPoKKpXKZDKNjY3nzp07ffr0lJSUuLi43bt3m5qaYmnX0qVLDQwMWr/wm6MBRDQxRkVFhZ6eXucWdpk7d+6ePXukt3UTiMfj3blzx9fXF5s98ctx7enp6enp6e7u7keOHDl8+HD7V5ioqqpiSU0n/uJ8CgoKioqKPj4+/fv3b32/vr4+1un78mOGjeGSWKtXr37x4gXeVYhEO0sJtzepi4uLi4aGRk1NDXasK3QUCsXa2vrt27f8e7CTmN887yMnJ9e6l4edDldUVKyoqIiNjcU+ne7u7uvXr6dQKB8+fDAwMMAa057/z8TExNjY+MtrfQVuWei0tbV/+eWXTrxw586dw4YNk5CVoYWIzWYzmUz+KWkWi/Xs2TP+o83Nzfv27Zs6derKlSvLy8uvXLnS/tZYLFbH/+LtsLCwYDAY/C3Y29traGhoa2sXFBQI/Jh19rcXE0J23zBXrlzhJ7ZtfHvWKktLS1dX1wcPHoigMDRmzJiEhITIyMj6+vrU1NSwsDAnJyfsm9DQ0LC6ujohIeHLVqZbt27v37/PycnBzsMmJCRg99fU1Ozfv/+vv/4qKioqLCy8dOlSS0uLvb09lUr19/c/f/58eno6i8V68uTJunXrjhw58mU9X9uyEFVUVLQ56umgS5cuUSiUCRMmCL0k3MnJyRkbG8fFxRUXF9fV1e3fv9/BwaG+vh77sjl58iSZTPbz81NRUZkzZ865c+faH6xPp9MVFRXb+YsbGBhkZWWlpKRgi35+zaxZsxITE2NjY7HobefOnWvWrMEm8hP4MRP2uyJkWGJFSJmZmV/7SHz1ELW1UaNGYaPDw8LCgoKChFiZj49PVVXV1atXjx07pqOj06tXr1mzZmEPubm5OTg4bN261d/f39/fv/Wr/q+9Ow9o4lobBn6ykJAESNiRRRAQRFbZRK3lKm5gtUqLUEqxWqX1rUutLahtXahWcG/1al16vZ9VVBRw11ZtKVp3vCAg4sKqbMqekIQsfH+Mby4vJpElyUyG5/dXyEzOHEEezjxzznOmTZtWVVW1cOFCmUwWGhoaExOzZcsW7PHcvHnzMjIyMjIysGdqqampWEo7KirK2dk5PT09Ly+Pw+F4eHgsWbLk9f6oalmDUlJSpk+f3ttP3blzJzs7Wy/KHPbN8uXL9+zZk5CQwGQyExISfH197969Gx0dvXz58lOnTm3evBlbKRwREXHhwoUtW7aoWoqgyMSp+YlHREQ8fvx45cqV69atU9MlLy+vnTt3Hjt27JdffhGJRB4eHmvWrGEymZ6enosXL/71119f/29GZGTNwWE/a1NTU6WHelfRd+/evRQK5Y2F4XEslyQSiQwMDHS250hvJ/qKRKKamprezr1saGiIjY3FbosIjgjlkogA94m+rwsKCiJrDk6N3hVWTkhIwObH3bhxQ2td6hdDQ0Mi76jEYDCwTZJ6JTo6Wn0hANBVf5YukhhZh2/9zcF1Y2dnhxB68uRJt0VLxMHn83HfFkCpwsLCuXPn9rbQxfz58zdt2qR+E0LQVVtbm56uNteqgZmD6+PWGB999BE2K73Ps1W1h06na6l4Xj9dunSpt7vHk/WxqVbpRQlv3cvOzsa7C9oSFRXVdUp2V/3dVev8+fNPnjzptvwV95Llbyw1rilaLVmenp5eUVHR25iIL8jBYSAHRxD93dwsIiKCx+M9f/5cIpFoqEsaQMA03OXLl+vq6np+/p07d/7880/9im4EATk4pQZmDq5H00TUi4+Pl0gkpaWl9+/fj4qKIsI9gkwmy8jIwOpJEUFubu7x48exukA90dDQ8O233+rFY9Nu6HQ67j/906dPz5w5E5tWghcC/okldw5O1XhZM/8JDAwM3N3dT548efXq1bFjx2IVJfHFYDBu3rw5ceJEvDvySmpqas9Pjo6OxuZY6R0ajYb7T7+8vJzNZuMeZ4kG5sFpQE1NzaBBg27evBkSEqLBZgeUhISEzz77zN/fH++OAFKBHJwGYEuRs7KyMjMzNdtyH9y9e5cID3mTkpK6rrdVb8OGDZMnT4bo1h/Z2dnErM6GL7IO3zQ8D64nUlNTsUJA+G7wY2BgoL2t+XooNzeXTqd7eHj05OT09HSyrjbVpVWrVsFzhteROwenKtTQtFQpBKtxuG/fvqdPn/r4+GjjEm9kY2NjaGjI4/F6VUNCs2xtbXtYKezu3bsZGRmEnT6tR8rKykJDQ/F9yEBA2dnZfVhFoxesrKxcXFyUJn81nIN7HbY+XyqVDsD/cA0NDdXV1d7e3j05U19WmwI9BTk4rcCqj5w5c6ZrgVZdWr16dUNDAy6XXrJkCbYZyhvBalMNghycUpCD06KZM2f+9ddfGtypq+c8PT1/+eUX3V+3srJy7ty5ajYQUEhISNi4cSOsNtUUyMEpNTBzcFq/Re2q5X/15K5NgxoaGvq/R5+WpKSkuLq6vv/++3h3hDxWrVr1zTffwDy4bkg8D+7BgwempqbYFI5udBrgsF0OPvnkkyVLlvj5+ensou3t7RQKBdvRRjdyc3Nzc3PfWBxUH1ebAj0FOTidXI9KPXDgAFYySH3BaA2Sy+WK3Ux0IyUl5Y2LKO7evfvHH39AdNM4yMEpRdbhm/ocnK5HcF3FxcV9/vnno0aN0sG10tLSnJycRo8erYNryeVyuVyu/qlxY2NjTEwMXg9eyO3tt9++ePEijnODgI6tXbvW399/2rRprx/S9Qiuq0OHDmH72+tAbGysbqIbQqiiouKNfzZmzZoFj0215B//+AcB17rjDurB4WbdunWTJk0KDg7W6lWys7Pd3d2VZiI16Pz587du3Vq7dq2ac2C1KdA9yMHhZsWKFX3bSa9XOBxOcnKytq9SUVHx5ZdfqjkhJSVl0qRJEN20B3JwSkEODn85OTnOzs729vZaav/q1as+Pj44zjhLT08vLy9PTEzEqwMDAeTgBhqC5uBeFxAQsHDhwl5Vvu2VsWPHYtEtMjIyJCTkhx9+6H+bkyZNUjww3bZtm5qC3dhjU4hu2gY5OKUGZg6OWAGOw+GcPHlSJBK1tLR02xkrPj5eI5cYM2bMyJEjKysrJRKJRmbGcTicpqYmf3//sWPHCoVCVeUeGxsbV65c+fPPP/f/ikC95ORkmOX7OhJPSBo+fLiq3DqxAhzG0dGRxWKFhISUl5dj7wQGBpaWlp4+fbo/zUZERAQGBorFYixB09nZ2cOFoj1BpVKFQuGJEyfGjRun9AR4bKozkINTamDm4IgY4LCC47dv3y4qKkIIYSGjvb39119/7XOD7777bn19fbc3+5+mkclkUqlU8SWVSm1rawsKCoqLi+t62qeffpqamsrj8fp5OdATsBZVqYG5FpWgAQ4zderUGTNmYFktKpVaXV2dlpbWt6ZOnTo1atQoBoPR9c3+BzgKpftTGmNj46lTpx46dEjxDraqISAgoJ/XAj0EOTilIAdHRFVVVYrXYrE4PT1dJBL1rakdO3bMmDHDyMgI+5JGo3E4nH52TyqVKoKmXC4fNGjQokWLutYQTU9Pp1KpsJZelyAHpxTk4Ahn4sSJ2KpVhZqamv7cqCYmJi5YsADbrZlGo1GpGvjnK3ro7u6+ffv2yMhIxaHc3NwrV67AY1MdgxycUpCDIxwWi2Vubs5iseT/SyKRnDt37uXLl31uMzo6+vvvv7e2tqZSqf1/itrZ2SkSidhs9vTp048cOeLi4qI41NTUtHz58j179vTzEqC3IAen1MDMwRFiom/eX83VpUK5HLU2SLodkkqxPL5MIpFIpRKpVNbZKe//ciu5vPNFfb21jXU/20EI1dXVGxkZcTjd03lNTU1cLo9Kpaj4XF+YWjEoFGTnwvJ+C6pjdhceHo49Fm9qauJyuVQqVS6X29jY7N+/H++uEQLUg8OBRNyZtrFiWDCPY0LnWTHlMvyjLZFRaZSmOrGgVVpW0Br95WAqZNK7mDZtWrc/42w2Ozk5may/1b01MNei4rwRzKENFVM+tjcyHXD70fQZ18IAIWRha3h0S2Vs4mC8u0Mg3t7e1dXVXZO2rq6uEN0USPytOH78+JAhQ5Q+SMUzB3flaP2Y6dYQ3frA2tHQc7TZ36fx2UyHmD744IOuNyksFmv27Nm49ohYBmYODs8AV3y7dZCL7sqIk4ytM6v4TivevSAQb2/vrjvwurm5hYaG4tojYoF5cDr1srpjiJcRXlcnAUMOzdKW2dYM8yH+KzY21srKCiHE5XK7LSYBMA9Op6QSOb9Z2oMTgUotjRK5VN6DEwcKLy8vbJ9GNzc3VYuCByxy5+BUzYOD/BfATacMtfOlghZZu0Am7dBMpJ76j09eVtAjQqOe5PM10iDdgMriUNkmdLYxnabPvy7kzsEZGhoqPaTPPzGgn1oapBUPBI/y+B1iJGjuYLDoHB5DItbUUNQsfMxSfjW6U62yMF+vMFg0QZO4QygzNKIxGJShfkZOnhxTK43VodEZEs+Di4qKMjU1VXoIAhzQndYGSc7JhpYGKd2QwTHjWpjrxyMmc6dXLwRNokeF7YU36ox41LEzLCxsGW/4JJF8/fXXZJ0HN3z4cFWHIMABHcnOePkkn2/pbGbn3d8aB3jhmBpyTA0RQm0vhWf219oOMZz8kRXeneopsg7fiDsPDgwQUknngbXlLa0M11EOXGt9jW5dGVuwhgTZieWsn5eX6suzMnLn4Ig4Dw4MBCKBfO/KUjuvQVwbMoS2rkys2EPHOBzdUtXSoAcxDubBAaBh/GbZsW3Ph493YrDJmQyh0amuowef3lv74lkH3n15A5gHB4CGHdpQ7uCr3Z22icDBb9Dxn6qkHYQuFREWFoZ3F7RFX+vBAb12Zl+to78tla7JglGE5TbKIWu38jQQQaSkpODdBW2BHBzQteLbrXw+Ypno00SK/qAb0jppBncuNeHdEZWuXLmCdxe0BXJwQNeunWqwdDbDuxc6ZeVidutCAyLqfery5cvx7oK2QA4O6NT9ay1mDlw6Y8AV5LTztLh5gaCDOMjBkVlp6ZNxYYH37/8H74688uxZ5biwwDt3b+LdEa0o+LuVbUrcVQoZZzZu2vGBNlo2NmcX3mjWRsv9Bzk4ADRA0CJtb5MOnOxbV3Qmjc6g1VcRccsbyMEBoAFlRe3kWK7QNxxzo/IH7Xj3QomBmYPTm+mXaUf+fffuza1bfsa+nD3n/ebmplNZr/4ofb9upaBdkPLDj42NDbt2by0syheJREFBo+Lj5jk4OCoaEXeId+3e9lfO5c7OzvHjJs+ftxDbAv3mrb+PHTv4sKTIzMzCy8s3Yd4ic3MLhJCa1jKzjt28ebW4uJDBZPr6+H/yyed2tvYIoYzMo2lHDiz9YsXqNYkzZsxa9PlXrW2te/b8eP7CKS6XFxgwcv68RdbWNooubdm6/uy5LHNzi7fHjl+8iAw7qNZWiOiGWhy+3bl39sadrJq6J4OsXf28J4wdFYPtw7B6w+TJYQmC9ubf/9jPZLDch4a8G/6liYkFQkgsbj98YtWT0ruDrF1HBUX24CJ9Z2hkUF2mmUpNmkXuHJzer0V1dXUvfliIbejb1NRYV1eDZbKwowWFeYEBI2Uy2dJln+bl5y79YuW/9h8z5Zn9z+ezn1c/UzTy046Nbm4ey5PWfhg791j6r+cvnEIIPXr8cMXKJSNGBP37XycWL0p8+vRR6sY1CCE1rRUU5O3YucnT0zc5efPypLVNTY3rf/gWuwSDwWhvF5w+fWLF8uSZ786SSqXLVyx+2fBi65afFy38uv5F3fKVi6XSVyt7Dvz7Zx8f/61bfp4VFZd1Mv2PP3/H41urYfwWqQFTW48X7uX/dizre3tb95VfZoVPXJBz/eip89uwQzSaQfa1QxQKNXnF74mL08sq8n/7cx92KP3k+pcNVZ9+vHP2B6m19aUPH/2tpe5hd6ntrURcuTUwc3B6M4JzGzpMJBKVlj0Z6uqel5/r7DzUiGOUf/+evf3g2tqaFy/qA/xHFhTkVVaWb9m8239EEEJowWdf/H39r4yMNMXIKMA/eELYFITQCL/A334/++efv097J7KwIM/Q0DDuw7lUKtXa2maY+/DSsidYFFPV2vDh3gd+Sbe3H0yn0xFCUolk5bdLW1pbuCZcCoUiEoliYmZjn7r2d3ZxceH/O3Bi8GAnhJCDg2P68UONja82ixnhFzhxQjj2IjPraEHBf8aPm4Trt1kD2ltlplrbSOh27ilnxxGR0xIRQsZGZpPDEtKz1oWFfmxsZIYQsjCznxA6ByHEYhm7u4Y8e/4QIdTS+iK/8HL0zO8cHbwQQu9MXvjg4VUtdQ8hRGfS2/lEDHBXrlwh6yBOTT04vRnB8Ximtrb2BQV52HjNy9PXw8OrqOg+Quj+/Xvm5hZDhrgUFOYZGBhgkQUhRKFQ/HwD8u/fUzQSFDhK8Xq4h3d1zTOEkJe3n0gkWvHNF8dPHH72vIrL5Y3wC8Suoqo1Go1WXf1sxcol70wPHRcWuPLbpQih5qZGRePD3D2xF0+fPmaz2Vh0w8L0tyvXWVm92nDa28tP8RGuCY8c+7EzDGk0A62M4ORyeVnlfbehIxXvDHUO7OyUl5XnYV/a23koDrFYJiIxHyHU2PQcIWRtNURxyKHLaRpHo1NZRkQsh5meno53F7SFDDk4hJD/iKCiovzImdH5+blzPv6MyTT88adUhND9gv+MGBGEEOLz2yQSybiw/3MrzuP9N7RzOP/d5obNZre0NGNBJ2XDTzk5V/bu27Fr97YA/+CPZ3/q5eWrprW///7r21XLPoyd82nCEheXoXdzbyUmLex6GoPxKgklEPCZTOXFlBFCNLo+ff97SC6XS0QSBkvzMU4q7ZDJJBcv/3zx8s9d328TKP60KFkWJmhvQQgxGWzFOwyGFqewSERSqZiIOwHdu3evB2fpJTU5OH36BQsIGLlnz48tLc2lpU/8RwRjw6iWluaCwrzYmI8RQubmFiwWa/26bV0/Reuy/7tIJFS8FrQLuFwe9npk8OiRwaPnfPxZbu6tjMwjK7/5IjPjkprWzp7P8vb2m/fJ59ibfL7K6thsNkcobJfL5VSq3gyW+4nDpWvpN5zBMGQy2AF+ET6e47u+b25mjeIDvQAAEm5JREFUp64/bC5CqEMiUrwjEgu00T2MVCxjmxBxhjO568GRYU+GEX6BtXU1V/74zcVlKJvNRgi5uw+/fPlCZWV5YGAIQsjFxU0oFFpZ2WAPNBFC1TXPedz/juAePX4YEvIW9rqk5IGdrQNCKC8vV9whHhk82sLCcvLkd2xsbL/4MqG2rkZNa62tLTbW/x0SX736h6o+D3MfLhKJSh4VewzzRAhVVpZv3f7Dos+/ZjKZWvs+4czCllldpa31SraD3ISiNlfnAOxLqVTS0PScx7VW8xFTni1CqLzyPnZnKpVKHj+9zeEoT9n0n0wmt7RTOWbHEYkr+pIhB4cQ4nJ5bkOHZWSkeXn6Yu94efpmZh11dnbFZnUE+AcHB4/evPn7urralpbmk6eOf7bgo4sXTyta+OPP327dvo4QunT5QnFx4bhxkxBChUX5a9Ymnjmb2dzc9KC4MDPrqIWFpY31IDWtubq43bl78z95d6VS6fETh7HGa+uUPMcJDAyxs3PYu/enq9f+vHP35vYfU17U1zk6Dnn9TNKwdWK2vdDWPImIiQsKi/+6lXtaLpeXVeQdSv9mz4HPpVJ1tdh4XCunwb6//bG3/kWFRCI+fPw7RNFigZO2FwIbRyL+9YJ6cHpgxIig6prn3t4jsC89PX2qa56P8AtSnLBh/fbQ0AnJ61bMiJyQmXV0woTwyMgYhJBEKkEIzfvk8737fhoXFrhv/46Y6PjwKdMRQrOi4qZGzNz5z80z35u49MsENpuzbete7PGoqtbmzv2fkcGjv/3uy0lTRtXV1S5PWjvMffjyFYsvX7nYrcN0On3zxl3yTvmq1V8nJi00ZLE2/PAjnYypNwUHdza/USSXaWUQN8TRb+mCg2XleWtSp+z59yKhiD/nw00GBm8IKB+8t3qwvef23fHfrBvHZpkE+09HndoaY7bWC5wJuekEiSv6qlmLSunU2k9avdoK0V8ZLyM+scfl6uSQtbPi3U9tuRaEe2Z3Ka1eKGWZWLJ7cC6ptDeLpW0t0xOIWOOTxNsGrl271t/ff9q0aa8f0rMRHNALfqHcxgqCFtXQqsaqJt+xXLx7oRxZo5v6tahkvlcCeLG0Y1raMVpqBao2mrl+O+P8pV1KD0kkYlW3nDGRq7w8QjXVybKKvF8OLVN6SCrtoNEMKMpSdTEzV3kNV94HQZOIweh09CDouPXrr78m64NU2BcV6Npb0y3O/7tOVYAL8IvwGqY8TAjFAhZT+adYbBMN9nCIo1/iomNKD4k7hEwVc+VYLGNVDfLr20JnWGiugxpG7hwcGebBAT3CtaD7jDEuvP3C2s3y9aNMBktVBDFBuosR2FJ8jXhR1uTkwbRxIuIEEQxZh2/kmQcH9IvnKJMX1R0Nlc3mg3l490W7mqrbOCzpyMnEHb6RPgdHhnlwQO/84z0LByfayzKCFrnViMbnbSbG0og5Nj04F08wDw4AzQuezLO06ax/8hLvjmhFQ0UjmyEOm0XosRuG3Dk4VfPg4BYVaF1opEXRzdaim7Vsc2MjCyJOgu0DQaOwrbbF1ZcdOEFbq740C3JwAGiLZ4iJnQv7+tmXFfeazRxMOWYsKk0/N4TuRG0NwpbnzUZcaliMpaWd3mw9MTBzcBDggI7wLOkRc2xePBPfv9ZaklNvbG7IMeNQaBQ6k27ApCMKUfcTRUgqkkk6ZJ0yeXtTe1Ntu4uP8bgoczsX4m4bphTMgwNA6yztmWExlmExls8eC2srRC+rxYJGqdSA2vJC3YJ5HPGsGBKxnMOlmdsyrMfwHD1s8e5RH5E7Bwfz4ACx2A9l2Q/Vs0GQXiPr8I24OThDDhHrAuoRlhENp1IJQP8MzBwcbtNEjE0NXlaLenAiUKm+SmRiTrhSIoCYYB6cTrGNaSamBh1COV4d0HetDRL7oewBUwgd9Be5c3Cq5sHh9vtBoSDvMdwb5+rx6oC+u3nuxYhQki+BAhpE7hycqn1RcSt4iSn4u7WypP3t94i+zIVoLh+u9h7DdfUlyaRZAPrjwYMHpqamSu9ScQ5wCKGiG60P77ZJxHIbJ5ZQQMT91oiDZUSrKRUyWVTvMdyhI4x68AkAXiHxPDg18A9wCCGZpLOhtqPlpURLhfxJg0qj8KwMLGyZ2tw1BZBTUFDQnTt38O6FVhB9HhzNgGLlwLRyIOJeRACQA4mHb2rmwRFiBAcAAH2mJgcHswwAGBBgHhwAgLRIPA/u6NGjqtKLhMjBAQC0jcQ5uJKSEg5H+ZQpyMEBAPTbw4cPeTyejY2S6bRwiwrAgEDiHNywYcOURjcIcAAMFJCDAwCQFuTgAABA/0AODoCBDnJwAADSghwcAIC0IAcHAAD6B3JwAAx0kIMDAJAW5OAAAKQFOTgAANA/kIMDYKCDHBwAgLQgBwcAIC3IwQEAgP6BHBwAA93AzMHBLSoAGkPk+6EbN24QuXsIIUpft/s9evSoi4tLUFDQ64cgwAGgGU1NTTKZDO9eqHTs2LGGhga8e6GOhYVF3z6oJgcHAQ6AAYHJJO3G6tHR0TweT+khCHAADAitra0mJiZ490Irhg0bpuoQPGQAYEAQi8V4d0FbYB4cAAMdWYdv6nNwMIIDYEAgTg6uubl5ypQpOTk5mmowOjpa6SNUCHAAEE5MTExNTY3Gm21tbdV4m+Xl5fHx8RpvtrdgLSoA+qGurq65uVkbLWsjB/fo0SONt9kHkIMDQNdOnz595MiRjRs3rlu3rqKiYsiQITNnzpw0aRJ2tKqqaufOnY8fP6bT6YMHD/7oo498fX3z8/OTkpIQQnPmzBk1atTq1au7Nsjn8w8ePHjnzp2mpiY3N7fx48dPmTIFIbRq1SqEUHJyMnbapUuXtmzZkpmZyWaz16xZY2Bg4ODgcOLECblc7uTktHTpUhcXF4RQZGRkdHT048ePr127xmazvby8EhMTjYyMsEbS0tIuXbrU0NBgaWnp4+OzaNEiKpVaVla2YMGC5OTk7du383i8kJCQtLQ0hNCUKVMSEhIiIyMbGxv37t374MEDsVgcEBAQGxtrb2+PNZidnX3w4MG2traQkJD33ntPs99qyMEBoGsGBgZ8Pn/Xrl1ffPHFhQsXxo4du23btvr6emxK8NKlS62srP75z39u27bN1NQ0JSWlvb3d19cXi1MHDhzoFt0QQlu3bi0uLl64cOG+ffuGDRu2Y8eOBw8eqO8DnU7Pz89HCJ06dWrfvn1mZmZr167FZiPT6fSsrKzw8PALFy6sX7++qqpq9+7d2KcOHjx45syZ+fPnp6WlzZ49OycnJzMzE/sXYbHv/fffX7JkSXx8fFRUlJWV1cWLFyMjI2UyWVJS0v379xctWrR7924ej7dkyZLq6mqEUFlZWWpq6oQJE/71r39NmDBBcSFNgRwcADiQSCQffvihh4cHhUKZMGFCZ2fn06dPEUJZWVkMBmPJkiWDBg2ys7NbunSpUCg8e/as+tYKCgreeuutgIAAS0vLuXPnbt++3dzc/I196OjoiI2NpVAoHA4nPj6+vr6+qKgIO+Ts7BwQEEChUDw8PN55552cnByJRMLn848fP/7BBx+MHj3ayMjo7bffnj59+pEjRyQSCbaUyt/fPzIy0t3dvduFioqKqqqqEhMTg4KCzMzM5s+fb2JicvLkSYTQ2bNnraysYmNjjY2NfX19w8PD+/FNVQLWogKAD0UgwO7++Hw+NqJxdXWl01/99rHZbDs7u8ePH6tvytPTMzMzs7W11dvbOyAgYOjQoT3pgJOTE3YhqVRqa2uLEKqsrPTx8UEIYfeqGFtbW4lEUlNT097eLpFIuk6dHTp0qEAgqK6uxtpRdd2ioiIDAwM/Pz/sSwqF4uPjU1BQgBCqrq52dHRUnOnm5taTnvfcyZMn7ezsYC0qALqmdAF5Y2MjFmsUDA0NhUKh+qaWLVt27ty57OzsjIwMDoczffr0Dz/8UBElVVHMDuFyuVhnBAJBt0NYB7BDTU1N3Q6xWCyEkFAoNDY2RggxGAylF+Lz+RKJBEsLKmArqFpbW+3s7LpdS4MuXbo0e/ZspYcgwAGga2w2u9szTaFQ2DUEKGVsbBwTExMdHV1UVHT9+vUjR44YGRm9nrCXy+Vdv1SEMxqN1t7ejt04S6XSrocQQiKRCAs9WLYe+xKDfcrMzEwikajpnpmZmaGh4dq1a7u+SaPRsDnGXf+9bwzlvTV79mxfX1+lhyAHB4Cuubm5lZSUKOJFW1tbVVWVk5OTmo+0traeOnVKJBJRKBQvL6+EhARfX98nT55gQyosBmGePXvW9YNlZWUtLS3Ya+x8b29vrG4S9vwB8/TpUzqdbmtr6+zsTKPRuj6+KCkpMTIyemOpD2dnZ5FIZGlp6fu/rKysnJ2dEUJWVlYlJSWKyHvr1q3efLfeLDg4WNU0ZghwAOhaRESEQCD46aef6uvrKyoqNm3axGQysZs7bF5FTk7Ow4cPu36ETqcfPnx4/fr1RUVFjY2Nly9ffvLkiaenJ5bme/ToUVlZGULo3r17169f7/pBExOTXbt2tbW1tbW1HT582MrKysvLC3se2tDQcPjwYZlMVlVVdf78+dDQUCaTaWxsPH78+KNHj968ebOtre3y5cunT5+OjIykUpXECjs7u8bGxuvXrz979mzEiBGBgYHbt2+vr69vaWk5c+bM4sWLL126hBB6++23m5ubd+/e3dnZmZ+ff+bMGQ1+MysqKjZu3KjqKNyiAqBrdnZ2K1euTEtLi4+P53K57u7umzdvZrPZWLJ/4sSJv/76a25ubtffWzab/d133+3evXvZsmXYo4P58+djs+qmTZtWVVW1cOFCmUwWGhoaExOzZcsWxQednJycnJzi4uLEYrGNjc3q1aux20aEUHh4eGlp6dSpUxFCvr6+CxYswN7/7LPPqFRqSkqKVCodNGhQdHR0VFSU0n9IUFCQp6dncnJyXFxcXFxccnLyuXPnNmzYUFxcbG9vP27cuHfffRchFBAQMG/evHPnzoWHh1tZWSUmJn711Veaqr5ZVFTU9V67G9iTAQDNIGDBy3Xr1vH5/JSUlNcPzZo1a8aMGbGxsVjarqWlxcjICBvZ4aVvBS+fP39Oo9FgmggAQDkqlWpqaopFZ6FQiD021RfqH85ADg4AgBRPPCkUSmNjI9596YWkpKS6ujpVR+EWFQDNIOAtat90dnZSKJSOjo7Ozk5dFlnqwy2qTCYbPXq0mseyMIIDAPwf2HxgAwMDsVhM8DrAUqkUW/CvCozgANAM0ozgupLL5VQqVSAQsFgspTNFNKjPu2qpAQ8ZANAMOp3e5509CU4mk927dy8kJESrV8GCaa8+kpmZKZVKZ82apeoECHAAaAa2VJOUeDwetiYhJyentLT0448/xrtHr9y7d++tt95ScwLcogIAemHnzp22traRkZF4dwRhBZDNzMzUTN+DAAcA6B2xWMxkMlesWDFz5szg4GC8u6MOPEUFAPQONndk4cKFx48f71aVRJfq6+vnzZun/hwIcACAvrCzs9u0aRNWbmTNmjW6n1Dy9OnTNy66gFtUAEB/nT17VigUqlqTryUikUgmk6nabgYDAQ4AoDFz5swJDw9XM29Dx+AWFQCgMfv376+trcWKzWn7WsuWLbt//776cyDAAQA0hkajLV68GAtw8fHxapbB919BQYFi31VV4BYVAKAVDx48qKysnDJlSl1dnbW1tcbb70llJwhwAADtSkpKsrS0/Oqrr3R/abhFBQBoV2pqKrYHa7cNcfrj9OnTP/zwwxtPgwAHANC6mTNnYpXmQkNDS0pK+t9gWVmZg4PDG0+DW1QAgO4IBIK8vLwxY8bk5+er2sxUg2AEBwDQHQ6HM2bMGITQ1atXFft49UFHR0dPToMRHAAAH/fu3fP393/69CmXy+1VtUuxWBwWFnbt2rU3ngkjOAAAPvz9/bFic3Fxcbdv3+75BysrK7FNr98IRnAAAPwVFhZ6eXn9/vvv2G7WmgIjOAAA/ry8vBBCfD6/JwFOJBL1sHgJjOAAAATS2NhoZmZWXFwsEAgCAwOVnpOUlDRp0qSwsLA3tgYjOAAAgZiZmSGEHB0d9+/ff/HiRaXntLW1ubq69qQ1GMEBAAiqsrJy8ODBBw8efO+999TXfVMFRnAAAIIaPHgwQsjFxWXq1KmKN2UyWX19fQ9bgBEcAEA/5Ofnl5SU2NjYnDx5cuvWrT35CIzgAAD6wcfHp7y8/MKFC9gEup6AERwAQJ+0traamJj08GQIcAAA0oJbVAAAaUGAAwCQFgQ4AABpQYADAJAWBDgAAGlBgAMAkNb/B+ILIh/EChidAAAAAElFTkSuQmCC",
      "text/plain": [
       "<IPython.core.display.Image object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "# 控制流 - 构建和配置LangGraph工作流\n",
    "\n",
    "# 导入必要的模块\n",
    "from langgraph.graph import StateGraph  # 状态图类，用于构建工作流图\n",
    "from IPython.display import Image, display  # 用于显示图像的模块\n",
    "\n",
    "# 创建状态图工作流，使用之前定义的GraphState作为状态模式\n",
    "workflow = StateGraph(GraphState)\n",
    "\n",
    "# 定义节点 - 将函数添加为图中的节点\n",
    "workflow.add_node(\"websearch\", web_search)  # 添加网络搜索节点\n",
    "workflow.add_node(\"retrieve\", retrieve)  # 添加文档检索节点\n",
    "workflow.add_node(\"grade_documents\", grade_documents)  # 添加文档评分节点\n",
    "workflow.add_node(\"generate\", generate)  # 添加答案生成节点\n",
    "\n",
    "# 构建图结构\n",
    "# 设置条件入口点 - 根据问题类型决定起始节点\n",
    "workflow.set_conditional_entry_point(\n",
    "    route_question,  # 路由函数，决定使用哪个数据源\n",
    "    {\n",
    "        \"websearch\": \"websearch\",    # 如果路由结果是\"websearch\"，跳转到网络搜索节点\n",
    "        \"vectorstore\": \"retrieve\",   # 如果路由结果是\"vectorstore\"，跳转到检索节点\n",
    "    },\n",
    ")\n",
    "\n",
    "# 添加固定边 - 定义节点间的直接连接\n",
    "workflow.add_edge(\"websearch\", \"generate\")  # 网络搜索后直接跳转到生成节点\n",
    "workflow.add_edge(\"retrieve\", \"grade_documents\")  # 检索后跳转到文档评分节点\n",
    "\n",
    "# 添加条件边 - 根据函数返回值决定下一个节点\n",
    "workflow.add_conditional_edges(\n",
    "    \"grade_documents\",  # 从文档评分节点出发\n",
    "    decide_to_generate,  # 决策函数，判断是否可以生成答案\n",
    "    {\n",
    "        \"websearch\": \"websearch\",  # 如果需要网络搜索，跳转到网络搜索节点\n",
    "        \"generate\": \"generate\",    # 如果可以生成答案，跳转到生成节点\n",
    "    },\n",
    ")\n",
    "\n",
    "# 添加生成节点的条件边 - 评估生成质量并决定下一步\n",
    "workflow.add_conditional_edges(\n",
    "    \"generate\",  # 从生成节点出发\n",
    "    grade_generation_v_documents_and_question,  # 评估函数，检查生成质量\n",
    "    {\n",
    "        \"not supported\": \"generate\",    # 如果生成内容不基于文档，重新生成\n",
    "        \"useful\": END,                  # 如果生成内容有用，结束流程\n",
    "        \"not useful\": \"websearch\",      # 如果生成内容无用，进行网络搜索\n",
    "        \"max retries\": END,             # 如果达到最大重试次数，结束流程\n",
    "    },\n",
    ")\n",
    "\n",
    "# 编译工作流图\n",
    "graph = workflow.compile()  # 将工作流编译为可执行的图\n",
    "\n",
    "# 显示工作流图的可视化表示\n",
    "display(Image(graph.get_graph().draw_mermaid_png()))  # 生成并显示Mermaid格式的流程图\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "id": "15f927cd",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "---ROUTE QUESTION---\n",
      "---ROUTE QUESTION TO RAG---\n",
      "{'question': 'What are the types of agent memory?', 'max_retries': 3, 'loop_step': 0}\n",
      "---RETRIEVE---\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Embedding texts: 100%|██████████| 1/1 [00:00<00:00, 22.62inputs/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'question': 'What are the types of agent memory?', 'max_retries': 3, 'loop_step': 0, 'documents': [Document(metadata={'id': '6263b6ea-5182-4860-9cee-b31e5e5a1e61', 'source': 'https://lilianweng.github.io/posts/2023-06-23-agent/', 'title': \"LLM Powered Autonomous Agents | Lil'Log\", 'description': 'Building agents with LLM (large language model) as its core controller is a cool concept. Several proof-of-concepts demos, such as AutoGPT, GPT-Engineer and BabyAGI, serve as inspiring examples. The potentiality of LLM extends beyond generating well-written copies, stories, essays and programs; it can be framed as a powerful general problem solver.\\nAgent System Overview\\nIn a LLM-powered autonomous agent system, LLM functions as the agent’s brain, complemented by several key components:\\n\\nPlanning\\n\\nSubgoal and decomposition: The agent breaks down large tasks into smaller, manageable subgoals, enabling efficient handling of complex tasks.\\nReflection and refinement: The agent can do self-criticism and self-reflection over past actions, learn from mistakes and refine them for future steps, thereby improving the quality of final results.\\n\\n\\nMemory\\n\\nShort-term memory: I would consider all the in-context learning (See Prompt Engineering) as utilizing short-term memory of the model to learn.\\nLong-term memory: This provides the agent with the capability to retain and recall (infinite) information over extended periods, often by leveraging an external vector store and fast retrieval.\\n\\n\\nTool use\\n\\nThe agent learns to call external APIs for extra information that is missing from the model weights (often hard to change after pre-training), including current information, code execution capability, access to proprietary information sources and more.\\n\\n\\n\\n\\n\\t\\n\\tOverview of a LLM-powered autonomous agent system.\\n\\nComponent One: Planning\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.', 'language': 'en'}, page_content=\"LLM Powered Autonomous Agents | Lil'Log\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\nLil'Log\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n|\\n\\n\\n\\n\\n\\n\\nPosts\\n\\n\\n\\n\\nArchive\\n\\n\\n\\n\\nSearch\\n\\n\\n\\n\\nTags\\n\\n\\n\\n\\nFAQ\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n      LLM Powered Autonomous Agents\\n    \\nDate: June 23, 2023  |  Estimated Reading Time: 31 min  |  Author: Lilian Weng\\n\\n\\n \\n\\n\\nTable of Contents\\n\\n\\n\\nAgent System Overview\\n\\nComponent One: Planning\\n\\nTask Decomposition\\n\\nSelf-Reflection\\n\\n\\nComponent Two: Memory\\n\\nTypes of Memory\\n\\nMaximum Inner Product Search (MIPS)\\n\\n\\nComponent Three: Tool Use\\n\\nCase Studies\\n\\nScientific Discovery Agent\\n\\nGenerative Agents Simulation\\n\\nProof-of-Concept Examples\\n\\n\\nChallenges\\n\\nCitation\\n\\nReferences\\n\\n\\n\\n\\n\\nBuilding agents with LLM (large language model) as its core controller is a cool concept. Several proof-of-concepts demos, such as AutoGPT, GPT-Engineer and BabyAGI, serve as inspiring examples. The potentiality of LLM extends beyond generating well-written copies, stories, essays and programs; it can be framed as a powerful general problem solver.\\nAgent System Overview#\\nIn a LLM-powered autonomous agent system, LLM functions as the agent’s brain, complemented by several key components:\\n\\nPlanning\\n\\nSubgoal and decomposition: The agent breaks down large tasks into smaller, manageable subgoals, enabling efficient handling of complex tasks.\\nReflection and refinement: The agent can do self-criticism and self-reflection over past actions, learn from mistakes and refine them for future steps, thereby improving the quality of final results.\\n\\n\\nMemory\\n\\nShort-term memory: I would consider all the in-context learning (See Prompt Engineering) as utilizing short-term memory of the model to learn.\\nLong-term memory: This provides the agent with the capability to retain and recall (infinite) information over extended periods, often by leveraging an external vector store and fast retrieval.\\n\\n\\nTool use\\n\\nThe agent learns to call external APIs for extra information that is missing from the model weights (often hard to change after pre-training), including current information, code execution capability, access to proprietary information sources and more.\\n\\n\\n\\n\\n\\nOverview of a LLM-powered autonomous agent system.\"), Document(metadata={'id': '54b675c2-fc68-468b-8408-f9c36ef0006b', 'source': 'https://lilianweng.github.io/posts/2023-06-23-agent/', 'title': \"LLM Powered Autonomous Agents | Lil'Log\", 'description': 'Building agents with LLM (large language model) as its core controller is a cool concept. Several proof-of-concepts demos, such as AutoGPT, GPT-Engineer and BabyAGI, serve as inspiring examples. The potentiality of LLM extends beyond generating well-written copies, stories, essays and programs; it can be framed as a powerful general problem solver.\\nAgent System Overview\\nIn a LLM-powered autonomous agent system, LLM functions as the agent’s brain, complemented by several key components:\\n\\nPlanning\\n\\nSubgoal and decomposition: The agent breaks down large tasks into smaller, manageable subgoals, enabling efficient handling of complex tasks.\\nReflection and refinement: The agent can do self-criticism and self-reflection over past actions, learn from mistakes and refine them for future steps, thereby improving the quality of final results.\\n\\n\\nMemory\\n\\nShort-term memory: I would consider all the in-context learning (See Prompt Engineering) as utilizing short-term memory of the model to learn.\\nLong-term memory: This provides the agent with the capability to retain and recall (infinite) information over extended periods, often by leveraging an external vector store and fast retrieval.\\n\\n\\nTool use\\n\\nThe agent learns to call external APIs for extra information that is missing from the model weights (often hard to change after pre-training), including current information, code execution capability, access to proprietary information sources and more.\\n\\n\\n\\n\\n\\t\\n\\tOverview of a LLM-powered autonomous agent system.\\n\\nComponent One: Planning\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.', 'language': 'en'}, page_content='Memory\\n\\nShort-term memory: I would consider all the in-context learning (See Prompt Engineering) as utilizing short-term memory of the model to learn.\\nLong-term memory: This provides the agent with the capability to retain and recall (infinite) information over extended periods, often by leveraging an external vector store and fast retrieval.\\n\\n\\nTool use\\n\\nThe agent learns to call external APIs for extra information that is missing from the model weights (often hard to change after pre-training), including current information, code execution capability, access to proprietary information sources and more.\\n\\n\\n\\n\\n\\nOverview of a LLM-powered autonomous agent system.\\n\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#\\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\\nTree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\\nTask decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\", \"What are the subgoals for achieving XYZ?\", (2) by using task-specific instructions; e.g. \"Write a story outline.\" for writing a novel, or (3) with human inputs.\\nAnother quite distinct approach, LLM+P (Liu et al. 2023), involves relying on an external classical planner to do long-horizon planning. This approach utilizes the Planning Domain Definition Language (PDDL) as an intermediate interface to describe the planning problem. In this process, LLM (1) translates the problem into “Problem PDDL”, then (2) requests a classical planner to generate a PDDL plan based on an existing “Domain PDDL”, and finally (3) translates the PDDL plan back into natural language. Essentially, the planning step is outsourced to an external tool, assuming the availability of domain-specific PDDL and a suitable planner which is common in certain robotic setups but not in many other domains.\\nSelf-Reflection#\\nSelf-reflection is a vital aspect that allows autonomous agents to improve iteratively by refining past action decisions and correcting previous mistakes. It plays a crucial role in real-world tasks where trial and error are inevitable.\\nReAct (Yao et al. 2023) integrates reasoning and acting within LLM by extending the action space to be a combination of task-specific discrete actions and the language space. The former enables LLM to interact with the environment (e.g. use Wikipedia search API), while the latter prompting LLM to generate reasoning traces in natural language.\\nThe ReAct prompt template incorporates explicit steps for LLM to think, roughly formatted as:\\nThought: ...\\nAction: ...\\nObservation: ...\\n... (Repeated many times)\\n\\n\\nExamples of reasoning trajectories for knowledge-intensive tasks (e.g. HotpotQA, FEVER) and decision-making tasks (e.g. AlfWorld Env, WebShop). (Image source: Yao et al. 2023).\\n\\nIn both experiments on knowledge-intensive tasks and decision-making tasks, ReAct works better than the Act-only baseline where Thought: … step is removed.\\nReflexion (Shinn & Labash 2023) is a framework to equip agents with dynamic memory and self-reflection capabilities to improve reasoning skills. Reflexion has a standard RL setup, in which the reward model provides a simple binary reward and the action space follows the setup in ReAct where the task-specific action space is augmented with language to enable complex reasoning steps. After each action $a_t$, the agent computes a heuristic $h_t$ and optionally may decide to reset the environment to start a new trial depending on the self-reflection results.\\n\\n\\nIllustration of the Reflexion framework. (Image source: Shinn & Labash, 2023)'), Document(metadata={'id': '5d351e9a-f8de-4970-a0ab-aa9dd3cad97a', 'source': 'https://lilianweng.github.io/posts/2023-06-23-agent/', 'title': \"LLM Powered Autonomous Agents | Lil'Log\", 'description': 'Building agents with LLM (large language model) as its core controller is a cool concept. Several proof-of-concepts demos, such as AutoGPT, GPT-Engineer and BabyAGI, serve as inspiring examples. The potentiality of LLM extends beyond generating well-written copies, stories, essays and programs; it can be framed as a powerful general problem solver.\\nAgent System Overview\\nIn a LLM-powered autonomous agent system, LLM functions as the agent’s brain, complemented by several key components:\\n\\nPlanning\\n\\nSubgoal and decomposition: The agent breaks down large tasks into smaller, manageable subgoals, enabling efficient handling of complex tasks.\\nReflection and refinement: The agent can do self-criticism and self-reflection over past actions, learn from mistakes and refine them for future steps, thereby improving the quality of final results.\\n\\n\\nMemory\\n\\nShort-term memory: I would consider all the in-context learning (See Prompt Engineering) as utilizing short-term memory of the model to learn.\\nLong-term memory: This provides the agent with the capability to retain and recall (infinite) information over extended periods, often by leveraging an external vector store and fast retrieval.\\n\\n\\nTool use\\n\\nThe agent learns to call external APIs for extra information that is missing from the model weights (often hard to change after pre-training), including current information, code execution capability, access to proprietary information sources and more.\\n\\n\\n\\n\\n\\t\\n\\tOverview of a LLM-powered autonomous agent system.\\n\\nComponent One: Planning\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.', 'language': 'en'}, page_content='Explicit / declarative memory: This is memory of facts and events, and refers to those memories that can be consciously recalled, including episodic memory (events and experiences) and semantic memory (facts and concepts).\\nImplicit / procedural memory: This type of memory is unconscious and involves skills and routines that are performed automatically, like riding a bike or typing on a keyboard.\\n\\n\\n\\n\\n\\nCategorization of human memory.\\n\\nWe can roughly consider the following mappings:\\n\\nSensory memory as learning embedding representations for raw inputs, including text, image or other modalities;\\nShort-term memory as in-context learning. It is short and finite, as it is restricted by the finite context window length of Transformer.\\nLong-term memory as the external vector store that the agent can attend to at query time, accessible via fast retrieval.\\n\\nMaximum Inner Product Search (MIPS)#\\nThe external memory can alleviate the restriction of finite attention span.  A standard practice is to save the embedding representation of information into a vector store database that can support fast maximum inner-product search (MIPS). To optimize the retrieval speed, the common choice is the approximate nearest neighbors (ANN)\\u200b algorithm to return approximately top k nearest neighbors to trade off a little accuracy lost for a huge speedup.\\nA couple common choices of ANN algorithms for fast MIPS:\\n\\nLSH (Locality-Sensitive Hashing): It introduces a hashing function such that similar input items are mapped to the same buckets with high probability, where the number of buckets is much smaller than the number of inputs.\\nANNOY (Approximate Nearest Neighbors Oh Yeah): The core data structure are random projection trees, a set of binary trees where each non-leaf node represents a hyperplane splitting the input space into half and each leaf stores one data point. Trees are built independently and at random, so to some extent, it mimics a hashing function. ANNOY search happens in all the trees to iteratively search through the half that is closest to the query and then aggregates the results. The idea is quite related to KD tree but a lot more scalable.\\nHNSW (Hierarchical Navigable Small World): It is inspired by the idea of small world networks where most nodes can be reached by any other nodes within a small number of steps; e.g. “six degrees of separation” feature of social networks. HNSW builds hierarchical layers of these small-world graphs, where the bottom layers contain the actual data points. The layers in the middle create shortcuts to speed up search. When performing a search, HNSW starts from a random node in the top layer and navigates towards the target. When it can’t get any closer, it moves down to the next layer, until it reaches the bottom layer. Each move in the upper layers can potentially cover a large distance in the data space, and each move in the lower layers refines the search quality.\\nFAISS (Facebook AI Similarity Search): It operates on the assumption that in high dimensional space, distances between nodes follow a Gaussian distribution and thus there should exist clustering of data points. FAISS applies vector quantization by partitioning the vector space into clusters and then refining the quantization within clusters. Search first looks for cluster candidates with coarse quantization and then further looks into each cluster with finer quantization.\\nScaNN (Scalable Nearest Neighbors): The main innovation in ScaNN is anisotropic vector quantization. It quantizes a data point $x_i$ to $\\\\tilde{x}_i$ such that the inner product $\\\\langle q, x_i \\\\rangle$ is as similar to the original distance of $\\\\angle q, \\\\tilde{x}_i$ as possible, instead of picking the closet quantization centroid points.\\n\\n\\n\\nComparison of MIPS algorithms, measured in recall@10. (Image source: Google Blog, 2020)\\n\\nCheck more MIPS algorithms and performance comparison in ann-benchmarks.com.\\nComponent Three: Tool Use#\\nTool use is a remarkable and distinguishing characteristic of human beings. We create, modify and utilize external objects to do things that go beyond our physical and cognitive limits. Equipping LLMs with external tools can significantly extend the model capabilities.\\n\\n\\nA picture of a sea otter using rock to crack open a seashell, while floating in the water. While some other animals can use tools, the complexity is not comparable with humans. (Image source: Animals using tools)'), Document(metadata={'id': '47ba1e93-5768-41f2-ac4c-80694a1bd8c9', 'source': 'https://lilianweng.github.io/posts/2023-06-23-agent/', 'title': \"LLM Powered Autonomous Agents | Lil'Log\", 'description': 'Building agents with LLM (large language model) as its core controller is a cool concept. Several proof-of-concepts demos, such as AutoGPT, GPT-Engineer and BabyAGI, serve as inspiring examples. The potentiality of LLM extends beyond generating well-written copies, stories, essays and programs; it can be framed as a powerful general problem solver.\\nAgent System Overview\\nIn a LLM-powered autonomous agent system, LLM functions as the agent’s brain, complemented by several key components:\\n\\nPlanning\\n\\nSubgoal and decomposition: The agent breaks down large tasks into smaller, manageable subgoals, enabling efficient handling of complex tasks.\\nReflection and refinement: The agent can do self-criticism and self-reflection over past actions, learn from mistakes and refine them for future steps, thereby improving the quality of final results.\\n\\n\\nMemory\\n\\nShort-term memory: I would consider all the in-context learning (See Prompt Engineering) as utilizing short-term memory of the model to learn.\\nLong-term memory: This provides the agent with the capability to retain and recall (infinite) information over extended periods, often by leveraging an external vector store and fast retrieval.\\n\\n\\nTool use\\n\\nThe agent learns to call external APIs for extra information that is missing from the model weights (often hard to change after pre-training), including current information, code execution capability, access to proprietary information sources and more.\\n\\n\\n\\n\\n\\t\\n\\tOverview of a LLM-powered autonomous agent system.\\n\\nComponent One: Planning\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.', 'language': 'en'}, page_content='Finite context length: The restricted context capacity limits the inclusion of historical information, detailed instructions, API call context, and responses. The design of the system has to work with this limited communication bandwidth, while mechanisms like self-reflection to learn from past mistakes would benefit a lot from long or infinite context windows. Although vector stores and retrieval can provide access to a larger knowledge pool, their representation power is not as powerful as full attention.\\n\\n\\nChallenges in long-term planning and task decomposition: Planning over a lengthy history and effectively exploring the solution space remain challenging. LLMs struggle to adjust plans when faced with unexpected errors, making them less robust compared to humans who learn from trial and error.\\n\\n\\nReliability of natural language interface: Current agent system relies on natural language as an interface between LLMs and external components such as memory and tools. However, the reliability of model outputs is questionable, as LLMs may make formatting errors and occasionally exhibit rebellious behavior (e.g. refuse to follow an instruction). Consequently, much of the agent demo code focuses on parsing model output.\\n\\n\\nCitation#\\nCited as:\\n\\nWeng, Lilian. (Jun 2023). “LLM-powered Autonomous Agents”. Lil’Log. https://lilianweng.github.io/posts/2023-06-23-agent/.')]}\n",
      "---CHECK DOCUMENT RELEVANCE TO QUESTION---\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "---GRADE: DOCUMENT RELEVANT---\n",
      "---GRADE: DOCUMENT RELEVANT---\n",
      "---GRADE: DOCUMENT RELEVANT---\n",
      "---GRADE: DOCUMENT NOT RELEVANT---\n",
      "---ASSESS GRADED DOCUMENTS---\n",
      "---DECISION: NOT ALL DOCUMENTS ARE RELEVANT TO QUESTION, INCLUDE WEB SEARCH---\n",
      "{'question': 'What are the types of agent memory?', 'web_search': 'Yes', 'max_retries': 3, 'loop_step': 0, 'documents': [Document(metadata={'id': '6263b6ea-5182-4860-9cee-b31e5e5a1e61', 'source': 'https://lilianweng.github.io/posts/2023-06-23-agent/', 'title': \"LLM Powered Autonomous Agents | Lil'Log\", 'description': 'Building agents with LLM (large language model) as its core controller is a cool concept. Several proof-of-concepts demos, such as AutoGPT, GPT-Engineer and BabyAGI, serve as inspiring examples. The potentiality of LLM extends beyond generating well-written copies, stories, essays and programs; it can be framed as a powerful general problem solver.\\nAgent System Overview\\nIn a LLM-powered autonomous agent system, LLM functions as the agent’s brain, complemented by several key components:\\n\\nPlanning\\n\\nSubgoal and decomposition: The agent breaks down large tasks into smaller, manageable subgoals, enabling efficient handling of complex tasks.\\nReflection and refinement: The agent can do self-criticism and self-reflection over past actions, learn from mistakes and refine them for future steps, thereby improving the quality of final results.\\n\\n\\nMemory\\n\\nShort-term memory: I would consider all the in-context learning (See Prompt Engineering) as utilizing short-term memory of the model to learn.\\nLong-term memory: This provides the agent with the capability to retain and recall (infinite) information over extended periods, often by leveraging an external vector store and fast retrieval.\\n\\n\\nTool use\\n\\nThe agent learns to call external APIs for extra information that is missing from the model weights (often hard to change after pre-training), including current information, code execution capability, access to proprietary information sources and more.\\n\\n\\n\\n\\n\\t\\n\\tOverview of a LLM-powered autonomous agent system.\\n\\nComponent One: Planning\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.', 'language': 'en'}, page_content=\"LLM Powered Autonomous Agents | Lil'Log\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\nLil'Log\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n|\\n\\n\\n\\n\\n\\n\\nPosts\\n\\n\\n\\n\\nArchive\\n\\n\\n\\n\\nSearch\\n\\n\\n\\n\\nTags\\n\\n\\n\\n\\nFAQ\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n      LLM Powered Autonomous Agents\\n    \\nDate: June 23, 2023  |  Estimated Reading Time: 31 min  |  Author: Lilian Weng\\n\\n\\n \\n\\n\\nTable of Contents\\n\\n\\n\\nAgent System Overview\\n\\nComponent One: Planning\\n\\nTask Decomposition\\n\\nSelf-Reflection\\n\\n\\nComponent Two: Memory\\n\\nTypes of Memory\\n\\nMaximum Inner Product Search (MIPS)\\n\\n\\nComponent Three: Tool Use\\n\\nCase Studies\\n\\nScientific Discovery Agent\\n\\nGenerative Agents Simulation\\n\\nProof-of-Concept Examples\\n\\n\\nChallenges\\n\\nCitation\\n\\nReferences\\n\\n\\n\\n\\n\\nBuilding agents with LLM (large language model) as its core controller is a cool concept. Several proof-of-concepts demos, such as AutoGPT, GPT-Engineer and BabyAGI, serve as inspiring examples. The potentiality of LLM extends beyond generating well-written copies, stories, essays and programs; it can be framed as a powerful general problem solver.\\nAgent System Overview#\\nIn a LLM-powered autonomous agent system, LLM functions as the agent’s brain, complemented by several key components:\\n\\nPlanning\\n\\nSubgoal and decomposition: The agent breaks down large tasks into smaller, manageable subgoals, enabling efficient handling of complex tasks.\\nReflection and refinement: The agent can do self-criticism and self-reflection over past actions, learn from mistakes and refine them for future steps, thereby improving the quality of final results.\\n\\n\\nMemory\\n\\nShort-term memory: I would consider all the in-context learning (See Prompt Engineering) as utilizing short-term memory of the model to learn.\\nLong-term memory: This provides the agent with the capability to retain and recall (infinite) information over extended periods, often by leveraging an external vector store and fast retrieval.\\n\\n\\nTool use\\n\\nThe agent learns to call external APIs for extra information that is missing from the model weights (often hard to change after pre-training), including current information, code execution capability, access to proprietary information sources and more.\\n\\n\\n\\n\\n\\nOverview of a LLM-powered autonomous agent system.\"), Document(metadata={'id': '54b675c2-fc68-468b-8408-f9c36ef0006b', 'source': 'https://lilianweng.github.io/posts/2023-06-23-agent/', 'title': \"LLM Powered Autonomous Agents | Lil'Log\", 'description': 'Building agents with LLM (large language model) as its core controller is a cool concept. Several proof-of-concepts demos, such as AutoGPT, GPT-Engineer and BabyAGI, serve as inspiring examples. The potentiality of LLM extends beyond generating well-written copies, stories, essays and programs; it can be framed as a powerful general problem solver.\\nAgent System Overview\\nIn a LLM-powered autonomous agent system, LLM functions as the agent’s brain, complemented by several key components:\\n\\nPlanning\\n\\nSubgoal and decomposition: The agent breaks down large tasks into smaller, manageable subgoals, enabling efficient handling of complex tasks.\\nReflection and refinement: The agent can do self-criticism and self-reflection over past actions, learn from mistakes and refine them for future steps, thereby improving the quality of final results.\\n\\n\\nMemory\\n\\nShort-term memory: I would consider all the in-context learning (See Prompt Engineering) as utilizing short-term memory of the model to learn.\\nLong-term memory: This provides the agent with the capability to retain and recall (infinite) information over extended periods, often by leveraging an external vector store and fast retrieval.\\n\\n\\nTool use\\n\\nThe agent learns to call external APIs for extra information that is missing from the model weights (often hard to change after pre-training), including current information, code execution capability, access to proprietary information sources and more.\\n\\n\\n\\n\\n\\t\\n\\tOverview of a LLM-powered autonomous agent system.\\n\\nComponent One: Planning\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.', 'language': 'en'}, page_content='Memory\\n\\nShort-term memory: I would consider all the in-context learning (See Prompt Engineering) as utilizing short-term memory of the model to learn.\\nLong-term memory: This provides the agent with the capability to retain and recall (infinite) information over extended periods, often by leveraging an external vector store and fast retrieval.\\n\\n\\nTool use\\n\\nThe agent learns to call external APIs for extra information that is missing from the model weights (often hard to change after pre-training), including current information, code execution capability, access to proprietary information sources and more.\\n\\n\\n\\n\\n\\nOverview of a LLM-powered autonomous agent system.\\n\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#\\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\\nTree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\\nTask decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\", \"What are the subgoals for achieving XYZ?\", (2) by using task-specific instructions; e.g. \"Write a story outline.\" for writing a novel, or (3) with human inputs.\\nAnother quite distinct approach, LLM+P (Liu et al. 2023), involves relying on an external classical planner to do long-horizon planning. This approach utilizes the Planning Domain Definition Language (PDDL) as an intermediate interface to describe the planning problem. In this process, LLM (1) translates the problem into “Problem PDDL”, then (2) requests a classical planner to generate a PDDL plan based on an existing “Domain PDDL”, and finally (3) translates the PDDL plan back into natural language. Essentially, the planning step is outsourced to an external tool, assuming the availability of domain-specific PDDL and a suitable planner which is common in certain robotic setups but not in many other domains.\\nSelf-Reflection#\\nSelf-reflection is a vital aspect that allows autonomous agents to improve iteratively by refining past action decisions and correcting previous mistakes. It plays a crucial role in real-world tasks where trial and error are inevitable.\\nReAct (Yao et al. 2023) integrates reasoning and acting within LLM by extending the action space to be a combination of task-specific discrete actions and the language space. The former enables LLM to interact with the environment (e.g. use Wikipedia search API), while the latter prompting LLM to generate reasoning traces in natural language.\\nThe ReAct prompt template incorporates explicit steps for LLM to think, roughly formatted as:\\nThought: ...\\nAction: ...\\nObservation: ...\\n... (Repeated many times)\\n\\n\\nExamples of reasoning trajectories for knowledge-intensive tasks (e.g. HotpotQA, FEVER) and decision-making tasks (e.g. AlfWorld Env, WebShop). (Image source: Yao et al. 2023).\\n\\nIn both experiments on knowledge-intensive tasks and decision-making tasks, ReAct works better than the Act-only baseline where Thought: … step is removed.\\nReflexion (Shinn & Labash 2023) is a framework to equip agents with dynamic memory and self-reflection capabilities to improve reasoning skills. Reflexion has a standard RL setup, in which the reward model provides a simple binary reward and the action space follows the setup in ReAct where the task-specific action space is augmented with language to enable complex reasoning steps. After each action $a_t$, the agent computes a heuristic $h_t$ and optionally may decide to reset the environment to start a new trial depending on the self-reflection results.\\n\\n\\nIllustration of the Reflexion framework. (Image source: Shinn & Labash, 2023)'), Document(metadata={'id': '5d351e9a-f8de-4970-a0ab-aa9dd3cad97a', 'source': 'https://lilianweng.github.io/posts/2023-06-23-agent/', 'title': \"LLM Powered Autonomous Agents | Lil'Log\", 'description': 'Building agents with LLM (large language model) as its core controller is a cool concept. Several proof-of-concepts demos, such as AutoGPT, GPT-Engineer and BabyAGI, serve as inspiring examples. The potentiality of LLM extends beyond generating well-written copies, stories, essays and programs; it can be framed as a powerful general problem solver.\\nAgent System Overview\\nIn a LLM-powered autonomous agent system, LLM functions as the agent’s brain, complemented by several key components:\\n\\nPlanning\\n\\nSubgoal and decomposition: The agent breaks down large tasks into smaller, manageable subgoals, enabling efficient handling of complex tasks.\\nReflection and refinement: The agent can do self-criticism and self-reflection over past actions, learn from mistakes and refine them for future steps, thereby improving the quality of final results.\\n\\n\\nMemory\\n\\nShort-term memory: I would consider all the in-context learning (See Prompt Engineering) as utilizing short-term memory of the model to learn.\\nLong-term memory: This provides the agent with the capability to retain and recall (infinite) information over extended periods, often by leveraging an external vector store and fast retrieval.\\n\\n\\nTool use\\n\\nThe agent learns to call external APIs for extra information that is missing from the model weights (often hard to change after pre-training), including current information, code execution capability, access to proprietary information sources and more.\\n\\n\\n\\n\\n\\t\\n\\tOverview of a LLM-powered autonomous agent system.\\n\\nComponent One: Planning\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.', 'language': 'en'}, page_content='Explicit / declarative memory: This is memory of facts and events, and refers to those memories that can be consciously recalled, including episodic memory (events and experiences) and semantic memory (facts and concepts).\\nImplicit / procedural memory: This type of memory is unconscious and involves skills and routines that are performed automatically, like riding a bike or typing on a keyboard.\\n\\n\\n\\n\\n\\nCategorization of human memory.\\n\\nWe can roughly consider the following mappings:\\n\\nSensory memory as learning embedding representations for raw inputs, including text, image or other modalities;\\nShort-term memory as in-context learning. It is short and finite, as it is restricted by the finite context window length of Transformer.\\nLong-term memory as the external vector store that the agent can attend to at query time, accessible via fast retrieval.\\n\\nMaximum Inner Product Search (MIPS)#\\nThe external memory can alleviate the restriction of finite attention span.  A standard practice is to save the embedding representation of information into a vector store database that can support fast maximum inner-product search (MIPS). To optimize the retrieval speed, the common choice is the approximate nearest neighbors (ANN)\\u200b algorithm to return approximately top k nearest neighbors to trade off a little accuracy lost for a huge speedup.\\nA couple common choices of ANN algorithms for fast MIPS:\\n\\nLSH (Locality-Sensitive Hashing): It introduces a hashing function such that similar input items are mapped to the same buckets with high probability, where the number of buckets is much smaller than the number of inputs.\\nANNOY (Approximate Nearest Neighbors Oh Yeah): The core data structure are random projection trees, a set of binary trees where each non-leaf node represents a hyperplane splitting the input space into half and each leaf stores one data point. Trees are built independently and at random, so to some extent, it mimics a hashing function. ANNOY search happens in all the trees to iteratively search through the half that is closest to the query and then aggregates the results. The idea is quite related to KD tree but a lot more scalable.\\nHNSW (Hierarchical Navigable Small World): It is inspired by the idea of small world networks where most nodes can be reached by any other nodes within a small number of steps; e.g. “six degrees of separation” feature of social networks. HNSW builds hierarchical layers of these small-world graphs, where the bottom layers contain the actual data points. The layers in the middle create shortcuts to speed up search. When performing a search, HNSW starts from a random node in the top layer and navigates towards the target. When it can’t get any closer, it moves down to the next layer, until it reaches the bottom layer. Each move in the upper layers can potentially cover a large distance in the data space, and each move in the lower layers refines the search quality.\\nFAISS (Facebook AI Similarity Search): It operates on the assumption that in high dimensional space, distances between nodes follow a Gaussian distribution and thus there should exist clustering of data points. FAISS applies vector quantization by partitioning the vector space into clusters and then refining the quantization within clusters. Search first looks for cluster candidates with coarse quantization and then further looks into each cluster with finer quantization.\\nScaNN (Scalable Nearest Neighbors): The main innovation in ScaNN is anisotropic vector quantization. It quantizes a data point $x_i$ to $\\\\tilde{x}_i$ such that the inner product $\\\\langle q, x_i \\\\rangle$ is as similar to the original distance of $\\\\angle q, \\\\tilde{x}_i$ as possible, instead of picking the closet quantization centroid points.\\n\\n\\n\\nComparison of MIPS algorithms, measured in recall@10. (Image source: Google Blog, 2020)\\n\\nCheck more MIPS algorithms and performance comparison in ann-benchmarks.com.\\nComponent Three: Tool Use#\\nTool use is a remarkable and distinguishing characteristic of human beings. We create, modify and utilize external objects to do things that go beyond our physical and cognitive limits. Equipping LLMs with external tools can significantly extend the model capabilities.\\n\\n\\nA picture of a sea otter using rock to crack open a seashell, while floating in the water. While some other animals can use tools, the complexity is not comparable with humans. (Image source: Animals using tools)')]}\n",
      "---WEB SEARCH---\n",
      "{'question': 'What are the types of agent memory?', 'web_search': 'Yes', 'max_retries': 3, 'loop_step': 0, 'documents': [Document(metadata={'id': '6263b6ea-5182-4860-9cee-b31e5e5a1e61', 'source': 'https://lilianweng.github.io/posts/2023-06-23-agent/', 'title': \"LLM Powered Autonomous Agents | Lil'Log\", 'description': 'Building agents with LLM (large language model) as its core controller is a cool concept. Several proof-of-concepts demos, such as AutoGPT, GPT-Engineer and BabyAGI, serve as inspiring examples. The potentiality of LLM extends beyond generating well-written copies, stories, essays and programs; it can be framed as a powerful general problem solver.\\nAgent System Overview\\nIn a LLM-powered autonomous agent system, LLM functions as the agent’s brain, complemented by several key components:\\n\\nPlanning\\n\\nSubgoal and decomposition: The agent breaks down large tasks into smaller, manageable subgoals, enabling efficient handling of complex tasks.\\nReflection and refinement: The agent can do self-criticism and self-reflection over past actions, learn from mistakes and refine them for future steps, thereby improving the quality of final results.\\n\\n\\nMemory\\n\\nShort-term memory: I would consider all the in-context learning (See Prompt Engineering) as utilizing short-term memory of the model to learn.\\nLong-term memory: This provides the agent with the capability to retain and recall (infinite) information over extended periods, often by leveraging an external vector store and fast retrieval.\\n\\n\\nTool use\\n\\nThe agent learns to call external APIs for extra information that is missing from the model weights (often hard to change after pre-training), including current information, code execution capability, access to proprietary information sources and more.\\n\\n\\n\\n\\n\\t\\n\\tOverview of a LLM-powered autonomous agent system.\\n\\nComponent One: Planning\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.', 'language': 'en'}, page_content=\"LLM Powered Autonomous Agents | Lil'Log\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\nLil'Log\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n|\\n\\n\\n\\n\\n\\n\\nPosts\\n\\n\\n\\n\\nArchive\\n\\n\\n\\n\\nSearch\\n\\n\\n\\n\\nTags\\n\\n\\n\\n\\nFAQ\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n      LLM Powered Autonomous Agents\\n    \\nDate: June 23, 2023  |  Estimated Reading Time: 31 min  |  Author: Lilian Weng\\n\\n\\n \\n\\n\\nTable of Contents\\n\\n\\n\\nAgent System Overview\\n\\nComponent One: Planning\\n\\nTask Decomposition\\n\\nSelf-Reflection\\n\\n\\nComponent Two: Memory\\n\\nTypes of Memory\\n\\nMaximum Inner Product Search (MIPS)\\n\\n\\nComponent Three: Tool Use\\n\\nCase Studies\\n\\nScientific Discovery Agent\\n\\nGenerative Agents Simulation\\n\\nProof-of-Concept Examples\\n\\n\\nChallenges\\n\\nCitation\\n\\nReferences\\n\\n\\n\\n\\n\\nBuilding agents with LLM (large language model) as its core controller is a cool concept. Several proof-of-concepts demos, such as AutoGPT, GPT-Engineer and BabyAGI, serve as inspiring examples. The potentiality of LLM extends beyond generating well-written copies, stories, essays and programs; it can be framed as a powerful general problem solver.\\nAgent System Overview#\\nIn a LLM-powered autonomous agent system, LLM functions as the agent’s brain, complemented by several key components:\\n\\nPlanning\\n\\nSubgoal and decomposition: The agent breaks down large tasks into smaller, manageable subgoals, enabling efficient handling of complex tasks.\\nReflection and refinement: The agent can do self-criticism and self-reflection over past actions, learn from mistakes and refine them for future steps, thereby improving the quality of final results.\\n\\n\\nMemory\\n\\nShort-term memory: I would consider all the in-context learning (See Prompt Engineering) as utilizing short-term memory of the model to learn.\\nLong-term memory: This provides the agent with the capability to retain and recall (infinite) information over extended periods, often by leveraging an external vector store and fast retrieval.\\n\\n\\nTool use\\n\\nThe agent learns to call external APIs for extra information that is missing from the model weights (often hard to change after pre-training), including current information, code execution capability, access to proprietary information sources and more.\\n\\n\\n\\n\\n\\nOverview of a LLM-powered autonomous agent system.\"), Document(metadata={'id': '54b675c2-fc68-468b-8408-f9c36ef0006b', 'source': 'https://lilianweng.github.io/posts/2023-06-23-agent/', 'title': \"LLM Powered Autonomous Agents | Lil'Log\", 'description': 'Building agents with LLM (large language model) as its core controller is a cool concept. Several proof-of-concepts demos, such as AutoGPT, GPT-Engineer and BabyAGI, serve as inspiring examples. The potentiality of LLM extends beyond generating well-written copies, stories, essays and programs; it can be framed as a powerful general problem solver.\\nAgent System Overview\\nIn a LLM-powered autonomous agent system, LLM functions as the agent’s brain, complemented by several key components:\\n\\nPlanning\\n\\nSubgoal and decomposition: The agent breaks down large tasks into smaller, manageable subgoals, enabling efficient handling of complex tasks.\\nReflection and refinement: The agent can do self-criticism and self-reflection over past actions, learn from mistakes and refine them for future steps, thereby improving the quality of final results.\\n\\n\\nMemory\\n\\nShort-term memory: I would consider all the in-context learning (See Prompt Engineering) as utilizing short-term memory of the model to learn.\\nLong-term memory: This provides the agent with the capability to retain and recall (infinite) information over extended periods, often by leveraging an external vector store and fast retrieval.\\n\\n\\nTool use\\n\\nThe agent learns to call external APIs for extra information that is missing from the model weights (often hard to change after pre-training), including current information, code execution capability, access to proprietary information sources and more.\\n\\n\\n\\n\\n\\t\\n\\tOverview of a LLM-powered autonomous agent system.\\n\\nComponent One: Planning\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.', 'language': 'en'}, page_content='Memory\\n\\nShort-term memory: I would consider all the in-context learning (See Prompt Engineering) as utilizing short-term memory of the model to learn.\\nLong-term memory: This provides the agent with the capability to retain and recall (infinite) information over extended periods, often by leveraging an external vector store and fast retrieval.\\n\\n\\nTool use\\n\\nThe agent learns to call external APIs for extra information that is missing from the model weights (often hard to change after pre-training), including current information, code execution capability, access to proprietary information sources and more.\\n\\n\\n\\n\\n\\nOverview of a LLM-powered autonomous agent system.\\n\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#\\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\\nTree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\\nTask decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\", \"What are the subgoals for achieving XYZ?\", (2) by using task-specific instructions; e.g. \"Write a story outline.\" for writing a novel, or (3) with human inputs.\\nAnother quite distinct approach, LLM+P (Liu et al. 2023), involves relying on an external classical planner to do long-horizon planning. This approach utilizes the Planning Domain Definition Language (PDDL) as an intermediate interface to describe the planning problem. In this process, LLM (1) translates the problem into “Problem PDDL”, then (2) requests a classical planner to generate a PDDL plan based on an existing “Domain PDDL”, and finally (3) translates the PDDL plan back into natural language. Essentially, the planning step is outsourced to an external tool, assuming the availability of domain-specific PDDL and a suitable planner which is common in certain robotic setups but not in many other domains.\\nSelf-Reflection#\\nSelf-reflection is a vital aspect that allows autonomous agents to improve iteratively by refining past action decisions and correcting previous mistakes. It plays a crucial role in real-world tasks where trial and error are inevitable.\\nReAct (Yao et al. 2023) integrates reasoning and acting within LLM by extending the action space to be a combination of task-specific discrete actions and the language space. The former enables LLM to interact with the environment (e.g. use Wikipedia search API), while the latter prompting LLM to generate reasoning traces in natural language.\\nThe ReAct prompt template incorporates explicit steps for LLM to think, roughly formatted as:\\nThought: ...\\nAction: ...\\nObservation: ...\\n... (Repeated many times)\\n\\n\\nExamples of reasoning trajectories for knowledge-intensive tasks (e.g. HotpotQA, FEVER) and decision-making tasks (e.g. AlfWorld Env, WebShop). (Image source: Yao et al. 2023).\\n\\nIn both experiments on knowledge-intensive tasks and decision-making tasks, ReAct works better than the Act-only baseline where Thought: … step is removed.\\nReflexion (Shinn & Labash 2023) is a framework to equip agents with dynamic memory and self-reflection capabilities to improve reasoning skills. Reflexion has a standard RL setup, in which the reward model provides a simple binary reward and the action space follows the setup in ReAct where the task-specific action space is augmented with language to enable complex reasoning steps. After each action $a_t$, the agent computes a heuristic $h_t$ and optionally may decide to reset the environment to start a new trial depending on the self-reflection results.\\n\\n\\nIllustration of the Reflexion framework. (Image source: Shinn & Labash, 2023)'), Document(metadata={'id': '5d351e9a-f8de-4970-a0ab-aa9dd3cad97a', 'source': 'https://lilianweng.github.io/posts/2023-06-23-agent/', 'title': \"LLM Powered Autonomous Agents | Lil'Log\", 'description': 'Building agents with LLM (large language model) as its core controller is a cool concept. Several proof-of-concepts demos, such as AutoGPT, GPT-Engineer and BabyAGI, serve as inspiring examples. The potentiality of LLM extends beyond generating well-written copies, stories, essays and programs; it can be framed as a powerful general problem solver.\\nAgent System Overview\\nIn a LLM-powered autonomous agent system, LLM functions as the agent’s brain, complemented by several key components:\\n\\nPlanning\\n\\nSubgoal and decomposition: The agent breaks down large tasks into smaller, manageable subgoals, enabling efficient handling of complex tasks.\\nReflection and refinement: The agent can do self-criticism and self-reflection over past actions, learn from mistakes and refine them for future steps, thereby improving the quality of final results.\\n\\n\\nMemory\\n\\nShort-term memory: I would consider all the in-context learning (See Prompt Engineering) as utilizing short-term memory of the model to learn.\\nLong-term memory: This provides the agent with the capability to retain and recall (infinite) information over extended periods, often by leveraging an external vector store and fast retrieval.\\n\\n\\nTool use\\n\\nThe agent learns to call external APIs for extra information that is missing from the model weights (often hard to change after pre-training), including current information, code execution capability, access to proprietary information sources and more.\\n\\n\\n\\n\\n\\t\\n\\tOverview of a LLM-powered autonomous agent system.\\n\\nComponent One: Planning\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.', 'language': 'en'}, page_content='Explicit / declarative memory: This is memory of facts and events, and refers to those memories that can be consciously recalled, including episodic memory (events and experiences) and semantic memory (facts and concepts).\\nImplicit / procedural memory: This type of memory is unconscious and involves skills and routines that are performed automatically, like riding a bike or typing on a keyboard.\\n\\n\\n\\n\\n\\nCategorization of human memory.\\n\\nWe can roughly consider the following mappings:\\n\\nSensory memory as learning embedding representations for raw inputs, including text, image or other modalities;\\nShort-term memory as in-context learning. It is short and finite, as it is restricted by the finite context window length of Transformer.\\nLong-term memory as the external vector store that the agent can attend to at query time, accessible via fast retrieval.\\n\\nMaximum Inner Product Search (MIPS)#\\nThe external memory can alleviate the restriction of finite attention span.  A standard practice is to save the embedding representation of information into a vector store database that can support fast maximum inner-product search (MIPS). To optimize the retrieval speed, the common choice is the approximate nearest neighbors (ANN)\\u200b algorithm to return approximately top k nearest neighbors to trade off a little accuracy lost for a huge speedup.\\nA couple common choices of ANN algorithms for fast MIPS:\\n\\nLSH (Locality-Sensitive Hashing): It introduces a hashing function such that similar input items are mapped to the same buckets with high probability, where the number of buckets is much smaller than the number of inputs.\\nANNOY (Approximate Nearest Neighbors Oh Yeah): The core data structure are random projection trees, a set of binary trees where each non-leaf node represents a hyperplane splitting the input space into half and each leaf stores one data point. Trees are built independently and at random, so to some extent, it mimics a hashing function. ANNOY search happens in all the trees to iteratively search through the half that is closest to the query and then aggregates the results. The idea is quite related to KD tree but a lot more scalable.\\nHNSW (Hierarchical Navigable Small World): It is inspired by the idea of small world networks where most nodes can be reached by any other nodes within a small number of steps; e.g. “six degrees of separation” feature of social networks. HNSW builds hierarchical layers of these small-world graphs, where the bottom layers contain the actual data points. The layers in the middle create shortcuts to speed up search. When performing a search, HNSW starts from a random node in the top layer and navigates towards the target. When it can’t get any closer, it moves down to the next layer, until it reaches the bottom layer. Each move in the upper layers can potentially cover a large distance in the data space, and each move in the lower layers refines the search quality.\\nFAISS (Facebook AI Similarity Search): It operates on the assumption that in high dimensional space, distances between nodes follow a Gaussian distribution and thus there should exist clustering of data points. FAISS applies vector quantization by partitioning the vector space into clusters and then refining the quantization within clusters. Search first looks for cluster candidates with coarse quantization and then further looks into each cluster with finer quantization.\\nScaNN (Scalable Nearest Neighbors): The main innovation in ScaNN is anisotropic vector quantization. It quantizes a data point $x_i$ to $\\\\tilde{x}_i$ such that the inner product $\\\\langle q, x_i \\\\rangle$ is as similar to the original distance of $\\\\angle q, \\\\tilde{x}_i$ as possible, instead of picking the closet quantization centroid points.\\n\\n\\n\\nComparison of MIPS algorithms, measured in recall@10. (Image source: Google Blog, 2020)\\n\\nCheck more MIPS algorithms and performance comparison in ann-benchmarks.com.\\nComponent Three: Tool Use#\\nTool use is a remarkable and distinguishing characteristic of human beings. We create, modify and utilize external objects to do things that go beyond our physical and cognitive limits. Equipping LLMs with external tools can significantly extend the model capabilities.\\n\\n\\nA picture of a sea otter using rock to crack open a seashell, while floating in the water. While some other animals can use tools, the complexity is not comparable with humans. (Image source: Animals using tools)'), Document(metadata={}, page_content='### Memory component of an Agent.\\n\\nIn this article I will focus on the memory component of the Agent. Generally, we tend to use memory patterns present in humans to both model and describe agentic memory. Keeping that in mind, there are two types of agentic memory:\\n\\n Short-term memory, or sometimes called working memory.\\n Long-term memory, that is further split into multiple types. [...] Episodic.\\n Semantic.\\n Procedural.\\n\\n#### Episodic memory.\\n\\nThis type of memory contains past interactions and actions performed by the agent. While we already talked about this in short term memory segment, not all information might be kept in working memory as the context continues to expand. Few reasons: [...] An interesting note, identity of the agent provided in the system prompt is also considered semantic memory. This kind of information is usually retrieved at the beginning of Agent initialisation and used for alignment.\\n\\n#### Procedural memory.\\n\\nProcedural memory is defined as anything that has been codified into the agent by us. It includes:\\n> You want both - RAG to inform the LLM, memory to shape its behavior.\\n\\n## Types of Memory in Agents: A High-Level Taxonomy\\n\\nAt a foundational level, memory in AI agents comes in two forms:\\n\\n Short-term memory: Holds immediate context within a single interaction.\\n Long-term memory: Persists knowledge across sessions, tasks, and time. [...] Just like in humans, these memory types serve different cognitive functions. Short-term memory helps the agent stay coherent in the moment. Long-term memory helps it learn, personalize, and adapt.\\n\\nLet’s break this down further: [...] | Type | Role | Example |\\n --- \\n| Working Memory (short-term) | Maintains short-term conversational coherence | “What was the last question again?” |\\n| Factual Memory (long-term) | Retains user preferences, communication style, domain context | “You prefer markdown output and short-form answers.” |\\n| Episodic Memory (long-term) | Remembers specific past interactions or outcomes | “Last time we deployed this model, the latency increased.” |\\n# How These Work Together in Agentic AI\\n\\nIn an agentic AI system, these memory types collaborate to create a capable, goal-driven agent. Short-term memory handles immediate demands, while long-term memory — encompassing semantic, episodic, and procedural elements — builds a deeper foundation.\\n\\nSemantic memory provides the facts, episodic memory offers lessons from experience, and procedural memory ensures smooth execution. [...] Sitemap\\n\\nOpen in app\\n\\nSign in\\n\\nSign in\\n\\n# Memory Types in Agentic AI: A Breakdown\\n\\nGokcer Belgusen\\n\\n4 min readApr 6, 2025\\n\\nAgentic AI — systems designed to act autonomously, make decisions, and pursue goals — relies on various types of memory to function effectively. Drawing from cognitive science concepts, these memory types include\\n\\n semantic\\n episodic\\n short-term\\n procedural and\\n long-term memory. [...] Episodic memory is the AI’s record of specific experiences or events, tied to a time and context. Think of it as the agent’s personal history — like recalling, “Last Tuesday, I helped a user debug code and got stuck on a syntax error.” In agentic AI, episodic memory allows the system to reflect on past interactions or actions, learning from successes or mistakes. This type of memory adds a narrative layer, helping the AI adjust its behavior based on what it has directly encountered.\\nMemory: The agent is equipped in general with short term, long term, consensus, and episodic memory. Short term memory for immediate interactions, long-term memory for historical data and conversations, episodic memory for past interactions, and consensus memory for shared information among agents. The agent can maintain context, learn from experiences, and improve performance by recalling past interactions and adapting to new situations.\\n1. Immediate Working Memory — Information that must remain constantly accessible, similar to how you don’t need to recall how to speak or walk consciously\\n2. Searchable Episodic Memory — Information the agent must actively retrieve, comparable to how you search your mind for a specific conversation or event\\n3. Procedural Memory — Skills and learned behaviours that become automatic, like your ability to type without thinking about individual keys')]}\n",
      "---GENERATE---\n",
      "---CHECK HALLUCINATIONS---\n",
      "---DECISION: GENERATION IS GROUNDED IN DOCUMENTS---\n",
      "---GRADE GENERATION vs QUESTION---\n",
      "---DECISION: GENERATION ADDRESSES QUESTION---\n",
      "{'question': 'What are the types of agent memory?', 'generation': AIMessage(content='The types of agent memory include short-term (working) memory, long-term memory, episodic memory, semantic memory, and procedural memory. Short-term memory handles immediate context, while long-term memory retains knowledge across sessions. Episodic memory records specific past interactions, and procedural memory involves learned skills and routines.', additional_kwargs={}, response_metadata={'model_name': 'qwen-turbo', 'finish_reason': 'stop', 'request_id': '648727c1-51fb-4f24-bae9-9ebf9f5295c8', 'token_usage': {'input_tokens': 3171, 'output_tokens': 62, 'total_tokens': 3233, 'prompt_tokens_details': {'cached_tokens': 0}}}, id='run--4fa587ee-7454-49e4-9399-b00f61a3aa94-0'), 'web_search': 'Yes', 'max_retries': 3, 'loop_step': 1, 'documents': [Document(metadata={'id': '6263b6ea-5182-4860-9cee-b31e5e5a1e61', 'source': 'https://lilianweng.github.io/posts/2023-06-23-agent/', 'title': \"LLM Powered Autonomous Agents | Lil'Log\", 'description': 'Building agents with LLM (large language model) as its core controller is a cool concept. Several proof-of-concepts demos, such as AutoGPT, GPT-Engineer and BabyAGI, serve as inspiring examples. The potentiality of LLM extends beyond generating well-written copies, stories, essays and programs; it can be framed as a powerful general problem solver.\\nAgent System Overview\\nIn a LLM-powered autonomous agent system, LLM functions as the agent’s brain, complemented by several key components:\\n\\nPlanning\\n\\nSubgoal and decomposition: The agent breaks down large tasks into smaller, manageable subgoals, enabling efficient handling of complex tasks.\\nReflection and refinement: The agent can do self-criticism and self-reflection over past actions, learn from mistakes and refine them for future steps, thereby improving the quality of final results.\\n\\n\\nMemory\\n\\nShort-term memory: I would consider all the in-context learning (See Prompt Engineering) as utilizing short-term memory of the model to learn.\\nLong-term memory: This provides the agent with the capability to retain and recall (infinite) information over extended periods, often by leveraging an external vector store and fast retrieval.\\n\\n\\nTool use\\n\\nThe agent learns to call external APIs for extra information that is missing from the model weights (often hard to change after pre-training), including current information, code execution capability, access to proprietary information sources and more.\\n\\n\\n\\n\\n\\t\\n\\tOverview of a LLM-powered autonomous agent system.\\n\\nComponent One: Planning\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.', 'language': 'en'}, page_content=\"LLM Powered Autonomous Agents | Lil'Log\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\nLil'Log\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n|\\n\\n\\n\\n\\n\\n\\nPosts\\n\\n\\n\\n\\nArchive\\n\\n\\n\\n\\nSearch\\n\\n\\n\\n\\nTags\\n\\n\\n\\n\\nFAQ\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n      LLM Powered Autonomous Agents\\n    \\nDate: June 23, 2023  |  Estimated Reading Time: 31 min  |  Author: Lilian Weng\\n\\n\\n \\n\\n\\nTable of Contents\\n\\n\\n\\nAgent System Overview\\n\\nComponent One: Planning\\n\\nTask Decomposition\\n\\nSelf-Reflection\\n\\n\\nComponent Two: Memory\\n\\nTypes of Memory\\n\\nMaximum Inner Product Search (MIPS)\\n\\n\\nComponent Three: Tool Use\\n\\nCase Studies\\n\\nScientific Discovery Agent\\n\\nGenerative Agents Simulation\\n\\nProof-of-Concept Examples\\n\\n\\nChallenges\\n\\nCitation\\n\\nReferences\\n\\n\\n\\n\\n\\nBuilding agents with LLM (large language model) as its core controller is a cool concept. Several proof-of-concepts demos, such as AutoGPT, GPT-Engineer and BabyAGI, serve as inspiring examples. The potentiality of LLM extends beyond generating well-written copies, stories, essays and programs; it can be framed as a powerful general problem solver.\\nAgent System Overview#\\nIn a LLM-powered autonomous agent system, LLM functions as the agent’s brain, complemented by several key components:\\n\\nPlanning\\n\\nSubgoal and decomposition: The agent breaks down large tasks into smaller, manageable subgoals, enabling efficient handling of complex tasks.\\nReflection and refinement: The agent can do self-criticism and self-reflection over past actions, learn from mistakes and refine them for future steps, thereby improving the quality of final results.\\n\\n\\nMemory\\n\\nShort-term memory: I would consider all the in-context learning (See Prompt Engineering) as utilizing short-term memory of the model to learn.\\nLong-term memory: This provides the agent with the capability to retain and recall (infinite) information over extended periods, often by leveraging an external vector store and fast retrieval.\\n\\n\\nTool use\\n\\nThe agent learns to call external APIs for extra information that is missing from the model weights (often hard to change after pre-training), including current information, code execution capability, access to proprietary information sources and more.\\n\\n\\n\\n\\n\\nOverview of a LLM-powered autonomous agent system.\"), Document(metadata={'id': '54b675c2-fc68-468b-8408-f9c36ef0006b', 'source': 'https://lilianweng.github.io/posts/2023-06-23-agent/', 'title': \"LLM Powered Autonomous Agents | Lil'Log\", 'description': 'Building agents with LLM (large language model) as its core controller is a cool concept. Several proof-of-concepts demos, such as AutoGPT, GPT-Engineer and BabyAGI, serve as inspiring examples. The potentiality of LLM extends beyond generating well-written copies, stories, essays and programs; it can be framed as a powerful general problem solver.\\nAgent System Overview\\nIn a LLM-powered autonomous agent system, LLM functions as the agent’s brain, complemented by several key components:\\n\\nPlanning\\n\\nSubgoal and decomposition: The agent breaks down large tasks into smaller, manageable subgoals, enabling efficient handling of complex tasks.\\nReflection and refinement: The agent can do self-criticism and self-reflection over past actions, learn from mistakes and refine them for future steps, thereby improving the quality of final results.\\n\\n\\nMemory\\n\\nShort-term memory: I would consider all the in-context learning (See Prompt Engineering) as utilizing short-term memory of the model to learn.\\nLong-term memory: This provides the agent with the capability to retain and recall (infinite) information over extended periods, often by leveraging an external vector store and fast retrieval.\\n\\n\\nTool use\\n\\nThe agent learns to call external APIs for extra information that is missing from the model weights (often hard to change after pre-training), including current information, code execution capability, access to proprietary information sources and more.\\n\\n\\n\\n\\n\\t\\n\\tOverview of a LLM-powered autonomous agent system.\\n\\nComponent One: Planning\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.', 'language': 'en'}, page_content='Memory\\n\\nShort-term memory: I would consider all the in-context learning (See Prompt Engineering) as utilizing short-term memory of the model to learn.\\nLong-term memory: This provides the agent with the capability to retain and recall (infinite) information over extended periods, often by leveraging an external vector store and fast retrieval.\\n\\n\\nTool use\\n\\nThe agent learns to call external APIs for extra information that is missing from the model weights (often hard to change after pre-training), including current information, code execution capability, access to proprietary information sources and more.\\n\\n\\n\\n\\n\\nOverview of a LLM-powered autonomous agent system.\\n\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#\\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\\nTree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\\nTask decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\", \"What are the subgoals for achieving XYZ?\", (2) by using task-specific instructions; e.g. \"Write a story outline.\" for writing a novel, or (3) with human inputs.\\nAnother quite distinct approach, LLM+P (Liu et al. 2023), involves relying on an external classical planner to do long-horizon planning. This approach utilizes the Planning Domain Definition Language (PDDL) as an intermediate interface to describe the planning problem. In this process, LLM (1) translates the problem into “Problem PDDL”, then (2) requests a classical planner to generate a PDDL plan based on an existing “Domain PDDL”, and finally (3) translates the PDDL plan back into natural language. Essentially, the planning step is outsourced to an external tool, assuming the availability of domain-specific PDDL and a suitable planner which is common in certain robotic setups but not in many other domains.\\nSelf-Reflection#\\nSelf-reflection is a vital aspect that allows autonomous agents to improve iteratively by refining past action decisions and correcting previous mistakes. It plays a crucial role in real-world tasks where trial and error are inevitable.\\nReAct (Yao et al. 2023) integrates reasoning and acting within LLM by extending the action space to be a combination of task-specific discrete actions and the language space. The former enables LLM to interact with the environment (e.g. use Wikipedia search API), while the latter prompting LLM to generate reasoning traces in natural language.\\nThe ReAct prompt template incorporates explicit steps for LLM to think, roughly formatted as:\\nThought: ...\\nAction: ...\\nObservation: ...\\n... (Repeated many times)\\n\\n\\nExamples of reasoning trajectories for knowledge-intensive tasks (e.g. HotpotQA, FEVER) and decision-making tasks (e.g. AlfWorld Env, WebShop). (Image source: Yao et al. 2023).\\n\\nIn both experiments on knowledge-intensive tasks and decision-making tasks, ReAct works better than the Act-only baseline where Thought: … step is removed.\\nReflexion (Shinn & Labash 2023) is a framework to equip agents with dynamic memory and self-reflection capabilities to improve reasoning skills. Reflexion has a standard RL setup, in which the reward model provides a simple binary reward and the action space follows the setup in ReAct where the task-specific action space is augmented with language to enable complex reasoning steps. After each action $a_t$, the agent computes a heuristic $h_t$ and optionally may decide to reset the environment to start a new trial depending on the self-reflection results.\\n\\n\\nIllustration of the Reflexion framework. (Image source: Shinn & Labash, 2023)'), Document(metadata={'id': '5d351e9a-f8de-4970-a0ab-aa9dd3cad97a', 'source': 'https://lilianweng.github.io/posts/2023-06-23-agent/', 'title': \"LLM Powered Autonomous Agents | Lil'Log\", 'description': 'Building agents with LLM (large language model) as its core controller is a cool concept. Several proof-of-concepts demos, such as AutoGPT, GPT-Engineer and BabyAGI, serve as inspiring examples. The potentiality of LLM extends beyond generating well-written copies, stories, essays and programs; it can be framed as a powerful general problem solver.\\nAgent System Overview\\nIn a LLM-powered autonomous agent system, LLM functions as the agent’s brain, complemented by several key components:\\n\\nPlanning\\n\\nSubgoal and decomposition: The agent breaks down large tasks into smaller, manageable subgoals, enabling efficient handling of complex tasks.\\nReflection and refinement: The agent can do self-criticism and self-reflection over past actions, learn from mistakes and refine them for future steps, thereby improving the quality of final results.\\n\\n\\nMemory\\n\\nShort-term memory: I would consider all the in-context learning (See Prompt Engineering) as utilizing short-term memory of the model to learn.\\nLong-term memory: This provides the agent with the capability to retain and recall (infinite) information over extended periods, often by leveraging an external vector store and fast retrieval.\\n\\n\\nTool use\\n\\nThe agent learns to call external APIs for extra information that is missing from the model weights (often hard to change after pre-training), including current information, code execution capability, access to proprietary information sources and more.\\n\\n\\n\\n\\n\\t\\n\\tOverview of a LLM-powered autonomous agent system.\\n\\nComponent One: Planning\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.', 'language': 'en'}, page_content='Explicit / declarative memory: This is memory of facts and events, and refers to those memories that can be consciously recalled, including episodic memory (events and experiences) and semantic memory (facts and concepts).\\nImplicit / procedural memory: This type of memory is unconscious and involves skills and routines that are performed automatically, like riding a bike or typing on a keyboard.\\n\\n\\n\\n\\n\\nCategorization of human memory.\\n\\nWe can roughly consider the following mappings:\\n\\nSensory memory as learning embedding representations for raw inputs, including text, image or other modalities;\\nShort-term memory as in-context learning. It is short and finite, as it is restricted by the finite context window length of Transformer.\\nLong-term memory as the external vector store that the agent can attend to at query time, accessible via fast retrieval.\\n\\nMaximum Inner Product Search (MIPS)#\\nThe external memory can alleviate the restriction of finite attention span.  A standard practice is to save the embedding representation of information into a vector store database that can support fast maximum inner-product search (MIPS). To optimize the retrieval speed, the common choice is the approximate nearest neighbors (ANN)\\u200b algorithm to return approximately top k nearest neighbors to trade off a little accuracy lost for a huge speedup.\\nA couple common choices of ANN algorithms for fast MIPS:\\n\\nLSH (Locality-Sensitive Hashing): It introduces a hashing function such that similar input items are mapped to the same buckets with high probability, where the number of buckets is much smaller than the number of inputs.\\nANNOY (Approximate Nearest Neighbors Oh Yeah): The core data structure are random projection trees, a set of binary trees where each non-leaf node represents a hyperplane splitting the input space into half and each leaf stores one data point. Trees are built independently and at random, so to some extent, it mimics a hashing function. ANNOY search happens in all the trees to iteratively search through the half that is closest to the query and then aggregates the results. The idea is quite related to KD tree but a lot more scalable.\\nHNSW (Hierarchical Navigable Small World): It is inspired by the idea of small world networks where most nodes can be reached by any other nodes within a small number of steps; e.g. “six degrees of separation” feature of social networks. HNSW builds hierarchical layers of these small-world graphs, where the bottom layers contain the actual data points. The layers in the middle create shortcuts to speed up search. When performing a search, HNSW starts from a random node in the top layer and navigates towards the target. When it can’t get any closer, it moves down to the next layer, until it reaches the bottom layer. Each move in the upper layers can potentially cover a large distance in the data space, and each move in the lower layers refines the search quality.\\nFAISS (Facebook AI Similarity Search): It operates on the assumption that in high dimensional space, distances between nodes follow a Gaussian distribution and thus there should exist clustering of data points. FAISS applies vector quantization by partitioning the vector space into clusters and then refining the quantization within clusters. Search first looks for cluster candidates with coarse quantization and then further looks into each cluster with finer quantization.\\nScaNN (Scalable Nearest Neighbors): The main innovation in ScaNN is anisotropic vector quantization. It quantizes a data point $x_i$ to $\\\\tilde{x}_i$ such that the inner product $\\\\langle q, x_i \\\\rangle$ is as similar to the original distance of $\\\\angle q, \\\\tilde{x}_i$ as possible, instead of picking the closet quantization centroid points.\\n\\n\\n\\nComparison of MIPS algorithms, measured in recall@10. (Image source: Google Blog, 2020)\\n\\nCheck more MIPS algorithms and performance comparison in ann-benchmarks.com.\\nComponent Three: Tool Use#\\nTool use is a remarkable and distinguishing characteristic of human beings. We create, modify and utilize external objects to do things that go beyond our physical and cognitive limits. Equipping LLMs with external tools can significantly extend the model capabilities.\\n\\n\\nA picture of a sea otter using rock to crack open a seashell, while floating in the water. While some other animals can use tools, the complexity is not comparable with humans. (Image source: Animals using tools)'), Document(metadata={}, page_content='### Memory component of an Agent.\\n\\nIn this article I will focus on the memory component of the Agent. Generally, we tend to use memory patterns present in humans to both model and describe agentic memory. Keeping that in mind, there are two types of agentic memory:\\n\\n Short-term memory, or sometimes called working memory.\\n Long-term memory, that is further split into multiple types. [...] Episodic.\\n Semantic.\\n Procedural.\\n\\n#### Episodic memory.\\n\\nThis type of memory contains past interactions and actions performed by the agent. While we already talked about this in short term memory segment, not all information might be kept in working memory as the context continues to expand. Few reasons: [...] An interesting note, identity of the agent provided in the system prompt is also considered semantic memory. This kind of information is usually retrieved at the beginning of Agent initialisation and used for alignment.\\n\\n#### Procedural memory.\\n\\nProcedural memory is defined as anything that has been codified into the agent by us. It includes:\\n> You want both - RAG to inform the LLM, memory to shape its behavior.\\n\\n## Types of Memory in Agents: A High-Level Taxonomy\\n\\nAt a foundational level, memory in AI agents comes in two forms:\\n\\n Short-term memory: Holds immediate context within a single interaction.\\n Long-term memory: Persists knowledge across sessions, tasks, and time. [...] Just like in humans, these memory types serve different cognitive functions. Short-term memory helps the agent stay coherent in the moment. Long-term memory helps it learn, personalize, and adapt.\\n\\nLet’s break this down further: [...] | Type | Role | Example |\\n --- \\n| Working Memory (short-term) | Maintains short-term conversational coherence | “What was the last question again?” |\\n| Factual Memory (long-term) | Retains user preferences, communication style, domain context | “You prefer markdown output and short-form answers.” |\\n| Episodic Memory (long-term) | Remembers specific past interactions or outcomes | “Last time we deployed this model, the latency increased.” |\\n# How These Work Together in Agentic AI\\n\\nIn an agentic AI system, these memory types collaborate to create a capable, goal-driven agent. Short-term memory handles immediate demands, while long-term memory — encompassing semantic, episodic, and procedural elements — builds a deeper foundation.\\n\\nSemantic memory provides the facts, episodic memory offers lessons from experience, and procedural memory ensures smooth execution. [...] Sitemap\\n\\nOpen in app\\n\\nSign in\\n\\nSign in\\n\\n# Memory Types in Agentic AI: A Breakdown\\n\\nGokcer Belgusen\\n\\n4 min readApr 6, 2025\\n\\nAgentic AI — systems designed to act autonomously, make decisions, and pursue goals — relies on various types of memory to function effectively. Drawing from cognitive science concepts, these memory types include\\n\\n semantic\\n episodic\\n short-term\\n procedural and\\n long-term memory. [...] Episodic memory is the AI’s record of specific experiences or events, tied to a time and context. Think of it as the agent’s personal history — like recalling, “Last Tuesday, I helped a user debug code and got stuck on a syntax error.” In agentic AI, episodic memory allows the system to reflect on past interactions or actions, learning from successes or mistakes. This type of memory adds a narrative layer, helping the AI adjust its behavior based on what it has directly encountered.\\nMemory: The agent is equipped in general with short term, long term, consensus, and episodic memory. Short term memory for immediate interactions, long-term memory for historical data and conversations, episodic memory for past interactions, and consensus memory for shared information among agents. The agent can maintain context, learn from experiences, and improve performance by recalling past interactions and adapting to new situations.\\n1. Immediate Working Memory — Information that must remain constantly accessible, similar to how you don’t need to recall how to speak or walk consciously\\n2. Searchable Episodic Memory — Information the agent must actively retrieve, comparable to how you search your mind for a specific conversation or event\\n3. Procedural Memory — Skills and learned behaviours that become automatic, like your ability to type without thinking about individual keys')]}\n"
     ]
    }
   ],
   "source": [
    "inputs = {\"question\": \"What are the types of agent memory?\", \"max_retries\": 3}\n",
    "for event in graph.stream(inputs, stream_mode=\"values\"):\n",
    "    print(event)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "id": "fac3360e",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "---ROUTE QUESTION---\n",
      "---ROUTE QUESTION TO WEB SEARCH---\n",
      "{'question': 'What are the models released today for llama3.2?', 'max_retries': 3, 'loop_step': 0}\n",
      "---WEB SEARCH---\n",
      "{'question': 'What are the models released today for llama3.2?', 'max_retries': 3, 'loop_step': 0, 'documents': [Document(metadata={}, page_content='Meta has released compact versions of its lightweight Llama 3.2 1B and 3B models that are small enough to run effectively on mobile devices.\\n\\nThe Facebook owner, in an announcement yesterday (24 October), said that these new “quantised” models are 56pc smaller and use 41pc less memory when compared to the original 3.2 models released last month. [...] Silicon Republic\\n\\n# Meta releases compact versions of Llama 3.2 AI models\\n\\nby Suhasini Srinivasaragavan\\n\\n25 Oct 2024\\n\\nA colourful illustration of a mobile phone with a big AIG symbol and speech bubbles to symbolise using AI models on your phone.\\n\\nImage: © ImageFlow/Stock.adobe.com\\n\\nThe new quantised models are 56pc smaller and use 41pc less memory when compared to the full-size models released last month. [...] Meta says you can use the 1B or 3B models for on-device applications such as summarising a discussion from your phone or calling on-device tools such as calendar.\\n\\nThe new models “apply the same quality and safety requirements” as the original Llama 3.2 1B and 3B, while processing information two to three times faster, the company claimed.\\n## Meta has unveiled new versions of its popular open source AI model Llama, with small and medium-sized models capable of powering workloads on edge and mobile devices.\\n\\nLlama 3.2 models were shown at the company’s annual Meta Connect event. They\\'re capable of supporting multilingual text generation and vision applications like image recognition.\\n\\nSubscribe today for free\\n\\nThe connectivity news and insights that matter - straight to your inbox [...] “This is our first open source, multimodal model, and it’s going to enable a lot of interesting applications that require visual understanding,” said Mark Zuckerberg, CEO of Meta.\\n\\n## New Llamas join the flock\\n\\nLlama 3.2 follows Llama 3.1 model, the massive open source model released in late July. [...] They’re split into two segments - the small size (1B and 3B) which are designed to be lightweight and can handle only text inputs. These can fit onto edge and mobile devices, enabling them to process inputs on-device.\\n\\nThe 1B and 3B Llama 3.2 models can support up to 128K tokens (~ 96,240 words) and achieve state-of-the-art performance for use cases like summarisation, instruction following, and rewriting tasks run at the edge.\\nOllama\\n\\n34M\\n Downloads\\n\\nUpdated \\n11 months ago\\n\\n## Meta\\'s Llama 3.2 goes small with 1B and 3B models.\\n\\n## Models\\n\\nName\\n\\n63 models\\n\\nSize\\n\\nContext\\n\\nInput\\n\\nllama3.2:latest\\n\\n2.0GB · 128K context window · Text · 11 months ago\\n\\n2.0GB\\n\\n128K\\n\\nText\\n\\nllama3.2:1b\\n\\n1.3GB · 128K context window · Text · 11 months ago\\n\\n1.3GB\\n\\n128K\\n\\nText\\n\\nllama3.2:3b\\n\\n2.0GB · 128K context window · Text · 11 months ago\\n\\n2.0GB\\n\\n128K\\n\\nText\\n\\n## Readme [...] The Meta Llama 3.2 collection of multilingual large language models (LLMs) is a collection of pretrained and instruction-tuned generative models in 1B and 3B sizes (text in/text out). The Llama 3.2 instruction-tuned text only models are optimized for multilingual dialogue use cases, including agentic retrieval and summarization tasks. They outperform many of the available open source and closed chat models on common industry benchmarks.\\n\\n## Sizes\\n\\n### 3B parameters (default) [...] The 3B model outperforms the Gemma 2 2.6B and Phi 3.5-mini models on tasks such as:\\n\\n`ollama run llama3.2`\\n\\n### 1B parameters\\n\\nThe 1B model is competitive with other 1-3B parameter models. It’s use cases include:\\n\\n`ollama run llama3.2:1b`\\n\\n### Benchmarks\\n\\nLlama 3.2 instruction-tuned benchmarks\\n\\nLlama 3.2 instruction-tuned benchmarks\\nOn April 18, 2024, Meta released Llama 3 with two sizes: 8B and 70B parameters. The models have been pre-trained on approximately 15 trillion tokens of text gathered from “publicly available sources” with the instruct models fine-tuned on “publicly available instruction datasets, as well as over 10M human-annotated examples\". Meta AI\\'s testing showed in April 2024 that Llama 3 70B was beating Gemini \"Gemini (chatbot)\") Pro 1.5 and Claude \"Claude (language model)\") 3 Sonnet on most benchmarks. [...] | Llama 3 | April 18, 2024 | Active |  8B  70.6B | 100,000 | 8192 | 15T |\\n| Llama 3.1 | July 23, 2024 | Active |  8B  70.6B  405B | 440,000 | 128,000 |\\n| Llama 3.2 | September 25, 2024 | Active |  1B  3B  11B  90B | ? | 128,000 | 9T |\\n| Llama 3.3 | December 7, 2024 | Active |  70B | ? | 128,000 | 15T+ |\\n| Llama 4 | April 5, 2025 | Active |  109B  400B  2T |  71,000  34,000  ? |  10M  1M  ? |  40T  22T  ? | [...] Booz Allen Hamilton deployed Meta’s Llama 3.2 model aboard the International Space Station (ISS) National Labs as part of a project called Space Llama. The system runs on Hewlett Packard Enterprise’s Spaceborne Computer‑2 and leverages Booz Allen’s A2E2 (AI for Edge Environments) platform, using NVIDIA CUDA‑accelerated computing. Space Llama demonstrates how large language models can operate in disconnected, constrained environments such as space, enabling astronauts to retrieve and summarize\\n## Latest commit\\n\\n## History\\n\\n## Repository files navigation\\n\\n Meta Llama 3\\n\\nWe are unlocking the power of large language models. Our latest version of Llama is now accessible to individuals, creators, researchers, and businesses of all sizes so that they can experiment, innovate, and scale their ideas responsibly.\\n\\nThis release includes model weights and starting code for pre-trained and instruction-tuned Llama 3 language models — including sizes of 8B to 70B parameters. [...] The fine-tuned models were trained for dialogue applications. To get the expected features and performance for them, specific formatting defined in `ChatFormat` [...] All models support sequence length up to 8192 tokens, but we pre-allocate the cache according to `max_seq_len` and `max_batch_size` values. So set those according to your hardware.\\n\\n`max_seq_len`\\n`max_batch_size`\\n\\n### Pretrained Models\\n\\nThese models are not finetuned for chat or Q&A. They should be prompted so that the expected answer is the natural continuation of the prompt.')]}\n",
      "---GENERATE---\n",
      "---CHECK HALLUCINATIONS---\n",
      "---DECISION: GENERATION IS GROUNDED IN DOCUMENTS---\n",
      "---GRADE GENERATION vs QUESTION---\n",
      "---DECISION: GENERATION ADDRESSES QUESTION---\n",
      "{'question': 'What are the models released today for llama3.2?', 'generation': AIMessage(content='Meta has released compact versions of its Llama 3.2 1B and 3B models for mobile and edge devices. These models are 56% smaller and use 41% less memory than the original versions. They support tasks like summarization and are optimized for on-device applications.', additional_kwargs={}, response_metadata={'model_name': 'qwen-turbo', 'finish_reason': 'stop', 'request_id': '434d4902-86e3-4d02-83dc-5b2ab4501296', 'token_usage': {'input_tokens': 1763, 'output_tokens': 62, 'total_tokens': 1825, 'prompt_tokens_details': {'cached_tokens': 0}}}, id='run--52b27abb-ff3e-4815-9c0e-4a5b1f45a048-0'), 'max_retries': 3, 'loop_step': 1, 'documents': [Document(metadata={}, page_content='Meta has released compact versions of its lightweight Llama 3.2 1B and 3B models that are small enough to run effectively on mobile devices.\\n\\nThe Facebook owner, in an announcement yesterday (24 October), said that these new “quantised” models are 56pc smaller and use 41pc less memory when compared to the original 3.2 models released last month. [...] Silicon Republic\\n\\n# Meta releases compact versions of Llama 3.2 AI models\\n\\nby Suhasini Srinivasaragavan\\n\\n25 Oct 2024\\n\\nA colourful illustration of a mobile phone with a big AIG symbol and speech bubbles to symbolise using AI models on your phone.\\n\\nImage: © ImageFlow/Stock.adobe.com\\n\\nThe new quantised models are 56pc smaller and use 41pc less memory when compared to the full-size models released last month. [...] Meta says you can use the 1B or 3B models for on-device applications such as summarising a discussion from your phone or calling on-device tools such as calendar.\\n\\nThe new models “apply the same quality and safety requirements” as the original Llama 3.2 1B and 3B, while processing information two to three times faster, the company claimed.\\n## Meta has unveiled new versions of its popular open source AI model Llama, with small and medium-sized models capable of powering workloads on edge and mobile devices.\\n\\nLlama 3.2 models were shown at the company’s annual Meta Connect event. They\\'re capable of supporting multilingual text generation and vision applications like image recognition.\\n\\nSubscribe today for free\\n\\nThe connectivity news and insights that matter - straight to your inbox [...] “This is our first open source, multimodal model, and it’s going to enable a lot of interesting applications that require visual understanding,” said Mark Zuckerberg, CEO of Meta.\\n\\n## New Llamas join the flock\\n\\nLlama 3.2 follows Llama 3.1 model, the massive open source model released in late July. [...] They’re split into two segments - the small size (1B and 3B) which are designed to be lightweight and can handle only text inputs. These can fit onto edge and mobile devices, enabling them to process inputs on-device.\\n\\nThe 1B and 3B Llama 3.2 models can support up to 128K tokens (~ 96,240 words) and achieve state-of-the-art performance for use cases like summarisation, instruction following, and rewriting tasks run at the edge.\\nOllama\\n\\n34M\\n Downloads\\n\\nUpdated \\n11 months ago\\n\\n## Meta\\'s Llama 3.2 goes small with 1B and 3B models.\\n\\n## Models\\n\\nName\\n\\n63 models\\n\\nSize\\n\\nContext\\n\\nInput\\n\\nllama3.2:latest\\n\\n2.0GB · 128K context window · Text · 11 months ago\\n\\n2.0GB\\n\\n128K\\n\\nText\\n\\nllama3.2:1b\\n\\n1.3GB · 128K context window · Text · 11 months ago\\n\\n1.3GB\\n\\n128K\\n\\nText\\n\\nllama3.2:3b\\n\\n2.0GB · 128K context window · Text · 11 months ago\\n\\n2.0GB\\n\\n128K\\n\\nText\\n\\n## Readme [...] The Meta Llama 3.2 collection of multilingual large language models (LLMs) is a collection of pretrained and instruction-tuned generative models in 1B and 3B sizes (text in/text out). The Llama 3.2 instruction-tuned text only models are optimized for multilingual dialogue use cases, including agentic retrieval and summarization tasks. They outperform many of the available open source and closed chat models on common industry benchmarks.\\n\\n## Sizes\\n\\n### 3B parameters (default) [...] The 3B model outperforms the Gemma 2 2.6B and Phi 3.5-mini models on tasks such as:\\n\\n`ollama run llama3.2`\\n\\n### 1B parameters\\n\\nThe 1B model is competitive with other 1-3B parameter models. It’s use cases include:\\n\\n`ollama run llama3.2:1b`\\n\\n### Benchmarks\\n\\nLlama 3.2 instruction-tuned benchmarks\\n\\nLlama 3.2 instruction-tuned benchmarks\\nOn April 18, 2024, Meta released Llama 3 with two sizes: 8B and 70B parameters. The models have been pre-trained on approximately 15 trillion tokens of text gathered from “publicly available sources” with the instruct models fine-tuned on “publicly available instruction datasets, as well as over 10M human-annotated examples\". Meta AI\\'s testing showed in April 2024 that Llama 3 70B was beating Gemini \"Gemini (chatbot)\") Pro 1.5 and Claude \"Claude (language model)\") 3 Sonnet on most benchmarks. [...] | Llama 3 | April 18, 2024 | Active |  8B  70.6B | 100,000 | 8192 | 15T |\\n| Llama 3.1 | July 23, 2024 | Active |  8B  70.6B  405B | 440,000 | 128,000 |\\n| Llama 3.2 | September 25, 2024 | Active |  1B  3B  11B  90B | ? | 128,000 | 9T |\\n| Llama 3.3 | December 7, 2024 | Active |  70B | ? | 128,000 | 15T+ |\\n| Llama 4 | April 5, 2025 | Active |  109B  400B  2T |  71,000  34,000  ? |  10M  1M  ? |  40T  22T  ? | [...] Booz Allen Hamilton deployed Meta’s Llama 3.2 model aboard the International Space Station (ISS) National Labs as part of a project called Space Llama. The system runs on Hewlett Packard Enterprise’s Spaceborne Computer‑2 and leverages Booz Allen’s A2E2 (AI for Edge Environments) platform, using NVIDIA CUDA‑accelerated computing. Space Llama demonstrates how large language models can operate in disconnected, constrained environments such as space, enabling astronauts to retrieve and summarize\\n## Latest commit\\n\\n## History\\n\\n## Repository files navigation\\n\\n Meta Llama 3\\n\\nWe are unlocking the power of large language models. Our latest version of Llama is now accessible to individuals, creators, researchers, and businesses of all sizes so that they can experiment, innovate, and scale their ideas responsibly.\\n\\nThis release includes model weights and starting code for pre-trained and instruction-tuned Llama 3 language models — including sizes of 8B to 70B parameters. [...] The fine-tuned models were trained for dialogue applications. To get the expected features and performance for them, specific formatting defined in `ChatFormat` [...] All models support sequence length up to 8192 tokens, but we pre-allocate the cache according to `max_seq_len` and `max_batch_size` values. So set those according to your hardware.\\n\\n`max_seq_len`\\n`max_batch_size`\\n\\n### Pretrained Models\\n\\nThese models are not finetuned for chat or Q&A. They should be prompted so that the expected answer is the natural continuation of the prompt.')]}\n"
     ]
    }
   ],
   "source": [
    "# Test on current events\n",
    "inputs = {\n",
    "    \"question\": \"What are the models released today for llama3.2?\",\n",
    "    \"max_retries\": 3,\n",
    "}\n",
    "for event in graph.stream(inputs, stream_mode=\"values\"):\n",
    "    print(event)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "MLOps",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.11"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
