sabazo commited on
Commit
82bee2d
1 Parent(s): b11b693

Created using Colaboratory

Browse files
examples/Langgraph_CorrectiveRAG_mistral_chroma.ipynb ADDED
@@ -0,0 +1,571 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "nbformat": 4,
3
+ "nbformat_minor": 0,
4
+ "metadata": {
5
+ "colab": {
6
+ "provenance": [],
7
+ "authorship_tag": "ABX9TyMp8bhKotk3mdZcc3U4qqKP",
8
+ "include_colab_link": true
9
+ },
10
+ "kernelspec": {
11
+ "name": "python3",
12
+ "display_name": "Python 3"
13
+ },
14
+ "language_info": {
15
+ "name": "python"
16
+ }
17
+ },
18
+ "cells": [
19
+ {
20
+ "cell_type": "markdown",
21
+ "metadata": {
22
+ "id": "view-in-github",
23
+ "colab_type": "text"
24
+ },
25
+ "source": [
26
+ "<a href=\"https://colab.research.google.com/github/almutareb/InnovationPathfinderAI/blob/main/examples/Langgraph_CorrectiveRAG_mistral_chroma.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
27
+ ]
28
+ },
29
+ {
30
+ "cell_type": "code",
31
+ "execution_count": null,
32
+ "metadata": {
33
+ "id": "jLMHfRq9kAP9"
34
+ },
35
+ "outputs": [],
36
+ "source": [
37
+ "!pip install -Uq langchain-community\n",
38
+ "!pip install -Uq langchain\n",
39
+ "!pip install -Uq langchainhub\n",
40
+ "!pip install -Uq langgraph\n",
41
+ "!pip install -Uq wikipedia\n",
42
+ "!pip install -Uq scikit-learn\n",
43
+ "!pip install -Uq chromadb\n",
44
+ "!pip install -Uq sentence-transformers\n",
45
+ "!pip install -Uq gpt4all\n",
46
+ "!pip install -qU google-search-results"
47
+ ]
48
+ },
49
+ {
50
+ "cell_type": "code",
51
+ "source": [
52
+ "import os\n",
53
+ "from google.colab import userdata\n",
54
+ "os.environ[\"HUGGINGFACEHUB_API_TOKEN\"] = userdata.get('HUGGINGFACEHUB_API_TOKEN')\n",
55
+ "os.environ[\"GOOGLE_CSE_ID\"] = userdata.get('GOOGLE_CSE_ID')\n",
56
+ "os.environ[\"GOOGLE_API_KEY\"] = userdata.get('GOOGLE_API_KEY')"
57
+ ],
58
+ "metadata": {
59
+ "id": "kPF-3dzGuAfT"
60
+ },
61
+ "execution_count": 2,
62
+ "outputs": []
63
+ },
64
+ {
65
+ "cell_type": "markdown",
66
+ "source": [
67
+ "### LLMs"
68
+ ],
69
+ "metadata": {
70
+ "id": "XTtbWrue9l3E"
71
+ }
72
+ },
73
+ {
74
+ "cell_type": "code",
75
+ "source": [
76
+ "# HF libraries\n",
77
+ "from langchain_community.llms import HuggingFaceEndpoint\n",
78
+ "\n",
79
+ "# Load the model from the Hugging Face Hub\n",
80
+ "llm_mid = HuggingFaceEndpoint(repo_id=\"mistralai/Mixtral-8x7B-Instruct-v0.1\",\n",
81
+ " temperature=0.1,\n",
82
+ " max_new_tokens=1024,\n",
83
+ " repetition_penalty=1.2,\n",
84
+ " return_full_text=False\n",
85
+ " )\n",
86
+ "\n",
87
+ "llm_small = HuggingFaceEndpoint(repo_id=\"mistralai/Mistral-7B-Instruct-v0.2\",\n",
88
+ " temperature=0.1,\n",
89
+ " max_new_tokens=1024,\n",
90
+ " repetition_penalty=1.2,\n",
91
+ " return_full_text=False\n",
92
+ " )"
93
+ ],
94
+ "metadata": {
95
+ "id": "EDZyRq-wuIuy"
96
+ },
97
+ "execution_count": null,
98
+ "outputs": []
99
+ },
100
+ {
101
+ "cell_type": "markdown",
102
+ "source": [
103
+ "### Chroma DB"
104
+ ],
105
+ "metadata": {
106
+ "id": "mdMx_T8V9npk"
107
+ }
108
+ },
109
+ {
110
+ "cell_type": "code",
111
+ "source": [
112
+ "from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
113
+ "from langchain_community.document_loaders import WebBaseLoader\n",
114
+ "from langchain_community.vectorstores import Chroma\n",
115
+ "from langchain_community.embeddings import GPT4AllEmbeddings\n",
116
+ "from langchain.embeddings import HuggingFaceEmbeddings\n",
117
+ "\n",
118
+ "# Load\n",
119
+ "url = \"https://lilianweng.github.io/posts/2023-06-23-agent/\"\n",
120
+ "loader = WebBaseLoader(url)\n",
121
+ "docs = loader.load()\n",
122
+ "\n",
123
+ "# Split\n",
124
+ "text_splitter = RecursiveCharacterTextSplitter(\n",
125
+ " chunk_size=500, chunk_overlap=100\n",
126
+ ")\n",
127
+ "all_splits = text_splitter.split_documents(docs)\n",
128
+ "\n",
129
+ "# Embed and index\n",
130
+ "#embedding = GPT4AllEmbeddings()\n",
131
+ "embedding = HuggingFaceEmbeddings(model_name=\"sentence-transformers/all-mpnet-base-v2\")\n",
132
+ "\n",
133
+ "\n",
134
+ "# Index\n",
135
+ "vectorstore = Chroma.from_documents(\n",
136
+ " documents=all_splits,\n",
137
+ " collection_name=\"rag-chroma\",\n",
138
+ " embedding=embedding,\n",
139
+ ")\n",
140
+ "retriever = vectorstore.as_retriever()"
141
+ ],
142
+ "metadata": {
143
+ "id": "LkX9ehoeupSz"
144
+ },
145
+ "execution_count": null,
146
+ "outputs": []
147
+ },
148
+ {
149
+ "cell_type": "markdown",
150
+ "source": [
151
+ "###State"
152
+ ],
153
+ "metadata": {
154
+ "id": "0A-7_d3G9b8h"
155
+ }
156
+ },
157
+ {
158
+ "cell_type": "code",
159
+ "source": [
160
+ "from typing import Annotated, Dict, TypedDict\n",
161
+ "from langchain_core.messages import BaseMessage\n",
162
+ "\n",
163
+ "class GraphState(TypedDict):\n",
164
+ " \"\"\"\n",
165
+ " Represents the state of our graph.\n",
166
+ "\n",
167
+ " Attributes:\n",
168
+ " key: A dictionary where each key is a string.\n",
169
+ " \"\"\"\n",
170
+ "\n",
171
+ " keys: Dict[str, any]"
172
+ ],
173
+ "metadata": {
174
+ "id": "fRzYhmOs7_GJ"
175
+ },
176
+ "execution_count": 9,
177
+ "outputs": []
178
+ },
179
+ {
180
+ "cell_type": "markdown",
181
+ "source": [
182
+ "### Nodes and Edges"
183
+ ],
184
+ "metadata": {
185
+ "id": "bPhIdcVD9pgV"
186
+ }
187
+ },
188
+ {
189
+ "cell_type": "code",
190
+ "source": [
191
+ "import json\n",
192
+ "import operator\n",
193
+ "from typing import Annotated, Sequence, TypedDict\n",
194
+ "\n",
195
+ "from langchain import hub\n",
196
+ "from langchain_core.output_parsers import JsonOutputParser\n",
197
+ "from langchain.prompts import PromptTemplate\n",
198
+ "from langchain.schema import Document\n",
199
+ "from langchain.tools import Tool\n",
200
+ "from langchain_community.utilities import GoogleSearchAPIWrapper\n",
201
+ "from langchain_community.vectorstores import Chroma\n",
202
+ "from langchain_core.output_parsers import StrOutputParser\n",
203
+ "from langchain_core.runnables import RunnablePassthrough\n",
204
+ "\n",
205
+ "### Nodes ###\n",
206
+ "\n",
207
+ "def retrieve(state):\n",
208
+ " \"\"\"\n",
209
+ " Retrieve documents\n",
210
+ "\n",
211
+ " Args:\n",
212
+ " state (dict): The current graph state\n",
213
+ "\n",
214
+ " Returns:\n",
215
+ " state (dict): New key added to state, documents, that contains retrieved documents\n",
216
+ " \"\"\"\n",
217
+ " print(\"---RETRIEVE---\")\n",
218
+ " state_dict = state[\"keys\"]\n",
219
+ " question = state_dict[\"question\"]\n",
220
+ " local = state_dict[\"local\"]\n",
221
+ " documents = retriever.get_relevant_documents(question)\n",
222
+ "\n",
223
+ " return {\"keys\": {\"documents\": documents, \"local\": local, \"question\": question}}\n",
224
+ "\n",
225
+ "def generate(state):\n",
226
+ " \"\"\"\n",
227
+ " Generate answer\n",
228
+ "\n",
229
+ " Args:\n",
230
+ " state (dict): The current graph state\n",
231
+ "\n",
232
+ " Returns:\n",
233
+ " state (dict): New key added to state, generation, that contains generation\n",
234
+ " \"\"\"\n",
235
+ " print(\"---GENERATE---\")\n",
236
+ " state_dict = state[\"keys\"]\n",
237
+ " question = state_dict[\"question\"]\n",
238
+ " documents = state_dict[\"documents\"]\n",
239
+ " local = state_dict[\"local\"]\n",
240
+ "\n",
241
+ " # Prompt\n",
242
+ " prompt = hub.pull(\"rlm/rag-prompt\")\n",
243
+ "\n",
244
+ " # LLM\n",
245
+ " llm = llm_mid\n",
246
+ "\n",
247
+ " # Post-processing\n",
248
+ " def format_docs(docs):\n",
249
+ " return \"\\n\\n\".join(doc.page_content for doc in docs)\n",
250
+ "\n",
251
+ " # Chain\n",
252
+ " rag_chain = prompt | llm | StrOutputParser()\n",
253
+ "\n",
254
+ "\n",
255
+ " # Run\n",
256
+ " generation = rag_chain.invoke({\"context\": documents, \"question\": question})\n",
257
+ "\n",
258
+ " return {\n",
259
+ " \"keys\": {\"documents\": documents, \"question\": question, \"generation\": generation}\n",
260
+ " }\n",
261
+ "\n",
262
+ "def grade_documents(state):\n",
263
+ " \"\"\"\n",
264
+ " Determines whether the retrieved documents are relevant to the question.\n",
265
+ "\n",
266
+ " Args:\n",
267
+ " state (dict): The current graph state\n",
268
+ "\n",
269
+ " Returns:\n",
270
+ " state (dict): Update documents key with relevant documents\n",
271
+ " \"\"\"\n",
272
+ "\n",
273
+ " print(\"---CHECK RELEVANCE---\")\n",
274
+ " state_dict = state[\"keys\"]\n",
275
+ " question = state_dict[\"question\"]\n",
276
+ " documents = state_dict[\"documents\"]\n",
277
+ " local = state_dict[\"local\"]\n",
278
+ "\n",
279
+ " # LLM\n",
280
+ " llm = llm_mid\n",
281
+ "\n",
282
+ " prompt = PromptTemplate(\n",
283
+ " template=\"\"\"You are a grader assessing relevance of a retrieved document to a user question. \\n\n",
284
+ " Here is the retrieved document: \\n\\n {context} \\n\\n\n",
285
+ " Here is the user question: {question} \\n\n",
286
+ " If the document contains keywords related to the user question, grade it as relevant. \\n\n",
287
+ " It does not need to be a stringent test. The goal is to filter out erroneous retrievals. \\n\n",
288
+ " Give a binary score 'yes' or 'no' score to indicate whether the document is relevant to the question. \\n\n",
289
+ " Provide the binary score as a JSON with a single key 'score' and no premable or explaination.\n",
290
+ " \"\"\",\n",
291
+ " input_variables=[\"question\",\"context\"],\n",
292
+ " )\n",
293
+ "\n",
294
+ " chain = prompt | llm | JsonOutputParser()\n",
295
+ "\n",
296
+ " # Score\n",
297
+ " filtered_docs = []\n",
298
+ " search = \"No\" #Default to do not opt for web search to supplement retrieval\n",
299
+ " for d in documents:\n",
300
+ " score = chain.invoke(\n",
301
+ " {\n",
302
+ " \"question\": question,\n",
303
+ " \"context\": d.page_content,\n",
304
+ " }\n",
305
+ " )\n",
306
+ " grade = score[\"score\"]\n",
307
+ " if grade == \"yes\":\n",
308
+ " print(\"---GRADE: DOCUMENT RELEVANT---\")\n",
309
+ " filtered_docs.append(d)\n",
310
+ " else:\n",
311
+ " print(\"---GRADE: DOCUMENT IRRELEVANT---\")\n",
312
+ " search = \"Yes\" #Perform web search\n",
313
+ " continue\n",
314
+ "\n",
315
+ " return {\n",
316
+ " \"keys\": {\n",
317
+ " \"documents\": filtered_docs,\n",
318
+ " \"question\": question,\n",
319
+ " \"local\": local,\n",
320
+ " \"run_web_search\": search,\n",
321
+ " }\n",
322
+ " }\n",
323
+ "\n",
324
+ "def transform_query(state):\n",
325
+ " \"\"\"\n",
326
+ " Transform the query to produce a better question.\n",
327
+ "\n",
328
+ " Args:\n",
329
+ " state (dict): The current graph state\n",
330
+ "\n",
331
+ " Returns:\n",
332
+ " state (dict): Updates question key with a re-phrased question\n",
333
+ " \"\"\"\n",
334
+ " print(\"---TRANSFORM QUERY---\")\n",
335
+ " state_dict = state[\"keys\"]\n",
336
+ " question = state_dict[\"question\"]\n",
337
+ " documents = state_dict[\"documents\"]\n",
338
+ " local = state_dict[\"local\"]\n",
339
+ "\n",
340
+ " # Create a prompt template with format instructions and the query\n",
341
+ " prompt = PromptTemplate(\n",
342
+ " template=\"\"\"You are generating questions that are well optimized for retrieval. \\n\n",
343
+ " Look at the input and try to reasin about the underlying sematic intent / meaning . \\n\n",
344
+ " Here is the initial question:\n",
345
+ " \\n -------- \\n\n",
346
+ " {question}\n",
347
+ " \\n -------- \\n\n",
348
+ " Provide an improved question without any premable, only respond with the updated question: \"\"\",\n",
349
+ " input_variables=[\"question\"],\n",
350
+ " )\n",
351
+ "\n",
352
+ " # Grader\n",
353
+ " # LLM\n",
354
+ " llm = llm_mid\n",
355
+ "\n",
356
+ " # Prompt\n",
357
+ " chain = prompt | llm | StrOutputParser()\n",
358
+ " better_question = chain.invoke({\"question\": question})\n",
359
+ "\n",
360
+ " return {\n",
361
+ " \"keys\": {\"documents\": documents, \"question\": better_question, \"local\": local}\n",
362
+ " }\n",
363
+ "\n",
364
+ "\n",
365
+ "def web_search(state):\n",
366
+ " \"\"\"\n",
367
+ " Web search based on the re-phrased question using google\n",
368
+ "\n",
369
+ " Args:\n",
370
+ " state (dict): The current graph state\n",
371
+ " Returns:\n",
372
+ " state (dict): Web results appended to documents.\n",
373
+ " \"\"\"\n",
374
+ "\n",
375
+ " print(\"---WEB SEARCH---\")\n",
376
+ " state_dict = state[\"keys\"]\n",
377
+ " question = state_dict[\"question\"]\n",
378
+ " documents = state_dict[\"documents\"]\n",
379
+ " local = state_dict[\"local\"]\n",
380
+ "\n",
381
+ " websearch = GoogleSearchAPIWrapper(k=3)\n",
382
+ " google_search = Tool(\n",
383
+ " name=\"google_search\",\n",
384
+ " description=\"Search Google for recent results.\",\n",
385
+ " func=websearch.run,\n",
386
+ " )\n",
387
+ " web_search = google_search.run(question)\n",
388
+ " #filtered_contents = [d[\"page_content\"] for d in web_search if d[\"page_content\"] is not None]\n",
389
+ " #web_results = \"\\n\".join(filtered_contents)\n",
390
+ " web_results = Document(page_content=web_search)\n",
391
+ " documents.append(web_results)\n",
392
+ "\n",
393
+ " return {\"keys\": {\"documents\": documents, \"local\": local, \"question\": question}}"
394
+ ],
395
+ "metadata": {
396
+ "id": "1Sn5NCyl9pRE"
397
+ },
398
+ "execution_count": 88,
399
+ "outputs": []
400
+ },
401
+ {
402
+ "cell_type": "code",
403
+ "source": [
404
+ "### Edges ###\n",
405
+ "\n",
406
+ "def decide_to_generate(state):\n",
407
+ " \"\"\"\n",
408
+ " Determines whether to generate an answer or re-generate a question for web search.\n",
409
+ "\n",
410
+ " Args:\n",
411
+ " state (dict): The current state of the agent, including all keys.\n",
412
+ "\n",
413
+ " Returns:\n",
414
+ " str: Next node to call\n",
415
+ " \"\"\"\n",
416
+ "\n",
417
+ " print(\"---DECIDE TO GENERATE---\")\n",
418
+ " state_dict = state[\"keys\"]\n",
419
+ " question = state_dict[\"question\"]\n",
420
+ " filtered_documents = state_dict[\"documents\"]\n",
421
+ " search = state_dict[\"run_web_search\"]\n",
422
+ "\n",
423
+ " if search == \"Yes\":\n",
424
+ " # All documents have been filtered check_relevance\n",
425
+ " # We will re-generate a new query\n",
426
+ " print(\"---DECISION: TRANSFORM QUERY and RUN WEB SEARCH---\")\n",
427
+ " return \"transform_query\"\n",
428
+ " else:\n",
429
+ " # We have relevant documents, so generate answer\n",
430
+ " print(\"---DECISION: GENERATE---\")\n",
431
+ " return \"generate\""
432
+ ],
433
+ "metadata": {
434
+ "id": "l9djuUIx-_ZK"
435
+ },
436
+ "execution_count": 89,
437
+ "outputs": []
438
+ },
439
+ {
440
+ "cell_type": "markdown",
441
+ "source": [
442
+ "### Graph"
443
+ ],
444
+ "metadata": {
445
+ "id": "Z6g94SltdUEc"
446
+ }
447
+ },
448
+ {
449
+ "cell_type": "code",
450
+ "source": [
451
+ "import pprint\n",
452
+ "from langgraph.graph import END, StateGraph\n",
453
+ "\n",
454
+ "workflow = StateGraph(GraphState)\n",
455
+ "\n",
456
+ "# Define the nodes\n",
457
+ "workflow.add_node(\"retrieve\", retrieve) #retrieve\n",
458
+ "workflow.add_node(\"grade_documents\", grade_documents) # grade documents\n",
459
+ "workflow.add_node(\"generate\", generate)\n",
460
+ "workflow.add_node(\"transform_query\", transform_query)\n",
461
+ "workflow.add_node(\"web_search\", web_search)\n",
462
+ "\n",
463
+ "# Build graph\n",
464
+ "workflow.set_entry_point(\"retrieve\")\n",
465
+ "workflow.add_edge(\"retrieve\", \"grade_documents\")\n",
466
+ "workflow.add_conditional_edges(\n",
467
+ " \"grade_documents\",\n",
468
+ " decide_to_generate,\n",
469
+ " {\n",
470
+ " \"transform_query\": \"transform_query\",\n",
471
+ " \"generate\": \"generate\",\n",
472
+ " },\n",
473
+ ")\n",
474
+ "workflow.add_edge(\"transform_query\", \"web_search\")\n",
475
+ "workflow.add_edge(\"web_search\", \"generate\")\n",
476
+ "workflow.add_edge(\"generate\", END)\n",
477
+ "\n",
478
+ "# Compile\n",
479
+ "app = workflow.compile()"
480
+ ],
481
+ "metadata": {
482
+ "id": "5pyAWscidTUt"
483
+ },
484
+ "execution_count": 90,
485
+ "outputs": []
486
+ },
487
+ {
488
+ "cell_type": "markdown",
489
+ "source": [
490
+ "### RUN"
491
+ ],
492
+ "metadata": {
493
+ "id": "Yb4oGR4Dfoud"
494
+ }
495
+ },
496
+ {
497
+ "cell_type": "code",
498
+ "source": [
499
+ "# Run\n",
500
+ "\n",
501
+ "inputs = {\n",
502
+ " \"keys\": {\n",
503
+ " \"question\": \"Explain how the different types of agent memory work?\",\n",
504
+ " \"local\": \"No\",\n",
505
+ " }\n",
506
+ "}\n",
507
+ "\n",
508
+ "for output in app.stream(inputs):\n",
509
+ " for key, value in output.items():\n",
510
+ " # Node\n",
511
+ " pprint.pprint(f\"Node '{key}':\")\n",
512
+ " # Optional: print full state at each node\n",
513
+ " # pprint.pprint(value[\"keys\"], ident=2, width=80, depth=None)\n",
514
+ " pprint.pprint(\"\\n---\\n\")\n",
515
+ "\n",
516
+ "# Final generation\n",
517
+ "pprint.pprint(value['keys']['generation'])"
518
+ ],
519
+ "metadata": {
520
+ "colab": {
521
+ "base_uri": "https://localhost:8080/"
522
+ },
523
+ "id": "bJH68dQffp_e",
524
+ "outputId": "4318d425-7284-4275-83b1-f1fcd85c9b38"
525
+ },
526
+ "execution_count": 92,
527
+ "outputs": [
528
+ {
529
+ "output_type": "stream",
530
+ "name": "stdout",
531
+ "text": [
532
+ "---RETRIEVE---\n",
533
+ "\"Node 'retrieve':\"\n",
534
+ "'\\n---\\n'\n",
535
+ "---CHECK RELEVANCE---\n",
536
+ "---GRADE: DOCUMENT IRRELEVANT---\n",
537
+ "---GRADE: DOCUMENT RELEVANT---\n",
538
+ "---GRADE: DOCUMENT RELEVANT---\n",
539
+ "---GRADE: DOCUMENT IRRELEVANT---\n",
540
+ "\"Node 'grade_documents':\"\n",
541
+ "'\\n---\\n'\n",
542
+ "---DECIDE TO GENERATE---\n",
543
+ "---DECISION: TRANSFORM QUERY and RUN WEB SEARCH---\n",
544
+ "---TRANSFORM QUERY---\n",
545
+ "\"Node 'transform_query':\"\n",
546
+ "'\\n---\\n'\n",
547
+ "---WEB SEARCH---\n",
548
+ "\"Node 'web_search':\"\n",
549
+ "'\\n---\\n'\n",
550
+ "---GENERATE---\n",
551
+ "\"Node 'generate':\"\n",
552
+ "'\\n---\\n'\n",
553
+ "\"Node '__end__':\"\n",
554
+ "'\\n---\\n'\n",
555
+ "(' \\n'\n",
556
+ " '\\n'\n",
557
+ " 'The functionalities of agent memory include recency, importance, relevance, '\n",
558
+ " 'reflection mechanism, sensory memory, short-term memory, and long-term '\n",
559
+ " 'memory. Recency gives higher scores to recent events, while Importance '\n",
560
+ " 'distinguishes mundane from core memories. Relevance depends on how related '\n",
561
+ " 'the memory is to the current situation or query. Reflection mechanism '\n",
562
+ " 'synthesizes memories into higher-level inferences over time. Sensory memory '\n",
563
+ " 'learns embedding representations for raw inputs, Short-term memory handles '\n",
564
+ " 'in-context learning, and Long-term memory serves as an external vector store '\n",
565
+ " 'attended to at query time.')\n"
566
+ ]
567
+ }
568
+ ]
569
+ }
570
+ ]
571
+ }