jfeng1115 commited on
Commit
58d33f0
1 Parent(s): a0fa57f

init commit

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .idea/workspace.xml +82 -0
  2. CITATION.cff +8 -0
  3. Dockerfile +11 -0
  4. LICENSE +21 -0
  5. Makefile +53 -0
  6. Procfile +1 -0
  7. README.md +82 -11
  8. __pycache__/main.cpython-39.pyc +0 -0
  9. app.py +102 -0
  10. langchain/__init__.py +109 -0
  11. langchain/__pycache__/__init__.cpython-39.pyc +0 -0
  12. langchain/__pycache__/cache.cpython-39.pyc +0 -0
  13. langchain/__pycache__/formatting.cpython-39.pyc +0 -0
  14. langchain/__pycache__/input.cpython-39.pyc +0 -0
  15. langchain/__pycache__/python.cpython-39.pyc +0 -0
  16. langchain/__pycache__/requests.cpython-39.pyc +0 -0
  17. langchain/__pycache__/schema.cpython-39.pyc +0 -0
  18. langchain/__pycache__/sql_database.cpython-39.pyc +0 -0
  19. langchain/__pycache__/text_splitter.cpython-39.pyc +0 -0
  20. langchain/__pycache__/utils.cpython-39.pyc +0 -0
  21. langchain/agents/__init__.py +43 -0
  22. langchain/agents/__pycache__/__init__.cpython-39.pyc +0 -0
  23. langchain/agents/__pycache__/agent.cpython-39.pyc +0 -0
  24. langchain/agents/__pycache__/initialize.cpython-39.pyc +0 -0
  25. langchain/agents/__pycache__/load_tools.cpython-39.pyc +0 -0
  26. langchain/agents/__pycache__/loading.cpython-39.pyc +0 -0
  27. langchain/agents/__pycache__/tools.cpython-39.pyc +0 -0
  28. langchain/agents/agent.py +583 -0
  29. langchain/agents/agent_toolkits/__init__.py +39 -0
  30. langchain/agents/agent_toolkits/__pycache__/__init__.cpython-39.pyc +0 -0
  31. langchain/agents/agent_toolkits/__pycache__/base.cpython-39.pyc +0 -0
  32. langchain/agents/agent_toolkits/base.py +15 -0
  33. langchain/agents/agent_toolkits/csv/__init__.py +1 -0
  34. langchain/agents/agent_toolkits/csv/__pycache__/__init__.cpython-39.pyc +0 -0
  35. langchain/agents/agent_toolkits/csv/__pycache__/base.cpython-39.pyc +0 -0
  36. langchain/agents/agent_toolkits/csv/base.py +17 -0
  37. langchain/agents/agent_toolkits/json/__init__.py +1 -0
  38. langchain/agents/agent_toolkits/json/__pycache__/__init__.cpython-39.pyc +0 -0
  39. langchain/agents/agent_toolkits/json/__pycache__/base.cpython-39.pyc +0 -0
  40. langchain/agents/agent_toolkits/json/__pycache__/prompt.cpython-39.pyc +0 -0
  41. langchain/agents/agent_toolkits/json/__pycache__/toolkit.cpython-39.pyc +0 -0
  42. langchain/agents/agent_toolkits/json/base.py +43 -0
  43. langchain/agents/agent_toolkits/json/prompt.py +25 -0
  44. langchain/agents/agent_toolkits/json/toolkit.py +21 -0
  45. langchain/agents/agent_toolkits/openapi/__init__.py +1 -0
  46. langchain/agents/agent_toolkits/openapi/__pycache__/__init__.cpython-39.pyc +0 -0
  47. langchain/agents/agent_toolkits/openapi/__pycache__/base.cpython-39.pyc +0 -0
  48. langchain/agents/agent_toolkits/openapi/__pycache__/prompt.cpython-39.pyc +0 -0
  49. langchain/agents/agent_toolkits/openapi/__pycache__/toolkit.cpython-39.pyc +0 -0
  50. langchain/agents/agent_toolkits/openapi/base.py +46 -0
.idea/workspace.xml ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="AutoImportSettings">
4
+ <option name="autoReloadType" value="SELECTIVE" />
5
+ </component>
6
+ <component name="ChangeListManager">
7
+ <list default="true" id="3dc50299-1dad-4898-8eba-684fc694db9d" name="Changes" comment="" />
8
+ <option name="SHOW_DIALOG" value="false" />
9
+ <option name="HIGHLIGHT_CONFLICTS" value="true" />
10
+ <option name="HIGHLIGHT_NON_ACTIVE_CHANGELIST" value="false" />
11
+ <option name="LAST_RESOLUTION" value="IGNORE" />
12
+ </component>
13
+ <component name="Git.Settings">
14
+ <option name="RECENT_GIT_ROOT_PATH" value="$PROJECT_DIR$" />
15
+ <option name="RESET_MODE" value="HARD" />
16
+ </component>
17
+ <component name="MarkdownSettingsMigration">
18
+ <option name="stateVersion" value="1" />
19
+ </component>
20
+ <component name="ProjectId" id="2VHNEcYX4qIZfWloxtEWlDzDWqk" />
21
+ <component name="ProjectViewState">
22
+ <option name="hideEmptyMiddlePackages" value="true" />
23
+ <option name="showLibraryContents" value="true" />
24
+ </component>
25
+ <component name="PropertiesComponent"><![CDATA[{
26
+ "keyToString": {
27
+ "RunOnceActivity.ShowReadmeOnStart": "true",
28
+ "WebServerToolWindowFactoryState": "false",
29
+ "git-widget-placeholder": "main",
30
+ "last_opened_file_path": "/Users/jiefeng/Dropbox/Apps/admixer/marketing-analytics-bot",
31
+ "node.js.detected.package.eslint": "true",
32
+ "node.js.detected.package.tslint": "true",
33
+ "node.js.selected.package.eslint": "(autodetect)",
34
+ "node.js.selected.package.tslint": "(autodetect)",
35
+ "nodejs_package_manager_path": "npm",
36
+ "vue.rearranger.settings.migration": "true"
37
+ }
38
+ }]]></component>
39
+ <component name="RunManager">
40
+ <configuration name="marketing-analytics-bot" type="Python.FlaskServer">
41
+ <module name="marketing-analytics-bot" />
42
+ <option name="target" value="$PROJECT_DIR$/app.py" />
43
+ <option name="targetType" value="PATH" />
44
+ <option name="INTERPRETER_OPTIONS" value="" />
45
+ <option name="PARENT_ENVS" value="true" />
46
+ <option name="SDK_HOME" value="" />
47
+ <option name="WORKING_DIRECTORY" value="" />
48
+ <option name="IS_MODULE_SDK" value="false" />
49
+ <option name="ADD_CONTENT_ROOTS" value="true" />
50
+ <option name="ADD_SOURCE_ROOTS" value="true" />
51
+ <EXTENSION ID="PythonCoverageRunConfigurationExtension" runner="coverage.py" />
52
+ <option name="launchJavascriptDebuger" value="false" />
53
+ <method v="2" />
54
+ </configuration>
55
+ </component>
56
+ <component name="SpellCheckerSettings" RuntimeDictionaries="0" Folders="0" CustomDictionaries="0" DefaultDictionary="application-level" UseSingleDictionary="true" transferred="true" />
57
+ <component name="TaskManager">
58
+ <task active="true" id="Default" summary="Default task">
59
+ <changelist id="3dc50299-1dad-4898-8eba-684fc694db9d" name="Changes" comment="" />
60
+ <created>1694488935255</created>
61
+ <option name="number" value="Default" />
62
+ <option name="presentableId" value="Default" />
63
+ <updated>1694488935255</updated>
64
+ <workItem from="1694488936855" duration="1127000" />
65
+ </task>
66
+ <servers />
67
+ </component>
68
+ <component name="TypeScriptGeneratedFilesManager">
69
+ <option name="version" value="3" />
70
+ </component>
71
+ <component name="Vcs.Log.Tabs.Properties">
72
+ <option name="TAB_STATES">
73
+ <map>
74
+ <entry key="MAIN">
75
+ <value>
76
+ <State />
77
+ </value>
78
+ </entry>
79
+ </map>
80
+ </option>
81
+ </component>
82
+ </project>
CITATION.cff ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ cff-version: 1.2.0
2
+ message: "If you use this software, please cite it as below."
3
+ authors:
4
+ - family-names: "Chase"
5
+ given-names: "Harrison"
6
+ title: "LangChain"
7
+ date-released: 2022-10-17
8
+ url: "https://github.com/hwchase17/langchain"
Dockerfile ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.9
2
+ RUN useradd -m -u 1000 user
3
+ USER user
4
+ ENV HOME=/home/user \
5
+ PATH=/home/user/.local/bin:$PATH
6
+ WORKDIR $HOME/app
7
+ COPY --chown=user . $HOME/app
8
+ COPY ./requirements.txt ~/app/requirements.txt
9
+ RUN pip install -r requirements.txt
10
+ COPY . .
11
+ CMD ["python", "app.py"]
LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ The MIT License
2
+
3
+ Copyright (c) Harrison Chase
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in
13
+ all copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
21
+ THE SOFTWARE.
Makefile ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .PHONY: all clean format lint test tests test_watch integration_tests help
2
+
3
+ all: help
4
+
5
+ coverage:
6
+ poetry run pytest --cov \
7
+ --cov-config=.coveragerc \
8
+ --cov-report xml \
9
+ --cov-report term-missing:skip-covered
10
+
11
+ clean: docs_clean
12
+
13
+ docs_build:
14
+ cd docs && poetry run make html
15
+
16
+ docs_clean:
17
+ cd docs && poetry run make clean
18
+
19
+ docs_linkcheck:
20
+ poetry run linkchecker docs/_build/html/index.html
21
+
22
+ format:
23
+ poetry run black .
24
+ poetry run ruff --select I --fix .
25
+
26
+ lint:
27
+ poetry run mypy .
28
+ poetry run black . --check
29
+ poetry run ruff .
30
+
31
+ test:
32
+ poetry run pytest tests/unit_tests
33
+
34
+ tests:
35
+ poetry run pytest tests/unit_tests
36
+
37
+ test_watch:
38
+ poetry run ptw --now . -- tests/unit_tests
39
+
40
+ integration_tests:
41
+ poetry run pytest tests/integration_tests
42
+
43
+ help:
44
+ @echo '----'
45
+ @echo 'coverage - run unit tests and generate coverage report'
46
+ @echo 'docs_build - build the documentation'
47
+ @echo 'docs_clean - clean the documentation build artifacts'
48
+ @echo 'docs_linkcheck - run linkchecker on the documentation'
49
+ @echo 'format - run code formatters'
50
+ @echo 'lint - run linters'
51
+ @echo 'test - run unit tests'
52
+ @echo 'test_watch - run unit tests in watch mode'
53
+ @echo 'integration_tests - run integration tests'
Procfile ADDED
@@ -0,0 +1 @@
 
 
1
+ web: gunicorn app:app
README.md CHANGED
@@ -1,11 +1,82 @@
1
- ---
2
- title: Marketing Analytics Bot
3
- emoji: 🦀
4
- colorFrom: purple
5
- colorTo: purple
6
- sdk: docker
7
- pinned: false
8
- license: openrail
9
- ---
10
-
11
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 🦜️🔗 LangChain
2
+
3
+ Building applications with LLMs through composability ⚡
4
+
5
+ [![lint](https://github.com/hwchase17/langchain/actions/workflows/lint.yml/badge.svg)](https://github.com/hwchase17/langchain/actions/workflows/lint.yml) [![test](https://github.com/hwchase17/langchain/actions/workflows/test.yml/badge.svg)](https://github.com/hwchase17/langchain/actions/workflows/test.yml) [![linkcheck](https://github.com/hwchase17/langchain/actions/workflows/linkcheck.yml/badge.svg)](https://github.com/hwchase17/langchain/actions/workflows/linkcheck.yml) [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) [![Twitter](https://img.shields.io/twitter/url/https/twitter.com/langchainai.svg?style=social&label=Follow%20%40LangChainAI)](https://twitter.com/langchainai) [![](https://dcbadge.vercel.app/api/server/6adMQxSpJS?compact=true&style=flat)](https://discord.gg/6adMQxSpJS)
6
+
7
+ **Production Support:** As you move your LangChains into production, we'd love to offer more comprehensive support.
8
+ Please fill out [this form](https://forms.gle/57d8AmXBYp8PP8tZA) and we'll set up a dedicated support Slack channel.
9
+
10
+ ## Quick Install
11
+
12
+ `pip install langchain`
13
+
14
+ ## 🤔 What is this?
15
+
16
+ Large language models (LLMs) are emerging as a transformative technology, enabling
17
+ developers to build applications that they previously could not.
18
+ But using these LLMs in isolation is often not enough to
19
+ create a truly powerful app - the real power comes when you can combine them with other sources of computation or knowledge.
20
+
21
+ This library is aimed at assisting in the development of those types of applications. Common examples of these types of applications include:
22
+
23
+ **❓ Question Answering over specific documents**
24
+
25
+ - [Documentation](https://langchain.readthedocs.io/en/latest/use_cases/question_answering.html)
26
+ - End-to-end Example: [Question Answering over Notion Database](https://github.com/hwchase17/notion-qa)
27
+
28
+ **💬 Chatbots**
29
+
30
+ - [Documentation](https://langchain.readthedocs.io/en/latest/use_cases/chatbots.html)
31
+ - End-to-end Example: [Chat-LangChain](https://github.com/hwchase17/chat-langchain)
32
+
33
+ **🤖 Agents**
34
+
35
+ - [Documentation](https://langchain.readthedocs.io/en/latest/use_cases/agents.html)
36
+ - End-to-end Example: [GPT+WolframAlpha](https://huggingface.co/spaces/JavaFXpert/Chat-GPT-LangChain)
37
+
38
+ ## 📖 Documentation
39
+
40
+ Please see [here](https://langchain.readthedocs.io/en/latest/?) for full documentation on:
41
+
42
+ - Getting started (installation, setting up the environment, simple examples)
43
+ - How-To examples (demos, integrations, helper functions)
44
+ - Reference (full API docs)
45
+ - Resources (high-level explanation of core concepts)
46
+
47
+ ## 🚀 What can this help with?
48
+
49
+ There are six main areas that LangChain is designed to help with.
50
+ These are, in increasing order of complexity:
51
+
52
+ **📃 LLMs and Prompts:**
53
+
54
+ This includes prompt management, prompt optimization, generic interface for all LLMs, and common utilities for working with LLMs.
55
+
56
+ **🔗 Chains:**
57
+
58
+ Chains go beyond just a single LLM call, and are sequences of calls (whether to an LLM or a different utility). LangChain provides a standard interface for chains, lots of integrations with other tools, and end-to-end chains for common applications.
59
+
60
+ **📚 Data Augmented Generation:**
61
+
62
+ Data Augmented Generation involves specific types of chains that first interact with an external datasource to fetch data to use in the generation step. Examples of this include summarization of long pieces of text and question/answering over specific data sources.
63
+
64
+ **🤖 Agents:**
65
+
66
+ Agents involve an LLM making decisions about which Actions to take, taking that Action, seeing an Observation, and repeating that until done. LangChain provides a standard interface for agents, a selection of agents to choose from, and examples of end to end agents.
67
+
68
+ **🧠 Memory:**
69
+
70
+ Memory is the concept of persisting state between calls of a chain/agent. LangChain provides a standard interface for memory, a collection of memory implementations, and examples of chains/agents that use memory.
71
+
72
+ **🧐 Evaluation:**
73
+
74
+ [BETA] Generative models are notoriously hard to evaluate with traditional metrics. One new way of evaluating them is using language models themselves to do the evaluation. LangChain provides some prompts/chains for assisting in this.
75
+
76
+ For more information on these concepts, please see our [full documentation](https://langchain.readthedocs.io/en/latest/?).
77
+
78
+ ## 💁 Contributing
79
+
80
+ As an open source project in a rapidly developing field, we are extremely open to contributions, whether it be in the form of a new feature, improved infra, or better documentation.
81
+
82
+ For detailed information on how to contribute, see [here](.github/CONTRIBUTING.md).
__pycache__/main.cpython-39.pyc ADDED
Binary file (2.74 kB). View file
 
app.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Python file to serve as the frontend"""
2
+ from datetime import datetime
3
+
4
+ import wandb
5
+
6
+ from langchain.agents.agent_toolkits.sql.simple_sql import create_simple_sql_agent_excutor
7
+ from langchain.callbacks import WandbCallbackHandler, CallbackManager, StdOutCallbackHandler
8
+ from langchain.document_loaders import WebBaseLoader
9
+ from langchain.embeddings import OpenAIEmbeddings
10
+ # import faiss
11
+ from langchain import OpenAI, FAISS, LLMChain
12
+ from langchain.chains import VectorDBQAWithSourcesChain
13
+ import pickle
14
+
15
+ # root_dir = "/Users/jiefeng/Dropbox/Apps/admixer/neon_scrapy/data/"
16
+ # index_path = "".join([root_dir, "docs.index"])
17
+ # fass_store_path = "".join([root_dir, "faiss_store.pkl"])
18
+ # Load the LangChain.
19
+
20
+ from langchain.prompts import PromptTemplate
21
+ import os
22
+ from langchain import OpenAI, VectorDBQA
23
+ from flask import Flask, request, jsonify
24
+ from flask_cors import CORS, cross_origin
25
+ from langchain.agents.agent_toolkits.sql.toolkit import SimpleSQLDatabaseToolkit
26
+ from langchain.sql_database import SQLDatabase
27
+ from langchain.llms.openai import OpenAI
28
+
29
+ # create your SocketIO instance
30
+ # handle chat messages
31
+
32
+
33
+ url = "https://langchain.readthedocs.io/en/latest/"
34
+ os.environ["OPENAI_API_KEY"] = "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"
35
+ os.environ["WANDB_API_KEY"] = "7e3c65043f06598e45810ffdd5588f048ec870db"
36
+ qa = None
37
+
38
+ db = SQLDatabase.from_uri(
39
+ "postgresql+psycopg2://macbttqtwpbkxg:8e00539601577e6d3e73f4781d0d71913dc5a165a9b75229cf930abe79ddaae3@ec2-54-173-77-184.compute-1.amazonaws.com:5432/d8cb6alpt8ft06")
40
+ toolkit = SimpleSQLDatabaseToolkit(db=db)
41
+
42
+ session_group = datetime.now().strftime("%m.%d.%Y_%H.%M.%S")
43
+ # wandb_callback = WandbCallbackHandler(
44
+ # job_type="inference",
45
+ # project="marketing_questions",
46
+ # group=f"minimal_{session_group}",
47
+ # name="llm",
48
+ # tags=["test"],
49
+ # )
50
+ manager = CallbackManager([StdOutCallbackHandler()])
51
+
52
+
53
+ llm = OpenAI(temperature=0,
54
+ model_name="gpt-4",
55
+ callback_manager=manager,
56
+ verbose=True,
57
+ )
58
+
59
+ agent_executor = create_simple_sql_agent_excutor(
60
+ llm=llm,
61
+ toolkit=toolkit,
62
+ callback_manager=manager,
63
+ verbose=True
64
+ )
65
+ # agent_executor.run("What are the most popular pages visited by our visitors?")
66
+
67
+ # agent_executor.run("how many visitors profiles are from the Unite States?")
68
+ # From here down is all the StreamLit UI.
69
+
70
+
71
+ app = Flask(__name__)
72
+ cors = CORS(app)
73
+
74
+ @app.route('/')
75
+ @cross_origin()
76
+ def hello_world():
77
+ return 'Hello, World!'
78
+
79
+
80
+ @app.route('/api/ask', methods=['POST'])
81
+ @cross_origin()
82
+ def submit():
83
+ print("request received")
84
+ data = request.get_json()
85
+ question = data['question']
86
+ sql_data_result = None
87
+ if question:
88
+ print(question)
89
+ sql_data_result = agent_executor.run(question)
90
+ #wandb_callback.flush_tracker(agent_executor, reset=False, finish=True)
91
+
92
+ # chartPrompt = PromptTemplate(
93
+ # template="What chart is best for the data {data}?", input_variables=["data"])
94
+ #
95
+ # chartChain = LLMChain(llm=llm, prompt=chartPrompt)
96
+ # chartChain.run(sql_data_result)
97
+ result = jsonify(sql_data_result)
98
+ return result
99
+
100
+
101
+ if __name__ == '__main__':
102
+ app.run(port=7860)
langchain/__init__.py ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Main entrypoint into package."""
2
+
3
+ from typing import Optional
4
+
5
+ from langchain.agents import MRKLChain, ReActChain, SelfAskWithSearchChain
6
+ from langchain.cache import BaseCache
7
+ from langchain.callbacks import (
8
+ set_default_callback_manager,
9
+ set_handler,
10
+ set_tracing_callback_manager,
11
+ )
12
+ from langchain.chains import (
13
+ ConversationChain,
14
+ LLMBashChain,
15
+ LLMChain,
16
+ LLMCheckerChain,
17
+ LLMMathChain,
18
+ PALChain,
19
+ QAWithSourcesChain,
20
+ SQLDatabaseChain,
21
+ VectorDBQA,
22
+ VectorDBQAWithSourcesChain,
23
+ )
24
+ from langchain.docstore import InMemoryDocstore, Wikipedia
25
+ from langchain.llms import (
26
+ Anthropic,
27
+ Banana,
28
+ CerebriumAI,
29
+ Cohere,
30
+ ForefrontAI,
31
+ GooseAI,
32
+ HuggingFaceHub,
33
+ Modal,
34
+ OpenAI,
35
+ Petals,
36
+ SagemakerEndpoint,
37
+ StochasticAI,
38
+ Writer,
39
+ )
40
+ from langchain.llms.huggingface_pipeline import HuggingFacePipeline
41
+ from langchain.prompts import (
42
+ BasePromptTemplate,
43
+ FewShotPromptTemplate,
44
+ Prompt,
45
+ PromptTemplate,
46
+ )
47
+ from langchain.sql_database import SQLDatabase
48
+ from langchain.utilities.google_search import GoogleSearchAPIWrapper
49
+ from langchain.utilities.google_serper import GoogleSerperAPIWrapper
50
+ from langchain.utilities.searx_search import SearxSearchWrapper
51
+ from langchain.utilities.serpapi import SerpAPIWrapper
52
+ from langchain.utilities.wikipedia import WikipediaAPIWrapper
53
+ from langchain.utilities.wolfram_alpha import WolframAlphaAPIWrapper
54
+ from langchain.vectorstores import FAISS, ElasticVectorSearch
55
+
56
+ verbose: bool = False
57
+ llm_cache: Optional[BaseCache] = None
58
+ set_default_callback_manager()
59
+
60
+ # For backwards compatibility
61
+ SerpAPIChain = SerpAPIWrapper
62
+
63
+ __all__ = [
64
+ "LLMChain",
65
+ "LLMBashChain",
66
+ "LLMCheckerChain",
67
+ "LLMMathChain",
68
+ "SelfAskWithSearchChain",
69
+ "SerpAPIWrapper",
70
+ "SerpAPIChain",
71
+ "SearxSearchWrapper",
72
+ "GoogleSearchAPIWrapper",
73
+ "GoogleSerperAPIWrapper",
74
+ "WolframAlphaAPIWrapper",
75
+ "WikipediaAPIWrapper",
76
+ "Anthropic",
77
+ "Banana",
78
+ "CerebriumAI",
79
+ "Cohere",
80
+ "ForefrontAI",
81
+ "GooseAI",
82
+ "Modal",
83
+ "OpenAI",
84
+ "Petals",
85
+ "StochasticAI",
86
+ "Writer",
87
+ "BasePromptTemplate",
88
+ "Prompt",
89
+ "FewShotPromptTemplate",
90
+ "PromptTemplate",
91
+ "ReActChain",
92
+ "Wikipedia",
93
+ "HuggingFaceHub",
94
+ "SagemakerEndpoint",
95
+ "HuggingFacePipeline",
96
+ "SQLDatabase",
97
+ "SQLDatabaseChain",
98
+ "FAISS",
99
+ "MRKLChain",
100
+ "VectorDBQA",
101
+ "ElasticVectorSearch",
102
+ "InMemoryDocstore",
103
+ "ConversationChain",
104
+ "VectorDBQAWithSourcesChain",
105
+ "QAWithSourcesChain",
106
+ "PALChain",
107
+ "set_handler",
108
+ "set_tracing_callback_manager",
109
+ ]
langchain/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (2.35 kB). View file
 
langchain/__pycache__/cache.cpython-39.pyc ADDED
Binary file (5.91 kB). View file
 
langchain/__pycache__/formatting.cpython-39.pyc ADDED
Binary file (1.37 kB). View file
 
langchain/__pycache__/input.cpython-39.pyc ADDED
Binary file (1.57 kB). View file
 
langchain/__pycache__/python.cpython-39.pyc ADDED
Binary file (1.15 kB). View file
 
langchain/__pycache__/requests.cpython-39.pyc ADDED
Binary file (3.72 kB). View file
 
langchain/__pycache__/schema.cpython-39.pyc ADDED
Binary file (13 kB). View file
 
langchain/__pycache__/sql_database.cpython-39.pyc ADDED
Binary file (7.14 kB). View file
 
langchain/__pycache__/text_splitter.cpython-39.pyc ADDED
Binary file (11.9 kB). View file
 
langchain/__pycache__/utils.cpython-39.pyc ADDED
Binary file (808 Bytes). View file
 
langchain/agents/__init__.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Interface for agents."""
2
+ from langchain.agents.agent import Agent, AgentExecutor
3
+ from langchain.agents.agent_toolkits import (
4
+ create_csv_agent,
5
+ create_json_agent,
6
+ create_openapi_agent,
7
+ create_pandas_dataframe_agent,
8
+ create_sql_agent,
9
+ create_vectorstore_agent,
10
+ create_vectorstore_router_agent,
11
+ )
12
+ from langchain.agents.conversational.base import ConversationalAgent
13
+ from langchain.agents.initialize import initialize_agent
14
+ from langchain.agents.load_tools import get_all_tool_names, load_tools
15
+ from langchain.agents.loading import load_agent
16
+ from langchain.agents.mrkl.base import MRKLChain, ZeroShotAgent
17
+ from langchain.agents.react.base import ReActChain, ReActTextWorldAgent
18
+ from langchain.agents.self_ask_with_search.base import SelfAskWithSearchChain
19
+ from langchain.agents.tools import Tool, tool
20
+
21
+ __all__ = [
22
+ "MRKLChain",
23
+ "SelfAskWithSearchChain",
24
+ "ReActChain",
25
+ "AgentExecutor",
26
+ "Agent",
27
+ "Tool",
28
+ "tool",
29
+ "initialize_agent",
30
+ "ZeroShotAgent",
31
+ "ReActTextWorldAgent",
32
+ "load_tools",
33
+ "get_all_tool_names",
34
+ "ConversationalAgent",
35
+ "load_agent",
36
+ "create_sql_agent",
37
+ "create_json_agent",
38
+ "create_openapi_agent",
39
+ "create_vectorstore_router_agent",
40
+ "create_vectorstore_agent",
41
+ "create_pandas_dataframe_agent",
42
+ "create_csv_agent",
43
+ ]
langchain/agents/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (1.31 kB). View file
 
langchain/agents/__pycache__/agent.cpython-39.pyc ADDED
Binary file (16.6 kB). View file
 
langchain/agents/__pycache__/initialize.cpython-39.pyc ADDED
Binary file (2.5 kB). View file
 
langchain/agents/__pycache__/load_tools.cpython-39.pyc ADDED
Binary file (9.69 kB). View file
 
langchain/agents/__pycache__/loading.cpython-39.pyc ADDED
Binary file (3.53 kB). View file
 
langchain/agents/__pycache__/tools.cpython-39.pyc ADDED
Binary file (3.73 kB). View file
 
langchain/agents/agent.py ADDED
@@ -0,0 +1,583 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Chain that takes in an input and produces an action and action input."""
2
+ from __future__ import annotations
3
+
4
+ import json
5
+ import logging
6
+ from abc import abstractmethod
7
+ from pathlib import Path
8
+ from typing import Any, Dict, List, Optional, Sequence, Tuple, Union
9
+
10
+ import yaml
11
+ from pydantic import BaseModel, root_validator
12
+
13
+ from langchain.agents.tools import InvalidTool
14
+ from langchain.callbacks.base import BaseCallbackManager
15
+ from langchain.chains.base import Chain
16
+ from langchain.chains.llm import LLMChain
17
+ from langchain.input import get_color_mapping
18
+ from langchain.llms.base import BaseLLM
19
+ from langchain.prompts.base import BasePromptTemplate
20
+ from langchain.prompts.few_shot import FewShotPromptTemplate
21
+ from langchain.prompts.prompt import PromptTemplate
22
+ from langchain.schema import AgentAction, AgentFinish, BaseMessage, AgentClarify
23
+ from langchain.tools.base import BaseTool
24
+
25
+ logger = logging.getLogger()
26
+
27
+
28
+ class Agent(BaseModel):
29
+ """Class responsible for calling the language model and deciding the action.
30
+
31
+ This is driven by an LLMChain. The prompt in the LLMChain MUST include
32
+ a variable called "agent_scratchpad" where the agent can put its
33
+ intermediary work.
34
+ """
35
+
36
+ llm_chain: LLMChain
37
+ allowed_tools: Optional[List[str]] = None
38
+ return_values: List[str] = ["output"]
39
+
40
+ @abstractmethod
41
+ def _extract_tool_and_input(self, text: str) -> Optional[Tuple[str, str]]:
42
+ """Extract tool and tool input from llm output."""
43
+
44
+ def _fix_text(self, text: str) -> str:
45
+ """Fix the text."""
46
+ raise ValueError("fix_text not implemented for this agent.")
47
+
48
+ @property
49
+ def _stop(self) -> List[str]:
50
+ return [
51
+ f"\n{self.observation_prefix.rstrip()}",
52
+ f"\n\t{self.observation_prefix.rstrip()}",
53
+ ]
54
+
55
+ def _construct_scratchpad(
56
+ self, intermediate_steps: List[Tuple[AgentAction, str]]
57
+ ) -> Union[str, List[BaseMessage]]:
58
+ """Construct the scratchpad that lets the agent continue its thought process."""
59
+ thoughts = ""
60
+ for action, observation in intermediate_steps:
61
+ thoughts += action.log
62
+ thoughts += f"\n{self.observation_prefix}{observation}\n{self.llm_prefix}"
63
+ return thoughts
64
+
65
+ def _get_next_action(self, full_inputs: Dict[str, str]) -> AgentAction:
66
+ full_output = self.llm_chain.predict(**full_inputs)
67
+ parsed_output = self._extract_tool_and_input(full_output)
68
+ while parsed_output is None:
69
+ full_output = self._fix_text(full_output)
70
+ full_inputs["agent_scratchpad"] += full_output
71
+ output = self.llm_chain.predict(**full_inputs)
72
+ full_output += output
73
+ parsed_output = self._extract_tool_and_input(full_output)
74
+ return AgentAction(
75
+ tool=parsed_output[0], tool_input=parsed_output[1], log=full_output
76
+ )
77
+
78
+ async def _aget_next_action(self, full_inputs: Dict[str, str]) -> AgentAction:
79
+ full_output = await self.llm_chain.apredict(**full_inputs)
80
+ parsed_output = self._extract_tool_and_input(full_output)
81
+ while parsed_output is None:
82
+ full_output = self._fix_text(full_output)
83
+ full_inputs["agent_scratchpad"] += full_output
84
+ output = await self.llm_chain.apredict(**full_inputs)
85
+ full_output += output
86
+ parsed_output = self._extract_tool_and_input(full_output)
87
+ return AgentAction(
88
+ tool=parsed_output[0], tool_input=parsed_output[1], log=full_output
89
+ )
90
+
91
+ def plan(
92
+ self, intermediate_steps: List[Tuple[AgentAction, str]], **kwargs: Any
93
+ ) -> Union[AgentAction, AgentFinish, AgentClarify]:
94
+ """Given input, decided what to do.
95
+
96
+ Args:
97
+ intermediate_steps: Steps the LLM has taken to date,
98
+ along with observations
99
+ **kwargs: User inputs.
100
+
101
+ Returns:
102
+ Action specifying what tool to use.
103
+ """
104
+ full_inputs = self.get_full_inputs(intermediate_steps, **kwargs)
105
+ action = self._get_next_action(full_inputs)
106
+ if action.tool == self.finish_tool_name:
107
+ return AgentFinish({"output": action.tool_input}, action.log)
108
+ return action
109
+
110
+ async def aplan(
111
+ self, intermediate_steps: List[Tuple[AgentAction, str]], **kwargs: Any
112
+ ) -> Union[AgentAction, AgentFinish]:
113
+ """Given input, decided what to do.
114
+
115
+ Args:
116
+ intermediate_steps: Steps the LLM has taken to date,
117
+ along with observations
118
+ **kwargs: User inputs.
119
+
120
+ Returns:
121
+ Action specifying what tool to use.
122
+ """
123
+ full_inputs = self.get_full_inputs(intermediate_steps, **kwargs)
124
+ action = await self._aget_next_action(full_inputs)
125
+ if action.tool == self.finish_tool_name:
126
+ return AgentFinish({"output": action.tool_input}, action.log)
127
+ return action
128
+
129
+ def get_full_inputs(
130
+ self, intermediate_steps: List[Tuple[AgentAction, str]], **kwargs: Any
131
+ ) -> Dict[str, Any]:
132
+ """Create the full inputs for the LLMChain from intermediate steps."""
133
+ thoughts = self._construct_scratchpad(intermediate_steps)
134
+ new_inputs = {"agent_scratchpad": thoughts, "stop": self._stop}
135
+ full_inputs = {**kwargs, **new_inputs}
136
+ return full_inputs
137
+
138
+ def prepare_for_new_call(self) -> None:
139
+ """Prepare the agent for new call, if needed."""
140
+ pass
141
+
142
+ @property
143
+ def finish_tool_name(self) -> str:
144
+ """Name of the tool to use to finish the chain."""
145
+ return "Final Answer"
146
+
147
+ @property
148
+ def input_keys(self) -> List[str]:
149
+ """Return the input keys.
150
+
151
+ :meta private:
152
+ """
153
+ return list(set(self.llm_chain.input_keys) - {"agent_scratchpad"})
154
+
155
+ @root_validator()
156
+ def validate_prompt(cls, values: Dict) -> Dict:
157
+ """Validate that prompt matches format."""
158
+ prompt = values["llm_chain"].prompt
159
+ if "agent_scratchpad" not in prompt.input_variables:
160
+ logger.warning(
161
+ "`agent_scratchpad` should be a variable in prompt.input_variables."
162
+ " Did not find it, so adding it at the end."
163
+ )
164
+ prompt.input_variables.append("agent_scratchpad")
165
+ if isinstance(prompt, PromptTemplate):
166
+ prompt.template += "\n{agent_scratchpad}"
167
+ elif isinstance(prompt, FewShotPromptTemplate):
168
+ prompt.suffix += "\n{agent_scratchpad}"
169
+ else:
170
+ raise ValueError(f"Got unexpected prompt type {type(prompt)}")
171
+ return values
172
+
173
+ @property
174
+ @abstractmethod
175
+ def observation_prefix(self) -> str:
176
+ """Prefix to append the observation with."""
177
+
178
+ @property
179
+ @abstractmethod
180
+ def llm_prefix(self) -> str:
181
+ """Prefix to append the LLM call with."""
182
+
183
+ @classmethod
184
+ @abstractmethod
185
+ def create_prompt(cls, tools: Sequence[BaseTool]) -> BasePromptTemplate:
186
+ """Create a prompt for this class."""
187
+
188
+ @classmethod
189
+ def _validate_tools(cls, tools: Sequence[BaseTool]) -> None:
190
+ """Validate that appropriate tools are passed in."""
191
+ pass
192
+
193
+ @classmethod
194
+ def from_llm_and_tools(
195
+ cls,
196
+ llm: BaseLLM,
197
+ tools: Sequence[BaseTool],
198
+ callback_manager: Optional[BaseCallbackManager] = None,
199
+ **kwargs: Any,
200
+ ) -> Agent:
201
+ """Construct an agent from an LLM and tools."""
202
+ cls._validate_tools(tools)
203
+ llm_chain = LLMChain(
204
+ llm=llm,
205
+ prompt=cls.create_prompt(tools),
206
+ callback_manager=callback_manager,
207
+ )
208
+ tool_names = [tool.name for tool in tools]
209
+ return cls(llm_chain=llm_chain, allowed_tools=tool_names, **kwargs)
210
+
211
+ def return_stopped_response(
212
+ self,
213
+ early_stopping_method: str,
214
+ intermediate_steps: List[Tuple[AgentAction, str]],
215
+ **kwargs: Any,
216
+ ) -> AgentFinish:
217
+ """Return response when agent has been stopped due to max iterations."""
218
+ if early_stopping_method == "force":
219
+ # `force` just returns a constant string
220
+ return AgentFinish({"output": "Agent stopped due to max iterations."}, "")
221
+ elif early_stopping_method == "generate":
222
+ # Generate does one final forward pass
223
+ thoughts = ""
224
+ for action, observation in intermediate_steps:
225
+ thoughts += action.log
226
+ thoughts += (
227
+ f"\n{self.observation_prefix}{observation}\n{self.llm_prefix}"
228
+ )
229
+ # Adding to the previous steps, we now tell the LLM to make a final pred
230
+ thoughts += (
231
+ "\n\nI now need to return a final answer based on the previous steps:"
232
+ )
233
+ new_inputs = {"agent_scratchpad": thoughts, "stop": self._stop}
234
+ full_inputs = {**kwargs, **new_inputs}
235
+ full_output = self.llm_chain.predict(**full_inputs)
236
+ # We try to extract a final answer
237
+ parsed_output = self._extract_tool_and_input(full_output)
238
+ if parsed_output is None:
239
+ # If we cannot extract, we just return the full output
240
+ return AgentFinish({"output": full_output}, full_output)
241
+ tool, tool_input = parsed_output
242
+ if tool == self.finish_tool_name:
243
+ # If we can extract, we send the correct stuff
244
+ return AgentFinish({"output": tool_input}, full_output)
245
+ else:
246
+ # If we can extract, but the tool is not the final tool,
247
+ # we just return the full output
248
+ return AgentFinish({"output": full_output}, full_output)
249
+ else:
250
+ raise ValueError(
251
+ "early_stopping_method should be one of `force` or `generate`, "
252
+ f"got {early_stopping_method}"
253
+ )
254
+
255
+ @property
256
+ @abstractmethod
257
+ def _agent_type(self) -> str:
258
+ """Return Identifier of agent type."""
259
+
260
+ def dict(self, **kwargs: Any) -> Dict:
261
+ """Return dictionary representation of agent."""
262
+ _dict = super().dict()
263
+ _dict["_type"] = self._agent_type
264
+ return _dict
265
+
266
+ def save(self, file_path: Union[Path, str]) -> None:
267
+ """Save the agent.
268
+
269
+ Args:
270
+ file_path: Path to file to save the agent to.
271
+
272
+ Example:
273
+ .. code-block:: python
274
+
275
+ # If working with agent executor
276
+ agent.agent.save(file_path="path/agent.yaml")
277
+ """
278
+ # Convert file to Path object.
279
+ if isinstance(file_path, str):
280
+ save_path = Path(file_path)
281
+ else:
282
+ save_path = file_path
283
+
284
+ directory_path = save_path.parent
285
+ directory_path.mkdir(parents=True, exist_ok=True)
286
+
287
+ # Fetch dictionary to save
288
+ agent_dict = self.dict()
289
+
290
+ if save_path.suffix == ".json":
291
+ with open(file_path, "w") as f:
292
+ json.dump(agent_dict, f, indent=4)
293
+ elif save_path.suffix == ".yaml":
294
+ with open(file_path, "w") as f:
295
+ yaml.dump(agent_dict, f, default_flow_style=False)
296
+ else:
297
+ raise ValueError(f"{save_path} must be json or yaml")
298
+
299
+
300
+ class AgentExecutor(Chain, BaseModel):
301
+ """Consists of an agent using tools."""
302
+
303
+ agent: Agent
304
+ tools: Sequence[BaseTool]
305
+ return_intermediate_steps: bool = False
306
+ max_iterations: Optional[int] = 15
307
+ early_stopping_method: str = "force"
308
+
309
+ @classmethod
310
+ def from_agent_and_tools(
311
+ cls,
312
+ agent: Agent,
313
+ tools: Sequence[BaseTool],
314
+ callback_manager: Optional[BaseCallbackManager] = None,
315
+ **kwargs: Any,
316
+ ) -> AgentExecutor:
317
+ """Create from agent and tools."""
318
+ return cls(
319
+ agent=agent, tools=tools, callback_manager=callback_manager, **kwargs
320
+ )
321
+
322
+ @root_validator()
323
+ def validate_tools(cls, values: Dict) -> Dict:
324
+ """Validate that tools are compatible with agent."""
325
+ agent = values["agent"]
326
+ tools = values["tools"]
327
+ if agent.allowed_tools is not None:
328
+ if set(agent.allowed_tools) != set([tool.name for tool in tools]):
329
+ raise ValueError(
330
+ f"Allowed tools ({agent.allowed_tools}) different than "
331
+ f"provided tools ({[tool.name for tool in tools]})"
332
+ )
333
+ return values
334
+
335
+ def save(self, file_path: Union[Path, str]) -> None:
336
+ """Raise error - saving not supported for Agent Executors."""
337
+ raise ValueError(
338
+ "Saving not supported for agent executors. "
339
+ "If you are trying to save the agent, please use the "
340
+ "`.save_agent(...)`"
341
+ )
342
+
343
+ def save_agent(self, file_path: Union[Path, str]) -> None:
344
+ """Save the underlying agent."""
345
+ return self.agent.save(file_path)
346
+
347
+ @property
348
+ def input_keys(self) -> List[str]:
349
+ """Return the input keys.
350
+
351
+ :meta private:
352
+ """
353
+ return self.agent.input_keys
354
+
355
+ @property
356
+ def output_keys(self) -> List[str]:
357
+ """Return the singular output key.
358
+
359
+ :meta private:
360
+ """
361
+ if self.return_intermediate_steps:
362
+ return self.agent.return_values + ["intermediate_steps"]
363
+ else:
364
+ return self.agent.return_values
365
+
366
+ def _should_continue(self, iterations: int) -> bool:
367
+ if self.max_iterations is None:
368
+ return True
369
+ else:
370
+ return iterations < self.max_iterations
371
+
372
+ def _return(self, output: AgentFinish, intermediate_steps: list) -> Dict[str, Any]:
373
+ self.callback_manager.on_agent_finish(
374
+ output, color="green", verbose=self.verbose
375
+ )
376
+ final_output = output.return_values
377
+ if self.return_intermediate_steps:
378
+ final_output["intermediate_steps"] = intermediate_steps
379
+ return final_output
380
+
381
+ def _handle_clarify(self, output: AgentClarify, intermediate_steps: list) -> Dict[str, Any]:
382
+ self.callback_manager.on_agent_clarify(
383
+ output, color="yellow", verbose=self.verbose
384
+ )
385
+ final_output = {"clarify_question": output.question}
386
+ if self.return_intermediate_steps:
387
+ final_output["intermediate_steps"] = intermediate_steps
388
+ return final_output
389
+
390
+
391
+ async def _areturn(
392
+ self, output: AgentFinish, intermediate_steps: list
393
+ ) -> Dict[str, Any]:
394
+ if self.callback_manager.is_async:
395
+ await self.callback_manager.on_agent_finish(
396
+ output, color="green", verbose=self.verbose
397
+ )
398
+ else:
399
+ self.callback_manager.on_agent_finish(
400
+ output, color="green", verbose=self.verbose
401
+ )
402
+ final_output = output.return_values
403
+ if self.return_intermediate_steps:
404
+ final_output["intermediate_steps"] = intermediate_steps
405
+ return final_output
406
+
407
+ def _take_next_step(
408
+ self,
409
+ name_to_tool_map: Dict[str, BaseTool],
410
+ color_mapping: Dict[str, str],
411
+ inputs: Dict[str, str],
412
+ intermediate_steps: List[Tuple[AgentAction, str]],
413
+ ) -> Union[AgentFinish, Tuple[AgentAction, str], Tuple[AgentClarify, str]]:
414
+ """Take a single step in the thought-action-observation loop.
415
+
416
+ Override this to take control of how the agent makes and acts on choices.
417
+ """
418
+ # Call the LLM to see what to do.
419
+ output = self.agent.plan(intermediate_steps, **inputs)
420
+ # If the tool chosen is the finishing tool, then we end and return.
421
+ if isinstance(output, AgentFinish):
422
+ return output
423
+ if isinstance(output, AgentClarify):
424
+ return output
425
+ self.callback_manager.on_agent_action(
426
+ output, verbose=self.verbose, color="green"
427
+ )
428
+ # Otherwise we lookup the tool
429
+ if output.tool in name_to_tool_map:
430
+ tool = name_to_tool_map[output.tool]
431
+ return_direct = tool.return_direct
432
+ color = color_mapping[output.tool]
433
+ llm_prefix = "" if return_direct else self.agent.llm_prefix
434
+ # We then call the tool on the tool input to get an observation
435
+ observation = tool.run(
436
+ output.tool_input,
437
+ verbose=self.verbose,
438
+ color=color,
439
+ llm_prefix=llm_prefix,
440
+ observation_prefix=self.agent.observation_prefix,
441
+ )
442
+ else:
443
+ observation = InvalidTool().run(
444
+ output.tool,
445
+ verbose=self.verbose,
446
+ color=None,
447
+ llm_prefix="",
448
+ observation_prefix=self.agent.observation_prefix,
449
+ )
450
+ return output, observation
451
+
452
+ async def _atake_next_step(
453
+ self,
454
+ name_to_tool_map: Dict[str, BaseTool],
455
+ color_mapping: Dict[str, str],
456
+ inputs: Dict[str, str],
457
+ intermediate_steps: List[Tuple[AgentAction, str]],
458
+ ) -> Union[AgentFinish, Tuple[AgentAction, str]]:
459
+ """Take a single step in the thought-action-observation loop.
460
+
461
+ Override this to take control of how the agent makes and acts on choices.
462
+ """
463
+ # Call the LLM to see what to do.
464
+ output = await self.agent.aplan(intermediate_steps, **inputs)
465
+ # If the tool chosen is the finishing tool, then we end and return.
466
+ if isinstance(output, AgentFinish):
467
+ return output
468
+ if self.callback_manager.is_async:
469
+ await self.callback_manager.on_agent_action(
470
+ output, verbose=self.verbose, color="green"
471
+ )
472
+ else:
473
+ self.callback_manager.on_agent_action(
474
+ output, verbose=self.verbose, color="green"
475
+ )
476
+
477
+ # Otherwise we lookup the tool
478
+ if output.tool in name_to_tool_map:
479
+ tool = name_to_tool_map[output.tool]
480
+ return_direct = tool.return_direct
481
+ color = color_mapping[output.tool]
482
+ llm_prefix = "" if return_direct else self.agent.llm_prefix
483
+ # We then call the tool on the tool input to get an observation
484
+ observation = await tool.arun(
485
+ output.tool_input,
486
+ verbose=self.verbose,
487
+ color=color,
488
+ llm_prefix=llm_prefix,
489
+ observation_prefix=self.agent.observation_prefix,
490
+ )
491
+ else:
492
+ observation = await InvalidTool().arun(
493
+ output.tool,
494
+ verbose=self.verbose,
495
+ color=None,
496
+ llm_prefix="",
497
+ observation_prefix=self.agent.observation_prefix,
498
+ )
499
+ return_direct = False
500
+ return output, observation
501
+
502
+ def _call(self, inputs: Dict[str, str]) -> Dict[str, Any]:
503
+ """Run text through and get agent response."""
504
+ # Do any preparation necessary when receiving a new input.
505
+ self.agent.prepare_for_new_call()
506
+ # Construct a mapping of tool name to tool for easy lookup
507
+ name_to_tool_map = {tool.name: tool for tool in self.tools}
508
+ # We construct a mapping from each tool to a color, used for logging.
509
+ color_mapping = get_color_mapping(
510
+ [tool.name for tool in self.tools], excluded_colors=["green"]
511
+ )
512
+ intermediate_steps: List[Tuple[AgentAction, str]] = []
513
+ # Let's start tracking the iterations the agent has gone through
514
+ iterations = 0
515
+ # We now enter the agent loop (until it returns something).
516
+ while self._should_continue(iterations):
517
+ next_step_output = self._take_next_step(
518
+ name_to_tool_map, color_mapping, inputs, intermediate_steps
519
+ )
520
+ if isinstance(next_step_output, AgentFinish):
521
+ return self._return(next_step_output, intermediate_steps)
522
+
523
+ if isinstance(next_step_output, AgentClarify):
524
+ return self._handle_clarify(next_step_output, intermediate_steps)
525
+
526
+ intermediate_steps.append(next_step_output)
527
+ # See if tool should return directly
528
+ tool_return = self._get_tool_return(next_step_output)
529
+ if tool_return is not None:
530
+ return self._return(tool_return, intermediate_steps)
531
+ iterations += 1
532
+ output = self.agent.return_stopped_response(
533
+ self.early_stopping_method, intermediate_steps, **inputs
534
+ )
535
+ return self._return(output, intermediate_steps)
536
+
537
+ async def _acall(self, inputs: Dict[str, str]) -> Dict[str, str]:
538
+ """Run text through and get agent response."""
539
+ # Do any preparation necessary when receiving a new input.
540
+ self.agent.prepare_for_new_call()
541
+ # Construct a mapping of tool name to tool for easy lookup
542
+ name_to_tool_map = {tool.name: tool for tool in self.tools}
543
+ # We construct a mapping from each tool to a color, used for logging.
544
+ color_mapping = get_color_mapping(
545
+ [tool.name for tool in self.tools], excluded_colors=["green"]
546
+ )
547
+ intermediate_steps: List[Tuple[AgentAction, str]] = []
548
+ # Let's start tracking the iterations the agent has gone through
549
+ iterations = 0
550
+ # We now enter the agent loop (until it returns something).
551
+ while self._should_continue(iterations):
552
+ next_step_output = await self._atake_next_step(
553
+ name_to_tool_map, color_mapping, inputs, intermediate_steps
554
+ )
555
+ if isinstance(next_step_output, AgentFinish):
556
+ return await self._areturn(next_step_output, intermediate_steps)
557
+
558
+ intermediate_steps.append(next_step_output)
559
+ # See if tool should return directly
560
+ tool_return = self._get_tool_return(next_step_output)
561
+ if tool_return is not None:
562
+ return await self._areturn(tool_return, intermediate_steps)
563
+
564
+ iterations += 1
565
+ output = self.agent.return_stopped_response(
566
+ self.early_stopping_method, intermediate_steps, **inputs
567
+ )
568
+ return await self._areturn(output, intermediate_steps)
569
+
570
+ def _get_tool_return(
571
+ self, next_step_output: Tuple[AgentAction, str]
572
+ ) -> Optional[AgentFinish]:
573
+ """Check if the tool is a returning tool."""
574
+ agent_action, observation = next_step_output
575
+ name_to_tool_map = {tool.name: tool for tool in self.tools}
576
+ # Invalid tools won't be in the map, so we return False.
577
+ if agent_action.tool in name_to_tool_map:
578
+ if name_to_tool_map[agent_action.tool].return_direct:
579
+ return AgentFinish(
580
+ {self.agent.return_values[0]: observation},
581
+ "",
582
+ )
583
+ return None
langchain/agents/agent_toolkits/__init__.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Agent toolkits."""
2
+
3
+ from langchain.agents.agent_toolkits.csv.base import create_csv_agent
4
+ from langchain.agents.agent_toolkits.json.base import create_json_agent
5
+ from langchain.agents.agent_toolkits.json.toolkit import JsonToolkit
6
+ from langchain.agents.agent_toolkits.openapi.base import create_openapi_agent
7
+ from langchain.agents.agent_toolkits.openapi.toolkit import OpenAPIToolkit
8
+ from langchain.agents.agent_toolkits.pandas.base import create_pandas_dataframe_agent
9
+ from langchain.agents.agent_toolkits.python.base import create_python_agent
10
+ from langchain.agents.agent_toolkits.sql.base import create_sql_agent
11
+ from langchain.agents.agent_toolkits.sql.toolkit import SQLDatabaseToolkit
12
+ from langchain.agents.agent_toolkits.vectorstore.base import (
13
+ create_vectorstore_agent,
14
+ create_vectorstore_router_agent,
15
+ )
16
+ from langchain.agents.agent_toolkits.vectorstore.toolkit import (
17
+ VectorStoreInfo,
18
+ VectorStoreRouterToolkit,
19
+ VectorStoreToolkit,
20
+ )
21
+ from langchain.agents.agent_toolkits.zapier.toolkit import ZapierToolkit
22
+
23
+ __all__ = [
24
+ "create_json_agent",
25
+ "create_sql_agent",
26
+ "create_openapi_agent",
27
+ "create_python_agent",
28
+ "create_vectorstore_agent",
29
+ "JsonToolkit",
30
+ "SQLDatabaseToolkit",
31
+ "OpenAPIToolkit",
32
+ "VectorStoreToolkit",
33
+ "create_vectorstore_router_agent",
34
+ "VectorStoreInfo",
35
+ "VectorStoreRouterToolkit",
36
+ "create_pandas_dataframe_agent",
37
+ "create_csv_agent",
38
+ "ZapierToolkit",
39
+ ]
langchain/agents/agent_toolkits/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (1.46 kB). View file
 
langchain/agents/agent_toolkits/__pycache__/base.cpython-39.pyc ADDED
Binary file (809 Bytes). View file
 
langchain/agents/agent_toolkits/base.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Toolkits for agents."""
2
+ from abc import abstractmethod
3
+ from typing import List
4
+
5
+ from pydantic import BaseModel
6
+
7
+ from langchain.tools import BaseTool
8
+
9
+
10
+ class BaseToolkit(BaseModel):
11
+ """Class responsible for defining a collection of related tools."""
12
+
13
+ @abstractmethod
14
+ def get_tools(self) -> List[BaseTool]:
15
+ """Get the tools in the toolkit."""
langchain/agents/agent_toolkits/csv/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ """CSV toolkit."""
langchain/agents/agent_toolkits/csv/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (208 Bytes). View file
 
langchain/agents/agent_toolkits/csv/__pycache__/base.cpython-39.pyc ADDED
Binary file (864 Bytes). View file
 
langchain/agents/agent_toolkits/csv/base.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Agent for working with csvs."""
2
+ from typing import Any, Optional
3
+
4
+ from langchain.agents.agent import AgentExecutor
5
+ from langchain.agents.agent_toolkits.pandas.base import create_pandas_dataframe_agent
6
+ from langchain.llms.base import BaseLLM
7
+
8
+
9
+ def create_csv_agent(
10
+ llm: BaseLLM, path: str, pandas_kwargs: Optional[dict] = None, **kwargs: Any
11
+ ) -> AgentExecutor:
12
+ """Create csv agent by loading to a dataframe and using pandas agent."""
13
+ import pandas as pd
14
+
15
+ _kwargs = pandas_kwargs or {}
16
+ df = pd.read_csv(path, **_kwargs)
17
+ return create_pandas_dataframe_agent(llm, df, **kwargs)
langchain/agents/agent_toolkits/json/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ """Json agent."""
langchain/agents/agent_toolkits/json/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (208 Bytes). View file
 
langchain/agents/agent_toolkits/json/__pycache__/base.cpython-39.pyc ADDED
Binary file (1.69 kB). View file
 
langchain/agents/agent_toolkits/json/__pycache__/prompt.cpython-39.pyc ADDED
Binary file (1.98 kB). View file
 
langchain/agents/agent_toolkits/json/__pycache__/toolkit.cpython-39.pyc ADDED
Binary file (999 Bytes). View file
 
langchain/agents/agent_toolkits/json/base.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Json agent."""
2
+ from typing import Any, List, Optional
3
+
4
+ from langchain.agents.agent import AgentExecutor
5
+ from langchain.agents.agent_toolkits.json.prompt import JSON_PREFIX, JSON_SUFFIX
6
+ from langchain.agents.agent_toolkits.json.toolkit import JsonToolkit
7
+ from langchain.agents.mrkl.base import ZeroShotAgent
8
+ from langchain.agents.mrkl.prompt import FORMAT_INSTRUCTIONS
9
+ from langchain.callbacks.base import BaseCallbackManager
10
+ from langchain.chains.llm import LLMChain
11
+ from langchain.llms.base import BaseLLM
12
+
13
+
14
+ def create_json_agent(
15
+ llm: BaseLLM,
16
+ toolkit: JsonToolkit,
17
+ callback_manager: Optional[BaseCallbackManager] = None,
18
+ prefix: str = JSON_PREFIX,
19
+ suffix: str = JSON_SUFFIX,
20
+ format_instructions: str = FORMAT_INSTRUCTIONS,
21
+ input_variables: Optional[List[str]] = None,
22
+ verbose: bool = False,
23
+ **kwargs: Any,
24
+ ) -> AgentExecutor:
25
+ """Construct a json agent from an LLM and tools."""
26
+ tools = toolkit.get_tools()
27
+ prompt = ZeroShotAgent.create_prompt(
28
+ tools,
29
+ prefix=prefix,
30
+ suffix=suffix,
31
+ format_instructions=format_instructions,
32
+ input_variables=input_variables,
33
+ )
34
+ llm_chain = LLMChain(
35
+ llm=llm,
36
+ prompt=prompt,
37
+ callback_manager=callback_manager,
38
+ )
39
+ tool_names = [tool.name for tool in tools]
40
+ agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names, **kwargs)
41
+ return AgentExecutor.from_agent_and_tools(
42
+ agent=agent, tools=toolkit.get_tools(), verbose=verbose
43
+ )
langchain/agents/agent_toolkits/json/prompt.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa
2
+
3
+ JSON_PREFIX = """You are an agent designed to interact with JSON.
4
+ Your goal is to return a final answer by interacting with the JSON.
5
+ You have access to the following tools which help you learn more about the JSON you are interacting with.
6
+ Only use the below tools. Only use the information returned by the below tools to construct your final answer.
7
+ Do not make up any information that is not contained in the JSON.
8
+ Your input to the tools should be in the form of `data["key"][0]` where `data` is the JSON blob you are interacting with, and the syntax used is Python.
9
+ You should only use keys that you know for a fact exist. You must validate that a key exists by seeing it previously when calling `json_spec_list_keys`.
10
+ If you have not seen a key in one of those responses, you cannot use it.
11
+ You should only add one key at a time to the path. You cannot add multiple keys at once.
12
+ If you encounter a "KeyError", go back to the previous key, look at the available keys, and try again.
13
+
14
+ If the question does not seem to be related to the JSON, just return "I don't know" as the answer.
15
+ Always begin your interaction with the `json_spec_list_keys` tool with input "data" to see what keys exist in the JSON.
16
+
17
+ Note that sometimes the value at a given path is large. In this case, you will get an error "Value is a large dictionary, should explore its keys directly".
18
+ In this case, you should ALWAYS follow up by using the `json_spec_list_keys` tool to see what keys exist at that path.
19
+ Do not simply refer the user to the JSON or a section of the JSON, as this is not a valid answer. Keep digging until you find the answer and explicitly return it.
20
+ """
21
+ JSON_SUFFIX = """Begin!"
22
+
23
+ Question: {input}
24
+ Thought: I should look at the keys that exist in data to see what I have access to
25
+ {agent_scratchpad}"""
langchain/agents/agent_toolkits/json/toolkit.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Toolkit for interacting with a JSON spec."""
2
+ from __future__ import annotations
3
+
4
+ from typing import List
5
+
6
+ from langchain.agents.agent_toolkits.base import BaseToolkit
7
+ from langchain.tools import BaseTool
8
+ from langchain.tools.json.tool import JsonGetValueTool, JsonListKeysTool, JsonSpec
9
+
10
+
11
+ class JsonToolkit(BaseToolkit):
12
+ """Toolkit for interacting with a JSON spec."""
13
+
14
+ spec: JsonSpec
15
+
16
+ def get_tools(self) -> List[BaseTool]:
17
+ """Get the tools in the toolkit."""
18
+ return [
19
+ JsonListKeysTool(spec=self.spec),
20
+ JsonGetValueTool(spec=self.spec),
21
+ ]
langchain/agents/agent_toolkits/openapi/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ """OpenAPI spec agent."""
langchain/agents/agent_toolkits/openapi/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (219 Bytes). View file
 
langchain/agents/agent_toolkits/openapi/__pycache__/base.cpython-39.pyc ADDED
Binary file (1.72 kB). View file
 
langchain/agents/agent_toolkits/openapi/__pycache__/prompt.cpython-39.pyc ADDED
Binary file (1.91 kB). View file
 
langchain/agents/agent_toolkits/openapi/__pycache__/toolkit.cpython-39.pyc ADDED
Binary file (2.6 kB). View file
 
langchain/agents/agent_toolkits/openapi/base.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """OpenAPI spec agent."""
2
+ from typing import Any, List, Optional
3
+
4
+ from langchain.agents.agent import AgentExecutor
5
+ from langchain.agents.agent_toolkits.openapi.prompt import (
6
+ OPENAPI_PREFIX,
7
+ OPENAPI_SUFFIX,
8
+ )
9
+ from langchain.agents.agent_toolkits.openapi.toolkit import OpenAPIToolkit
10
+ from langchain.agents.mrkl.base import ZeroShotAgent
11
+ from langchain.agents.mrkl.prompt import FORMAT_INSTRUCTIONS
12
+ from langchain.callbacks.base import BaseCallbackManager
13
+ from langchain.chains.llm import LLMChain
14
+ from langchain.llms.base import BaseLLM
15
+
16
+
17
+ def create_openapi_agent(
18
+ llm: BaseLLM,
19
+ toolkit: OpenAPIToolkit,
20
+ callback_manager: Optional[BaseCallbackManager] = None,
21
+ prefix: str = OPENAPI_PREFIX,
22
+ suffix: str = OPENAPI_SUFFIX,
23
+ format_instructions: str = FORMAT_INSTRUCTIONS,
24
+ input_variables: Optional[List[str]] = None,
25
+ verbose: bool = False,
26
+ **kwargs: Any,
27
+ ) -> AgentExecutor:
28
+ """Construct a json agent from an LLM and tools."""
29
+ tools = toolkit.get_tools()
30
+ prompt = ZeroShotAgent.create_prompt(
31
+ tools,
32
+ prefix=prefix,
33
+ suffix=suffix,
34
+ format_instructions=format_instructions,
35
+ input_variables=input_variables,
36
+ )
37
+ llm_chain = LLMChain(
38
+ llm=llm,
39
+ prompt=prompt,
40
+ callback_manager=callback_manager,
41
+ )
42
+ tool_names = [tool.name for tool in tools]
43
+ agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names, **kwargs)
44
+ return AgentExecutor.from_agent_and_tools(
45
+ agent=agent, tools=toolkit.get_tools(), verbose=verbose
46
+ )