Harrison Chase commited on
Commit
3c87692
1 Parent(s): 1f3224b

initial commit

Browse files
Files changed (6) hide show
  1. app.py +101 -0
  2. chain.py +127 -0
  3. ingest.py +92 -0
  4. ingest.sh +6 -0
  5. ingest_examples.py +219 -0
  6. requirements.txt +9 -0
app.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datetime
2
+ import os
3
+
4
+ import gradio as gr
5
+ import langchain
6
+ import weaviate
7
+ from langchain.vectorstores import Weaviate
8
+
9
+ from chain import get_new_chain1
10
+
11
+ WEAVIATE_URL = os.environ["WEAVIATE_URL"]
12
+
13
+
14
+ def get_weaviate_store():
15
+ client = weaviate.Client(
16
+ url=WEAVIATE_URL,
17
+ additional_headers={"X-OpenAI-Api-Key": os.environ["OPENAI_API_KEY"]},
18
+ )
19
+ return Weaviate(client, "Paragraph", "content", attributes=["source"])
20
+
21
+
22
+ vectorstore = get_weaviate_store()
23
+
24
+
25
+ def set_openai_api_key(api_key, agent):
26
+ if api_key:
27
+ os.environ["OPENAI_API_KEY"] = api_key
28
+ qa_chain = get_new_chain1(vectorstore)
29
+ os.environ["OPENAI_API_KEY"] = ""
30
+ return qa_chain
31
+
32
+
33
+ def chat(inp, history, agent):
34
+ history = history or []
35
+ if agent is None:
36
+ history.append((inp, "Please paste your OpenAI key to use"))
37
+ return history, history
38
+ print("\n==== date/time: " + str(datetime.datetime.now()) + " ====")
39
+ print("inp: " + inp)
40
+ history = history or []
41
+ output = agent({"question": inp, "chat_history": history})
42
+ answer = output["answer"]
43
+ history.append((inp, answer))
44
+ print(history)
45
+ return history, history
46
+
47
+
48
+ block = gr.Blocks(css=".gradio-container {background-color: lightgray}")
49
+
50
+ with block:
51
+ with gr.Row():
52
+ gr.Markdown("<h3><center>LangChain AI</center></h3>")
53
+
54
+ openai_api_key_textbox = gr.Textbox(
55
+ placeholder="Paste your OpenAI API key (sk-...)",
56
+ show_label=False,
57
+ lines=1,
58
+ type="password",
59
+ )
60
+
61
+ chatbot = gr.Chatbot()
62
+
63
+ with gr.Row():
64
+ message = gr.Textbox(
65
+ label="What's your question?",
66
+ placeholder="What's the answer to life, the universe, and everything?",
67
+ lines=1,
68
+ )
69
+ submit = gr.Button(value="Send", variant="secondary").style(full_width=False)
70
+
71
+ gr.Examples(
72
+ examples=[
73
+ "What are agents?",
74
+ "How do I summarize a long document?",
75
+ "What types of memory exist?",
76
+ ],
77
+ inputs=message,
78
+ )
79
+
80
+ gr.HTML(
81
+ """
82
+ This simple application is an implementation of ChatGPT but over an external dataset (in this case, the LangChain documentation)."""
83
+ )
84
+
85
+ gr.HTML(
86
+ "<center>Powered by <a href='https://github.com/hwchase17/langchain'>LangChain 🦜️🔗</a></center>"
87
+ )
88
+
89
+ state = gr.State()
90
+ agent_state = gr.State()
91
+
92
+ submit.click(chat, inputs=[message, state, agent_state], outputs=[chatbot, state])
93
+ message.submit(chat, inputs=[message, state, agent_state], outputs=[chatbot, state])
94
+
95
+ openai_api_key_textbox.change(
96
+ set_openai_api_key,
97
+ inputs=[openai_api_key_textbox, agent_state],
98
+ outputs=[agent_state],
99
+ )
100
+
101
+ block.launch(debug=True)
chain.py ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ import pathlib
4
+ from typing import Dict, List, Tuple
5
+
6
+ import weaviate
7
+ from langchain import OpenAI, PromptTemplate
8
+ from langchain.chains import LLMChain
9
+ from langchain.chains.base import Chain
10
+ from langchain.chains.combine_documents.base import BaseCombineDocumentsChain
11
+ from langchain.chains.conversation.memory import ConversationBufferMemory
12
+ from langchain.chains.question_answering import load_qa_chain
13
+ from langchain.embeddings import OpenAIEmbeddings
14
+ from langchain.prompts import FewShotPromptTemplate, PromptTemplate
15
+ from langchain.prompts.example_selector import \
16
+ SemanticSimilarityExampleSelector
17
+ from langchain.vectorstores import FAISS, Weaviate
18
+ from pydantic import BaseModel
19
+
20
+ WEAVIATE_URL = os.environ["WEAVIATE_URL"]
21
+ client = weaviate.Client(
22
+ url=WEAVIATE_URL,
23
+ additional_headers={"X-OpenAI-Api-Key": os.environ["OPENAI_API_KEY"]},
24
+ )
25
+
26
+ _eg_template = """## Example:
27
+
28
+ Chat History:
29
+ {chat_history}
30
+ Follow Up Input: {question}
31
+ Standalone question: {answer}"""
32
+ _eg_prompt = PromptTemplate(
33
+ template=_eg_template,
34
+ input_variables=["chat_history", "question", "answer"],
35
+ )
36
+
37
+
38
+ _prefix = """Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question. You should assume that the question is related to LangChain."""
39
+ _suffix = """## Example:
40
+
41
+ Chat History:
42
+ {chat_history}
43
+ Follow Up Input: {question}
44
+ Standalone question:"""
45
+ eg_store = Weaviate(
46
+ client,
47
+ "Rephrase",
48
+ "content",
49
+ attributes=["question", "answer", "chat_history"],
50
+ )
51
+ example_selector = SemanticSimilarityExampleSelector(vectorstore=eg_store, k=4)
52
+ prompt = FewShotPromptTemplate(
53
+ prefix=_prefix,
54
+ suffix=_suffix,
55
+ example_selector=example_selector,
56
+ example_prompt=_eg_prompt,
57
+ input_variables=["question", "chat_history"],
58
+ )
59
+ llm = OpenAI(temperature=0, model_name="text-davinci-003")
60
+ key_word_extractor = LLMChain(llm=llm, prompt=prompt)
61
+
62
+
63
+ class CustomChain(Chain, BaseModel):
64
+
65
+ vstore: Weaviate
66
+ chain: BaseCombineDocumentsChain
67
+
68
+ @property
69
+ def input_keys(self) -> List[str]:
70
+ return ["question"]
71
+
72
+ @property
73
+ def output_keys(self) -> List[str]:
74
+ return ["answer"]
75
+
76
+ def _call(self, inputs: Dict[str, str]) -> Dict[str, str]:
77
+ question = inputs["question"]
78
+ chat_history_str = _get_chat_history(inputs["chat_history"])
79
+ if chat_history_str:
80
+ new_question = key_word_extractor.run(
81
+ question=question, chat_history=chat_history_str
82
+ )
83
+ else:
84
+ new_question = question
85
+ print(new_question)
86
+ docs = self.vstore.similarity_search(new_question, k=4)
87
+ new_inputs = inputs.copy()
88
+ new_inputs["question"] = new_question
89
+ new_inputs["chat_history"] = chat_history_str
90
+ answer, _ = self.chain.combine_docs(docs, **new_inputs)
91
+ return {"answer": answer}
92
+
93
+
94
+ def get_new_chain1(vectorstore) -> Chain:
95
+
96
+ EXAMPLE_PROMPT = PromptTemplate(
97
+ template=">Example:\nContent:\n---------\n{page_content}\n----------\nSource: {source}",
98
+ input_variables=["page_content", "source"],
99
+ )
100
+ template = """You are an AI assistant for the open source library LangChain. The documentation is located at https://langchain.readthedocs.io.
101
+ You are given the following extracted parts of a long document and a question. Provide a conversational answer with a hyperlink to the documentation.
102
+ You should only use hyperlinks that are explicitly listed as a source in the context. Do NOT make up a hyperlink that is not listed.
103
+ If the question includes a request for code, provide a code block directly from the documentation.
104
+ If you don't know the answer, just say "Hmm, I'm not sure." Don't try to make up an answer.
105
+ If the question is not about LangChain, politely inform them that you are tuned to only answer questions about LangChain.
106
+ Question: {question}
107
+ =========
108
+ {context}
109
+ =========
110
+ Answer in Markdown:"""
111
+ PROMPT = PromptTemplate(template=template, input_variables=["question", "context"])
112
+ doc_chain = load_qa_chain(
113
+ OpenAI(temperature=0, model_name="text-davinci-003", max_tokens=-1),
114
+ chain_type="stuff",
115
+ prompt=PROMPT,
116
+ document_prompt=EXAMPLE_PROMPT,
117
+ )
118
+ return CustomChain(chain=doc_chain, vstore=vectorstore)
119
+
120
+
121
+ def _get_chat_history(chat_history: List[Tuple[str, str]]):
122
+ buffer = ""
123
+ for human_s, ai_s in chat_history:
124
+ human = f"Human: " + human_s
125
+ ai = f"Assistant: " + ai_s
126
+ buffer += "\n" + "\n".join([human, ai])
127
+ return buffer
ingest.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Load html from files, clean up, split, ingest into Weaviate."""
2
+ import os
3
+ from pathlib import Path
4
+
5
+ import weaviate
6
+ from bs4 import BeautifulSoup
7
+ from langchain.text_splitter import CharacterTextSplitter
8
+
9
+
10
+ def clean_data(data):
11
+ soup = BeautifulSoup(data)
12
+ text = soup.find_all("main", {"id": "main-content"})[0].get_text()
13
+ return "\n".join([t for t in text.split("\n") if t])
14
+
15
+
16
+ docs = []
17
+ metadatas = []
18
+ for p in Path("langchain.readthedocs.io/en/latest/").rglob("*"):
19
+ if p.is_dir():
20
+ continue
21
+ with open(p) as f:
22
+ docs.append(clean_data(f.read()))
23
+ metadatas.append({"source": p})
24
+
25
+
26
+ text_splitter = CharacterTextSplitter(
27
+ separator="\n",
28
+ chunk_size=1000,
29
+ chunk_overlap=200,
30
+ length_function=len,
31
+ )
32
+
33
+ documents = text_splitter.create_documents(docs, metadatas=metadatas)
34
+
35
+
36
+ WEAVIATE_URL = os.environ["WEAVIATE_URL"]
37
+ client = weaviate.Client(
38
+ url=WEAVIATE_URL,
39
+ additional_headers={"X-OpenAI-Api-Key": os.environ["OPENAI_API_KEY"]},
40
+ )
41
+
42
+ client.schema.delete_class("Paragraph")
43
+ client.schema.get()
44
+ schema = {
45
+ "classes": [
46
+ {
47
+ "class": "Paragraph",
48
+ "description": "A written paragraph",
49
+ "vectorizer": "text2vec-openai",
50
+ "moduleConfig": {
51
+ "text2vec-openai": {
52
+ "model": "ada",
53
+ "modelVersion": "002",
54
+ "type": "text",
55
+ }
56
+ },
57
+ "properties": [
58
+ {
59
+ "dataType": ["text"],
60
+ "description": "The content of the paragraph",
61
+ "moduleConfig": {
62
+ "text2vec-openai": {
63
+ "skip": False,
64
+ "vectorizePropertyName": False,
65
+ }
66
+ },
67
+ "name": "content",
68
+ },
69
+ {
70
+ "dataType": ["text"],
71
+ "description": "The link",
72
+ "moduleConfig": {
73
+ "text2vec-openai": {
74
+ "skip": True,
75
+ "vectorizePropertyName": False,
76
+ }
77
+ },
78
+ "name": "source",
79
+ },
80
+ ],
81
+ },
82
+ ]
83
+ }
84
+
85
+ client.schema.create(schema)
86
+
87
+ with client.batch as batch:
88
+ for text in documents:
89
+ batch.add_data_object(
90
+ {"content": text.page_content, "source": str(text.metadata["source"])},
91
+ "Paragraph",
92
+ )
ingest.sh ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
1
+ # Bash script to ingest data
2
+ # This involves scraping the data from the web and then cleaning up and putting in Weaviate.
3
+ !set -eu
4
+ wget -r -A.html https://langchain.readthedocs.io/en/latest/
5
+ python3 ingest.py
6
+ python3 ingest_examples.py
ingest_examples.py ADDED
@@ -0,0 +1,219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Ingest examples into Weaviate."""
2
+ import os
3
+ from pathlib import Path
4
+
5
+ import weaviate
6
+
7
+ WEAVIATE_URL = os.environ["WEAVIATE_URL"]
8
+ client = weaviate.Client(
9
+ url=WEAVIATE_URL,
10
+ additional_headers={"X-OpenAI-Api-Key": os.environ["OPENAI_API_KEY"]},
11
+ )
12
+
13
+ client.schema.delete_class("Rephrase")
14
+ client.schema.delete_class("QA")
15
+ client.schema.get()
16
+ schema = {
17
+ "classes": [
18
+ {
19
+ "class": "Rephrase",
20
+ "description": "Rephrase Examples",
21
+ "vectorizer": "text2vec-openai",
22
+ "moduleConfig": {
23
+ "text2vec-openai": {
24
+ "model": "ada",
25
+ "modelVersion": "002",
26
+ "type": "text",
27
+ }
28
+ },
29
+ "properties": [
30
+ {
31
+ "dataType": ["text"],
32
+ "moduleConfig": {
33
+ "text2vec-openai": {
34
+ "skip": False,
35
+ "vectorizePropertyName": False,
36
+ }
37
+ },
38
+ "name": "content",
39
+ },
40
+ {
41
+ "dataType": ["text"],
42
+ "description": "The link",
43
+ "moduleConfig": {
44
+ "text2vec-openai": {
45
+ "skip": True,
46
+ "vectorizePropertyName": False,
47
+ }
48
+ },
49
+ "name": "question",
50
+ },
51
+ {
52
+ "dataType": ["text"],
53
+ "description": "The link",
54
+ "moduleConfig": {
55
+ "text2vec-openai": {
56
+ "skip": True,
57
+ "vectorizePropertyName": False,
58
+ }
59
+ },
60
+ "name": "answer",
61
+ },
62
+ {
63
+ "dataType": ["text"],
64
+ "description": "The link",
65
+ "moduleConfig": {
66
+ "text2vec-openai": {
67
+ "skip": True,
68
+ "vectorizePropertyName": False,
69
+ }
70
+ },
71
+ "name": "chat_history",
72
+ },
73
+ ],
74
+ },
75
+ ]
76
+ }
77
+
78
+ client.schema.create(schema)
79
+
80
+ documents = [
81
+ {
82
+ "question": "how do i load those?",
83
+ "chat_history": "Human: What types of memory exist?\nAssistant: \n\nThere are a few different types of memory: Buffer, Summary, and Conversational Memory.",
84
+ "answer": "How do I load Buffer, Summary, and Conversational Memory",
85
+ },
86
+ {
87
+ "question": "how do i install this package?",
88
+ "chat_history": "",
89
+ "answer": "How do I install langchain?",
90
+ },
91
+ {
92
+ "question": "how do I set serpapi_api_key?",
93
+ "chat_history": "Human: can you write me a code snippet for that?\nAssistant: \n\nYes, you can create an Agent with a custom LLMChain in LangChain. Here is a [link](https://langchain.readthedocs.io/en/latest/modules/agents/examples/custom_agent.html) to the documentation that provides a code snippet for creating a custom Agent.",
94
+ "answer": "How do I set the serpapi_api_key?",
95
+ },
96
+ {
97
+ "question": "What are some methods for data augmented generation?",
98
+ "chat_history": "Human: List all methods of an Agent class please\nAssistant: \n\nTo answer your question, you can find a list of all the methods of the Agent class in the [API reference documentation](https://langchain.readthedocs.io/en/latest/modules/agents/reference.html).",
99
+ "answer": "What are some methods for data augmented generation?",
100
+ },
101
+ {
102
+ "question": "can you write me a code snippet for that?",
103
+ "chat_history": "Human: how do I create an agent with custom LLMChain?\nAssistant: \n\nTo create an Agent with a custom LLMChain in LangChain, you can use the [Custom Agent example](https://langchain.readthedocs.io/en/latest/modules/agents/examples/custom_agent.html). This example shows how to create a custom LLMChain and use an existing Agent class to parse the output. For more information on Agents and Tools, check out the [Key Concepts](https://langchain.readthedocs.io/en/latest/modules/agents/key_concepts.html) documentation.",
104
+ "answer": "Can you provide a code snippet for creating an Agent with a custom LLMChain?",
105
+ },
106
+ ]
107
+ from langchain.prompts.example_selector.semantic_similarity import \
108
+ sorted_values
109
+
110
+ for d in documents:
111
+ d["content"] = " ".join(sorted_values(d))
112
+ with client.batch as batch:
113
+ for text in documents:
114
+ batch.add_data_object(
115
+ text,
116
+ "Rephrase",
117
+ )
118
+
119
+ client.schema.get()
120
+ schema = {
121
+ "classes": [
122
+ {
123
+ "class": "QA",
124
+ "description": "Rephrase Examples",
125
+ "vectorizer": "text2vec-openai",
126
+ "moduleConfig": {
127
+ "text2vec-openai": {
128
+ "model": "ada",
129
+ "modelVersion": "002",
130
+ "type": "text",
131
+ }
132
+ },
133
+ "properties": [
134
+ {
135
+ "dataType": ["text"],
136
+ "moduleConfig": {
137
+ "text2vec-openai": {
138
+ "skip": False,
139
+ "vectorizePropertyName": False,
140
+ }
141
+ },
142
+ "name": "content",
143
+ },
144
+ {
145
+ "dataType": ["text"],
146
+ "description": "The link",
147
+ "moduleConfig": {
148
+ "text2vec-openai": {
149
+ "skip": True,
150
+ "vectorizePropertyName": False,
151
+ }
152
+ },
153
+ "name": "question",
154
+ },
155
+ {
156
+ "dataType": ["text"],
157
+ "description": "The link",
158
+ "moduleConfig": {
159
+ "text2vec-openai": {
160
+ "skip": True,
161
+ "vectorizePropertyName": False,
162
+ }
163
+ },
164
+ "name": "answer",
165
+ },
166
+ {
167
+ "dataType": ["text"],
168
+ "description": "The link",
169
+ "moduleConfig": {
170
+ "text2vec-openai": {
171
+ "skip": True,
172
+ "vectorizePropertyName": False,
173
+ }
174
+ },
175
+ "name": "summaries",
176
+ },
177
+ {
178
+ "dataType": ["text"],
179
+ "description": "The link",
180
+ "moduleConfig": {
181
+ "text2vec-openai": {
182
+ "skip": True,
183
+ "vectorizePropertyName": False,
184
+ }
185
+ },
186
+ "name": "sources",
187
+ },
188
+ ],
189
+ },
190
+ ]
191
+ }
192
+
193
+ client.schema.create(schema)
194
+
195
+ documents = [
196
+ {
197
+ "question": "how do i install langchain?",
198
+ "answer": "```pip install langchain```",
199
+ "summaries": ">Example:\nContent:\n---------\nYou can pip install langchain package by running 'pip install langchain'\n----------\nSource: foo.html",
200
+ "sources": "foo.html",
201
+ },
202
+ {
203
+ "question": "how do i import an openai LLM?",
204
+ "answer": "```from langchain.llm import OpenAI```",
205
+ "summaries": ">Example:\nContent:\n---------\nyou can import the open ai wrapper (OpenAI) from the langchain.llm module\n----------\nSource: bar.html",
206
+ "sources": "bar.html",
207
+ },
208
+ ]
209
+ from langchain.prompts.example_selector.semantic_similarity import \
210
+ sorted_values
211
+
212
+ for d in documents:
213
+ d["content"] = " ".join(sorted_values(d))
214
+ with client.batch as batch:
215
+ for text in documents:
216
+ batch.add_data_object(
217
+ text,
218
+ "QA",
219
+ )
requirements.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
1
+ langchain==0.0.64
2
+ beautifulsoup4
3
+ weaviate-client
4
+ openai
5
+ black
6
+ isort
7
+ Flask
8
+ transformers
9
+ gradio