osellight commited on
Commit
40c2c31
·
1 Parent(s): bd2eec6

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +117 -0
  2. requirements.txt +8 -0
app.py ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain.embeddings.openai import OpenAIEmbeddings
2
+ from langchain.vectorstores import Chroma
3
+ from langchain.text_splitter import CharacterTextSplitter
4
+ from langchain.chains.question_answering import load_qa_chain
5
+ from langchain.llms import OpenAI
6
+ import os
7
+
8
+ import subprocess
9
+ subprocess.run(["git", "clone", "https://github.com/TheMITTech/shakespeare"], check=True)
10
+
11
+ from glob import glob
12
+ files = glob("./shakespeare/**/*.html")
13
+
14
+
15
+ import shutil
16
+ import os
17
+ os.mkdir('./data')
18
+ destination_folder = './data/'
19
+ for html_file in files:
20
+ shutil.move(html_file, destination_folder + html_file.split("/")[-1])
21
+
22
+
23
+ from langchain.document_loaders import BSHTMLLoader, DirectoryLoader
24
+ from bs4 import BeautifulSoup
25
+ bshtml_dir_loader = DirectoryLoader('./data/', loader_cls=BSHTMLLoader)
26
+ data = bshtml_dir_loader.load()
27
+
28
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
29
+ text_splitter = RecursiveCharacterTextSplitter(
30
+ chunk_size = 1000,
31
+ chunk_overlap = 20,
32
+ length_function = len,
33
+ )
34
+ documents = text_splitter.split_documents(data)
35
+
36
+ embeddings = OpenAIEmbeddings()
37
+
38
+ from langchain.vectorstores import Chroma
39
+ persist_directory = "vector_db"
40
+ vectordb = Chroma.from_documents(documents=documents, embedding=embeddings, persist_directory=persist_directory)
41
+
42
+ vectordb.persist()
43
+ vectordb = None
44
+ vectordb = Chroma(persist_directory=persist_directory, embedding_function=embeddings)
45
+
46
+ from langchain.chat_models import ChatOpenAI
47
+ llm = ChatOpenAI(temperature=0, model="gpt-4")
48
+ doc_retriever = vectordb.as_retriever()
49
+
50
+ chain = load_qa_chain(OpenAI(temperature=0), chain_type="stuff")
51
+ from langchain.chains import RetrievalQA
52
+ shakespeare_qa = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=doc_retriever)
53
+
54
+ from langchain.utilities import SerpAPIWrapper
55
+ search = SerpAPIWrapper()
56
+
57
+ from langchain.agents import initialize_agent, Tool
58
+ from langchain.agents import AgentType
59
+ from langchain.tools import BaseTool
60
+ from langchain.llms import OpenAI
61
+ from langchain import LLMMathChain, SerpAPIWrapper
62
+
63
+ tools = [
64
+ Tool(
65
+ name = "Shakespeare QA System",
66
+ func=shakespeare_qa.run,
67
+ description="useful for when you need to answer questions about Shakespeare's works. Input should be a fully formed question."
68
+ ),
69
+ Tool(
70
+ name = "SERP API Search",
71
+ func=search.run,
72
+ description="useful for when you need to answer questions about ruff (a python linter). Input should be a fully formed question."
73
+ ),
74
+ ]
75
+
76
+ from langchain.memory import ConversationBufferMemory, ReadOnlySharedMemory
77
+ memory = ConversationBufferMemory(memory_key="chat_history")
78
+ readonlymemory = ReadOnlySharedMemory(memory=memory)
79
+
80
+ from langchain.agents import ZeroShotAgent, Tool, AgentExecutor
81
+
82
+ prefix = """Have a conversation with a human, answering the following questions as best you can. You have access to the following tools:"""
83
+ suffix = """Begin!"
84
+
85
+ {chat_history}
86
+ Question: {input}
87
+ {agent_scratchpad}"""
88
+
89
+ prompt = ZeroShotAgent.create_prompt(
90
+ tools,
91
+ prefix=prefix,
92
+ suffix=suffix,
93
+ input_variables=["input", "chat_history", "agent_scratchpad"]
94
+ )
95
+
96
+
97
+ from langchain import OpenAI, LLMChain, PromptTemplate
98
+ llm_chain = LLMChain(llm=llm, prompt=prompt)
99
+ agent = ZeroShotAgent(llm_chain=llm_chain, tools=tools, verbose=True)
100
+ agent_chain = AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, verbose=True, memory=memory)
101
+
102
+ def make_inference(query):
103
+ return(agent_chain.run(input=query))
104
+
105
+ if __name__ == "__main__":
106
+ # make a gradio interface
107
+ import gradio as gr
108
+
109
+ gr.Interface(
110
+ make_inference,
111
+ [
112
+ gr.inputs.Textbox(lines=2, label="Query"),
113
+ ],
114
+ gr.outputs.Textbox(label="Response"),
115
+ title="🗣️QuestionMyDoc-OpenAI📄",
116
+ description="🗣️QuestionMyDoc-OpenAI📄 is a tool that allows you to ask questions about a document. In this case - Shakespears.",
117
+ ).launch()
requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ langchain
2
+ openai
3
+ tiktoken
4
+ chromadb
5
+ beautifulsoup4
6
+ lxml
7
+ bs4
8
+ google-search-results