Graceliying commited on
Commit
3b3e76a
1 Parent(s): ca7cdf9

Upload 3 files

Browse files
Files changed (4) hide show
  1. .gitattributes +1 -0
  2. Toastmasters-CC-Manual.pdf +3 -0
  3. app.py +120 -0
  4. requirements.txt +10 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ Toastmasters-CC-Manual.pdf filter=lfs diff=lfs merge=lfs -text
Toastmasters-CC-Manual.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ec9fdec57c22ead344bac5319e0097990f20df8660285b75256c81427817ace8
3
+ size 5989158
app.py ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import openai
3
+ import os
4
+ from dotenv import load_dotenv
5
+ from PyPDF2 import PdfReader
6
+ from langchain.text_splitter import CharacterTextSplitter
7
+ from langchain.embeddings import OpenAIEmbeddings, HuggingFaceInstructEmbeddings
8
+ from langchain.vectorstores import FAISS
9
+ from langchain.memory import ConversationBufferMemory
10
+ from langchain.chains import ConversationalRetrievalChain
11
+ from langchain.chat_models import ChatOpenAI
12
+ from htmlTemplates import css, bot_template, user_template
13
+ from PIL import Image
14
+
15
+ def get_pdf_text(pdf_docs):
16
+ text = ""
17
+ for pdf in pdf_docs:
18
+ pdf_reader = PdfReader(pdf)
19
+ for page in pdf_reader.pages:
20
+ text += page.extract_text()
21
+ return text
22
+
23
+ # documentation for CharacterTextSplitter:
24
+ # https://python.langchain.com/en/latest/modules/indexes/text_splitters/examples/character_text_splitter.html
25
+ def get_text_chunk(text):
26
+ text_splitter = CharacterTextSplitter(
27
+ separator="\n",
28
+ chunk_size = 1000,
29
+ chunk_overlap = 200,
30
+ length_function = len
31
+ )
32
+ chunks = text_splitter.split_text(text)
33
+ return chunks
34
+
35
+ #embedding using openAI embedding. Warn: This will cost you money
36
+ def get_vectorstore_openAI(text_chunks):
37
+ embeddings = OpenAIEmbeddings()
38
+ vectorstore = FAISS.from_texts(texts=text_chunks, embedding=embeddings)
39
+ return vectorstore
40
+
41
+ #embedding using instructor-xl with your local machine for free
42
+ #you can find more details at: https://huggingface.co/hkunlp/instructor-xl
43
+ def get_vectorstore(text_chunks):
44
+ embeddings = HuggingFaceInstructEmbeddings(model_name="hkunlp/instructor-xl")
45
+ vectorstore = FAISS.from_texts(texts=text_chunks, embedding=embeddings)
46
+ return vectorstore
47
+
48
+ def get_conversation_chain(vectorstore):
49
+ llm = ChatOpenAI()
50
+ memory = ConversationBufferMemory(memory_key='chat_history', return_messages=True)
51
+ conversation_chain = ConversationalRetrievalChain.from_llm(
52
+ llm=llm,
53
+ retriever=vectorstore.as_retriever(),
54
+ memory = memory
55
+ )
56
+ return conversation_chain
57
+
58
+ def handle_userinput(user_question):
59
+ response = st.session_state.conversation({'question': user_question})
60
+ st.session_state.chat_history = response['chat_history']
61
+
62
+ for i, message in enumerate(st.session_state.chat_history):
63
+ if i%2 == 0:
64
+ st.write(user_template.replace("{{MSG}}", message.content), unsafe_allow_html=True)
65
+ else:
66
+ st.write(bot_template.replace("{{MSG}}", message.content), unsafe_allow_html=True)
67
+
68
+ def main():
69
+ ##############################################################################
70
+ #load openai api_key from .evn
71
+ load_dotenv()
72
+ #openai.api_key = os.getenv("OPENAI_API_KEY")
73
+
74
+ ##############################################################################
75
+ #set up basic page
76
+ st.set_page_config(page_title="Chat With multiple PDFs", page_icon=":books:")
77
+ st.write(css, unsafe_allow_html=True)
78
+
79
+ #initial session_state in order to avoid refresh
80
+ if "conversation" not in st.session_state:
81
+ st.session_state.conversation = None
82
+ if "chat_history" not in st.session_state:
83
+ st.session_state.chat_history = None
84
+
85
+ st.header("Chat based on PDF you provided :books:")
86
+ user_question = st.text_input("Ask a question about your documents:")
87
+
88
+ if user_question:
89
+ handle_userinput(user_question)
90
+
91
+ # Define the templates
92
+
93
+ with st.sidebar:
94
+ st.subheader("Your PDF documents")
95
+ pdf_docs = st.file_uploader("Upload your pdfs here and click on 'Proces'", accept_multiple_files= True)
96
+ #if the button is pressed
97
+ if st.button("Process"):
98
+ with st.spinner("Processing"):
99
+ #get pdf text
100
+ raw_text = get_pdf_text(pdf_docs)
101
+ print('raw_text is created')
102
+
103
+ #get the text chunks
104
+ text_chunks = get_text_chunk(raw_text)
105
+ print('text_chunks are generated')
106
+
107
+ #create vector store
108
+ vectorstore = get_vectorstore_openAI(text_chunks)
109
+ print('vectorstore is created')
110
+
111
+ #create converstion chain
112
+ st.session_state.conversation = get_conversation_chain(vectorstore)
113
+ print('conversation chain created')
114
+
115
+
116
+
117
+
118
+ # to run this application, you need to run "streamlit run app.py"
119
+ if __name__ == '__main__':
120
+ main()
requirements.txt ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ streamlit==1.23.1
2
+ PyPDF2==3.0.1
3
+ langchain==0.0.197
4
+ openai==0.27.8
5
+ python-dotenv==1.0.0
6
+ faiss-cpu==1.7.4
7
+ huggingface-hub==0.15.1
8
+ InstructorEmbedding==1.0.1
9
+ sentence-transformers==2.2.2
10
+ tiktoken==0.4.0