qorgh346 commited on
Commit
dfa737c
1 Parent(s): ffdd322

Add application file

Browse files
Files changed (2) hide show
  1. app.py +190 -0
  2. requirements.txt +12 -0
app.py ADDED
@@ -0,0 +1,190 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from dotenv import load_dotenv
3
+ from PyPDF2 import PdfReader
4
+ from langchain.text_splitter import CharacterTextSplitter,RecursiveCharacterTextSplitter
5
+ from langchain.embeddings import OpenAIEmbeddings, HuggingFaceInstructEmbeddings
6
+ from langchain.vectorstores import FAISS, Chroma
7
+ from langchain.embeddings import HuggingFaceEmbeddings # General embeddings from HuggingFace models.
8
+ from langchain.chat_models import ChatOpenAI
9
+ from langchain.memory import ConversationBufferMemory
10
+ from langchain.chains import ConversationalRetrievalChain
11
+ from htmlTemplates import css, bot_template, user_template
12
+ from langchain.llms import HuggingFaceHub, LlamaCpp,CTransformers # For loading transformer models.
13
+
14
+ def get_pdf_text(pdf_docs):
15
+ text = ''
16
+ # pdf_file_ = open(pdf_docs,'rb')
17
+ # text = "example hofjin"
18
+ pdf_reader = PdfReader(pdf_docs)
19
+ for page in pdf_reader.pages:
20
+ text += page.extract_text()
21
+
22
+ return text
23
+
24
+
25
+ def get_text_chunks(text):
26
+ print('text = ',text)
27
+ text_splitter = RecursiveCharacterTextSplitter(
28
+ chunk_size = 256,
29
+ chunk_overlap = 50,
30
+ length_function= len
31
+ )
32
+ # text_splitter = CharacterTextSplitter(
33
+ # separator="\n",
34
+ # chunk_size=10f00,
35
+ # chunk_overlap=200,
36
+ # length_function=len
37
+ # )
38
+ chunks = text_splitter.split_text(text)
39
+ print('chunks = ', chunks)
40
+ return chunks
41
+
42
+
43
+ def get_vectorstore(text_chunks):
44
+ # Load the desired embeddings model.
45
+ embeddings = HuggingFaceEmbeddings(model_name='sentence-transformers/all-MiniLM-L12-v2',
46
+ model_kwargs={'device': 'cpu'})
47
+ print('embeddings = ', embeddings)
48
+ # embeddings = OpenAIEmbeddings()sentence-transformers/all-MiniLM-L6-v2
49
+ # embeddings = HuggingFaceInstructEmbeddings(model_name="hkunlp/instructor-xl",
50
+ # model_kwargs={'device':'cpu'})
51
+ vectorstore = FAISS.from_texts(texts=text_chunks, embedding=embeddings)
52
+ # vectorstore = Chroma.from_texts(texts=text_chunks, embedding=embeddings)
53
+
54
+ return vectorstore
55
+
56
+
57
+ def get_conversation_chain(vectorstore):
58
+
59
+ model_path = 'llama-2-7b-chat.Q2_K.gguf'
60
+ # llm = ChatOpenAI()
61
+ # llm = HuggingFaceHub(repo_id="google/flan-t5-xxl", model_kwargs={"temperature":0.5, "max_length":512})
62
+ config = {'max_new_tokens': 2048}
63
+
64
+
65
+ # llm = CTransformers(model="llama-2-7b-chat.ggmlv3.q2_K.bin", model_type="llama", config=config)
66
+
67
+ llm = LlamaCpp(model_path=model_path,
68
+ input={"temperature": 0.75, "max_length": 2000, "top_p": 1},
69
+ verbose=True, )
70
+ memory = ConversationBufferMemory(
71
+ memory_key='chat_history', return_messages=True)
72
+ conversation_chain = ConversationalRetrievalChain.from_llm(
73
+ llm=llm,
74
+ retriever=vectorstore.as_retriever(),
75
+ memory=memory
76
+ )
77
+ return conversation_chain
78
+
79
+
80
+ def handle_userinput(user_question):
81
+ response = st.session_state.conversation({'question': user_question})
82
+ st.session_state.chat_history = response['chat_history']
83
+
84
+ for i, message in enumerate(st.session_state.chat_history):
85
+ if i % 2 == 0:
86
+ st.write(user_template.replace(
87
+ "{{MSG}}", message.content), unsafe_allow_html=True)
88
+ else:
89
+ st.write(bot_template.replace(
90
+ "{{MSG}}", message.content), unsafe_allow_html=True)
91
+
92
+ def get_text_file(docs):
93
+ text = docs.read().decode("utf-8")
94
+ return text
95
+
96
+ def get_csv_file(docs):
97
+ import pandas as pd
98
+ text = ''
99
+
100
+ data = pd.read_csv(docs)
101
+
102
+ for index, row in data.iterrows():
103
+ item_name = row[0]
104
+ row_text = item_name
105
+ for col_name in data.columns[1:]:
106
+ row_text += '{} is {} '.format(col_name, row[col_name])
107
+ text += row_text + '\n'
108
+
109
+ return text
110
+
111
+ def get_json_file(docs):
112
+ import json
113
+ text = ''
114
+ # with open(docs, 'r') as f:
115
+ json_data = json.load(docs)
116
+
117
+ for f_key, f_value in json_data.items():
118
+ for s_value in f_value:
119
+ text += str(f_key) + str(s_value)
120
+ text += '\n'
121
+ #print(text)
122
+ return text
123
+
124
+ def get_hwp_file(docs):
125
+ pass
126
+
127
+ def get_docs_file(docs):
128
+ pass
129
+
130
+
131
+ def main():
132
+ load_dotenv()
133
+ st.set_page_config(page_title="Chat with multiple PDFs",
134
+ page_icon=":books:")
135
+ st.write(css, unsafe_allow_html=True)
136
+
137
+ if "conversation" not in st.session_state:
138
+ st.session_state.conversation = None
139
+ if "chat_history" not in st.session_state:
140
+ st.session_state.chat_history = None
141
+
142
+ st.header("Chat with multiple PDFs :books:")
143
+ user_question = st.text_input("Ask a question about your documents:")
144
+ if user_question:
145
+ handle_userinput(user_question)
146
+
147
+ with st.sidebar:
148
+ st.subheader("Your documents")
149
+ docs = st.file_uploader(
150
+ "Upload your PDFs here and click on 'Process'", accept_multiple_files=True)
151
+ if st.button("Process"):
152
+ with st.spinner("Processing"):
153
+ # get pdf text
154
+ raw_text = ""
155
+
156
+ for file in docs:
157
+ print('file - type : ', file.type)
158
+ if file.type == 'text/plain':
159
+ #file is .txt
160
+ raw_text += get_text_file(file)
161
+ elif file.type in ['application/octet-stream', 'application/pdf']:
162
+ #file is .pdf
163
+ raw_text += get_pdf_text(file)
164
+ elif file.type == 'text/csv':
165
+ #file is .csv
166
+ raw_text += get_csv_file(file)
167
+ elif file.type == 'application/json':
168
+ # file is .json
169
+ raw_text += get_json_file(file)
170
+ elif file.type == 'application/x-hwp':
171
+ # file is .hwp
172
+ raw_text += get_hwp_file(file)
173
+ elif file.type == 'application/vnd.openxmlformats-officedocument.wordprocessingml.document':
174
+ # file is .docs
175
+ raw_text += get_docs_file(file)
176
+
177
+
178
+ # get the text chunks
179
+ text_chunks = get_text_chunks(raw_text)
180
+
181
+ # create vector store
182
+ vectorstore = get_vectorstore(text_chunks)
183
+
184
+ # create conversation chain
185
+ st.session_state.conversation = get_conversation_chain(
186
+ vectorstore)
187
+
188
+
189
+ if __name__ == '__main__':
190
+ main()
requirements.txt ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ langchain
2
+ llama-cpp-python
3
+ PyPDF2==3.0.1
4
+ faiss-cpu==1.7.4
5
+ ctransformers
6
+ pypdf
7
+ chromadb
8
+ tiktoken
9
+ pysqlite3-binary
10
+ streamlit-extras
11
+ InstructorEmbedding
12
+ sentence-transformers