inumulaisk commited on
Commit
bf14292
1 Parent(s): 573e47d

create app.py file

Browse files
Files changed (1) hide show
  1. app.py +321 -0
app.py ADDED
@@ -0,0 +1,321 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ from typing import List, Union, Optional
3
+
4
+ from dotenv import load_dotenv, find_dotenv
5
+ from langchain.callbacks import get_openai_callback
6
+ from langchain.chat_models import ChatOpenAI
7
+ from langchain.embeddings.openai import OpenAIEmbeddings
8
+ from langchain.schema import (SystemMessage, HumanMessage, AIMessage)
9
+ from langchain.llms import LlamaCpp, CTransformers
10
+ from langchain.embeddings import LlamaCppEmbeddings, HuggingFaceEmbeddings
11
+ from langchain.callbacks.manager import CallbackManager
12
+ from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
13
+ from langchain.text_splitter import TokenTextSplitter
14
+ from langchain.prompts import PromptTemplate
15
+ from langchain.vectorstores import Qdrant
16
+ from PyPDF2 import PdfReader
17
+ import streamlit as st
18
+ # import llamapy
19
+ # import langchain.llms.
20
+
21
+ PROMPT_TEMPLATE = """
22
+ Use the following pieces of context enclosed by triple backquotes to answer the question at the end.
23
+ \n\n
24
+ Context:
25
+ ```
26
+ {context}
27
+ ```
28
+ \n\n
29
+ Question: [][][][]{question}[][][][]
30
+ \n
31
+ Answer:"""
32
+
33
+
34
+ def init_page() -> None:
35
+ st.set_page_config(
36
+ page_title="Personal ChatGPT"
37
+ )
38
+ st.sidebar.title("Options")
39
+
40
+
41
+ def init_messages() -> None:
42
+ clear_button = st.sidebar.button("Clear Conversation", key="clear")
43
+ if clear_button or "messages" not in st.session_state:
44
+ st.session_state.messages = [
45
+ SystemMessage(
46
+ content=(
47
+ "You are a helpful AI QA assistant. "
48
+ "When answering questions, use the context enclosed by triple backquotes if it is relevant. "
49
+ "If you don't know the answer, just say that you don't know, "
50
+ "don't try to make up an answer. "
51
+ "Reply your answer in mardkown format.")
52
+ )
53
+ ]
54
+ st.session_state.costs = []
55
+
56
+
57
+ def get_pdf_text() -> Optional[str]:
58
+ """
59
+ Function to load PDF text and split it into chunks.
60
+ """
61
+ st.header("Document Upload")
62
+ uploaded_file = st.file_uploader(
63
+ label="Here, upload your PDF file you want ChatGPT to use to answer",
64
+ type="pdf"
65
+ )
66
+ if uploaded_file:
67
+ pdf_reader = PdfReader(uploaded_file)
68
+ text = "\n\n".join([page.extract_text() for page in pdf_reader.pages])
69
+ text_splitter = TokenTextSplitter(chunk_size=100, chunk_overlap=0)
70
+ return text_splitter.split_text(text)
71
+ else:
72
+ return None
73
+
74
+
75
+ # texts: str, embeddings: Union[OpenAIEmbeddings, HuggingFaceEmbeddings]) \
76
+
77
+
78
+ def build_vectore_store(
79
+ texts: str, embeddings: Union[OpenAIEmbeddings, LlamaCppEmbeddings]) \
80
+ -> Optional[Qdrant]:
81
+ """
82
+ Store the embedding vectors of text chunks into vector store (Qdrant).
83
+ """
84
+ if texts:
85
+ with st.spinner("Loading PDF ..."):
86
+ qdrant = Qdrant.from_texts(
87
+ texts,
88
+ embeddings,
89
+ path=":memory:",
90
+ collection_name="my_collection",
91
+ force_recreate=True
92
+ )
93
+ st.success("File Loaded Successfully!!")
94
+ else:
95
+ qdrant = None
96
+ return qdrant
97
+
98
+
99
+ def select_llm() -> Union[ChatOpenAI, LlamaCpp]:
100
+ """
101
+ Read user selection of parameters in Streamlit sidebar.
102
+ """
103
+ model_name = st.sidebar.radio("Choose LLM:",
104
+ ("gpt-3.5-turbo-0613",
105
+ "gpt-3.5-turbo-16k-0613",
106
+ "gpt-4",
107
+ "llama-2-7b-chat.ggmlv3.q2_K"))
108
+ temperature = st.sidebar.slider("Temperature:", min_value=0.0,
109
+ max_value=1.0, value=0.0, step=0.01)
110
+ print('Returing:--->', model_name)
111
+ return model_name, temperature
112
+
113
+
114
+ # def load_llm(model_name: str, temperature: float) -> Union[ChatOpenAI, LlamaCpp]:
115
+ # """
116
+ # Load LLM.
117
+ # """
118
+ # if model_name.startswith("gpt-"):
119
+ # return ChatOpenAI(temperature=temperature, model_name=model_name)
120
+ # elif model_name.startswith("llama-2-"):
121
+ # callback_manager = CallbackManager([StreamingStdOutCallbackHandler()])
122
+ # return LlamaCpp(
123
+ # model_path=f"C:Users/SravanthK/Downloads/{model_name}.bin",
124
+ # input={"temperature": temperature,
125
+ # "max_length": 2048,
126
+ # "top_p": 1
127
+ # },
128
+ # n_ctx=2048,
129
+ # callback_manager=callback_manager,
130
+ # verbose=False, # True
131
+ # )
132
+
133
+ def load_llm(model_name: str, temperature: float):
134
+ """
135
+ Load LLM.
136
+ """
137
+
138
+ if model_name.startswith("gpt-"):
139
+ return ChatOpenAI(temperature=temperature, model_name=model_name)
140
+ elif model_name.startswith("llama-2-"):
141
+ print('At else---->', model_name)
142
+ callback_manager = CallbackManager([StreamingStdOutCallbackHandler()])
143
+ return LlamaCpp(
144
+ model_path=r"C:\Users\SravanthK\Desktop\ISK\ggl_project\models\llama-2-7b-chat.ggmlv3.q2_K.bin",
145
+ input={"temperature": temperature,
146
+ "max_length": 2048,
147
+ "top_p": 1
148
+ },
149
+ n_ctx=2048,
150
+ callback_manager=callback_manager,
151
+ verbose=False, # True
152
+ )
153
+ # return CTransformers(
154
+ # model=r"C:\Users\SravanthK\Downloads\llama-2-7b-chat.ggmlv3.q2_K.bin",
155
+ # model_type="llama",
156
+ # max_new_tokens=256,
157
+ # temperature=0.5,
158
+ # context_length=512,
159
+ # verbose=False,# Set to the model's maximum context length
160
+ # callback_manager=callback_manager
161
+ # )
162
+
163
+
164
+
165
+ # def load_embeddings(model_name: str) -> Union[OpenAIEmbeddings, HuggingFaceEmbeddings]:
166
+
167
+ def load_embeddings(model_name: str) -> Union[OpenAIEmbeddings, LlamaCppEmbeddings]:
168
+ """
169
+ Load embedding model.
170
+ """
171
+ if model_name.startswith("gpt-"):
172
+ return OpenAIEmbeddings()
173
+ elif model_name.startswith("llama-2-"):
174
+ # return LlamaCppEmbeddings(model_path=f"./models/{model_name}.bin")
175
+ return LlamaCppEmbeddings(model_path=r'C:\Users\SravanthK\Downloads\llama-2-7b-chat.ggmlv3.q2_K.bin')
176
+ # print(f'---> Selected model: {model_name}')
177
+ # # HuggingFaceEmbeddings(model_path=f"C:/Users/SravanthK/Downloads/{model_name}.bin")
178
+ # print('YES')
179
+ # return HuggingFaceEmbeddings(model_name='sentence-transformers/all-MiniLM-L6-v2')
180
+
181
+
182
+
183
+ def get_answer(llm, messages) -> tuple[str, float]:
184
+ """
185
+ Get the AI answer to user questions.
186
+ """
187
+ if isinstance(llm, ChatOpenAI):
188
+ with get_openai_callback() as cb:
189
+ answer = llm(messages)
190
+ return answer.content, cb.total_cost
191
+ # if isinstance(llm, CTransformers):
192
+ # return llm(llama_v2_prompt(convert_langchainschema_to_dict(messages))), 0.0
193
+ if isinstance(llm, LlamaCpp):
194
+ return llm(llama_v2_prompt(convert_langchainschema_to_dict(messages))), 0.0
195
+
196
+
197
+ def find_role(message: Union[SystemMessage, HumanMessage, AIMessage]) -> str:
198
+ """
199
+ Identify role name from langchain.schema object.
200
+ """
201
+ if isinstance(message, SystemMessage):
202
+ return "system"
203
+ if isinstance(message, HumanMessage):
204
+ return "user"
205
+ if isinstance(message, AIMessage):
206
+ return "assistant"
207
+ raise TypeError("Unknown message type.")
208
+
209
+
210
+ def convert_langchainschema_to_dict(
211
+ messages: List[Union[SystemMessage, HumanMessage, AIMessage]]) \
212
+ -> List[dict]:
213
+ """
214
+ Convert the chain of chat messages in list of langchain.schema format to
215
+ list of dictionary format.
216
+ """
217
+ return [{"role": find_role(message),
218
+ "content": message.content
219
+ } for message in messages]
220
+
221
+
222
+ def llama_v2_prompt(messages: List[dict]) -> str:
223
+ """
224
+ Convert the messages in list of dictionary format to Llama2 compliant
225
+ format.
226
+ """
227
+ B_INST, E_INST = "[INST]", "[/INST]"
228
+ B_SYS, E_SYS = "<<SYS>>\n", "\n<</SYS>>\n\n"
229
+ BOS, EOS = "<s>", "</s>"
230
+ DEFAULT_SYSTEM_PROMPT = f"""You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Please ensure that your responses are socially unbiased and positive in nature. If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information."""
231
+
232
+ if messages[0]["role"] != "system":
233
+ messages = [
234
+ {
235
+ "role": "system",
236
+ "content": DEFAULT_SYSTEM_PROMPT,
237
+ }
238
+ ] + messages
239
+ messages = [
240
+ {
241
+ "role": messages[1]["role"],
242
+ "content": B_SYS + messages[0]["content"] + E_SYS + messages[1]["content"],
243
+ }
244
+ ] + messages[2:]
245
+
246
+ messages_list = [
247
+ f"{BOS}{B_INST} {(prompt['content']).strip()} {E_INST} {(answer['content']).strip()} {EOS}"
248
+ for prompt, answer in zip(messages[::2], messages[1::2])
249
+ ]
250
+ messages_list.append(
251
+ f"{BOS}{B_INST} {(messages[-1]['content']).strip()} {E_INST}")
252
+
253
+ return "".join(messages_list)
254
+
255
+
256
+ def extract_userquesion_part_only(content):
257
+ """
258
+ Function to extract only the user question part from the entire question
259
+ content combining user question and pdf context.
260
+ """
261
+ content_split = content.split("[][][][]")
262
+ if len(content_split) == 3:
263
+ return content_split[1]
264
+ return content
265
+
266
+
267
+ def main() -> None:
268
+ _ = load_dotenv(find_dotenv())
269
+
270
+ init_page()
271
+
272
+ model_name, temperature = select_llm()
273
+ llm = load_llm(model_name, temperature)
274
+ embeddings = load_embeddings(model_name)
275
+
276
+ texts = get_pdf_text()
277
+ qdrant = build_vectore_store(texts, embeddings)
278
+
279
+ init_messages()
280
+
281
+ st.header("Personal ChatGPT")
282
+ # Supervise user input
283
+ if user_input := st.chat_input("Input your question!"):
284
+ if qdrant:
285
+ context = [c.page_content for c in qdrant.similarity_search(
286
+ user_input, k=10)]
287
+ user_input_w_context = PromptTemplate(
288
+ template=PROMPT_TEMPLATE,
289
+ input_variables=["context", "question"]) \
290
+ .format(
291
+ context=context, question=user_input)
292
+ else:
293
+ user_input_w_context = user_input
294
+ st.session_state.messages.append(
295
+ HumanMessage(content=user_input_w_context))
296
+ with st.spinner("ChatGPT is typing ..."):
297
+ print(type(llm), type(st.session_state.messages))
298
+ answer, cost = get_answer(llm, st.session_state.messages)
299
+ st.session_state.messages.append(AIMessage(content=answer))
300
+ st.session_state.costs.append(cost)
301
+
302
+ # Display chat history
303
+ messages = st.session_state.get("messages", [])
304
+ for message in messages:
305
+ if isinstance(message, AIMessage):
306
+ with st.chat_message("assistant"):
307
+ st.markdown(message.content)
308
+ elif isinstance(message, HumanMessage):
309
+ with st.chat_message("user"):
310
+ st.markdown(extract_userquesion_part_only(message.content))
311
+
312
+ costs = st.session_state.get("costs", [])
313
+ st.sidebar.markdown("## Costs")
314
+ st.sidebar.markdown(f"**Total cost: ${sum(costs):.5f}**")
315
+ for cost in costs:
316
+ st.sidebar.markdown(f"- ${cost:.5f}")
317
+
318
+
319
+ # streamlit run app.py
320
+ if __name__ == "__main__":
321
+ main()