eliujl commited on
Commit
891293a
1 Parent(s): 11c3099

Added 'Chat' and 'Task' usages

Browse files

Added 'Chat' and 'Task' usages. 'Chat' does not load/ingest any file, but keeps chat history.
'Task' loads file(s) and performs user-defined task on each chunk of the file(s), such as proofreading or translation.

Files changed (1) hide show
  1. app.py +134 -67
app.py CHANGED
@@ -9,7 +9,8 @@ from langchain.embeddings.openai import OpenAIEmbeddings
9
  from langchain.embeddings.huggingface import HuggingFaceEmbeddings
10
  from langchain.chat_models import ChatOpenAI
11
  from langchain.vectorstores import Pinecone, Chroma
12
- from langchain.chains import ConversationalRetrievalChain
 
13
  from langchain.prompts import PromptTemplate
14
  from langchain.memory import ConversationBufferMemory
15
  import os
@@ -196,12 +197,12 @@ def use_local_llm(r_llm, local_llm_path):
196
  return llm
197
 
198
 
199
- def setup_prompt(r_llm):
200
  B_INST, E_INST = "[INST]", "[/INST]"
201
  B_SYS_LLAMA, E_SYS_LLAMA = "<<SYS>>\n", "\n<</SYS>>\n\n"
202
  B_SYS_MIS, E_SYS_MIS = "<s> ", "</s> "
203
  B_SYS_MIXTRAL, E_SYS_MIXTRAL = "<s>[INST]", "[/INST]</s>[INST]"
204
- system_prompt = """Answer the question in your own words as truthfully as possible from the context given to you.
205
  Supply sufficient information, evidence, reasoning, source from the context, etc., to justify your answer with details and logic.
206
  Think step by step and do not jump to conclusion during your reasoning at the beginning.
207
  Sometimes user's question may appear to be directly related to the context but may still be indirectly related,
@@ -209,12 +210,35 @@ def setup_prompt(r_llm):
209
  If questions are asked where there is no relevant context available,
210
  respond using out-of-context knowledge with
211
  "This question does not seem to be relevant to the documents. I am trying to explore knowledge outside the context." """
212
- instruction = """
213
- Context: {context}
214
-
215
- Chat history: {chat_history}
216
- User: {question}
217
- Bot: answer """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
218
  if r_llm == gpt3p5 or r_llm == gpt4:
219
  template = system_prompt + instruction
220
  else:
@@ -228,9 +252,18 @@ def setup_prompt(r_llm):
228
  else:
229
  # Handle other models or raise an exception
230
  pass
231
- prompt = PromptTemplate(
232
- input_variables=["context", "chat_history", "question"], template=template
233
- )
 
 
 
 
 
 
 
 
 
234
  return prompt
235
 
236
  def setup_em_llm(OPENAI_API_KEY, temperature, r_llm, local_llm_path):
@@ -273,49 +306,59 @@ def main(pinecone_index_name, chroma_collection_name, persist_directory, docsear
273
  reply = ''
274
  source = ''
275
  LLMs = [gpt3p5, gpt4] + local_model_names
 
276
  local_llm_path = './models/'
277
  user_llm_path = ''
278
  # Get user input of whether to use Pinecone or not
279
  col1, col2, col3 = st.columns([1, 1, 1])
280
  # create the radio buttons and text input fields
281
  with col1:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
282
  r_llm = st.radio(label='LLM:', options=LLMs)
283
  if r_llm == gpt3p5 or r_llm == gpt4:
284
  use_openai = True
285
  else:
286
- use_openai = False
287
- r_pinecone = st.radio('Vector store:', ('Pinecone (online)', 'Chroma (local)'))
288
- r_ingest = st.radio(
289
- 'Ingest file(s)?', ('Yes', 'No'))
290
- if r_pinecone == 'Pinecone (online)':
291
- use_pinecone = True
292
- else:
293
- use_pinecone = False
294
- with col2:
295
- temperature = st.slider('Temperature', 0.0, 1.0, 0.1)
296
- k_sources = st.slider('# source(s) to print out', 0, 20, 2)
297
  if use_openai == True:
298
  OPENAI_API_KEY = st.text_input(
299
  "OpenAI API key:", type="password")
300
  else:
301
  OPENAI_API_KEY = ''
302
- if use_pinecone == True:
303
  st.write('Local GPT model (and local embedding model) is selected. Online vector store is selected.')
304
- else:
305
  st.write('Local GPT model (and local embedding model) and local vector store are selected. All info remains local.')
 
 
306
  with col3:
307
- if use_pinecone == True:
308
- PINECONE_API_KEY = st.text_input(
309
- "Pinecone API key:", type="password")
310
- PINECONE_API_ENV = st.text_input(
311
- "Pinecone API env:", type="password")
312
- pinecone_index_name = st.text_input('Pinecone index:')
313
- pinecone.init(api_key=PINECONE_API_KEY,
314
- environment=PINECONE_API_ENV)
 
 
 
 
 
315
  else:
316
- chroma_collection_name = st.text_input(
317
- '''Chroma collection name of 3-63 characters:''')
318
- persist_directory = "./vectorstore"
319
  if use_openai == False:
320
  user_llm_path = st.text_input(
321
  "Path for local model (TO BE DOWNLOADED IF NOT EXISTING), type 'default' to use default path:",
@@ -323,45 +366,61 @@ def main(pinecone_index_name, chroma_collection_name, persist_directory, docsear
323
  if 'default' in user_llm_path:
324
  user_llm_path = local_llm_path
325
 
326
- if ( (pinecone_index_name or chroma_collection_name)
327
  and ( (use_openai and OPENAI_API_KEY) or (not use_openai and user_llm_path) ) ):
328
  embeddings, llm = setup_em_llm(OPENAI_API_KEY, temperature, r_llm, user_llm_path)
329
  #if ( pinecone_index_name or chroma_collection_name ) and embeddings and llm:
330
- session_name = pinecone_index_name + chroma_collection_name
331
- if r_ingest.lower() == 'yes':
332
- files = st.file_uploader(
333
- 'Upload Files', accept_multiple_files=True)
334
- if files:
335
- save_file(files)
336
- all_texts, n_texts = load_files()
337
- docsearch = ingest(all_texts, use_pinecone, embeddings, pinecone_index_name,
338
- chroma_collection_name, persist_directory)
 
 
 
 
 
 
 
 
339
  docsearch_ready = True
340
  else:
341
- st.write(
342
- 'No data is to be ingested. Make sure the Pinecone index or Chroma collection name you provided contains data.')
343
- docsearch, n_texts = setup_docsearch(use_pinecone, pinecone_index_name,
344
- embeddings, chroma_collection_name, persist_directory)
345
  docsearch_ready = True
346
  if docsearch_ready:
347
- # number of sources (split-documents when ingesting files); default is 4
348
- k = min([20, n_texts])
349
- retriever = setup_retriever(docsearch, k)
350
- prompt = setup_prompt(r_llm)
351
  memory = ConversationBufferMemory(memory_key='chat_history', return_messages=True, output_key='answer')
352
- CRqa = ConversationalRetrievalChain.from_llm(
353
- llm,
354
- chain_type="stuff",
355
- retriever=retriever,
356
- memory=memory,
357
- return_source_documents=True,
358
- combine_docs_chain_kwargs={'prompt': prompt},
359
- )
360
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
361
  st.title(':blue[Chatbot]')
362
  # Get user input
363
  query = st.text_area('Enter your question:', height=10,
364
- placeholder='''Summarize the context.
365
  \nAfter typing your question, click on SUBMIT to send it to the bot.''')
366
  submitted = st.button('SUBMIT')
367
 
@@ -373,8 +432,16 @@ def main(pinecone_index_name, chroma_collection_name, persist_directory, docsear
373
  # Generate a reply based on the user input and chat history
374
  chat_history = [(user, bot)
375
  for user, bot in chat_history]
376
- reply, source = get_response(query, chat_history, CRqa)
377
-
 
 
 
 
 
 
 
 
378
  # Update the chat history with the user input and system response
379
  chat_history.append(('User', query))
380
  chat_history.append(('Bot', reply))
@@ -389,7 +456,7 @@ def main(pinecone_index_name, chroma_collection_name, persist_directory, docsear
389
  chat_history_str1 = '<br>'.join([f'<span class=\"my_title\">{x[0]}:</span> {x[1]}' for x in latest_chats])
390
  st.markdown(f'<div class=\"chat-record\">{chat_history_str1}</div>', unsafe_allow_html=True)
391
 
392
- if reply and source:
393
  # Display sources
394
  for i, source_i in enumerate(source):
395
  if i < k_sources:
 
9
  from langchain.embeddings.huggingface import HuggingFaceEmbeddings
10
  from langchain.chat_models import ChatOpenAI
11
  from langchain.vectorstores import Pinecone, Chroma
12
+ from langchain.chains import ConversationalRetrievalChain, LLMChain
13
+ from langchain.chains.question_answering import load_qa_chain
14
  from langchain.prompts import PromptTemplate
15
  from langchain.memory import ConversationBufferMemory
16
  import os
 
197
  return llm
198
 
199
 
200
+ def setup_prompt(r_llm, usage):
201
  B_INST, E_INST = "[INST]", "[/INST]"
202
  B_SYS_LLAMA, E_SYS_LLAMA = "<<SYS>>\n", "\n<</SYS>>\n\n"
203
  B_SYS_MIS, E_SYS_MIS = "<s> ", "</s> "
204
  B_SYS_MIXTRAL, E_SYS_MIXTRAL = "<s>[INST]", "[/INST]</s>[INST]"
205
+ system_prompt_rag = """Answer the question in your own words as truthfully as possible from the context given to you.
206
  Supply sufficient information, evidence, reasoning, source from the context, etc., to justify your answer with details and logic.
207
  Think step by step and do not jump to conclusion during your reasoning at the beginning.
208
  Sometimes user's question may appear to be directly related to the context but may still be indirectly related,
 
210
  If questions are asked where there is no relevant context available,
211
  respond using out-of-context knowledge with
212
  "This question does not seem to be relevant to the documents. I am trying to explore knowledge outside the context." """
213
+ system_prompt_chat = """Answer the question in your own words.
214
+ Supply sufficient information, evidence, reasoning, source from the context, etc., to justify your answer with details and logic.
215
+ Think step by step and do not jump to conclusion during your reasoning at the beginning.
216
+ """
217
+ system_prompt_task = """You will be given a task, and you are an expert in that task.
218
+ Perform the task for the given context, and output the result. Do not include extra descriptions. Just output the desired result defined by the task.
219
+ Example: You are a professional translator and are given a translation task. Then you translate the text in the context and output only the translated text.
220
+ Example: You are a professional proofreader and are given a proofreading task. Then you proofread the text in the context and output only the translated text.
221
+ """
222
+ if usage == 'RAG':
223
+ system_prompt = system_prompt_rag
224
+ instruction = """
225
+ Context: {context}
226
+
227
+ Chat history: {chat_history}
228
+ User: {question}
229
+ Bot: answer """
230
+ elif usage == 'Chat':
231
+ system_prompt = system_prompt_chat
232
+ instruction = """
233
+ Chat history: {chat_history}
234
+ User: {question}
235
+ Bot: answer """
236
+ elif usage == 'Task':
237
+ system_prompt = system_prompt_task
238
+ instruction = """
239
+ Context: {context}
240
+ User: {question}
241
+ Bot: answer """
242
  if r_llm == gpt3p5 or r_llm == gpt4:
243
  template = system_prompt + instruction
244
  else:
 
252
  else:
253
  # Handle other models or raise an exception
254
  pass
255
+ if usage == 'RAG':
256
+ prompt = PromptTemplate(
257
+ input_variables=["context", "chat_history", "question"], template=template
258
+ )
259
+ elif usage == 'Chat':
260
+ prompt = PromptTemplate(
261
+ input_variables=["chat_history", "question"], template=template
262
+ )
263
+ elif usage == 'Task':
264
+ prompt = PromptTemplate(
265
+ input_variables=["context", "question"], template=template
266
+ )
267
  return prompt
268
 
269
  def setup_em_llm(OPENAI_API_KEY, temperature, r_llm, local_llm_path):
 
306
  reply = ''
307
  source = ''
308
  LLMs = [gpt3p5, gpt4] + local_model_names
309
+ usage = 'RAG'
310
  local_llm_path = './models/'
311
  user_llm_path = ''
312
  # Get user input of whether to use Pinecone or not
313
  col1, col2, col3 = st.columns([1, 1, 1])
314
  # create the radio buttons and text input fields
315
  with col1:
316
+ usage = st.radio('Usage: RAG for ingested files, chat (no files), or task (for all ingested texts)', ('RAG', 'Chat', 'Task'))
317
+ temperature = st.slider('Temperature', 0.0, 1.0, 0.1)
318
+ if usage == 'RAG':
319
+ r_pinecone = st.radio('Vector store:', ('Pinecone (online)', 'Chroma (local)'))
320
+ k_sources = st.slider('# source(s) to print out', 0, 20, 2)
321
+ r_ingest = st.radio('Ingest file(s)?', ('Yes', 'No'))
322
+ if r_pinecone == 'Pinecone (online)':
323
+ use_pinecone = True
324
+ else:
325
+ use_pinecone = False
326
+ if usage == 'Task':
327
+ r_ingest = 'Yes'
328
+
329
+ with col2:
330
  r_llm = st.radio(label='LLM:', options=LLMs)
331
  if r_llm == gpt3p5 or r_llm == gpt4:
332
  use_openai = True
333
  else:
334
+ use_openai = False
 
 
 
 
 
 
 
 
 
 
335
  if use_openai == True:
336
  OPENAI_API_KEY = st.text_input(
337
  "OpenAI API key:", type="password")
338
  else:
339
  OPENAI_API_KEY = ''
340
+ if usage == 'RAG' and use_pinecone == True:
341
  st.write('Local GPT model (and local embedding model) is selected. Online vector store is selected.')
342
+ elif usage == 'RAG' and use_pinecone == False:
343
  st.write('Local GPT model (and local embedding model) and local vector store are selected. All info remains local.')
344
+ else:
345
+ st.write('Local GPT model is selected. All info remains local.')
346
  with col3:
347
+ if usage == 'RAG':
348
+ if use_pinecone == True:
349
+ PINECONE_API_KEY = st.text_input(
350
+ "Pinecone API key:", type="password")
351
+ PINECONE_API_ENV = st.text_input(
352
+ "Pinecone API env:", type="password")
353
+ pinecone_index_name = st.text_input('Pinecone index:')
354
+ pinecone.init(api_key=PINECONE_API_KEY,
355
+ environment=PINECONE_API_ENV)
356
+ else:
357
+ chroma_collection_name = st.text_input(
358
+ '''Chroma collection name of 3-63 characters:''')
359
+ persist_directory = "./vectorstore"
360
  else:
361
+ hist_fn = st.text_input('Chat history filename')
 
 
362
  if use_openai == False:
363
  user_llm_path = st.text_input(
364
  "Path for local model (TO BE DOWNLOADED IF NOT EXISTING), type 'default' to use default path:",
 
366
  if 'default' in user_llm_path:
367
  user_llm_path = local_llm_path
368
 
369
+ if ( (pinecone_index_name or chroma_collection_name or usage == 'Task' or usage == 'Chat')
370
  and ( (use_openai and OPENAI_API_KEY) or (not use_openai and user_llm_path) ) ):
371
  embeddings, llm = setup_em_llm(OPENAI_API_KEY, temperature, r_llm, user_llm_path)
372
  #if ( pinecone_index_name or chroma_collection_name ) and embeddings and llm:
373
+ session_name = pinecone_index_name + chroma_collection_name + hist_fn
374
+ if usage != 'Chat':
375
+ if r_ingest.lower() == 'yes':
376
+ files = st.file_uploader(
377
+ 'Upload Files', accept_multiple_files=True)
378
+ if files:
379
+ save_file(files)
380
+ all_texts, n_texts = load_files()
381
+ if usage == 'RAG':
382
+ docsearch = ingest(all_texts, use_pinecone, embeddings, pinecone_index_name,
383
+ chroma_collection_name, persist_directory)
384
+ docsearch_ready = True
385
+ else:
386
+ st.write(
387
+ 'No data is to be ingested. Make sure the Pinecone index or Chroma collection name you provided contains data.')
388
+ docsearch, n_texts = setup_docsearch(use_pinecone, pinecone_index_name,
389
+ embeddings, chroma_collection_name, persist_directory)
390
  docsearch_ready = True
391
  else:
 
 
 
 
392
  docsearch_ready = True
393
  if docsearch_ready:
394
+ prompt = setup_prompt(r_llm, usage)
395
+ #if usage == 'RAG' or usage == 'Chat':
 
 
396
  memory = ConversationBufferMemory(memory_key='chat_history', return_messages=True, output_key='answer')
397
+ if usage == 'RAG':
398
+ # number of sources (split-documents when ingesting files); default is 4
399
+ k = min([20, n_texts])
400
+ retriever = setup_retriever(docsearch, k)
401
+ CRqa = ConversationalRetrievalChain.from_llm(
402
+ llm,
403
+ chain_type="stuff",
404
+ retriever=retriever,
405
+ memory=memory,
406
+ return_source_documents=True,
407
+ combine_docs_chain_kwargs={'prompt': prompt},
408
+ )
409
+ elif usage == 'Chat':
410
+ CRqa = LLMChain(
411
+ llm=llm,
412
+ prompt=prompt,
413
+ )
414
+ elif usage == 'Task':
415
+ CRqa = load_qa_chain(
416
+ llm=llm,
417
+ chain_type="stuff",
418
+ prompt=prompt
419
+ )
420
  st.title(':blue[Chatbot]')
421
  # Get user input
422
  query = st.text_area('Enter your question:', height=10,
423
+ placeholder='''Summarize the context.
424
  \nAfter typing your question, click on SUBMIT to send it to the bot.''')
425
  submitted = st.button('SUBMIT')
426
 
 
432
  # Generate a reply based on the user input and chat history
433
  chat_history = [(user, bot)
434
  for user, bot in chat_history]
435
+ if usage == 'RAG':
436
+ reply, source = get_response(query, chat_history, CRqa)
437
+ elif usage == 'Chat':
438
+ reply = CRqa({"question": query, "chat_history": chat_history, "return_only_outputs": True})
439
+ reply = reply['text']
440
+ elif usage == 'Task':
441
+ reply = []
442
+ for a_text in all_texts:
443
+ output_text = CRqa.run(input_documents=[a_text], question=query )
444
+ reply.append ( output_text )
445
  # Update the chat history with the user input and system response
446
  chat_history.append(('User', query))
447
  chat_history.append(('Bot', reply))
 
456
  chat_history_str1 = '<br>'.join([f'<span class=\"my_title\">{x[0]}:</span> {x[1]}' for x in latest_chats])
457
  st.markdown(f'<div class=\"chat-record\">{chat_history_str1}</div>', unsafe_allow_html=True)
458
 
459
+ if usage == 'RAG' and reply and source:
460
  # Display sources
461
  for i, source_i in enumerate(source):
462
  if i < k_sources: