clementsan commited on
Commit
eb94a8f
1 Parent(s): 8aeeae1

Improve PDF chatbot description

Browse files
Files changed (1) hide show
  1. app.py +6 -7
app.py CHANGED
@@ -98,7 +98,7 @@ def initialize_llmchain(llm_model, temperature, max_tokens, top_k, vector_db, pr
98
  "trust_remote_code": True, "torch_dtype": "auto"}
99
  )
100
 
101
- progress(0.5, desc="Defining buffer memory...")
102
  memory = ConversationBufferMemory(
103
  memory_key="chat_history",
104
  return_messages=True
@@ -185,9 +185,11 @@ def demo():
185
  # qa_chain = gr.Variable()
186
 
187
  gr.Markdown(
188
- """<center><h2>Document-based chatbot with memory</center></h2>
189
- <h3>Ask any questions (and follow-up) about your PDF documents</h3>
190
- <i>Note: this chatbot leverages LangChain for retrieval-augmented generation with memory.</i>
 
 
191
  """)
192
  with gr.Tab("Step 1 - Document pre-processing"):
193
  with gr.Row():
@@ -206,9 +208,6 @@ def demo():
206
  db_btn = gr.Button("Generating vector database...")
207
 
208
  with gr.Tab("Step 2 - QA chain initialization"):
209
- gr.Markdown(
210
- """<b>Note:</b> This space uses the free CPU Basic hardware from Hugging Face. The LLM models used below (free inference endpoints) can take some time to generate a reply.
211
- """)
212
  with gr.Row():
213
  llm_btn = gr.Radio(list_llm_simple, \
214
  label="LLM models", value = list_llm_simple[0], type="index", info="Choose your LLM model")
 
98
  "trust_remote_code": True, "torch_dtype": "auto"}
99
  )
100
 
101
+ progress(0.75, desc="Defining buffer memory...")
102
  memory = ConversationBufferMemory(
103
  memory_key="chat_history",
104
  return_messages=True
 
185
  # qa_chain = gr.Variable()
186
 
187
  gr.Markdown(
188
+ """<center><h2>PDF-based chatbot, powered by LangChain and open-source LLMs</center></h2>
189
+ <h3>Ask any questions about your PDF documents, along with follow-ups</h3>
190
+ <b>Note:</b> This AI assistant performs retrieval-augmented generation from your PDF documents. \
191
+ When generating answers, it takes past questions into account (via conversational memory), and points to specific document sources for clarity purposes</i>
192
+ <b>Warning:</b> This space uses the free CPU Basic hardware from Hugging Face. Some steps and LLM models used below (free inference endpoints) can take some time to generate a reply.
193
  """)
194
  with gr.Tab("Step 1 - Document pre-processing"):
195
  with gr.Row():
 
208
  db_btn = gr.Button("Generating vector database...")
209
 
210
  with gr.Tab("Step 2 - QA chain initialization"):
 
 
 
211
  with gr.Row():
212
  llm_btn = gr.Radio(list_llm_simple, \
213
  label="LLM models", value = list_llm_simple[0], type="index", info="Choose your LLM model")