CCCDev commited on
Commit
8defb93
·
verified ·
1 Parent(s): e80e929

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -7
app.py CHANGED
@@ -22,8 +22,13 @@ import tqdm
22
  import accelerate
23
  import re
24
 
25
- list_llm = ["mistralai/Mistral-7B-Instruct-v0.2"]
26
- list_llm_simple = [os.path.basename(llm) for llm in list_llm]
 
 
 
 
 
27
  pdf_url = "https://huggingface.co/spaces/CCCDev/PDFChat/resolve/main/Privacy-Policy%20(1).pdf" # Replace with your static PDF URL or path
28
 
29
 
@@ -106,9 +111,8 @@ def initialize_database(pdf_url, chunk_size, chunk_overlap, progress=gr.Progress
106
  return vector_db, collection_name, "Complete!"
107
 
108
 
109
- def initialize_LLM(llm_option, llm_temperature, max_tokens, top_k, vector_db, progress=gr.Progress()):
110
- llm_name = list_llm[llm_option]
111
- qa_chain = initialize_llmchain(llm_name, llm_temperature, max_tokens, top_k, vector_db, progress)
112
  return qa_chain, "Complete!"
113
 
114
 
@@ -188,8 +192,8 @@ def demo():
188
  llm_progress = gr.Textbox(value="None", label="QA chain initialization")
189
 
190
  def auto_initialize():
191
- vector_db, collection_name, db_status = initialize_database(pdf_url, 1024, 24)
192
- qa_chain, llm_status = initialize_LLM(0, 0.1, 1024, 20, vector_db)
193
  return vector_db, collection_name, db_status, qa_chain, llm_status, "Initialization complete."
194
 
195
  demo.load(auto_initialize, [], [vector_db, collection_name, db_progress, qa_chain, llm_progress])
 
22
  import accelerate
23
  import re
24
 
25
+ LLM_MODEL = "mistralai/Mistral-7B-Instruct-v0.2"
26
+ LLM_MAX_TOKEN = 512
27
+ DB_CHUNK_SIZE = 512
28
+ CHUNK_OVERLAP = 24
29
+ TEMPERATURE = 0.1
30
+ MAX_TOKENS = 512
31
+ TOP_K = 20
32
  pdf_url = "https://huggingface.co/spaces/CCCDev/PDFChat/resolve/main/Privacy-Policy%20(1).pdf" # Replace with your static PDF URL or path
33
 
34
 
 
111
  return vector_db, collection_name, "Complete!"
112
 
113
 
114
+ def initialize_LLM(llm_temperature, max_tokens, top_k, vector_db, progress=gr.Progress()):
115
+ qa_chain = initialize_llmchain(LLM_MODEL, llm_temperature, max_tokens, top_k, vector_db, progress)
 
116
  return qa_chain, "Complete!"
117
 
118
 
 
192
  llm_progress = gr.Textbox(value="None", label="QA chain initialization")
193
 
194
  def auto_initialize():
195
+ vector_db, collection_name, db_status = initialize_database(pdf_url, DB_CHUNK_SIZE, CHUNK_OVERLAP)
196
+ qa_chain, llm_status = initialize_LLM(TEMPERATURE, LLM_MAX_TOKEN, 20, vector_db)
197
  return vector_db, collection_name, db_status, qa_chain, llm_status, "Initialization complete."
198
 
199
  demo.load(auto_initialize, [], [vector_db, collection_name, db_progress, qa_chain, llm_progress])