Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,7 +1,93 @@
|
|
1 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
|
3 |
-
|
4 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
|
6 |
-
|
7 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
def demo():
|
2 |
+
with gr.Blocks(theme="base") as demo:
|
3 |
+
vector_db = gr.State()
|
4 |
+
qa_chain = gr.State()
|
5 |
+
collection_name = gr.State()
|
6 |
+
|
7 |
+
gr.Markdown(
|
8 |
+
"""<center><h2>PDF-based chatbot (powered by LangChain and open-source LLMs)</center></h2>
|
9 |
+
<h3>Ask any questions about your PDF documents, along with follow-ups</h3>
|
10 |
+
<b>Note:</b> This AI assistant performs retrieval-augmented generation from your PDF documents. \
|
11 |
+
When generating answers, it takes past questions into account (via conversational memory), and includes document references for clarity purposes.</i>
|
12 |
+
<br><b>Warning:</b> This space uses the free CPU Basic hardware from Hugging Face. Some steps and LLM models used below (free inference endpoints) can take some time to generate an output.<br>
|
13 |
+
""")
|
14 |
+
with gr.Tab("Step 1 - Document pre-processing"):
|
15 |
+
with gr.Row():
|
16 |
+
document = gr.Files(height=100, file_count="multiple", file_types=["pdf"], interactive=True, label="Upload your PDF documents (single or multiple)")
|
17 |
+
# upload_btn = gr.UploadButton("Loading document...", height=100, file_count="multiple", file_types=["pdf"], scale=1)
|
18 |
+
with gr.Row():
|
19 |
+
db_btn = gr.Radio(["ChromaDB"], label="Vector database type", value = "ChromaDB", type="index", info="Choose your vector database")
|
20 |
+
with gr.Accordion("Advanced options - Document text splitter", open=False):
|
21 |
+
with gr.Row():
|
22 |
+
slider_chunk_size = gr.Slider(minimum = 100, maximum = 1000, value=600, step=20, label="Chunk size", info="Chunk size", interactive=True)
|
23 |
+
with gr.Row():
|
24 |
+
slider_chunk_overlap = gr.Slider(minimum = 10, maximum = 200, value=40, step=10, label="Chunk overlap", info="Chunk overlap", interactive=True)
|
25 |
+
with gr.Row():
|
26 |
+
db_progress = gr.Textbox(label="Vector database initialization", value="None")
|
27 |
+
with gr.Row():
|
28 |
+
db_btn = gr.Button("Generate vector database...")
|
29 |
+
|
30 |
+
with gr.Tab("Step 2 - QA chain initialization"):
|
31 |
+
with gr.Row():
|
32 |
+
llm_btn = gr.Radio(list_llm_simple, \
|
33 |
+
label="LLM models", value = list_llm_simple[0], type="index", info="Choose your LLM model")
|
34 |
+
with gr.Accordion("Advanced options - LLM model", open=False):
|
35 |
+
with gr.Row():
|
36 |
+
slider_temperature = gr.Slider(minimum = 0.0, maximum = 1.0, value=0.7, step=0.1, label="Temperature", info="Model temperature", interactive=True)
|
37 |
+
with gr.Row():
|
38 |
+
slider_maxtokens = gr.Slider(minimum = 224, maximum = 4096, value=1024, step=32, label="Max Tokens", info="Model max tokens", interactive=True)
|
39 |
+
with gr.Row():
|
40 |
+
slider_topk = gr.Slider(minimum = 1, maximum = 10, value=3, step=1, label="top-k samples", info="Model top-k samples", interactive=True)
|
41 |
+
with gr.Row():
|
42 |
+
llm_progress = gr.Textbox(value="None",label="QA chain initialization")
|
43 |
+
with gr.Row():
|
44 |
+
qachain_btn = gr.Button("Initialize question-answering chain...")
|
45 |
|
46 |
+
with gr.Tab("Step 3 - Conversation with chatbot"):
|
47 |
+
chatbot = gr.Chatbot(height=300)
|
48 |
+
with gr.Accordion("Advanced - Document references", open=False):
|
49 |
+
with gr.Row():
|
50 |
+
doc_source1 = gr.Textbox(label="Reference 1", lines=2, container=True, scale=20)
|
51 |
+
source1_page = gr.Number(label="Page", scale=1)
|
52 |
+
with gr.Row():
|
53 |
+
doc_source2 = gr.Textbox(label="Reference 2", lines=2, container=True, scale=20)
|
54 |
+
source2_page = gr.Number(label="Page", scale=1)
|
55 |
+
with gr.Row():
|
56 |
+
doc_source3 = gr.Textbox(label="Reference 3", lines=2, container=True, scale=20)
|
57 |
+
source3_page = gr.Number(label="Page", scale=1)
|
58 |
+
with gr.Row():
|
59 |
+
msg = gr.Textbox(placeholder="Type message", container=True)
|
60 |
+
with gr.Row():
|
61 |
+
submit_btn = gr.Button("Submit")
|
62 |
+
clear_btn = gr.ClearButton([msg, chatbot])
|
63 |
+
|
64 |
+
# Preprocessing events
|
65 |
+
#upload_btn.upload(upload_file, inputs=[upload_btn], outputs=[document])
|
66 |
+
db_btn.click(initialize_database, \
|
67 |
+
inputs=[document, slider_chunk_size, slider_chunk_overlap], \
|
68 |
+
outputs=[vector_db, collection_name, db_progress])
|
69 |
+
qachain_btn.click(initialize_LLM, \
|
70 |
+
inputs=[llm_btn, slider_temperature, slider_maxtokens, slider_topk, vector_db], \
|
71 |
+
outputs=[qa_chain, llm_progress]).then(lambda:[None,"",0,"",0,"",0], \
|
72 |
+
inputs=None, \
|
73 |
+
outputs=[chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], \
|
74 |
+
queue=False)
|
75 |
|
76 |
+
# Chatbot events
|
77 |
+
msg.submit(conversation, \
|
78 |
+
inputs=[qa_chain, msg, chatbot], \
|
79 |
+
outputs=[qa_chain, msg, chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], \
|
80 |
+
queue=False)
|
81 |
+
submit_btn.click(conversation, \
|
82 |
+
inputs=[qa_chain, msg, chatbot], \
|
83 |
+
outputs=[qa_chain, msg, chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], \
|
84 |
+
queue=False)
|
85 |
+
clear_btn.click(lambda:[None,"",0,"",0,"",0], \
|
86 |
+
inputs=None, \
|
87 |
+
outputs=[chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], \
|
88 |
+
queue=False)
|
89 |
+
demo.queue().launch(debug=True)
|
90 |
+
|
91 |
+
|
92 |
+
if __name__ == "__main__":
|
93 |
+
demo()
|