Spaces:
Runtime error
Runtime error
Upload folder using huggingface_hub
Browse files- README +0 -0
- README.md +2 -8
- __pycache__/gradio_app.cpython-38.pyc +0 -0
- __pycache__/gradio_llm_example.cpython-38.pyc +0 -0
- __pycache__/test_gradio.cpython-38.pyc +0 -0
- flagged/log.csv +2 -0
- gradio_app.py +205 -0
- gradio_cached_examples/16/component 0/tmp95ia8keq.json +1 -0
- gradio_cached_examples/16/component 0/tmpf1gyji4c.json +1 -0
- gradio_cached_examples/16/component 0/tmpleed6aum.json +1 -0
- gradio_cached_examples/16/log.csv +4 -0
- gradio_cached_examples/35/component 0/tmphlap_ssj.json +1 -0
- gradio_cached_examples/35/component 0/tmpqmrmr545.json +1 -0
- gradio_cached_examples/35/component 0/tmpu4y3h8za.json +1 -0
- gradio_cached_examples/35/log.csv +4 -0
- gradio_cached_examples/41/component 0/tmp1gnhmzn0.json +1 -0
- gradio_cached_examples/41/component 0/tmp9r3rafm7.json +1 -0
- gradio_cached_examples/41/component 0/tmphbjuw0z9.json +1 -0
- gradio_cached_examples/41/log.csv +4 -0
- gradio_cached_examples/54/component 0/tmpaz_c2ond.json +1 -0
- gradio_cached_examples/54/component 0/tmpnl_8qi5t.json +1 -0
- gradio_cached_examples/54/component 0/tmpo6iaiydn.json +1 -0
- gradio_cached_examples/54/log.csv +4 -0
- gradio_cached_examples/60/component 0/tmp8z177n1e.json +1 -0
- gradio_cached_examples/60/component 0/tmpcjfuu9nz.json +1 -0
- gradio_cached_examples/60/component 0/tmpzem5dzus.json +1 -0
- gradio_cached_examples/60/log.csv +4 -0
- gradio_cached_examples/79/component 0/tmp00bsbvnd.json +1 -0
- gradio_cached_examples/79/component 0/tmp3b5s5lev.json +1 -0
- gradio_cached_examples/79/component 0/tmpmasy_kj7.json +1 -0
- gradio_cached_examples/79/log.csv +4 -0
- gradio_llm_example.py +162 -0
- logo_neovision.png +0 -0
- streamlit_app.py +170 -0
- temp/aze.pdf +0 -0
- temp/document-1.pdf +0 -0
- temp/document.pdf +0 -0
- temp/erty.pdf +0 -0
- test_gradio.py +188 -0
- test_streamlit.py +10 -0
README
ADDED
File without changes
|
README.md
CHANGED
@@ -1,12 +1,6 @@
|
|
1 |
---
|
2 |
title: Gradio
|
3 |
-
|
4 |
-
colorFrom: yellow
|
5 |
-
colorTo: indigo
|
6 |
sdk: gradio
|
7 |
-
sdk_version: 3.46.
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
1 |
---
|
2 |
title: Gradio
|
3 |
+
app_file: gradio_llm_example.py
|
|
|
|
|
4 |
sdk: gradio
|
5 |
+
sdk_version: 3.46.0
|
|
|
|
|
6 |
---
|
|
|
|
__pycache__/gradio_app.cpython-38.pyc
ADDED
Binary file (5.46 kB). View file
|
|
__pycache__/gradio_llm_example.cpython-38.pyc
ADDED
Binary file (4.87 kB). View file
|
|
__pycache__/test_gradio.cpython-38.pyc
ADDED
Binary file (1.45 kB). View file
|
|
flagged/log.csv
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
input_file,Error,flag,username,timestamp
|
2 |
+
,Error occurred while writing the file: 'Textbox' object has no attribute 'configure',,,2023-10-04 11:29:37.852423
|
gradio_app.py
ADDED
@@ -0,0 +1,205 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import numpy as np
|
3 |
+
import random
|
4 |
+
# import torch
|
5 |
+
# from langchain import HuggingFacePipeline
|
6 |
+
# from langchain.chains import LLMChain, RetrievalQA
|
7 |
+
# from langchain.document_loaders import (
|
8 |
+
# DirectoryLoader,
|
9 |
+
# PyPDFLoader,
|
10 |
+
# TextLoader,
|
11 |
+
# UnstructuredPDFLoader,
|
12 |
+
# )
|
13 |
+
# from langchain.embeddings import HuggingFaceEmbeddings, LlamaCppEmbeddings
|
14 |
+
# from langchain.llms import LlamaCpp
|
15 |
+
# from langchain.prompts import PromptTemplate
|
16 |
+
# from langchain.text_splitter import (
|
17 |
+
# CharacterTextSplitter,
|
18 |
+
# RecursiveCharacterTextSplitter,
|
19 |
+
# )
|
20 |
+
# from langchain.vectorstores import Chroma
|
21 |
+
# from PIL import Image
|
22 |
+
|
23 |
+
|
24 |
+
def file_upload(input_file):
|
25 |
+
# Process the uploaded file
|
26 |
+
if input_file is not None:
|
27 |
+
# Save the uploaded file or perform any desired operations
|
28 |
+
file_path = "/tmp/file.pdf"
|
29 |
+
content = input_file.read()
|
30 |
+
try:
|
31 |
+
with open(file_path, 'wb') as file:
|
32 |
+
file.write(content)
|
33 |
+
return {error_box: gr.Textbox(label="Completed",
|
34 |
+
value=f"File uploaded successfully in {file_path}.", visible=True)}
|
35 |
+
except Exception as e:
|
36 |
+
return {error_box: gr.Textbox(label="Error",
|
37 |
+
value=f"Error occurred while writing the file: {e}", visible=True)}
|
38 |
+
|
39 |
+
|
40 |
+
def respond(message, chat_history):
|
41 |
+
#No LLM here, just respond with a random pre-made message
|
42 |
+
bot_message = random.choice(["Tell me more about it",
|
43 |
+
"Cool, but I'm not interested",
|
44 |
+
"Hmmmm, ok then"])
|
45 |
+
chat_history.append((message, bot_message))
|
46 |
+
return "", chat_history
|
47 |
+
|
48 |
+
|
49 |
+
# Gradio interface
|
50 |
+
def qa_bot(pdf_file, question):
|
51 |
+
texts = load_docs(pdf_file)
|
52 |
+
model = setup_dbqa(texts)
|
53 |
+
answer = model({'query': question})
|
54 |
+
return f"Question: {answer['query']}\nAnswer: {answer['result']}\nSource documents: {answer['source_documents']}"
|
55 |
+
|
56 |
+
|
57 |
+
# Helper function to load documents from PDF files
|
58 |
+
def load_docs(file_path):
|
59 |
+
loader = DirectoryLoader(file_path,
|
60 |
+
glob="*.pdf",
|
61 |
+
loader_cls=UnstructuredPDFLoader)
|
62 |
+
documents = loader.load()
|
63 |
+
|
64 |
+
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000,
|
65 |
+
chunk_overlap=0,
|
66 |
+
length_function=len,)
|
67 |
+
texts = text_splitter.split_documents(documents)
|
68 |
+
return texts
|
69 |
+
|
70 |
+
|
71 |
+
# Helper function to set up the question-answering model
|
72 |
+
def setup_dbqa(texts):
|
73 |
+
print("Setting up DBQA ...")
|
74 |
+
llm = HuggingFacePipeline.from_model_id(
|
75 |
+
model_id="NousResearch/Llama-2-13b-chat-hf",
|
76 |
+
task="text-generation",
|
77 |
+
model_kwargs={
|
78 |
+
"max_length": 1500, "load_in_8bit": True},
|
79 |
+
)
|
80 |
+
|
81 |
+
embeddings = HuggingFaceEmbeddings(model_name='sentence-transformers/all-MiniLM-L6-v2',
|
82 |
+
model_kwargs={'device': 'cpu'})
|
83 |
+
|
84 |
+
vectorstore = Chroma.from_documents(texts, embeddings, persist_directory="vectorstore")
|
85 |
+
|
86 |
+
prompt = set_qa_prompt()
|
87 |
+
|
88 |
+
return build_retrieval_qa(llm, prompt, vectorstore)
|
89 |
+
|
90 |
+
|
91 |
+
|
92 |
+
def set_qa_prompt():
|
93 |
+
# set prompt template
|
94 |
+
prompt_template = """<s>[INST] <<SYS>> Use the following pieces of context closed between $ to answer the question closed between |. If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.
|
95 |
+
${context}$ <</SYS>>
|
96 |
+
Question: |{question}|
|
97 |
+
Answer:[/INST]</s>"""
|
98 |
+
prompt = PromptTemplate(
|
99 |
+
template=prompt_template, input_variables=["context", "question"]
|
100 |
+
)
|
101 |
+
return prompt
|
102 |
+
|
103 |
+
|
104 |
+
# Build RetrievalQA object
|
105 |
+
|
106 |
+
def build_retrieval_qa(_llm, _prompt, _vectorstore):
|
107 |
+
dbqa = RetrievalQA.from_chain_type(llm=_llm,
|
108 |
+
chain_type='stuff',
|
109 |
+
retriever=_vectorstore.as_retriever(search_kwargs={'k': 3}),
|
110 |
+
return_source_documents=True,
|
111 |
+
chain_type_kwargs={'prompt': _prompt})
|
112 |
+
return dbqa
|
113 |
+
|
114 |
+
|
115 |
+
|
116 |
+
|
117 |
+
|
118 |
+
if __name__ == "__main__":
|
119 |
+
|
120 |
+
# How to RUN code ==> gradio gradio_app.py
|
121 |
+
gr.themes.builder()
|
122 |
+
|
123 |
+
# # Define text and title information
|
124 |
+
# title1 = "## QA App"
|
125 |
+
|
126 |
+
# title2 = " ## Gradio QA Bot"
|
127 |
+
|
128 |
+
# intro = """
|
129 |
+
# Welcome! This is not just any bot, it's a special one equipped with state-of-the-art natural language processing capabilities, and ready to answer your queries.
|
130 |
+
|
131 |
+
# Ready to explore? Let's get started!
|
132 |
+
|
133 |
+
# * Step 1: Upload a PDF document.
|
134 |
+
# * Step 2: Type in a question related to your document's content.
|
135 |
+
# * Step 3: Get your answer!
|
136 |
+
|
137 |
+
# Push clear cache before uploading a new doc!
|
138 |
+
# """
|
139 |
+
|
140 |
+
# about = """
|
141 |
+
# ## About
|
142 |
+
# This app is an LLM-powered chatbot built using:
|
143 |
+
# - [Streamlit](<https://streamlit.io/>)
|
144 |
+
# - [HugChat](<https://github.com/Soulter/hugging-chat-api>)
|
145 |
+
# - Chat Model = llama2-chat-hf 7B
|
146 |
+
# - Retreiver model = all-MiniLM-L6-v2
|
147 |
+
|
148 |
+
# 💡 Note: No API key required!
|
149 |
+
# """
|
150 |
+
|
151 |
+
|
152 |
+
# # Define theme ==> see gr.themes.builder()
|
153 |
+
# theme = gr.themes.Soft(
|
154 |
+
# primary_hue="green",
|
155 |
+
# secondary_hue="blue",
|
156 |
+
# neutral_hue="indigo"
|
157 |
+
# ).set(
|
158 |
+
# background_fill_primary='*primary_50',
|
159 |
+
# shadow_drop='*shadow_spread',
|
160 |
+
# button_border_width='*block_border_width',
|
161 |
+
# button_border_width_dark='*block_label_border_width'
|
162 |
+
# )
|
163 |
+
|
164 |
+
|
165 |
+
# with gr.Blocks(theme=theme) as demo:
|
166 |
+
# with gr.Row():
|
167 |
+
# with gr.Column(scale=2, min_width=400):
|
168 |
+
# title1_gr= gr.Markdown(title1)
|
169 |
+
# intro_gr = gr.Markdown(intro)
|
170 |
+
# # Create a Gradio interface with a file upload input
|
171 |
+
# error_box = gr.Textbox(label="Error", visible=False)
|
172 |
+
# # upload_button = gr.Interface(fn=file_upload,
|
173 |
+
# # inputs=gr.File(),
|
174 |
+
# # outputs=error_box,
|
175 |
+
# # description="Drag and drop your document here")
|
176 |
+
# upload_button = gr.UploadButton("Drag and drop your document here",
|
177 |
+
# size="lg", scale=3, min_width=240,
|
178 |
+
# file_types=["pdf"])
|
179 |
+
# upload_button.upload(file_upload, upload_button, error_box)
|
180 |
+
|
181 |
+
# with gr.Column(scale=2, min_width=800):
|
182 |
+
# title2_gr = gr.Markdown(title2)
|
183 |
+
|
184 |
+
# chatbot = gr.Chatbot(label="Bot", height=500)
|
185 |
+
# msg = gr.Textbox(label="User", placeholder="Ask a question about the uploaded PDF document.")
|
186 |
+
# chatbot_btn = gr.Button("Submit")
|
187 |
+
# clear = gr.ClearButton(components=[msg, chatbot], value="Clear console")
|
188 |
+
# chatbot_btn.click(respond, inputs=[msg, chatbot], outputs=[msg, chatbot])
|
189 |
+
|
190 |
+
|
191 |
+
# with gr.Column(scale=3, min_width=600):
|
192 |
+
# with gr.Row():
|
193 |
+
# about_gr = gr.Markdown(about)
|
194 |
+
# logo_gr = gr.Markdown(""" </br> </br>
|
195 |
+
# <img src="file/logo_neovision.png" alt="logo" style="width:600px;"/>""")
|
196 |
+
# # gr.Image("./logo_neovision.png")
|
197 |
+
|
198 |
+
|
199 |
+
# gr.close_all()
|
200 |
+
# demo.launch(share=True, enable_queue=True)
|
201 |
+
|
202 |
+
|
203 |
+
|
204 |
+
|
205 |
+
|
gradio_cached_examples/16/component 0/tmp95ia8keq.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
[["Am I cool?", "Yes"]]
|
gradio_cached_examples/16/component 0/tmpf1gyji4c.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
[["Are tomatoes vegetables?", "Yes"]]
|
gradio_cached_examples/16/component 0/tmpleed6aum.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
[["Hello", "Ask me anything!"]]
|
gradio_cached_examples/16/log.csv
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
component 0,flag,username,timestamp
|
2 |
+
/home/tamara/Documents/Gradio/gradio_cached_examples/16/component 0/tmpleed6aum.json,,,2023-10-03 13:53:06.546567
|
3 |
+
/home/tamara/Documents/Gradio/gradio_cached_examples/16/component 0/tmp95ia8keq.json,,,2023-10-03 13:53:06.547644
|
4 |
+
/home/tamara/Documents/Gradio/gradio_cached_examples/16/component 0/tmpf1gyji4c.json,,,2023-10-03 13:53:06.548535
|
gradio_cached_examples/35/component 0/tmphlap_ssj.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
[["Are tomatoes vegetables?", "Yes"]]
|
gradio_cached_examples/35/component 0/tmpqmrmr545.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
[["Hello", "Ask me anything!"]]
|
gradio_cached_examples/35/component 0/tmpu4y3h8za.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
[["Am I cool?", "Yes"]]
|
gradio_cached_examples/35/log.csv
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
component 0,flag,username,timestamp
|
2 |
+
/home/tamara/Documents/Gradio/gradio_cached_examples/35/component 0/tmpqmrmr545.json,,,2023-10-03 13:52:57.596267
|
3 |
+
/home/tamara/Documents/Gradio/gradio_cached_examples/35/component 0/tmpu4y3h8za.json,,,2023-10-03 13:52:57.596899
|
4 |
+
/home/tamara/Documents/Gradio/gradio_cached_examples/35/component 0/tmphlap_ssj.json,,,2023-10-03 13:52:57.597399
|
gradio_cached_examples/41/component 0/tmp1gnhmzn0.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
[["Are tomatoes vegetables?", "Yes"]]
|
gradio_cached_examples/41/component 0/tmp9r3rafm7.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
[["Am I cool?", "Yes"]]
|
gradio_cached_examples/41/component 0/tmphbjuw0z9.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
[["Hello", "Ask me anything!"]]
|
gradio_cached_examples/41/log.csv
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
component 0,flag,username,timestamp
|
2 |
+
/home/tamara/Documents/Gradio/gradio_cached_examples/41/component 0/tmphbjuw0z9.json,,,2023-10-03 14:12:40.387069
|
3 |
+
/home/tamara/Documents/Gradio/gradio_cached_examples/41/component 0/tmp9r3rafm7.json,,,2023-10-03 14:12:40.388121
|
4 |
+
/home/tamara/Documents/Gradio/gradio_cached_examples/41/component 0/tmp1gnhmzn0.json,,,2023-10-03 14:12:40.388983
|
gradio_cached_examples/54/component 0/tmpaz_c2ond.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
[["Hello", "Ask me anything!"]]
|
gradio_cached_examples/54/component 0/tmpnl_8qi5t.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
[["Am I cool?", "Yes"]]
|
gradio_cached_examples/54/component 0/tmpo6iaiydn.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
[["Are tomatoes vegetables?", "Yes"]]
|
gradio_cached_examples/54/log.csv
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
component 0,flag,username,timestamp
|
2 |
+
/home/tamara/Documents/Gradio/gradio_cached_examples/54/component 0/tmpaz_c2ond.json,,,2023-10-03 13:52:57.635762
|
3 |
+
/home/tamara/Documents/Gradio/gradio_cached_examples/54/component 0/tmpnl_8qi5t.json,,,2023-10-03 13:52:57.636386
|
4 |
+
/home/tamara/Documents/Gradio/gradio_cached_examples/54/component 0/tmpo6iaiydn.json,,,2023-10-03 13:52:57.636821
|
gradio_cached_examples/60/component 0/tmp8z177n1e.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
[["Am I cool?", "Yes"]]
|
gradio_cached_examples/60/component 0/tmpcjfuu9nz.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
[["Are tomatoes vegetables?", "Yes"]]
|
gradio_cached_examples/60/component 0/tmpzem5dzus.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
[["Hello", "Ask me anything!"]]
|
gradio_cached_examples/60/log.csv
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
component 0,flag,username,timestamp
|
2 |
+
/home/tamara/Documents/Gradio/gradio_cached_examples/60/component 0/tmpzem5dzus.json,,,2023-10-03 14:12:40.417087
|
3 |
+
/home/tamara/Documents/Gradio/gradio_cached_examples/60/component 0/tmp8z177n1e.json,,,2023-10-03 14:12:40.418079
|
4 |
+
/home/tamara/Documents/Gradio/gradio_cached_examples/60/component 0/tmpcjfuu9nz.json,,,2023-10-03 14:12:40.418965
|
gradio_cached_examples/79/component 0/tmp00bsbvnd.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
[["Hello", "Ask me anything!"]]
|
gradio_cached_examples/79/component 0/tmp3b5s5lev.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
[["Am I cool?", "Yes"]]
|
gradio_cached_examples/79/component 0/tmpmasy_kj7.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
[["Are tomatoes vegetables?", "Yes"]]
|
gradio_cached_examples/79/log.csv
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
component 0,flag,username,timestamp
|
2 |
+
/home/tamara/Documents/Gradio/gradio_cached_examples/79/component 0/tmp00bsbvnd.json,,,2023-10-03 14:12:43.671670
|
3 |
+
/home/tamara/Documents/Gradio/gradio_cached_examples/79/component 0/tmp3b5s5lev.json,,,2023-10-03 14:12:43.672661
|
4 |
+
/home/tamara/Documents/Gradio/gradio_cached_examples/79/component 0/tmpmasy_kj7.json,,,2023-10-03 14:12:43.673551
|
gradio_llm_example.py
ADDED
@@ -0,0 +1,162 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import numpy as np
|
3 |
+
import random
|
4 |
+
import time
|
5 |
+
import os
|
6 |
+
import shutil
|
7 |
+
import codecs
|
8 |
+
# How to RUN code ==> gradio gradio_llm_example.py
|
9 |
+
|
10 |
+
|
11 |
+
# Define text and title information
|
12 |
+
title1 = "## </br> </br> </br> 🤗💬 QA App"
|
13 |
+
|
14 |
+
title2 = " ## </br> </br> </br> Gradio QA Bot"
|
15 |
+
|
16 |
+
intro = """ Welcome! This is not just any bot, it's a special one equipped with state-of-the-art natural language processing capabilities, and ready to answer your queries.
|
17 |
+
|
18 |
+
|
19 |
+
Ready to explore? Let's get started!
|
20 |
+
|
21 |
+
|
22 |
+
* Step 1: Upload a PDF document.
|
23 |
+
* Step 2: Type in a question related to your document's content.
|
24 |
+
* Step 3: Get your answer!
|
25 |
+
|
26 |
+
|
27 |
+
Push clear cache before uploading a new doc!
|
28 |
+
|
29 |
+
|
30 |
+
"""
|
31 |
+
|
32 |
+
about = """
|
33 |
+
## </br> About
|
34 |
+
This app is an LLM-powered chatbot built using:
|
35 |
+
- [Streamlit](<https://streamlit.io/>)
|
36 |
+
- [HugChat](<https://github.com/Soulter/hugging-chat-api>)
|
37 |
+
- Chat Model = llama2-chat-hf 7B
|
38 |
+
- Retreiver model = all-MiniLM-L6-v2
|
39 |
+
|
40 |
+
</br>
|
41 |
+
💡 Note: No API key required!
|
42 |
+
|
43 |
+
</br>
|
44 |
+
Made with ❤️ by us
|
45 |
+
"""
|
46 |
+
|
47 |
+
|
48 |
+
# Define theme ==> see gr.themes.builder()
|
49 |
+
theme = gr.themes.Soft(
|
50 |
+
primary_hue="emerald",
|
51 |
+
secondary_hue="emerald",
|
52 |
+
neutral_hue="slate",
|
53 |
+
).set(
|
54 |
+
body_background_fill_dark='*primary_50',
|
55 |
+
shadow_drop='*shadow_spread',
|
56 |
+
button_border_width='*block_border_width',
|
57 |
+
button_border_width_dark='*block_label_border_width'
|
58 |
+
)
|
59 |
+
|
60 |
+
|
61 |
+
def upload_file(files_obj):
|
62 |
+
""" Upload several files from drag and drop, and save them in local temp folder
|
63 |
+
files_obj (type:list) : list of tempfile._TemporaryFileWrapper
|
64 |
+
return checkbox to display uploaded documents """
|
65 |
+
# Create local copy
|
66 |
+
temp_file_path = "./temp"
|
67 |
+
if not os.path.exists(temp_file_path):
|
68 |
+
os.makedirs(temp_file_path)
|
69 |
+
# Save each file among list of given files
|
70 |
+
file_name_list = list()
|
71 |
+
for file_obj in files_obj :
|
72 |
+
file_name = os.path.basename(file_obj.name)
|
73 |
+
file_name_list.append(file_name)
|
74 |
+
shutil.copyfile(file_obj.name, os.path.join(temp_file_path, file_name))
|
75 |
+
return {uploaded_check : gr.Radio(choices=file_name_list, visible=True),
|
76 |
+
choose_btn : gr.Button(value="Choose", visible=True)}
|
77 |
+
|
78 |
+
|
79 |
+
def read_content(file_name):
|
80 |
+
print(file_name, type(file_name))
|
81 |
+
temp_file_path = "./temp"
|
82 |
+
file_path = os.path.join(temp_file_path, file_name)
|
83 |
+
with open(file_path, "rb") as file:
|
84 |
+
try:
|
85 |
+
content = file.read()
|
86 |
+
print(content)
|
87 |
+
print(codecs.decode(content, 'utf-8'))
|
88 |
+
return {error_box: gr.Textbox(value=f"File ready to be used. \n You can ask a question about the uploaded PDF document.", visible=True)}
|
89 |
+
except Exception as e:
|
90 |
+
print(f"Error occurred while writing the file: {e}")
|
91 |
+
return {error_box: gr.Textbox(value=f"Error occurred while writing the file: {e}", visible=True)}
|
92 |
+
|
93 |
+
|
94 |
+
def respond(message, chat_history,
|
95 |
+
language_choice, max_length, temperature,
|
96 |
+
num_return_sequences, top_p, no_repeat_ngram_size):
|
97 |
+
#No LLM here, just respond with a random pre-made message
|
98 |
+
if content == "":
|
99 |
+
bot_message = f"j'ai {max_length}" + random.choice(["Tell me more about it",
|
100 |
+
"Cool, but I'm not interested",
|
101 |
+
"Hmmmm, ok then"])
|
102 |
+
else:
|
103 |
+
bot_message = " j'ai besoin d'un modèle pour lire le {content[:3]}"
|
104 |
+
chat_history.append((message, bot_message))
|
105 |
+
return "", chat_history
|
106 |
+
|
107 |
+
|
108 |
+
|
109 |
+
# Layout
|
110 |
+
with gr.Blocks(theme=gr.themes.Soft()) as gradioApp:
|
111 |
+
with gr.Row():
|
112 |
+
with gr.Column(scale=1, min_width=100):
|
113 |
+
logo_gr = gr.Markdown(""" <img src="file/logo_neovision.png" alt="logo" style="width:400px;"/>""")
|
114 |
+
# gr.Image("./logo_neovision.png")
|
115 |
+
about_gr = gr.Markdown(about)
|
116 |
+
|
117 |
+
with gr.Column(scale=2, min_width=500):
|
118 |
+
title1_gr= gr.Markdown(title1)
|
119 |
+
intro_gr = gr.Markdown(intro)
|
120 |
+
|
121 |
+
# Upload several documents
|
122 |
+
content = ""
|
123 |
+
upload_button = gr.UploadButton("Browse files", label="Drag and drop your documents here",
|
124 |
+
size="lg", scale=0, min_width=100,
|
125 |
+
file_types=["pdf"], file_count="multiple")
|
126 |
+
uploaded_check = gr.Radio(label="Uploaded documents", visible=False,
|
127 |
+
info="Do you want to use a supporting document?")
|
128 |
+
choose_btn = gr.Button(value="Choose", visible=False)
|
129 |
+
upload_button.upload(upload_file, upload_button, [uploaded_check, choose_btn])
|
130 |
+
|
131 |
+
# Read only one document
|
132 |
+
error_box = gr.Textbox(label="Reading files... ", visible=False)
|
133 |
+
choose_btn.click(read_content, inputs=uploaded_check, outputs=error_box)
|
134 |
+
|
135 |
+
# Select advanced options
|
136 |
+
gr.Markdown(""" ## Toolbox """)
|
137 |
+
with gr.Accordion(label="Select advanced options",open=False):
|
138 |
+
language_choice = gr.Dropdown(["English", "French"], label="Language", info="Choose your language")
|
139 |
+
max_length = gr.Slider(label="Token length", minimum=1, maximum=100, value=50, step=1)
|
140 |
+
temperature= gr.Slider(label="Temperature", minimum=0.1, maximum=1, value=0.8, step=0.1)
|
141 |
+
num_return_sequences= gr.Slider(label="Temperature", minimum=0.1, maximum=50, value=1, step=0.1)
|
142 |
+
top_p= gr.Slider(label="Temperature", minimum=0.1, maximum=1, value=0.8, step=0.1)
|
143 |
+
no_repeat_ngram_size= gr.Slider(label="Temperature", minimum=0.1, maximum=1, value=3, step=0.1)
|
144 |
+
|
145 |
+
|
146 |
+
# Chat
|
147 |
+
with gr.Column(scale=2, min_width=600):
|
148 |
+
title2_gr = gr.Markdown(title2)
|
149 |
+
chatbot = gr.Chatbot(label="Bot", height=500)
|
150 |
+
msg = gr.Textbox(label="User", placeholder="Ask any question.")
|
151 |
+
chatbot_btn = gr.Button("Submit")
|
152 |
+
chatbot_btn.click(respond, inputs=[msg, chatbot,
|
153 |
+
language_choice, max_length, temperature,
|
154 |
+
num_return_sequences, top_p, no_repeat_ngram_size],
|
155 |
+
outputs=[msg, chatbot])
|
156 |
+
clear = gr.ClearButton(components=[msg, chatbot], value="Clear console")
|
157 |
+
|
158 |
+
|
159 |
+
gr.close_all()
|
160 |
+
gradioApp.launch(share=True, enable_queue=True)
|
161 |
+
|
162 |
+
|
logo_neovision.png
ADDED
![]() |
streamlit_app.py
ADDED
@@ -0,0 +1,170 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
import streamlit as st
|
3 |
+
|
4 |
+
# Set a custom background
|
5 |
+
import torch
|
6 |
+
from langchain import HuggingFacePipeline
|
7 |
+
from langchain.chains import LLMChain, RetrievalQA
|
8 |
+
from langchain.document_loaders import (
|
9 |
+
DirectoryLoader,
|
10 |
+
PyPDFLoader,
|
11 |
+
TextLoader,
|
12 |
+
UnstructuredPDFLoader,
|
13 |
+
)
|
14 |
+
from langchain.embeddings import HuggingFaceEmbeddings, LlamaCppEmbeddings
|
15 |
+
from langchain.llms import LlamaCpp
|
16 |
+
from langchain.prompts import PromptTemplate
|
17 |
+
from langchain.text_splitter import (
|
18 |
+
CharacterTextSplitter,
|
19 |
+
RecursiveCharacterTextSplitter,
|
20 |
+
)
|
21 |
+
from langchain.vectorstores import Chroma
|
22 |
+
from PIL import Image
|
23 |
+
from streamlit_extras.add_vertical_space import add_vertical_space
|
24 |
+
|
25 |
+
st.set_page_config(page_title="Welcome to our AI Question Answering Bot")
|
26 |
+
|
27 |
+
with st.sidebar:
|
28 |
+
st.title('🤗💬 QA App')
|
29 |
+
st.markdown('''
|
30 |
+
## About
|
31 |
+
This app is an LLM-powered chatbot built using:
|
32 |
+
- [Streamlit](<https://streamlit.io/>)
|
33 |
+
- [HugChat](<https://github.com/Soulter/hugging-chat-api>)
|
34 |
+
- Chat Model = llama2-chat-hf 7B
|
35 |
+
- Retreiver model = all-MiniLM-L6-v2
|
36 |
+
|
37 |
+
💡 Note: No API key required!
|
38 |
+
''')
|
39 |
+
add_vertical_space(5)
|
40 |
+
st.write('Made with ❤️ by us')
|
41 |
+
|
42 |
+
# logo = Image.open('logo.png')
|
43 |
+
# st.image(logo, use_column_width=True)
|
44 |
+
|
45 |
+
|
46 |
+
# Introduction
|
47 |
+
st.markdown("""
|
48 |
+
Welcome! This is not just any bot, it's a special one equipped with state-of-the-art natural language processing capabilities, and ready to answer your queries.
|
49 |
+
|
50 |
+
|
51 |
+
Ready to explore? Let's get started!
|
52 |
+
|
53 |
+
* Step 1: Upload a PDF document.
|
54 |
+
* Step 2: Type in a question related to your document's content.
|
55 |
+
* Step 3: Get your answer!
|
56 |
+
|
57 |
+
Push clear cache before uploading a new doc !
|
58 |
+
|
59 |
+
|
60 |
+
""")
|
61 |
+
|
62 |
+
|
63 |
+
def write_text_file(content, file_path):
|
64 |
+
try:
|
65 |
+
with open(file_path, 'wb') as file:
|
66 |
+
file.write(content)
|
67 |
+
return True
|
68 |
+
except Exception as e:
|
69 |
+
print(f"Error occurred while writing the file: {e}")
|
70 |
+
return False
|
71 |
+
|
72 |
+
|
73 |
+
# Wrap prompt template in a PromptTemplate object
|
74 |
+
|
75 |
+
def set_qa_prompt():
|
76 |
+
# set prompt template
|
77 |
+
prompt_template = """<s>[INST] <<SYS>> Use the following pieces of context closed between $ to answer the question closed between |. If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.
|
78 |
+
${context}$ <</SYS>>
|
79 |
+
Question: |{question}|
|
80 |
+
Answer:[/INST]</s>"""
|
81 |
+
prompt = PromptTemplate(
|
82 |
+
template=prompt_template, input_variables=["context", "question"]
|
83 |
+
)
|
84 |
+
return prompt
|
85 |
+
|
86 |
+
|
87 |
+
# Build RetrievalQA object
|
88 |
+
|
89 |
+
def build_retrieval_qa(_llm, _prompt, _vectorstore):
|
90 |
+
dbqa = RetrievalQA.from_chain_type(llm=_llm,
|
91 |
+
chain_type='stuff',
|
92 |
+
retriever=_vectorstore.as_retriever(search_kwargs={'k': 3}),
|
93 |
+
return_source_documents=True,
|
94 |
+
chain_type_kwargs={'prompt': _prompt})
|
95 |
+
return dbqa
|
96 |
+
|
97 |
+
|
98 |
+
# Instantiate QA object
|
99 |
+
# @st.cache(allow_output_mutation=True)
|
100 |
+
# @st.cache_resource()
|
101 |
+
@st.cache(allow_output_mutation=True)
|
102 |
+
def setup_dbqa(_texts):
|
103 |
+
print("setup_dbqa ...")
|
104 |
+
llm = HuggingFacePipeline.from_model_id(
|
105 |
+
model_id="NousResearch/Llama-2-13b-chat-hf",
|
106 |
+
task="text-generation",
|
107 |
+
model_kwargs={
|
108 |
+
"max_length": 1500, "load_in_8bit": True},
|
109 |
+
)
|
110 |
+
|
111 |
+
embeddings = HuggingFaceEmbeddings(model_name='sentence-transformers/all-MiniLM-L6-v2',
|
112 |
+
model_kwargs={'device': 'cpu'})
|
113 |
+
|
114 |
+
vectorstore = Chroma.from_documents(texts, embeddings, persist_directory="vectorstore")
|
115 |
+
|
116 |
+
prompt = set_qa_prompt()
|
117 |
+
|
118 |
+
return build_retrieval_qa(llm, prompt, vectorstore)
|
119 |
+
|
120 |
+
|
121 |
+
def load_docs(uploaded_file):
|
122 |
+
print("loading docs ...")
|
123 |
+
content = uploaded_file.read()
|
124 |
+
file_path_aux = "./temp/file.pdf"
|
125 |
+
write_text_file(content, file_path_aux)
|
126 |
+
file_path = "./temp/"
|
127 |
+
|
128 |
+
loader = DirectoryLoader(file_path,
|
129 |
+
glob="*.pdf",
|
130 |
+
loader_cls=UnstructuredPDFLoader)
|
131 |
+
documents = loader.load()
|
132 |
+
|
133 |
+
# Split text from PDF into chunks
|
134 |
+
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000,
|
135 |
+
chunk_overlap=0,
|
136 |
+
length_function=len,)
|
137 |
+
texts = text_splitter.split_documents(documents)
|
138 |
+
return texts
|
139 |
+
|
140 |
+
|
141 |
+
# Set the background image
|
142 |
+
# Load a PDF file
|
143 |
+
uploaded_file = st.file_uploader("Choose a PDF file", type="pdf")
|
144 |
+
|
145 |
+
if uploaded_file is not None:
|
146 |
+
st.write('Loading file')
|
147 |
+
|
148 |
+
texts = load_docs(uploaded_file)
|
149 |
+
model = setup_dbqa(texts)
|
150 |
+
|
151 |
+
# Build and persist FAISS vector store
|
152 |
+
|
153 |
+
question = st.text_input('Ask a question:')
|
154 |
+
|
155 |
+
if question:
|
156 |
+
# Placeholder for chatbot logic to generate an answer based on the question and the PDF content
|
157 |
+
|
158 |
+
answer = model({'query': question})
|
159 |
+
# The below is just a hardcoded response
|
160 |
+
print(question)
|
161 |
+
print(answer)
|
162 |
+
|
163 |
+
# st.write('Question: ', answer["query"])
|
164 |
+
st.write('Question: ', answer["query"])
|
165 |
+
st.write('Answer: ', answer["result"])
|
166 |
+
st.write('Source documents: ', answer["source_documents"])
|
167 |
+
|
168 |
+
# if st.button("Clear cache before loading new document"):
|
169 |
+
# # Clears all st.cache_resource caches:
|
170 |
+
# st.cache_resource.clear()
|
temp/aze.pdf
ADDED
Binary file (149 kB). View file
|
|
temp/document-1.pdf
ADDED
Binary file (45.8 kB). View file
|
|
temp/document.pdf
ADDED
Binary file (45.8 kB). View file
|
|
temp/erty.pdf
ADDED
Binary file (149 kB). View file
|
|
test_gradio.py
ADDED
@@ -0,0 +1,188 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
# in folder ==> gradio test_gradio.py ( it won’t provide the automatic reload mechanism?)
|
3 |
+
|
4 |
+
import gradio as gr
|
5 |
+
import numpy as np
|
6 |
+
import random
|
7 |
+
import time
|
8 |
+
|
9 |
+
# def greet(name):
|
10 |
+
# return "Hello " + name + "!"
|
11 |
+
|
12 |
+
# demo = gr.Interface(fn=greet, inputs=gr.Textbox(lines=2, placeholder="Name Here..."), outputs="text")
|
13 |
+
|
14 |
+
# demo.launch()
|
15 |
+
|
16 |
+
|
17 |
+
# def greet2(name, is_morning, temperature):
|
18 |
+
# salutation = "Good morning" if is_morning else "Good evening"
|
19 |
+
# greeting = f"{salutation} {name}. It is {temperature} degrees today"
|
20 |
+
# celsius = (temperature - 32) * 5 / 9
|
21 |
+
# return greeting, round(celsius, 2)
|
22 |
+
|
23 |
+
# demo = gr.Interface(
|
24 |
+
# fn=greet2,
|
25 |
+
# inputs=["text", "checkbox", gr.Slider(0, 100)],
|
26 |
+
# outputs=["text", "number"],
|
27 |
+
# )
|
28 |
+
# demo.launch()
|
29 |
+
|
30 |
+
|
31 |
+
# def sepia(input_img):
|
32 |
+
# sepia_filter = np.array([
|
33 |
+
# [0.393, 0.769, 0.189],
|
34 |
+
# [0.349, 0.686, 0.168],
|
35 |
+
# [0.272, 0.534, 0.131]
|
36 |
+
# ])
|
37 |
+
# sepia_img = input_img.dot(sepia_filter.T)
|
38 |
+
# sepia_img /= sepia_img.max()
|
39 |
+
# return sepia_img
|
40 |
+
|
41 |
+
# demo = gr.Interface(sepia, gr.Image(shape=(200, 200)), "image")
|
42 |
+
# demo.launch()
|
43 |
+
|
44 |
+
|
45 |
+
|
46 |
+
|
47 |
+
|
48 |
+
def yes_man(message, history):
|
49 |
+
if message.endswith("?"):
|
50 |
+
return "Yes"
|
51 |
+
else:
|
52 |
+
return "Ask me anything!"
|
53 |
+
|
54 |
+
|
55 |
+
|
56 |
+
# gr.ChatInterface(
|
57 |
+
# yes_man,
|
58 |
+
# chatbot=gr.Chatbot(height=300),
|
59 |
+
# textbox=gr.Textbox(placeholder="Ask a question about the uploaded PDF document.", container=False, scale=7),
|
60 |
+
# title="Gradio QA Bot",
|
61 |
+
# description=f"{intro}",
|
62 |
+
# theme="soft",
|
63 |
+
# examples=["What is the title of the document?", "Summarize the main ideas of the documents"],
|
64 |
+
# cache_examples=True,
|
65 |
+
# retry_btn=None,
|
66 |
+
# undo_btn="Delete Previous",
|
67 |
+
# clear_btn="Clear",
|
68 |
+
# ).launch()
|
69 |
+
|
70 |
+
|
71 |
+
# intro = "Welcome! This is not just any bot, ..."
|
72 |
+
title1 = "QA App"
|
73 |
+
title2 = "Gradio QA Bot"
|
74 |
+
|
75 |
+
|
76 |
+
def file_upload(input_file):
|
77 |
+
# Process the uploaded file
|
78 |
+
if input_file is not None:
|
79 |
+
# Save the uploaded file or perform any desired operations
|
80 |
+
file_path = "/tmp/file.pdf"
|
81 |
+
content = input_file.read()
|
82 |
+
try:
|
83 |
+
with open(file_path, 'wb') as file:
|
84 |
+
file.write(content)
|
85 |
+
return [f"File '{input_file.name}' uploaded successfully in {file_path}.",file_path]
|
86 |
+
except Exception as e:
|
87 |
+
return f"Error occurred while writing the file: {e}"
|
88 |
+
return ["No file uploaded.", file_path]
|
89 |
+
|
90 |
+
|
91 |
+
def crash(test, file):
|
92 |
+
return("ok")
|
93 |
+
|
94 |
+
|
95 |
+
|
96 |
+
gr.ChatInterface(
|
97 |
+
yes_man,
|
98 |
+
chatbot=gr.Chatbot(height=300),
|
99 |
+
textbox=gr.Textbox(placeholder="Ask a question about the uploaded PDF document.", container=False, scale=7),
|
100 |
+
title="Gradio QA Bot",
|
101 |
+
description="blabla",
|
102 |
+
theme="soft",
|
103 |
+
examples=["What is the title of the document?", "Summarize the main ideas of the documents"],
|
104 |
+
cache_examples=True,
|
105 |
+
retry_btn=None,
|
106 |
+
undo_btn="Delete Previous",
|
107 |
+
clear_btn="Clear",
|
108 |
+
).launch()
|
109 |
+
|
110 |
+
|
111 |
+
# with gr.Blocks() as demo:
|
112 |
+
# intro = gr.Markdown("""Welcome! This is not just any bot, it's a special one equipped with state-of-the-art natural language processing capabilities, and ready to answer your queries.
|
113 |
+
|
114 |
+
# Ready to explore? Let's get started!
|
115 |
+
|
116 |
+
# * Step 1: Upload a PDF document.
|
117 |
+
# * Step 2: Type in a question related to your document's content.
|
118 |
+
# * Step 3: Get your answer!
|
119 |
+
|
120 |
+
# Push clear cache before uploading a new doc!
|
121 |
+
# """)
|
122 |
+
|
123 |
+
# # Create a Gradio interface with a file upload input
|
124 |
+
# iface = gr.Interface(
|
125 |
+
# fn=file_upload,
|
126 |
+
# inputs=gr.File(),
|
127 |
+
# outputs=["text", gr.File()],
|
128 |
+
# title=title1,
|
129 |
+
# description="Drag and drop your document here")
|
130 |
+
|
131 |
+
|
132 |
+
# # bot = gr.Interface(crash,
|
133 |
+
# # inputs=[gr.Textbox(lines=2, placeholder="Ask a question about the uploaded PDF document."), gr.File()],
|
134 |
+
# # outputs=[gr.Chatbot(height=300)],
|
135 |
+
# # title="Gradio QA Bot",
|
136 |
+
# # description=f"{intro}",
|
137 |
+
# # theme="soft",
|
138 |
+
# # examples=["What is the title of the document?", "Summarize the main ideas of the documents"],
|
139 |
+
# # cache_examples=True,
|
140 |
+
# # retry_btn=None,
|
141 |
+
# # undo_btn="Delete Previous",
|
142 |
+
# # clear_btn="Clear")
|
143 |
+
|
144 |
+
# # gr.ChatInterface(
|
145 |
+
# # yes_man,
|
146 |
+
# # chatbot=gr.Chatbot(height=300),
|
147 |
+
# # textbox=gr.Textbox(placeholder="Ask a question about the uploaded PDF document.", container=False, scale=7),
|
148 |
+
# # title="Gradio QA Bot",
|
149 |
+
# # description=f"{intro}",
|
150 |
+
# # theme="soft",
|
151 |
+
# # examples=["What is the title of the document?", "Summarize the main ideas of the documents"],
|
152 |
+
# # cache_examples=True,
|
153 |
+
# # retry_btn=None,
|
154 |
+
# # undo_btn="Delete Previous",
|
155 |
+
# # clear_btn="Clear",
|
156 |
+
# # )
|
157 |
+
|
158 |
+
# demo.launch()
|
159 |
+
|
160 |
+
|
161 |
+
|
162 |
+
# bot
|
163 |
+
iface = gr.Interface(qa_bot,
|
164 |
+
inputs=["file", gr.Textbox(placeholder="Ask a question about the uploaded PDF document.", container=False, scale=7)],
|
165 |
+
outputs="text",
|
166 |
+
title=title2,
|
167 |
+
description="Ask a question about the uploaded PDF document.",
|
168 |
+
theme="soft",
|
169 |
+
examples=["What is the title of the document?", "Summarize the main ideas of the documents"],
|
170 |
+
cache_examples=True,
|
171 |
+
retry_btn=None,
|
172 |
+
undo_btn="Delete Previous",
|
173 |
+
clear_btn="Clear")
|
174 |
+
#### OR
|
175 |
+
|
176 |
+
iface = gr.ChatInterface(
|
177 |
+
qa_bot,
|
178 |
+
chatbot=gr.Chatbot(height=300),
|
179 |
+
textbox=gr.Textbox(placeholder="Ask a question about the uploaded PDF document.", container=False, scale=7),
|
180 |
+
title=title2,
|
181 |
+
description="Ask a question about the uploaded PDF document.",
|
182 |
+
theme="soft",
|
183 |
+
examples=["What is the title of the document?", "Summarize the main ideas of the documents"],
|
184 |
+
cache_examples=True,
|
185 |
+
retry_btn=None,
|
186 |
+
undo_btn="Delete Previous",
|
187 |
+
clear_btn="Clear",
|
188 |
+
)
|
test_streamlit.py
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import pandas as pd
|
3 |
+
|
4 |
+
st.write("""
|
5 |
+
# My first app
|
6 |
+
Hello *world!*
|
7 |
+
""")
|
8 |
+
|
9 |
+
df = pd.read_csv("my_data.csv")
|
10 |
+
st.line_chart(df)
|