dupuyta commited on
Commit
bc816c6
β€’
1 Parent(s): 6e2f032
Files changed (2) hide show
  1. gradio_llm_example.py +205 -106
  2. metrics.csv +5 -0
gradio_llm_example.py CHANGED
@@ -1,67 +1,56 @@
1
  import gradio as gr
2
- import time
3
  import random
 
4
  import os
5
  import shutil
 
6
  # How to RUN code ==> gradio gradio_llm_example.py
7
 
8
 
9
- from langchain import HuggingFacePipeline
10
- def load_llm_model(model: str = "google/flan-t5-large") -> HuggingFacePipeline:
11
- llm = HuggingFacePipeline.from_model_id(
12
- model_id=model,
13
- task="text2text-generation",
14
- model_kwargs={"max_length": 1500, "load_in_8bit": True},
15
- )
16
- return llm
17
-
18
  # Define text and title information
19
- title1 = "## </br> </br> </br> πŸ€—πŸ’¬ QA App"
20
 
21
- title2 = " ## </br> </br> </br> Gradio QA Bot"
22
 
23
- intro = """ Welcome! This is not just any bot, it's a special one equipped with state-of-the-art natural language processing capabilities, and ready to answer your queries.
24
 
 
25
 
26
- Ready to explore? Let's get started!
27
-
28
-
29
- * Step 1: Upload a PDF document.
30
- * Step 2: Type in a question related to your document's content.
31
- * Step 3: Get your answer!
32
-
33
-
34
- Push clear cache before uploading a new doc!
35
 
 
 
 
 
 
36
 
37
- """
38
-
39
- about = """
40
- ## </br> About
41
- This app is an LLM-powered chatbot built using:
42
- - [Streamlit](<https://streamlit.io/>)
43
- - [HugChat](<https://github.com/Soulter/hugging-chat-api>)
44
- - Chat Model = llama2-chat-hf 7B
45
- - Retreiver model = all-MiniLM-L6-v2
46
-
47
- </br>
48
- πŸ’‘ Note: No API key required!
49
-
50
- </br>
51
- Made with ❀️ by us
52
- """
53
 
 
54
 
55
  # Define theme ==> see gr.themes.builder()
56
  theme = gr.themes.Soft(
57
  primary_hue="emerald",
58
- secondary_hue="emerald",
59
  neutral_hue="slate",
 
60
  ).set(
61
- body_background_fill_dark='*primary_50',
62
- shadow_drop='*shadow_spread',
63
- button_border_width='*block_border_width',
64
- button_border_width_dark='*block_label_border_width'
 
 
 
 
 
 
 
 
65
  )
66
 
67
 
@@ -69,8 +58,8 @@ def upload_file(files_obj):
69
  """ Upload several files from drag and drop, and save them in local temp folder
70
  files_obj (type:list) : list of tempfile._TemporaryFileWrapper
71
  return checkbox to display uploaded documents """
72
- # Create local copy
73
  temp_file_path = "./temp"
 
74
  if not os.path.exists(temp_file_path):
75
  os.makedirs(temp_file_path)
76
  # Save each file among list of given files
@@ -81,19 +70,21 @@ def upload_file(files_obj):
81
  shutil.copyfile(file_obj.name, os.path.join(temp_file_path, file_name))
82
  # return visible button for next selection
83
  return {uploaded_check : gr.CheckboxGroup(choices=file_name_list, visible=True),
84
- choose_btn : gr.Button(value="Choose", visible=True)}
 
 
 
85
 
86
 
87
- def read_content(content, files_name):
88
  """ Read and update the content variable (state) according to the several files_names to read from temp folder
89
  return updated content_var (type : list of str)
90
  return visible error_box to display logs error """
91
  content_list = list()
92
  text_list = list()
93
  # Parse one or several docs among the selected ones
 
94
  for file_name in files_name :
95
- print(file_name, type(file_name))
96
- temp_file_path = "./temp"
97
  file_path = os.path.join(temp_file_path, file_name)
98
  # Read doc
99
  with open(file_path, "rb") as file:
@@ -102,28 +93,73 @@ def read_content(content, files_name):
102
  #### YOUR FONCTION FOR CONTENT ==> must be str
103
  my_content = str(content[:10])
104
  content_list.append(my_content)
105
- text_list.append(f"File {file_name} ready to be used. \n")
 
106
  except Exception as e:
107
  print(f"Error occurred while writing the file: {e}")
108
- text_list.append(f"Error occurred while writing the file {file_name}: {e}")
109
  return {content_var : content_list,
110
- error_box : gr.Textbox(value=f"""{" and ".join(text_list)} \n You can ask a question about the uploaded PDF document.""", visible=True)}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
111
 
112
 
113
  ### YOUR model using the same inputand returning output
114
  def my_model(message, chat_history, content_var,
115
  language_choice, model_choice, max_length, temperature,
116
  num_return_sequences, top_p, no_repeat_ngram_size):
 
117
  #No LLM here, just respond with a random pre-made message
118
  if content_var == []:
119
- bot_message = f"Pas de context : {content_var}" + random.choice(["Tell me more about it",
120
- "Cool, but I'm not interested",
121
- "Hmmmm, ok then"])
122
  else:
123
- bot_message = f" Voici le context {content_var}"
124
  chat_history.append((message, bot_message))
 
 
125
 
126
- return "", chat_history
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
127
 
128
 
129
  def queue_bot(history):
@@ -132,71 +168,134 @@ def queue_bot(history):
132
  history[-1][1] = ""
133
  for character in bot_message:
134
  history[-1][1] += character
135
- time.sleep(0.05)
136
  yield history
137
 
138
 
 
 
 
 
 
139
  # App
140
- with gr.Blocks(theme=gr.themes.Soft()) as gradioApp:
141
 
142
  # Initialize the document context variable as empty without any drag and drop
143
  content_var = gr.State([])
 
 
144
 
145
  # Layout
146
- with gr.Row():
147
- # Row 1 : About
148
- with gr.Column(scale=1, min_width=100):
149
- # gr.Image("./logo_neovision.png")
150
- logo_gr = gr.Markdown(""" <img src="file/logo_neovision.png" alt="logo" style="width:400px;"/>""")
151
- about_gr = gr.Markdown(about)
152
-
153
- # Row 2 : Param
154
- with gr.Column(scale=2, min_width=500):
155
- title1_gr= gr.Markdown(title1)
156
- intro_gr = gr.Markdown(intro)
157
-
158
- # Upload several documents
159
- upload_button = gr.UploadButton("Browse files", label="Drag and drop your documents here",
160
- size="lg", scale=0, min_width=100,
161
- file_types=["pdf"], file_count="multiple")
162
- # invisible button while no documents uploaded
163
- uploaded_check = gr.CheckboxGroup(label="Uploaded documents", visible=False,
164
- info="Do you want to use a supporting document?")
165
- choose_btn = gr.Button(value="Choose", visible=False)
166
- # uploading one or several docs and display other buttons
167
- upload_button.upload(upload_file, upload_button, [uploaded_check, choose_btn])
168
-
169
- # Read only one document
170
- error_box = gr.Textbox(label="Reading files... ", visible=False) # display only when ready or error
171
- choose_btn.click(read_content, inputs=[content_var, uploaded_check], outputs=[content_var, error_box])
172
-
173
- # Select advanced options, to be given as input for your model
174
- gr.Markdown(""" ## Toolbox """)
175
- with gr.Accordion(label="Select advanced options",open=False):
176
- model_choice = gr.Dropdown(["LLM", "Other"], label="Model", info="Choose your AI model")
177
- language_choice = gr.Dropdown(["English", "French"], label="Language", info="Choose your language")
178
- max_length = gr.Slider(label="Token length", minimum=1, maximum=100, value=50, step=1)
179
- temperature= gr.Slider(label="Temperature", minimum=0.1, maximum=1, value=0.8, step=0.1)
180
- num_return_sequences= gr.Slider(label="Return Sequence", minimum=0.1, maximum=50, value=1, step=0.1)
181
- top_p= gr.Slider(label="top p", minimum=0.1, maximum=1, value=0.8, step=0.1)
182
- no_repeat_ngram_size= gr.Slider(label="repeat", minimum=0.1, maximum=1, value=3, step=0.1)
183
 
 
 
184
 
185
- # Row 3 : Chat
186
- with gr.Column(scale=2, min_width=600):
187
  title2_gr = gr.Markdown(title2)
188
- chatbot = gr.Chatbot(label="Bot", height=500)
189
  msg = gr.Textbox(label="User", placeholder="Ask any question.")
190
- ### YOUR MODEL TO ADAPT
191
- msg.submit(my_model,
192
- inputs=[msg, chatbot, content_var,
193
- language_choice, model_choice, max_length, temperature,
194
- num_return_sequences, top_p, no_repeat_ngram_size],
195
- outputs=[msg, chatbot]).then(queue_bot, chatbot, chatbot)
196
- clear = gr.ClearButton(components=[msg, chatbot], value="Clear console")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
197
 
 
 
 
 
198
 
199
  gr.close_all()
200
  gradioApp.queue()
201
- gradioApp.launch(share=True, auth=("neovision", "gradio2023"))
202
  #auth=("neovision", "gradio2023") to be placed inside the launch parameters
 
1
  import gradio as gr
2
+ import datetime
3
  import random
4
+ import time
5
  import os
6
  import shutil
7
+ import pandas as pd
8
  # How to RUN code ==> gradio gradio_llm_example.py
9
 
10
 
 
 
 
 
 
 
 
 
 
11
  # Define text and title information
12
+ title1 = "## πŸ€— About QA App"
13
 
14
+ title2 = " ## πŸ’¬ Chat with QA Bot"
15
 
16
+ title3 = " ## πŸ”§ Toolbox "
17
 
18
+ title4 = " ## βš™οΈ Parameters"
19
 
20
+ title5 = " ## πŸ“Š Evaluation"
 
 
 
 
 
 
 
 
21
 
22
+ intro = """ Welcome! This is not just any bot, it's a special one equipped with state-of-the-art natural language processing capabilities, and ready to answer your queries with/without the support of some additional documents.
23
+
24
+
25
+ **Ready to explore? Let's get started!** πŸπŸŽ‰
26
+
27
 
28
+ * Chat with the bot by typing any question you want
29
+ and get your answers !
30
+ * You can load and select one or more documents to reinforce the bot's knowledge.
31
+ Don't forget to validate and update your selection according to your choices.
32
+ * You can customize your model by selecting advanced options in the toolbox."""
 
 
 
 
 
 
 
 
 
 
 
33
 
34
+ final_info = """ Made with ❀️ by us πŸš€"""
35
 
36
  # Define theme ==> see gr.themes.builder()
37
  theme = gr.themes.Soft(
38
  primary_hue="emerald",
 
39
  neutral_hue="slate",
40
+ text_size=gr.themes.sizes.text_md,
41
  ).set(
42
+ body_text_color='*secondary_900',
43
+ body_text_size='*text_lg',
44
+ body_text_weight='500',
45
+ border_color_accent='*secondary_950',
46
+ link_text_color='*secondary_300',
47
+ block_border_color='*neutral_200',
48
+ block_border_width='*block_label_border_width',
49
+ block_label_background_fill='*primary_200',
50
+ block_title_text_color='*primary_350',
51
+ checkbox_border_color='*primary_300',
52
+ checkbox_border_color_selected_dark='*secondary_200',
53
+ button_primary_border_color='*primary_350'
54
  )
55
 
56
 
 
58
  """ Upload several files from drag and drop, and save them in local temp folder
59
  files_obj (type:list) : list of tempfile._TemporaryFileWrapper
60
  return checkbox to display uploaded documents """
 
61
  temp_file_path = "./temp"
62
+ # Create local copy
63
  if not os.path.exists(temp_file_path):
64
  os.makedirs(temp_file_path)
65
  # Save each file among list of given files
 
70
  shutil.copyfile(file_obj.name, os.path.join(temp_file_path, file_name))
71
  # return visible button for next selection
72
  return {uploaded_check : gr.CheckboxGroup(choices=file_name_list, visible=True),
73
+ choose_btn : gr.Button(visible=True),
74
+ clear_folder_btn : gr.Button(visible=True)}
75
+
76
+
77
 
78
 
79
+ def read_PDFcontent(content, files_name):
80
  """ Read and update the content variable (state) according to the several files_names to read from temp folder
81
  return updated content_var (type : list of str)
82
  return visible error_box to display logs error """
83
  content_list = list()
84
  text_list = list()
85
  # Parse one or several docs among the selected ones
86
+ temp_file_path = "./temp"
87
  for file_name in files_name :
 
 
88
  file_path = os.path.join(temp_file_path, file_name)
89
  # Read doc
90
  with open(file_path, "rb") as file:
 
93
  #### YOUR FONCTION FOR CONTENT ==> must be str
94
  my_content = str(content[:10])
95
  content_list.append(my_content)
96
+ text_list.append(f" {file_name} : ready βœ… \n ")
97
+ # print(content)
98
  except Exception as e:
99
  print(f"Error occurred while writing the file: {e}")
100
+ text_list.append(f" {file_name} : error ❌ \n")
101
  return {content_var : content_list,
102
+ error_box : gr.Textbox(value=f"""{"".join(text_list)} """, visible=True)}
103
+
104
+
105
+ def clear_folder():
106
+ temp_file_path = "./temp"
107
+ shutil.rmtree(temp_file_path)
108
+ return {uploaded_check : gr.CheckboxGroup(choices=[], visible=False),
109
+ error_box : gr.Textbox("", visible=False),
110
+ choose_btn : gr.Button(visible=False),
111
+ clear_folder_btn : gr.Button(visible=False),
112
+ context_box : gr.Textbox(""),
113
+ content_var : []}
114
+
115
+ # def write_content (chat_history, download_counter):
116
+ # temp_file_path = "./Download_chat"
117
+ # if not os.path.exists(temp_file_path):
118
+ # os.makedirs(temp_file_path)
119
+ # file_name = str(download_counter)+"chat_Conversation.txt"
120
+ # file_path = os.path.join(temp_file_path, file_name)
121
+ # # write doc
122
+ # with open(file_path, "w") as file:
123
+ # for query_answer in chat_history :
124
+ # file.write(" \n ".join(query_answer))
125
+ # file.write(" \n ")
126
+ # new_count = int(download_counter)+1
127
+ # return { download_counter : gr.Number(new_count, visible=False)}
128
 
129
 
130
  ### YOUR model using the same inputand returning output
131
  def my_model(message, chat_history, content_var,
132
  language_choice, model_choice, max_length, temperature,
133
  num_return_sequences, top_p, no_repeat_ngram_size):
134
+
135
  #No LLM here, just respond with a random pre-made message
136
  if content_var == []:
137
+ bot_message = f"Pas de contexte : {content_var}" + "I'm not interested"
 
 
138
  else:
139
+ bot_message = f" Voici le contexte : {content_var}"
140
  chat_history.append((message, bot_message))
141
+
142
+ return "", chat_history, {context_box : gr.Textbox(visible=True, value=f'{"and".join(content_var)}')}
143
 
144
+
145
+ def evaluate_my_model(answer):
146
+ # true_answer.update("")
147
+ return {true_answer : gr.Textbox.update("") ,
148
+ updated_check : gr.Button(f"Model updated βœ… at : {datetime.datetime.now().strftime('%H:%M:%S')} " ,
149
+ visible = True)}
150
+
151
+
152
+
153
+ def display_my_metrics(metric_csv="./metrics.csv"):
154
+ df = pd.read_csv(metric_csv)
155
+ # df_metrics[0]=["Model", "P", "R", "F1", "⏳"]
156
+ return {df_metrics : gr.DataFrame(df, visible=True)}
157
+
158
+
159
+ def clear_all():
160
+ return {msg: gr.Textbox("") ,
161
+ chatbot: gr.Chatbot(""),
162
+ context_box: gr.Textbox("") }
163
 
164
 
165
  def queue_bot(history):
 
168
  history[-1][1] = ""
169
  for character in bot_message:
170
  history[-1][1] += character
171
+ time.sleep(0.005)
172
  yield history
173
 
174
 
175
+
176
+ # Params
177
+ temp_file_path = "./temp"
178
+
179
+
180
  # App
181
+ with gr.Blocks(theme=theme) as gradioApp:
182
 
183
  # Initialize the document context variable as empty without any drag and drop
184
  content_var = gr.State([])
185
+ download_counter = gr.Number(0, visible=False)
186
+
187
 
188
  # Layout
189
+ gr.Markdown(""" <img src="file/logo_neovision.png" alt="logo" style="width:350px;"/>""")
190
+
191
+ with gr.Column():
192
+ gr.Markdown(title1)
193
+ gr.Markdown(intro)
194
+ # gr.Markdown(final_info)
195
+
196
+
197
+ # Row 1 : Intro + Param
198
+ with gr.Row(equal_height=True):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
199
 
200
+ # with gr.Column(min_width=80, scale = 0):
201
+ # gr.Markdown(" ")
202
 
203
+ # Row 2 : Chat
204
+ with gr.Column(min_width= 300, scale = 3):
205
  title2_gr = gr.Markdown(title2)
206
+ chatbot = gr.Chatbot(label="Bot", height=300)
207
  msg = gr.Textbox(label="User", placeholder="Ask any question.")
208
+ with gr.Row():
209
+ with gr.Column():
210
+ # clear = gr.ClearButton(components=[msg, chatbot, context_box], value="Clear console")
211
+ clear = gr.Button( value="πŸ—‘οΈ Clear console")
212
+ download = gr.Button( value="πŸ“© Download chat")
213
+ updated_download = gr.Button(f"Last download βœ… at : {datetime.datetime.now().strftime('%H:%M:%S')} " ,
214
+ visible = False)
215
+
216
+ with gr.Column():
217
+ upload_button = gr.UploadButton("πŸ“ Browse files", label="Drag and drop your documents here",
218
+ file_types=["pdf"], file_count="multiple")
219
+ uploaded_check = gr.CheckboxGroup(label=" πŸ“ Uploaded documents", visible=False,
220
+ info="Do you want to use a supporting document?")
221
+ with gr.Row():
222
+ choose_btn = gr.Button(value="πŸ–±οΈ Choose docs", visible=False)
223
+ clear_folder_btn = gr.Button(value="πŸ—‘οΈ Clear docs", visible=False)
224
+ # upload_iface = gr.Interface(fn=upload_file,
225
+ # inputs=gr.File(file_count="multiple", file_types=["pdf"]),
226
+ # outputs=[uploaded_check],
227
+ # description="πŸ“ Browse files", allow_flagging="never")
228
+ error_box = gr.Textbox(label="Files state... ", visible=False) # display only when ready or error
229
+
230
+ # Row 3 : Toolbox
231
+ with gr.Column(min_width= 100, scale = 1):
232
+ gr.Markdown(title4)
233
+ with gr.Accordion(label="Select advanced options",open=False):
234
+ model_choice = gr.Dropdown(["LLM", "Other"], label="Model", info="Choose your AI model")
235
+ language_choice = gr.Dropdown(["English", "French"], label="Language", info="Choose your language")
236
+ max_length = gr.Slider(label="Token length", minimum=1, maximum=100, value=50, step=1)
237
+ temperature= gr.Slider(label="Temperature", minimum=0.1, maximum=1, value=0.8, step=0.5)
238
+ num_return_sequences= gr.Slider(label="Return Sequence", minimum=0.1, maximum=50, value=1, step=0.5)
239
+ top_p= gr.Slider(label="top p", minimum=0.1, maximum=1, value=0.8, step=0.5)
240
+ no_repeat_ngram_size= gr.Slider(label="repeat", minimum=0.1, maximum=1, value=3, step=0.5)
241
+
242
+ # Evaluate
243
+ with gr.Column(min_width= 100, scale = 1):
244
+ gr.Markdown(title5)
245
+ with gr.Accordion(label=" Evaluate the model",open=True):
246
+ context_box = gr.Textbox(label= "πŸ“š Context",
247
+ placeholder=" The used context" )
248
+ true_answer = gr.Textbox(lines = 2, label= "πŸ“ Right Answer",
249
+ placeholder="Give an example of right answer and update the model")
250
+ update_btn = gr.Button(value = "Update ⚠️ πŸ”„" )
251
+ updated_check = gr.Button(value= "", visible = False)
252
+ df_metrics = gr.DataFrame(row_count=(5, "fixed"), col_count=(3, "fixed"), visible=False)
253
+
254
+
255
+
256
+
257
+ ###### Chatbot ######
258
+ # Flag
259
+ logger = gr.CSVLogger()
260
+ logger.setup([chatbot], "Download_flagged")
261
+ # hf_writer = gr.HuggingFaceDatasetSaver(HF_TOKEN, "crowdsourced-calculator-demo")
262
+ download.click(lambda *args: logger.flag(args), [chatbot], None, preprocess=False)
263
+ download.click(lambda : {updated_download:gr.Button(visible=True)}, None, updated_download, preprocess=False)
264
+
265
+ # YOUR MODEL TO ADAPT
266
+ msg.submit(my_model, inputs=[msg, chatbot, content_var,
267
+ language_choice, model_choice, max_length, temperature,
268
+ num_return_sequences, top_p, no_repeat_ngram_size],
269
+ outputs=[msg, chatbot, context_box]).then(queue_bot, chatbot, chatbot)
270
+ # Chatbot clear
271
+ clear.click(clear_all, inputs=[] , outputs=[msg, chatbot, context_box])
272
+ # Chatbot examples
273
+ example = gr.Examples(examples = [ ["What are the payment terms?"],
274
+ ["Do I become the owner of the developments made?"],
275
+ [" Can Neovision use a subcontractor and if so, under what conditions?"],
276
+ ["What are the termination conditions?"]],
277
+ fn = my_model, inputs=[msg, chatbot, content_var,
278
+ language_choice, model_choice, max_length, temperature,
279
+ num_return_sequences, top_p, no_repeat_ngram_size],
280
+ outputs=[msg, chatbot, context_box])
281
+
282
+
283
+ ###### DOCS ######
284
+ # uploading one or several docs and display other buttons
285
+ upload_button.upload(upload_file, [upload_button], [uploaded_check, choose_btn, clear_folder_btn])
286
+ # Read only one document
287
+ choose_btn.click(read_PDFcontent, inputs=[content_var, uploaded_check], outputs=[content_var, error_box])
288
+ # # clear
289
+ clear_folder_btn.click(clear_folder, inputs=[] ,
290
+ outputs=[uploaded_check, error_box, choose_btn, clear_folder_btn,
291
+ context_box, content_var])
292
 
293
+
294
+ # evaluate and update model
295
+ update_btn.click(evaluate_my_model, inputs=true_answer, outputs=[true_answer, updated_check])
296
+ update_btn.click(display_my_metrics, inputs=None, outputs=df_metrics)
297
 
298
  gr.close_all()
299
  gradioApp.queue()
300
+ gradioApp.launch(share=True)
301
  #auth=("neovision", "gradio2023") to be placed inside the launch parameters
metrics.csv ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ modèles,M1,M2
2
+ P,895,905
3
+ R,891,878
4
+ F1,893,891
5
+ Time (s),28.04,"32,67"