anand004 commited on
Commit
17672b0
1 Parent(s): 3f98f11

dynamic references gradio

Browse files
Files changed (1) hide show
  1. app.py +16 -34
app.py CHANGED
@@ -35,7 +35,7 @@ def image_to_bytes(image):
35
  return base64.b64encode(img_byte_arr.getvalue()).decode("utf-8")
36
 
37
 
38
- @spaces.GPU
39
  def get_image_descriptions(images):
40
  torch.cuda.empty_cache()
41
  gc.collect()
@@ -244,8 +244,9 @@ def conversation(vectordb_client, msg, num_context, img_context, history):
244
  """
245
  prompt = PromptTemplate(template=template, input_variables=["context", "question"])
246
  context = "\n\n".join(results)
 
247
  response = llm(prompt.format(context=context, question=msg, images=img_desc))
248
- return history + [(msg, response)], context, images_and_locs
249
 
250
 
251
  def check_validity_and_llm(session_states):
@@ -272,6 +273,8 @@ with gr.Blocks(css=CSS) as demo:
272
  vectordb = gr.State()
273
  doc_collection = gr.State(value=[])
274
  session_states = gr.State(value={})
 
 
275
  gr.Markdown(
276
  """<h2><center>Multimodal PDF Chatbot</center></h2>
277
  <h3><center><b>Interact With Your PDF Documents</b></center></h3>"""
@@ -330,27 +333,7 @@ with gr.Blocks(css=CSS) as demo:
330
  label="Sample Extracted Images", columns=1, rows=2
331
  )
332
 
333
- # with gr.Row():
334
- # image_desc = gr.Textbox(label="Image Descriptions", interactive=False)
335
- # with gr.Row(variant="panel"):
336
- # ext_tables = gr.HTML("<h3>Sample Tables</h3>", label="Extracted Tables")
337
-
338
- # with gr.TabItem("Embeddings", id=3) as embed_tab:
339
- # with gr.Row():
340
- # with gr.Column():
341
- # back_p2 = gr.Button(value="Back")
342
- # with gr.Column():
343
- # view_stats = gr.Button(value="View Stats")
344
- # with gr.Column():
345
- # next_p2 = gr.Button(value="Next")
346
-
347
- # with gr.Row():
348
- # with gr.Column():
349
- # text_stats = gr.Textbox(label="Text Stats", interactive=False)
350
- # with gr.Column():
351
- # table_stats = gr.Textbox(label="Table Stats", interactive=False)
352
- # with gr.Column():
353
- # image_stats = gr.Textbox(label="Image Stats", interactive=False)
354
 
355
  with gr.TabItem("Chat", id=2) as chat_tab:
356
  with gr.Column():
@@ -382,8 +365,14 @@ with gr.Blocks(css=CSS) as demo:
382
  with gr.Column():
383
  chatbot = gr.Chatbot(height=400)
384
  with gr.Accordion("Text References", open=False):
385
- with gr.Row():
386
- text_context = gr.Textbox(interactive=False, lines=10)
 
 
 
 
 
 
387
 
388
  with gr.Row():
389
  msg = gr.Textbox(
@@ -408,27 +397,20 @@ with gr.Blocks(css=CSS) as demo:
408
  session_states,
409
  sample_data,
410
  ext_text,
411
- # ext_tables,
412
  images,
413
  prog,
414
- # image_desc
415
  ],
416
  )
417
 
418
  submit_btn.click(
419
  conversation,
420
  [vectordb, msg, num_context, img_context, chatbot],
421
- [chatbot, text_context, ret_images],
422
  )
423
 
424
- # view_stats.click(
425
- # get_stats, [vectordb], outputs=[text_stats, table_stats, image_stats]
426
- # )
427
-
428
- # Page Navigation
429
 
430
  back_p1.click(lambda: gr.Tabs(selected=0), None, tabs)
431
 
432
  next_p1.click(check_validity_and_llm, session_states, tabs)
433
  if __name__ == "__main__":
434
- demo.launch(share=True)
 
35
  return base64.b64encode(img_byte_arr.getvalue()).decode("utf-8")
36
 
37
 
38
+ @spaces.GPU(duration=60*4)
39
  def get_image_descriptions(images):
40
  torch.cuda.empty_cache()
41
  gc.collect()
 
244
  """
245
  prompt = PromptTemplate(template=template, input_variables=["context", "question"])
246
  context = "\n\n".join(results)
247
+ # references = [gr.Textbox(i, visible=True, interactive=False) for i in results]
248
  response = llm(prompt.format(context=context, question=msg, images=img_desc))
249
+ return history + [(msg, response)], results, images_and_locs
250
 
251
 
252
  def check_validity_and_llm(session_states):
 
273
  vectordb = gr.State()
274
  doc_collection = gr.State(value=[])
275
  session_states = gr.State(value={})
276
+ references = gr.State(value=[])
277
+
278
  gr.Markdown(
279
  """<h2><center>Multimodal PDF Chatbot</center></h2>
280
  <h3><center><b>Interact With Your PDF Documents</b></center></h3>"""
 
333
  label="Sample Extracted Images", columns=1, rows=2
334
  )
335
 
336
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
337
 
338
  with gr.TabItem("Chat", id=2) as chat_tab:
339
  with gr.Column():
 
365
  with gr.Column():
366
  chatbot = gr.Chatbot(height=400)
367
  with gr.Accordion("Text References", open=False):
368
+ # text_context = gr.Row()
369
+
370
+ @gr.render(inputs=[references])
371
+ def gen_refs(refs):
372
+ n = len(refs)
373
+ for i in range(n):
374
+ gr.Textbox(label=f"Ref-{i+1}", value=refs[i], lines=3)
375
+
376
 
377
  with gr.Row():
378
  msg = gr.Textbox(
 
397
  session_states,
398
  sample_data,
399
  ext_text,
 
400
  images,
401
  prog,
 
402
  ],
403
  )
404
 
405
  submit_btn.click(
406
  conversation,
407
  [vectordb, msg, num_context, img_context, chatbot],
408
+ [chatbot, references, ret_images],
409
  )
410
 
 
 
 
 
 
411
 
412
  back_p1.click(lambda: gr.Tabs(selected=0), None, tabs)
413
 
414
  next_p1.click(check_validity_and_llm, session_states, tabs)
415
  if __name__ == "__main__":
416
+ demo.launch(share=True)