ysharma HF staff commited on
Commit
7b617af
·
1 Parent(s): 7e1cabd

update heading wrto accordion

Browse files
Files changed (1) hide show
  1. app.py +7 -7
app.py CHANGED
@@ -75,19 +75,19 @@ with gr.Blocks(css = """#label_mid {padding-top: 2px; padding-bottom: 2px;}
75
  Get BLIP2 captions from <a href="https://langchain.readthedocs.io/en/latest/" target="_blank">Niels space</a> via API call,<br>
76
  Use LangChain to create vector space with PlaygroundAI prompts</h4><br>
77
  </div>""")
78
- with gr.Accordion(label="Details about the working of the App", open=False, elem_id='accordion'):
79
- gr.HTML("""<div style="text-align: center; max-width: 700px; margin: 0 auto;">
80
- <p style="margin-bottom: 10px; font-size: 90%">
81
- Do you see the "view api" link located in the footer of this application?
82
  By clicking on this link, a page will open which provides documentation on the REST API that developers can use to query the Interface function / Block events.<br>
83
- In this demo, the first step involves making an API call to the BLIP2 Gradio demo to retrieve image captions.
84
  Next, Langchain is used to create an embedding and vector space for the image prompts and their respective "source" from the PlaygroundAI dataset.
85
  Finally, a similarity search is performed over the vector space and the top result is returned.
86
- """)
87
  #with gr.Column(scale=3):
88
  # pass
89
  with gr.Column(elem_id = "col-container"):
90
- label_top = gr.HTML(value= "<center>🖼️ Please upload an Image here 👇 that will be used as your search query</center>", elem_id="label_top")
91
  image_in = gr.Image(label="Upoload an Image for search", type='filepath', elem_id="image_in")
92
  label_mid = gr.HTML(value= "<p style='text-align: center; color: red;'>Or</center></p>", elem_id='label_mid')
93
  label_bottom = gr.HTML(value= "<center>🔍Type in your serch query and press Enter 👇</center>", elem_id="label_bottom")
 
75
  Get BLIP2 captions from <a href="https://langchain.readthedocs.io/en/latest/" target="_blank">Niels space</a> via API call,<br>
76
  Use LangChain to create vector space with PlaygroundAI prompts</h4><br>
77
  </div>""")
78
+ with gr.Accordion(label="Details about the working:", open=False, elem_id='accordion'):
79
+ gr.HTML("""
80
+ <p style="margin-bottom: 10px; font-size: 90%"><br>
81
+ ▶️Do you see the "view api" link located in the footer of this application?
82
  By clicking on this link, a page will open which provides documentation on the REST API that developers can use to query the Interface function / Block events.<br>
83
+ ▶️In this demo, the first step involves making an API call to the BLIP2 Gradio demo to retrieve image captions.
84
  Next, Langchain is used to create an embedding and vector space for the image prompts and their respective "source" from the PlaygroundAI dataset.
85
  Finally, a similarity search is performed over the vector space and the top result is returned.
86
+ </p></div>""")
87
  #with gr.Column(scale=3):
88
  # pass
89
  with gr.Column(elem_id = "col-container"):
90
+ label_top = gr.HTML(value= "<center>🖼️ Please upload an Image here createssed as your search query</center>", elem_id="label_top")
91
  image_in = gr.Image(label="Upoload an Image for search", type='filepath', elem_id="image_in")
92
  label_mid = gr.HTML(value= "<p style='text-align: center; color: red;'>Or</center></p>", elem_id='label_mid')
93
  label_bottom = gr.HTML(value= "<center>🔍Type in your serch query and press Enter 👇</center>", elem_id="label_bottom")