Spaces:
Runtime error
Runtime error
import gradio as gr | |
import os | |
from PIL import Image | |
import base64 | |
import requests | |
from langchain.embeddings import HuggingFaceEmbeddings | |
#from langchain.llms import OpenAI | |
from langchain.chains.qa_with_sources import load_qa_with_sources_chain | |
from langchain.docstore.document import Document | |
from langchain.embeddings.openai import OpenAIEmbeddings | |
from langchain.vectorstores.faiss import FAISS | |
import pickle | |
HUGGINGFACEHUB_API_TOKEN = os.environ["HUGGINGFACEHUB_API_TOKEN"] | |
model_name = "sentence-transformers/all-mpnet-base-v2" | |
hf = HuggingFaceEmbeddings(model_name=model_name) | |
#Loading FAISS search index from disk | |
#This is a vector space of embeddings from one-tenth of PlaygrondAI image-prompts | |
#PlaygrondAI open-sourced dataset is a collection of around 1.3 mil generated images and caption pairs | |
with open("search_index0.pickle", "rb") as f: | |
search_index = pickle.load(f) | |
with open("search_index1.pickle", "rb") as f: | |
search_index1 = pickle.load(f) | |
with open("search_index2.pickle", "rb") as f: | |
search_index2 = pickle.load(f) | |
with open("search_index3.pickle", "rb") as f: | |
search_index3 = pickle.load(f) | |
#Defining methods for inference | |
def encode(img): | |
#Encode source image file to base64 string | |
with open(img, "rb") as image_file: | |
encoded_string = base64.b64encode(image_file.read()).decode('utf-8') | |
#Returning image as encoded string | |
return encoded_string | |
def get_caption(image_in): | |
#Sending requests to BLIP2 Gradio-space API | |
BLIP2_GRADIO_API_URL = "https://nielsr-comparing-captioning-models.hf.space/run/predict" | |
response = requests.post(BLIP2_GRADIO_API_URL, json={ | |
"data": ["data:image/jpg;base64," + encode(image_in) ] | |
}).json() | |
data = response["data"][-1] | |
return data | |
def Image_similarity_search(image_in, search_query): | |
if search_query == '': | |
#Get image caption from Bip2 Gradio space | |
img_caption = get_caption(image_in) | |
else: | |
img_caption = search_query | |
print(f"Image caption from Blip2 Gradio Space or the search_query is - {img_caption}") | |
#Searching the vector space | |
search_result = search_index.similarity_search(img_caption)[0] | |
search_result1 = search_index1.similarity_search(img_caption)[0] | |
search_result2 = search_index2.similarity_search(img_caption)[0] | |
search_result3 = search_index3.similarity_search(img_caption)[0] | |
#Formatting the search results | |
pai_prompt = list(search_result)[0][1] | |
pai_prompt1 = list(search_result1)[0][1] | |
pai_prompt2 = list(search_result2)[0][1] | |
pai_prompt3 = list(search_result3)[0][1] | |
pai_img_link = list(search_result)[-2][-1]['source'] | |
pai_img_link1 = list(search_result1)[-2][-1]['source'] | |
pai_img_link2 = list(search_result2)[-2][-1]['source'] | |
pai_img_link3 = list(search_result3)[-2][-1]['source'] | |
html_tag = f"""<div style="display: flex; flex-direction: row; overflow-x: auto;"> | |
<img src='{pai_img_link}' alt='{img_caption}' style='display: block; margin: auto;'> | |
<img src='{pai_img_link1}' alt='{img_caption}' style='display: block; margin: auto;'> | |
<img src='{pai_img_link2}' alt='{img_caption}' style='display: block; margin: auto;'> | |
<img src='{pai_img_link3}' alt='{img_caption}' style='display: block; margin: auto;'> | |
</div>""" #class='gallery' > | |
return html_tag #pai_prompt | |
#Defining Gradio Blocks | |
with gr.Blocks(css = """#label_mid {padding-top: 2px; padding-bottom: 2px;} | |
#label_results {padding-top: 5px; padding-bottom: 1px;} | |
#col-container {max-width: 580px; margin-left: auto; margin-right: auto;} | |
#accordion {max-width: 580px; margin-left: auto; margin-right: auto;} | |
#img_search img {margin: 10px; max-width: 300px; max-height: 300px;} | |
""") as demo: | |
gr.HTML("""<div style="text-align: center; max-width: 700px; margin: 0 auto;"> | |
<div | |
style=" | |
display: inline-flex; | |
align-items: center; | |
gap: 0.8rem; | |
font-size: 1.75rem; | |
" | |
> | |
<h1 style="font-weight: 900; margin-bottom: 7px; margin-top: 5px;"> | |
Using Gradio Demos as API</h1><br></div> | |
<div><h4 style="font-weight: 500; margin-bottom: 7px; margin-top: 5px;"> | |
Get BLIP2 captions from <a href="https://langchain.readthedocs.io/en/latest/" target="_blank">Niels space</a> via API call,<br> | |
Use LangChain to create vector space with PlaygroundAI prompts</h4><br> | |
</div>""") | |
with gr.Accordion(label="Details about the working:", open=False, elem_id='accordion'): | |
gr.HTML(""" | |
<p style="margin-bottom: 10px; font-size: 90%"><br> | |
▶️Do you see the "view api" link located in the footer of this application? | |
By clicking on this link, a page will open which provides documentation on the REST API that developers can use to query the Interface function / Block events.<br> | |
▶️In this demo, the first step involves making an API call to the BLIP2 Gradio demo to retrieve image captions. | |
Next, Langchain is used to create an embedding and vector space for the image prompts and their respective "source" from the PlaygroundAI dataset. | |
Finally, a similarity search is performed over the vector space and the top result is returned. | |
</p></div>""") | |
#with gr.Column(scale=3): | |
# pass | |
with gr.Column(elem_id = "col-container"): | |
label_top = gr.HTML(value= "<center>🖼️ Please upload an Image here👇 that will be used as your search query</center>", elem_id="label_top") | |
image_in = gr.Image(label="Upoload an Image for search", type='filepath', elem_id="image_in") | |
label_mid = gr.HTML(value= "<p style='text-align: center; color: red;'>Or</center></p>", elem_id='label_mid') | |
label_bottom = gr.HTML(value= "<center>🔍Type in your serch query and press Enter 👇</center>", elem_id="label_bottom") | |
search_query = gr.Textbox(placeholder="Example: A small cat sitting", label="", elem_id="search_query", value='') | |
label_results = gr.HTML(value= "<p style='text-align: center; color: blue; font-weight: bold;'>👇These Search results are from PlaygroundAI 'Liked_Images' dataset available on <a href='https://github.com/playgroundai/liked_images' _target='blank'>github</a></center></p>", elem_id="label_results") | |
img_search = gr.HTML(label = 'Image search results from PlaygroundAI dataset', elem_id="img_search") | |
#pai_prompt = gr.Textbox(label="Image prompt from PlaygroundAI dataset", elem_id="pai_prompt") | |
#b1 = gr.Button("Retry").style(full_width=False) | |
gr.HTML('''<center><a href="https://huggingface.co/spaces/ysharma/Blip_PlaygroundAI?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a></center> | |
</p></div>''') | |
image_in.change(Image_similarity_search, [image_in, search_query], [img_search], api_name="PlaygroundAI_image_search" ) | |
search_query.submit(Image_similarity_search, [image_in, search_query], [img_search], api_name='PlaygroundAI_text_search' ) | |
#b1.click(Image_similarity_search, [image_in, search_query], [pai_prompt, img_search] ) | |
demo.launch(debug=True) |