import gradio as gr import os from PIL import Image import base64 import requests from langchain.embeddings import HuggingFaceEmbeddings #from langchain.llms import OpenAI from langchain.chains.qa_with_sources import load_qa_with_sources_chain from langchain.docstore.document import Document from langchain.embeddings.openai import OpenAIEmbeddings from langchain.vectorstores.faiss import FAISS import pickle HUGGINGFACEHUB_API_TOKEN = os.environ["HUGGINGFACEHUB_API_TOKEN"] model_name = "sentence-transformers/all-mpnet-base-v2" hf = HuggingFaceEmbeddings(model_name=model_name) #Loading FAISS search index from disk #This is a vector space of embeddings from one-tenth of PlaygrondAI image-prompts #PlaygrondAI open-sourced dataset is a collection of around 1.3 mil generated images and caption pairs with open("search_index0.pickle", "rb") as f: search_index = pickle.load(f) with open("search_index1.pickle", "rb") as f: search_index1 = pickle.load(f) with open("search_index2.pickle", "rb") as f: search_index2 = pickle.load(f) with open("search_index3.pickle", "rb") as f: search_index3 = pickle.load(f) #Defining methods for inference def encode(img): #Encode source image file to base64 string with open(img, "rb") as image_file: encoded_string = base64.b64encode(image_file.read()).decode('utf-8') #Returning image as encoded string return encoded_string def get_caption(image_in): #Sending requests to BLIP2 Gradio-space API BLIP2_GRADIO_API_URL = "https://nielsr-comparing-captioning-models.hf.space/run/predict" response = requests.post(BLIP2_GRADIO_API_URL, json={ "data": ["data:image/jpg;base64," + encode(image_in) ] }).json() data = response["data"][-1] return data def Image_similarity_search(image_in, search_query): if search_query == '': #Get image caption from Bip2 Gradio space img_caption = get_caption(image_in) else: img_caption = search_query print(f"Image caption from Blip2 Gradio Space or the search_query is - {img_caption}") #Searching the vector space search_result = search_index.similarity_search(img_caption)[0] search_result1 = search_index1.similarity_search(img_caption)[0] search_result2 = search_index2.similarity_search(img_caption)[0] search_result3 = search_index3.similarity_search(img_caption)[0] #Formatting the search results pai_prompt = list(search_result)[0][1] pai_prompt1 = list(search_result1)[0][1] pai_prompt2 = list(search_result2)[0][1] pai_prompt3 = list(search_result3)[0][1] pai_img_link = list(search_result)[-2][-1]['source'] pai_img_link1 = list(search_result1)[-2][-1]['source'] pai_img_link2 = list(search_result2)[-2][-1]['source'] pai_img_link3 = list(search_result3)[-2][-1]['source'] html_tag = f"""
{img_caption} {img_caption} {img_caption} {img_caption}
""" #class='gallery' > return html_tag #pai_prompt #Defining Gradio Blocks with gr.Blocks(css = """#label_mid {padding-top: 2px; padding-bottom: 2px;} #label_results {padding-top: 5px; padding-bottom: 1px;} #col-container {max-width: 580px; margin-left: auto; margin-right: auto;} #accordion {max-width: 580px; margin-left: auto; margin-right: auto;} #img_search img {margin: 10px; max-width: 300px; max-height: 300px;} """) as demo: gr.HTML("""

Using Gradio Demos as API


Get BLIP2 captions from Niels space via API call,
Use LangChain to create vector space with PlaygroundAI prompts


""") with gr.Accordion(label="Details about the working:", open=False, elem_id='accordion'): gr.HTML("""


▶️Do you see the "view api" link located in the footer of this application? By clicking on this link, a page will open which provides documentation on the REST API that developers can use to query the Interface function / Block events.
▶️In this demo, the first step involves making an API call to the BLIP2 Gradio demo to retrieve image captions. Next, Langchain is used to create an embedding and vector space for the image prompts and their respective "source" from the PlaygroundAI dataset. Finally, a similarity search is performed over the vector space and the top result is returned.

""") #with gr.Column(scale=3): # pass with gr.Column(elem_id = "col-container"): label_top = gr.HTML(value= "
🖼️ Please upload an Image here👇 that will be used as your search query
", elem_id="label_top") image_in = gr.Image(label="Upoload an Image for search", type='filepath', elem_id="image_in") label_mid = gr.HTML(value= "

Or

", elem_id='label_mid') label_bottom = gr.HTML(value= "
🔍Type in your serch query and press Enter 👇
", elem_id="label_bottom") search_query = gr.Textbox(placeholder="Example: A small cat sitting", label="", elem_id="search_query", value='') label_results = gr.HTML(value= "

👇These Search results are from PlaygroundAI 'Liked_Images' dataset available on github

", elem_id="label_results") img_search = gr.HTML(label = 'Image search results from PlaygroundAI dataset', elem_id="img_search") #pai_prompt = gr.Textbox(label="Image prompt from PlaygroundAI dataset", elem_id="pai_prompt") #b1 = gr.Button("Retry").style(full_width=False) gr.HTML('''
Duplicate Space

''') image_in.change(Image_similarity_search, [image_in, search_query], [img_search], api_name="PlaygroundAI_image_search" ) search_query.submit(Image_similarity_search, [image_in, search_query], [img_search], api_name='PlaygroundAI_text_search' ) #b1.click(Image_similarity_search, [image_in, search_query], [pai_prompt, img_search] ) demo.launch(debug=True)