import transformers import torch import gradio as gr import requests from transformers import BlipForImageTextRetrieval from transformers import AutoProcessor from transformers.utils import logging from PIL import Image logging.set_verbosity_error() model = BlipForImageTextRetrieval.from_pretrained("Salesforce/blip-itm-base-coco") processor = AutoProcessor.from_pretrained("Salesforce/blip-itm-base-coco") def process_image(input_type, image_url, image_upload, text): if input_type == "URL": raw_image = Image.open(requests.get(image_url, stream=True).raw).convert('RGB') else: raw_image = image_upload inputs = processor(images=raw_image, text=text, return_tensors="pt") itm_scores = model(**inputs)[0] itm_score = torch.nn.functional.softmax(itm_scores,dim=1) itm_score = itm_score[0][1] print(itm_score) if itm_score <=.35: cmnt = "which is not that great. Try again." elif itm_score <= .75: cmnt = "which is good. But you can improve it. Try again." elif itm_score == 1.0: cmnt = "and that is an unbelievable perfect score. You have achieved the near impossible. Congratulations" else: cmnt = "which is excellent. Can you improve on it?" formatted_text = ( f"""

Your decription score is {itm_score*100:.2f}/100 {cmnt}

""" ) return formatted_text def display_image_from_url(image_url): if image_url: image = Image.open(requests.get(image_url, stream=True).raw).convert('RGB') return image return None def toggle_inputs(input_type): if input_type == "URL": return gr.update(visible=True), gr.update(visible=True), gr.update(visible=False), gr.update(visible=True) else: return gr.update(visible=False), gr.update(visible=False), gr.update(visible=True), gr.update(visible=True) with gr.Blocks() as demo: gr.Markdown( """ # Challenge yourself by describing the image - test & demo app by Srinivas.V.. Paste either URL of an image or upload the image, describe the image best and submit to know your score. """) input_type = gr.Radio(choices=["URL", "Upload"], label="Input Type") image_url = gr.Textbox(label="Image URL", visible=False) url_image = gr.Image(type="pil", label="URL Image", visible=False) image_upload = gr.Image(type="pil", label="Upload Image", visible=False) description = gr.Textbox(label="Describe the image", visible=False, lines=3) input_type.change(fn=toggle_inputs, inputs=input_type, outputs=[image_url, url_image, image_upload, description]) image_url.change(fn=display_image_from_url, inputs=image_url, outputs=url_image) submit_btn = gr.Button("Submit") processed_image = gr.HTML(label="Your challenge result") submit_btn.click(fn=process_image, inputs=[input_type, image_url, image_upload, description], outputs=processed_image) demo.launch(debug=True, share=True)