Added another example
Browse files
app.py
CHANGED
@@ -6,9 +6,7 @@ processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
|
|
6 |
|
7 |
|
8 |
def inference(input_img, captions):
|
9 |
-
captions_list = captions.split(",")
|
10 |
-
#url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
11 |
-
#image = Image.open(requests.get(url, stream=True).raw)
|
12 |
inputs = processor(text=captions_list, images=input_img, return_tensors="pt", padding=True)
|
13 |
outputs = model(**inputs)
|
14 |
logits_per_image = outputs.logits_per_image # this is the image-text similarity score
|
@@ -19,7 +17,8 @@ def inference(input_img, captions):
|
|
19 |
title = "TSAI S18 Assignment: Use a pretrained CLIP model and give a demo on its workig"
|
20 |
description = "A simple Gradio interface that accepts an image and some captions, and gives a score as to how much the caption describes the image "
|
21 |
|
22 |
-
examples = [["cats.jpg","a photo of a cat, a photo of a dog"]
|
|
|
23 |
]
|
24 |
|
25 |
demo = gr.Interface(
|
|
|
6 |
|
7 |
|
8 |
def inference(input_img, captions):
|
9 |
+
captions_list = captions.split(",")
|
|
|
|
|
10 |
inputs = processor(text=captions_list, images=input_img, return_tensors="pt", padding=True)
|
11 |
outputs = model(**inputs)
|
12 |
logits_per_image = outputs.logits_per_image # this is the image-text similarity score
|
|
|
17 |
title = "TSAI S18 Assignment: Use a pretrained CLIP model and give a demo on its workig"
|
18 |
description = "A simple Gradio interface that accepts an image and some captions, and gives a score as to how much the caption describes the image "
|
19 |
|
20 |
+
examples = [["cats.jpg","a photo of a cat, a photo of a dog"],
|
21 |
+
["personBicycle.jpg","person riding bicycle, person driving car, photo of a dog"]
|
22 |
]
|
23 |
|
24 |
demo = gr.Interface(
|