marinap commited on
Commit
fb740cb
1 Parent(s): 157e06b

adding app file

Browse files
Files changed (1) hide show
  1. app.py +112 -0
app.py ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM
3
+ import torch.nn.functional as F
4
+ from PIL import Image
5
+ import uform
6
+
7
+ model = uform.get_model('unum-cloud/uform-vl-english')
8
+
9
+ def find_score(img, txt, if_fine_grained):
10
+ txt = model.preprocess_text(txt)
11
+ img = model.preprocess_image(img)
12
+ txt_features, txt_emb = model.encode_text(txt, return_features = True)
13
+ img_features, img_emb = model.encode_image(img, return_features = True)
14
+ if if_fine_grained:
15
+ joint_embedding = model.encode_multimodal(
16
+ image_features=img_features,
17
+ text_features=txt_features,
18
+ attention_mask=txt['attention_mask'])
19
+ score = model.get_matching_scores(joint_embedding)
20
+ else:
21
+ score = F.cosine_similarity(txt_emb, img_emb)
22
+ return score
23
+
24
+ def find_score_img(img1, img2, if_fine_grained):
25
+ img1 = model.preprocess_image(img1)
26
+ img2 = model.preprocess_image(img2)
27
+ img_features1, img_emb1 = model.encode_image(img1, return_features = True)
28
+ img_features2, img_emb2 = model.encode_image(img2, return_features = True)
29
+ if if_fine_grained:
30
+ joint_embedding = model.encode_multimodal(
31
+ image_features=img_features1,
32
+ text_features=img_features2,
33
+ attention_mask=img1['attention_mask'])
34
+ score = model.get_matching_scores(joint_embedding)
35
+ else:
36
+ score = F.cosine_similarity(img_emb1, img_emb2)
37
+ return score
38
+
39
+ def find_score_txt(txt1, txt2, if_fine_grained):
40
+ txt1 = model.preprocess_text(txt1)
41
+ txt2 = model.preprocess_text(txt2)
42
+ txt_features1, txt_emb1 = model.encode_text(txt1, return_features = True)
43
+ txt_features2, txt_emb2 = model.encode_text(txt2, return_features = True)
44
+ if if_fine_grained:
45
+ joint_embedding = model.encode_multimodal(
46
+ image_features=txt_features1,
47
+ text_features=txt_features2,
48
+ attention_mask=txt1['attention_mask'])
49
+ score = model.get_matching_scores(joint_embedding)
50
+ else:
51
+ score = F.cosine_similarity(txt_emb1, txt_emb2)
52
+ return score
53
+
54
+ with gr.Blocks(theme = gr.themes.Glass()) as demo_mix:
55
+ gr.Markdown('# Find similarity between images and text.')
56
+ with gr.Row():
57
+ with gr.Column():
58
+ img_input = gr.Image(source = 'upload', type = 'pil', label = "Drop your image here", shape = [256, 256])
59
+ with gr.Column():
60
+ txt_input = gr.Textbox(label = 'Enter your text here:' , lines = 1,)
61
+ if_fine_grained = gr.Checkbox(label = "Check for a more fine-grained comparison")
62
+ btn = gr.Button("Find similarity")
63
+ score = gr.Number(label='Similarity score')
64
+ btn.click(find_score, inputs=[img_input, txt_input, if_fine_grained], outputs=[score])
65
+ gr.Markdown('If the box for a more fine-grained comparison is checked, the similarity score will be in (0,1), otherwise, it will be in (-1,1)')
66
+ gr.Markdown('### Image examples')
67
+ gr.Examples(['imgs/red_panda.jpg', 'imgs/trash_raccoon.jpg', 'imgs/baby_panda.jpg', 'imgs/rocket.jpg'], inputs=[img_input])
68
+ gr.Markdown('### Text examples')
69
+ gr.Examples(['baby red panda staring into the camera', \
70
+ 'trash raccoon peeking from a trash bin',
71
+ 'a cartoonish raccoon wearing a blue and red suit',
72
+ "a person holding a baby panda"], inputs=[txt_input])
73
+
74
+
75
+ with gr.Blocks() as demo_img:
76
+ gr.Markdown('# Find similarity between images.')
77
+ with gr.Row():
78
+ with gr.Column():
79
+ img_input1 = gr.Image(source = 'upload', type = 'pil', label = "Drop your image here", shape = [256, 256])
80
+ if_fine_grained = gr.Checkbox(label = "Check for a more fine-grained comparison")
81
+ with gr.Column():
82
+ img_input2 = gr.Image(source = 'upload', type = 'pil', label = "Drop your image here", shape = [256, 256])
83
+ btn = gr.Button("Find similarity")
84
+ score = gr.Number(label='Similarity score')
85
+ btn.click(find_score_img, inputs=[img_input1, img_input2, if_fine_grained], outputs=[score])
86
+ gr.Markdown('If the box for a more fine-grained comparison is checked, the similarity score will be in (0,1), otherwise, it will be in (-1,1)')
87
+ gr.Markdown('### Image examples')
88
+ gr.Examples(['imgs/red_panda.jpg', 'imgs/trash_raccoon.jpg', 'imgs/baby_panda.jpg', 'imgs/rocket.jpg'], inputs=[img_input1])
89
+
90
+
91
+ with gr.Blocks() as demo_txt:
92
+ gr.Markdown('# Find similarity between short descriptions.')
93
+ with gr.Row():
94
+ with gr.Column():
95
+ txt_input1 = gr.Textbox(label = 'Enter your text here:' , lines = 1)
96
+ txt_input2 = gr.Textbox(label = 'Enter your text here:' , lines = 1)
97
+ with gr.Column():
98
+ if_fine_grained = gr.Checkbox(label = "Check for a more fine-grained comparison")
99
+ btn = gr.Button("Find similarity")
100
+ score = gr.Number(label='Similarity score')
101
+ btn.click(find_score_txt, inputs=[img_input, txt_input, if_fine_grained], outputs=[score])
102
+ gr.Markdown('If the box for a more fine-grained comparison is checked, the similarity score will be in (0,1), otherwise, it will be in (-1,1)')
103
+ gr.Markdown('### Text examples')
104
+ gr.Examples(['baby red panda staring into the camera', \
105
+ 'trash raccoon peeking from a trash bin',
106
+ 'a cartoonish raccoon wearing a blue and red suit',
107
+ "a person holding a baby panda"], inputs=[txt_input1])
108
+
109
+ demo = gr.TabbedInterface([demo_mix, demo_img, demo_txt], ["img2txt", "img2img", "txt2txt"])
110
+
111
+ if __name__ == "__main__":
112
+ demo.launch(share=True)