Martijn van Beers commited on
Commit
f992b4c
1 Parent(s): 17ad4ef

Make run_demo work with partial examples

Browse files

Because of the caching for gradio.Examples that gradio does
automatically, run_demo gets called without the default value
for model_name. So we rearrange stuff so it still works then too.

Files changed (1) hide show
  1. app.py +23 -16
app.py CHANGED
@@ -53,7 +53,14 @@ import en_core_web_sm
53
  nlp = en_core_web_sm.load()
54
 
55
  # Gradio Section:
56
- def run_demo(model_name, image, text):
 
 
 
 
 
 
 
57
 
58
  model, preprocess = clip.load(model_name, device=device, jit=False)
59
  orig_image = pad_to_square(image)
@@ -77,9 +84,9 @@ def run_demo(model_name, image, text):
77
  # Default demo:
78
 
79
  default_inputs = [
80
- gr.Dropdown(label="CLIP Model", choices=['ViT-B/16', 'ViT-B/32', 'ViT-L/14'], value="ViT-L/14"),
81
  gr.components.Image(type='pil', label="Original Image"),
82
  gr.components.Textbox(label="Image description"),
 
83
  ]
84
 
85
  default_outputs = [
@@ -102,17 +109,17 @@ iface = gr.Interface(fn=run_demo,
102
  outputs=default_outputs,
103
  title="CLIP Grounding Explainability",
104
  description=description,
105
- examples=[[None, "example_images/London.png", "London Eye"],
106
- [None, "example_images/London.png", "Big Ben"],
107
- [None, "example_images/harrypotter.png", "Harry"],
108
- [None, "example_images/harrypotter.png", "Hermione"],
109
- [None, "example_images/harrypotter.png", "Ron"],
110
- [None, "example_images/Amsterdam.png", "Amsterdam canal"],
111
- [None, "example_images/Amsterdam.png", "Old buildings"],
112
- [None, "example_images/Amsterdam.png", "Pink flowers"],
113
- [None, "example_images/dogs_on_bed.png", "Two dogs"],
114
- [None, "example_images/dogs_on_bed.png", "Book"],
115
- [None, "example_images/dogs_on_bed.png", "Cat"]])
116
 
117
  # NER demo:
118
  def add_label_to_img(img, label, add_entity_label=True):
@@ -162,9 +169,9 @@ def NER_demo(image, text):
162
  return labeled_text, gallery_images
163
 
164
  inputs_NER = [
165
- gr.Dropdown(label="CLIP Model", choices=['ViT-B/16', 'ViT-B/32', 'ViT-L/14'], value="ViT-L/14"),
166
  gr.Image(type='pil', label="Original Image"),
167
  gr.components.Textbox(label="Descriptive text"),
 
168
  ]
169
 
170
  #colours = highlighter._style["color_map"]
@@ -184,8 +191,8 @@ iface_NER = gr.Interface(fn=NER_demo,
184
  title="Named Entity Grounding explainability using CLIP",
185
  description=description_NER,
186
  examples=[
187
- [None, "example_images/London.png", "In this image we see Big Ben and the London Eye, on both sides of the river Thames."],
188
- [None, "example_images/harrypotter.png", "Hermione, Harry and Ron in their school uniform"],
189
  ],
190
  cache_examples=False)
191
 
 
53
  nlp = en_core_web_sm.load()
54
 
55
  # Gradio Section:
56
+ def run_demo(*args):
57
+ if len(args) == 3:
58
+ image, text, model_name = args
59
+ elif len(args) == 2:
60
+ image, text = args
61
+ model_name = "ViT-L/14"
62
+ else:
63
+ raise ValueError("Unexpected number of parameters")
64
 
65
  model, preprocess = clip.load(model_name, device=device, jit=False)
66
  orig_image = pad_to_square(image)
 
84
  # Default demo:
85
 
86
  default_inputs = [
 
87
  gr.components.Image(type='pil', label="Original Image"),
88
  gr.components.Textbox(label="Image description"),
89
+ gr.Dropdown(label="CLIP Model", choices=['ViT-B/16', 'ViT-B/32', 'ViT-L/14'], value="ViT-L/14"),
90
  ]
91
 
92
  default_outputs = [
 
109
  outputs=default_outputs,
110
  title="CLIP Grounding Explainability",
111
  description=description,
112
+ examples=[["example_images/London.png", "London Eye"],
113
+ ["example_images/London.png", "Big Ben"],
114
+ ["example_images/harrypotter.png", "Harry"],
115
+ ["example_images/harrypotter.png", "Hermione"],
116
+ ["example_images/harrypotter.png", "Ron"],
117
+ ["example_images/Amsterdam.png", "Amsterdam canal"],
118
+ ["example_images/Amsterdam.png", "Old buildings"],
119
+ ["example_images/Amsterdam.png", "Pink flowers"],
120
+ ["example_images/dogs_on_bed.png", "Two dogs"],
121
+ ["example_images/dogs_on_bed.png", "Book"],
122
+ ["example_images/dogs_on_bed.png", "Cat"]])
123
 
124
  # NER demo:
125
  def add_label_to_img(img, label, add_entity_label=True):
 
169
  return labeled_text, gallery_images
170
 
171
  inputs_NER = [
 
172
  gr.Image(type='pil', label="Original Image"),
173
  gr.components.Textbox(label="Descriptive text"),
174
+ gr.Dropdown(label="CLIP Model", choices=['ViT-B/16', 'ViT-B/32', 'ViT-L/14'], value="ViT-L/14"),
175
  ]
176
 
177
  #colours = highlighter._style["color_map"]
 
191
  title="Named Entity Grounding explainability using CLIP",
192
  description=description_NER,
193
  examples=[
194
+ ["example_images/London.png", "In this image we see Big Ben and the London Eye, on both sides of the river Thames."],
195
+ ["example_images/harrypotter.png", "Hermione, Harry and Ron in their school uniform"],
196
  ],
197
  cache_examples=False)
198