paul hilders commited on
Commit
9775911
1 Parent(s): 9ee9e02

Update descriptions again

Browse files
Files changed (1) hide show
  1. app.py +4 -1
app.py CHANGED
@@ -62,7 +62,7 @@ outputs = [gr.inputs.Image(type='pil', label="Output Image"), "highlight"]
62
 
63
  description = """A demonstration based on the Generic Attention-model Explainability method for Interpreting Bi-Modal
64
  Transformers by Chefer et al. (2021): https://github.com/hila-chefer/Transformer-MM-Explainability.
65
-
66
  This demo shows attributions scores on both the image and the text input when presented CLIP with a
67
  <text,image> pair. Attributions are computed as Gradient-weighted Attention Rollout (Chefer et al.,
68
  2021), and can be thought of as an estimate of the effective attention CLIP pays to its input when
@@ -125,11 +125,14 @@ inputs_NER = [input_img_NER, input_txt_NER]
125
 
126
  outputs_NER = ["highlight", gr.Gallery(type='pil', label="NER Entity explanations")]
127
 
 
 
128
 
129
  iface_NER = gr.Interface(fn=NER_demo,
130
  inputs=inputs_NER,
131
  outputs=outputs_NER,
132
  title="Named Entity Grounding explainability using CLIP",
 
133
  examples=[["example_images/London.png", "In this image we see Big Ben and the London Eye, on both sides of the river Thames."]],
134
  cache_examples=False)
135
 
 
62
 
63
  description = """A demonstration based on the Generic Attention-model Explainability method for Interpreting Bi-Modal
64
  Transformers by Chefer et al. (2021): https://github.com/hila-chefer/Transformer-MM-Explainability.
65
+ <br> <br>
66
  This demo shows attributions scores on both the image and the text input when presented CLIP with a
67
  <text,image> pair. Attributions are computed as Gradient-weighted Attention Rollout (Chefer et al.,
68
  2021), and can be thought of as an estimate of the effective attention CLIP pays to its input when
 
125
 
126
  outputs_NER = ["highlight", gr.Gallery(type='pil', label="NER Entity explanations")]
127
 
128
+ description_NER = """Automatically generated CLIP grounding explanations for
129
+ named entities, retrieved from the spacy NER model."""
130
 
131
  iface_NER = gr.Interface(fn=NER_demo,
132
  inputs=inputs_NER,
133
  outputs=outputs_NER,
134
  title="Named Entity Grounding explainability using CLIP",
135
+ description=description_NER,
136
  examples=[["example_images/London.png", "In this image we see Big Ben and the London Eye, on both sides of the river Thames."]],
137
  cache_examples=False)
138