wzuidema commited on
Commit
330a2ff
1 Parent(s): 141b0e4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -4
app.py CHANGED
@@ -60,9 +60,8 @@ inputs = [input_img, input_txt]
60
  outputs = [gr.inputs.Image(type='pil', label="Output Image"), "highlight"]
61
 
62
 
63
- description = """A demonstration based on the Generic Attention-model Explainability method for Interpreting Bi-Modal
64
- Transformers by Chefer et al. (2021): https://github.com/hila-chefer/Transformer-MM-Explainability.
65
- <br> <br>
66
  This demo shows attributions scores on both the image and the text input when presenting CLIP with a
67
  <text,image> pair. Attributions are computed as Gradient-weighted Attention Rollout (Chefer et al.,
68
  2021), and can be thought of as an estimate of the effective attention CLIP pays to its input when
@@ -155,4 +154,4 @@ with demo_tabs:
155
  \[2\]: Abnar, S., & Zuidema, W. (2020). Quantifying attention flow in transformers. arXiv preprint arXiv:2005.00928. <br>
156
  \[3\]: [https://samiraabnar.github.io/articles/2020-04/attention_flow](https://samiraabnar.github.io/articles/2020-04/attention_flow) <br>
157
  """)
158
- demo_tabs.launch(show_error=True)
 
60
  outputs = [gr.inputs.Image(type='pil', label="Output Image"), "highlight"]
61
 
62
 
63
+ description = """This demo is a copy of the demo CLIPGroundingExlainability built by Paul Hilders, Danilo de Goede and Piyush Bagad, as part of the course Interpretability and Explainability in AI (MSc AI, UvA, June 2022).
64
+ <br> <br>
 
65
  This demo shows attributions scores on both the image and the text input when presenting CLIP with a
66
  <text,image> pair. Attributions are computed as Gradient-weighted Attention Rollout (Chefer et al.,
67
  2021), and can be thought of as an estimate of the effective attention CLIP pays to its input when
 
154
  \[2\]: Abnar, S., & Zuidema, W. (2020). Quantifying attention flow in transformers. arXiv preprint arXiv:2005.00928. <br>
155
  \[3\]: [https://samiraabnar.github.io/articles/2020-04/attention_flow](https://samiraabnar.github.io/articles/2020-04/attention_flow) <br>
156
  """)
157
+ demo_tabs.launch(show_error=True)