paul hilders commited on
Commit
929c841
1 Parent(s): 5d101e4

Add NER highlighting

Browse files
Files changed (1) hide show
  1. app.py +35 -4
app.py CHANGED
@@ -48,6 +48,36 @@ def run_demo(image, text):
48
  for i, token in enumerate(text_tokens_decoded):
49
  highlighted_text.append((str(token), float(text_scores[i])))
50
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
51
  # Apply NER to extract named entities, and run the explainability method
52
  # for each named entity.
53
  highlighed_entities = []
@@ -58,16 +88,16 @@ def run_demo(image, text):
58
 
59
  print(highlighed_entities)
60
 
61
- return overlapped, highlighted_text
62
 
63
  input_img = gr.inputs.Image(type='pil', label="Original Image")
64
  input_txt = "text"
65
  inputs = [input_img, input_txt]
66
 
67
- outputs = [gr.inputs.Image(type='pil', label="Output Image"), "highlight"]
68
 
69
 
70
- iface = gr.Interface(fn=run_demo,
71
  inputs=inputs,
72
  outputs=outputs,
73
  title="CLIP Grounding Explainability",
@@ -84,5 +114,6 @@ iface = gr.Interface(fn=run_demo,
84
  ["example_images/dogs_on_bed.png", "Book"],
85
  ["example_images/dogs_on_bed.png", "Cat"]])
86
 
87
- demo_tabs = gr.TabbedInterface([iface, iface], ["Default", "NER"])
 
88
  demo_tabs.launch(debug=True)
 
48
  for i, token in enumerate(text_tokens_decoded):
49
  highlighted_text.append((str(token), float(text_scores[i])))
50
 
51
+ return overlapped, highlighted_text
52
+
53
+
54
+ # Default demo:
55
+ input_img = gr.inputs.Image(type='pil', label="Original Image")
56
+ input_txt = "text"
57
+ inputs = [input_img, input_txt]
58
+
59
+ outputs = [gr.inputs.Image(type='pil', label="Output Image"), "highlight"]
60
+
61
+
62
+ iface = gr.Interface(fn=run_demo,
63
+ inputs=inputs,
64
+ outputs=outputs,
65
+ title="CLIP Grounding Explainability",
66
+ description="A demonstration based on the Generic Attention-model Explainability method for Interpreting Bi-Modal Transformers by Chefer et al. (2021): https://github.com/hila-chefer/Transformer-MM-Explainability.",
67
+ examples=[["example_images/London.png", "London Eye"],
68
+ ["example_images/London.png", "Big Ben"],
69
+ ["example_images/harrypotter.png", "Harry"],
70
+ ["example_images/harrypotter.png", "Hermione"],
71
+ ["example_images/harrypotter.png", "Ron"],
72
+ ["example_images/Amsterdam.png", "Amsterdam canal"],
73
+ ["example_images/Amsterdam.png", "Old buildings"],
74
+ ["example_images/Amsterdam.png", "Pink flowers"],
75
+ ["example_images/dogs_on_bed.png", "Two dogs"],
76
+ ["example_images/dogs_on_bed.png", "Book"],
77
+ ["example_images/dogs_on_bed.png", "Cat"]])
78
+
79
+ # NER demo:
80
+ def NER_demo(image, text):
81
  # Apply NER to extract named entities, and run the explainability method
82
  # for each named entity.
83
  highlighed_entities = []
 
88
 
89
  print(highlighed_entities)
90
 
91
+ return highlighed_entities
92
 
93
  input_img = gr.inputs.Image(type='pil', label="Original Image")
94
  input_txt = "text"
95
  inputs = [input_img, input_txt]
96
 
97
+ outputs = ["highlight"]
98
 
99
 
100
+ iface_NER = gr.Interface(fn=NER_demo,
101
  inputs=inputs,
102
  outputs=outputs,
103
  title="CLIP Grounding Explainability",
 
114
  ["example_images/dogs_on_bed.png", "Book"],
115
  ["example_images/dogs_on_bed.png", "Cat"]])
116
 
117
+
118
+ demo_tabs = gr.TabbedInterface([iface, iface_NER], ["Default", "NER"])
119
  demo_tabs.launch(debug=True)