pawlo2013 commited on
Commit
0eadccf
·
1 Parent(s): 56c6b0d

disabled cache

Browse files
Files changed (1) hide show
  1. app.py +7 -7
app.py CHANGED
@@ -52,10 +52,7 @@ def classify_and_visualize(img, device="cpu", discard_ratio=0.9, head_fusion="me
52
 
53
 
54
  def format_output(output):
55
- return (
56
- output["probabilities"],
57
- output["heatmap"] if output["heatmap"] is not None else None,
58
- )
59
 
60
 
61
  # Function to load examples from a folder
@@ -76,9 +73,9 @@ def show_final_layer_attention_maps(
76
  with torch.no_grad():
77
  outputs = model(**tensor, output_attentions=True)
78
 
79
- if outputs.attentions[0] is None:
80
- print("Attention outputs are None.")
81
- return None
82
 
83
  image = image - image.min()
84
  image = image / image.max()
@@ -144,6 +141,9 @@ iface = gr.Interface(
144
  gr.Image(label="Attention Heatmap"),
145
  ],
146
  examples=examples,
 
 
 
147
  title="Pneumonia X-Ray 3-Class Classification with Vision Transformer (ViT) using data augmentation",
148
  description="Upload an X-ray image to classify it as normal, viral or bacterial pneumonia. Checkout the model in more details [here](https://huggingface.co/pawlo2013/vit-pneumonia-x-ray_3_class). The examples presented are taken from the test set of [Kermany et al. (2018) dataset.](https://data.mendeley.com/datasets/rscbjbr9sj/2.) The attention heatmap over all layers of the transfomer done by the attention rollout techinique by the implementation of [jacobgil](https://github.com/jacobgil/vit-explain).",
149
  )
 
52
 
53
 
54
  def format_output(output):
55
+ return (output["probabilities"], output["heatmap"])
 
 
 
56
 
57
 
58
  # Function to load examples from a folder
 
73
  with torch.no_grad():
74
  outputs = model(**tensor, output_attentions=True)
75
 
76
+ # if outputs.attentions[0] is None:
77
+ # print("Attention outputs are None.")
78
+ # return None
79
 
80
  image = image - image.min()
81
  image = image / image.max()
 
141
  gr.Image(label="Attention Heatmap"),
142
  ],
143
  examples=examples,
144
+ cache_examples=False,
145
+ allow_flagging=False,
146
+ concurrency_limit=1,
147
  title="Pneumonia X-Ray 3-Class Classification with Vision Transformer (ViT) using data augmentation",
148
  description="Upload an X-ray image to classify it as normal, viral or bacterial pneumonia. Checkout the model in more details [here](https://huggingface.co/pawlo2013/vit-pneumonia-x-ray_3_class). The examples presented are taken from the test set of [Kermany et al. (2018) dataset.](https://data.mendeley.com/datasets/rscbjbr9sj/2.) The attention heatmap over all layers of the transfomer done by the attention rollout techinique by the implementation of [jacobgil](https://github.com/jacobgil/vit-explain).",
149
  )