Saghir commited on
Commit
45532a7
1 Parent(s): 3e2e594

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +33 -15
app.py CHANGED
@@ -16,33 +16,51 @@ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
16
  # Load PathDino model and image transforms
17
  model, image_transforms = get_pathDino_model("PathDino512.pth")
18
 
19
- # Increase the width of the sidebar
20
- st.write(
21
- f"""
22
- <style>
23
- .sidebar .sidebar-content {{
24
- width: 420px;
25
- }}
26
- </style>
27
- """
28
- )
29
 
30
  st.sidebar.markdown("### PathDino")
31
  st.sidebar.markdown(
32
- "PathDino is a lightweight Histopathology transformer consisting of just five small vision transformer blocks. "
33
  "PathDino is a customized ViT architecture, finely tuned to the nuances of histology images. It not only exhibits "
34
  "superior performance but also effectively reduces susceptibility to overfitting, a common challenge in histology "
35
  "image analysis.\n\n"
36
  )
37
 
 
38
  default_image_url_compare = "images/HistRotate.png"
39
- st.sidebar.image(default_image_url_compare, caption='A 360 rotation augmentation for training models on histopathology images. Unlike training on natural images where the rotation may change the context of the visual data, rotating a histopathology patch does not change the context and it improves the learning process for better reliable embedding learning.', width=300)
40
 
41
  default_image_url_compare = "images/FigPathDino_parameters_FLOPs_compare.png"
42
- st.sidebar.image(default_image_url_compare, caption='PathDino Vs its counterparts. Number of Parameters (Millions) vs the patch-level retrieval with macro avg F-score of majority vote (MV@5) on CAMELYON16 dataset. The bubble size represents the FLOPs.', width=300)
43
 
44
  default_image_url_compare = "images/ActivationMap.png"
45
- st.sidebar.image(default_image_url_compare, caption='Attention Visualization. When visualizing attention patterns, our PathDino transformer outperforms HIPT-small and DinoSSLPath, despite being trained on a smaller dataset of 6 million TCGA patches. In contrast, DinoSSLPath and HIPT were trained on much larger datasets, with 19 million and 104 million TCGA patches, respectively.', width=300)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
46
 
47
 
48
 
@@ -77,7 +95,7 @@ def generate_activation_maps(image):
77
  return attention_list
78
 
79
  # Streamlit UI
80
- st.title("PathDino - Compact ViT for Histopathology Image Analysis")
81
  st.write("Upload a histology image to view the activation maps.")
82
 
83
  # uploaded_image = st.file_uploader("Upload an image", type=["jpg", "png", "jpeg"])
 
16
  # Load PathDino model and image transforms
17
  model, image_transforms = get_pathDino_model("PathDino512.pth")
18
 
 
 
 
 
 
 
 
 
 
 
19
 
20
  st.sidebar.markdown("### PathDino")
21
  st.sidebar.markdown(
22
+ "PathDino is a lightweight histopathology transformer consisting of just five small vision transformer blocks. "
23
  "PathDino is a customized ViT architecture, finely tuned to the nuances of histology images. It not only exhibits "
24
  "superior performance but also effectively reduces susceptibility to overfitting, a common challenge in histology "
25
  "image analysis.\n\n"
26
  )
27
 
28
+
29
  default_image_url_compare = "images/HistRotate.png"
30
+ st.sidebar.image(default_image_url_compare, caption='A 360 rotation augmentation for training models on histopathology images. Unlike training on natural images where the rotation may change the context of the visual data, rotating a histopathology patch does not change the context and it improves the learning process for better reliable embedding learning.', width=500)
31
 
32
  default_image_url_compare = "images/FigPathDino_parameters_FLOPs_compare.png"
33
+ st.sidebar.image(default_image_url_compare, caption='PathDino Vs its counterparts. Number of Parameters (Millions) vs the patch-level retrieval with macro avg F-score of majority vote (MV@5) on CAMELYON16 dataset. The bubble size represents the FLOPs.', width=500)
34
 
35
  default_image_url_compare = "images/ActivationMap.png"
36
+ st.sidebar.image(default_image_url_compare, caption='Attention Visualization. When visualizing attention patterns, our PathDino transformer outperforms HIPT-small and DinoSSLPath, despite being trained on a smaller dataset of 6 million TCGA patches. In contrast, DinoSSLPath and HIPT were trained on much larger datasets, with 19 million and 104 million TCGA patches, respectively.', width=500)
37
+
38
+
39
+ st.sidebar.markdown("### Citation")
40
+ # Create a code block for citations
41
+ st.sidebar.markdown("""
42
+ ```markdown
43
+ @article{alfasly2023PathDino,
44
+ title={Rotation-Agnostic Representation Learning for Histopathological Image Analysis},
45
+ author={Saghir, Alfasly and Abubakr, Shafique and Peyman, Nejat and Jibran, Khan and Areej, Alsaafin and Ghazal, Alabtah and H.R.Tizhoosh},
46
+ journal={arXiv preprint arXiv:xxxx.xxxxx},
47
+ year={2023}""")
48
+
49
+ # Rhazes Lab and Mayo Clinic Logos
50
+ st.sidebar.markdown("\n\n")
51
+ # Create a two-column layout for logos and text
52
+ col1, col2 = st.sidebar.columns(2)
53
+
54
+ # Logo and text for My Logo Lab
55
+ url_logo_lab = "images/rhazes_lab_logo.png"
56
+ with col1:
57
+ st.image(url_logo_lab, width=150)
58
+ # Logo and text for My Logo Company
59
+ url_logo_company = "images/Mayo_Clinic_logo.png"
60
+ with col2:
61
+ st.image(url_logo_company, width=150)
62
+ st.sidebar.markdown("Rhazes Lab, \n Department of Artificial Intelligence and Informatics, \n Mayo Clinic, \n Rochester, MN, USA")
63
+
64
 
65
 
66
 
 
95
  return attention_list
96
 
97
  # Streamlit UI
98
+ st.title("PathDino - Compact ViT for histopathology Image Analysis")
99
  st.write("Upload a histology image to view the activation maps.")
100
 
101
  # uploaded_image = st.file_uploader("Upload an image", type=["jpg", "png", "jpeg"])