vinid commited on
Commit
08fb6d9
β€’
1 Parent(s): f58e4d2

some updates

Browse files
Files changed (6) hide show
  1. app.py +8 -14
  2. details.py +61 -0
  3. home.py +20 -48
  4. introduction.md +0 -6
  5. requirements.txt +1 -1
  6. text2image.py +28 -45
app.py CHANGED
@@ -3,31 +3,25 @@ import text2image
3
  import image2image
4
  import visualization
5
  import streamlit as st
6
- import streamlit.components.v1 as components
7
-
8
-
9
 
10
  st.set_page_config(layout="wide")
11
 
12
  st.sidebar.title("WebPLIP")
13
-
14
- components.html('''
15
- <!-- Google tag (gtag.js) -->
16
- <script async src="https://www.googletagmanager.com/gtag/js?id=G-KPF04V95FN"></script>
17
- <script>
18
- window.dataLayer = window.dataLayer || [];
19
- function gtag(){dataLayer.push(arguments);}
20
- gtag('js', new Date());
21
- gtag('config', 'G-KPF04V95FN');
22
- </script>
23
- ''')
24
 
25
  PAGES = {
26
  "Introduction": home,
 
27
  "Text to Image": text2image,
28
  "Image to Image": image2image,
29
  "Visualization": visualization,
30
  }
31
 
32
  page = st.sidebar.radio("", list(PAGES.keys()))
 
 
 
 
 
33
  PAGES[page].app()
 
3
  import image2image
4
  import visualization
5
  import streamlit as st
6
+ import details
 
 
7
 
8
  st.set_page_config(layout="wide")
9
 
10
  st.sidebar.title("WebPLIP")
11
+ st.sidebar.markdown("## Menu")
 
 
 
 
 
 
 
 
 
 
12
 
13
  PAGES = {
14
  "Introduction": home,
15
+ "Details": details,
16
  "Text to Image": text2image,
17
  "Image to Image": image2image,
18
  "Visualization": visualization,
19
  }
20
 
21
  page = st.sidebar.radio("", list(PAGES.keys()))
22
+ st.sidebar.markdown("## Links")
23
+
24
+ st.sidebar.markdown("[PLIP Model](https://huggingface.co/vinid/plip)")
25
+ st.sidebar.markdown("[OpenPath Dataset]()")
26
+ st.sidebar.markdown("[PLIP Code](https://github.com/vinid/path_eval)")
27
  PAGES[page].app()
details.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+ import streamlit as st
3
+ import streamlit.components.v1 as components
4
+ from PIL import Image
5
+
6
+ def read_markdown_file(markdown_file):
7
+ return Path(markdown_file).read_text()
8
+
9
+
10
+ def app():
11
+ #intro_markdown = read_markdown_file("introduction.md")
12
+ #st.markdown(intro_markdown, unsafe_allow_html=True)
13
+ st.markdown("# Leveraging medical Twitter to build a visual-language foundation model for pathology")
14
+
15
+ st.markdown("The lack of annotated publicly available medical images is a major barrier for innovations. At the same time, many de-identified images and much knowledge are shared by clinicians on public forums such as medical Twitter. Here we harness these crowd platforms to curate OpenPath, a large dataset of <b>208,414</b> pathology images paired with natural language descriptions. This is the largest public dataset for pathology images annotated with natural text. We demonstrate the value of this resource by developing PLIP, a multimodal AI with both image and text understanding, which is trained on OpenPath. PLIP achieves state-of-the-art zero-shot and few-short performance for classifying new pathology images across diverse tasks. Moreover, PLIP enables users to retrieve similar cases by either image or natural language search, greatly facilitating knowledge sharing. Our approach demonstrates that publicly shared medical data is a tremendous opportunity that can be harnessed to advance biomedical AI.", unsafe_allow_html=True)
16
+
17
+ fig1ab = Image.open('resources/4x/Fig1ab.png')
18
+ st.image(fig1ab, caption='OpenPath Dataset', output_format='png')
19
+ st.caption('An example of tweet')
20
+ components.html('''
21
+ <blockquote class="twitter-tweet">
22
+ <a href="https://twitter.com/xxx/status/1580753362059788288"></a>
23
+ </blockquote>
24
+ <script async src="https://platform.twitter.com/widgets.js" charset="utf-8">
25
+ </script>
26
+ ''',
27
+ height=500)
28
+
29
+
30
+ st.markdown("#### PLIP is trained on the largest public vision–language pathology dataset: OpenPath")
31
+
32
+ col1, col2 = st.columns([1, 1])
33
+ with col1:
34
+ st.markdown("Following the usage policy and guidelines from Twitter and other entities, we established so far the largest public vision–language pathology dataset. To ensure the quality of the data, OpenPath followed rigorous protocols for cohort inclusion and exclusion, including the removal of retweets, sensitive tweets, and non-pathology images, as well as text cleaning.", unsafe_allow_html=True)
35
+ st.markdown("The final OpenPath dataset consists of:", unsafe_allow_html=True)
36
+ st.markdown("- Tweets: 116,504 image–text pairs from Twitter posts (tweets) during Mar. 21, 2006 – Nov. 15, 2022 across 32 pathology subspecialty-specific hashtags;", unsafe_allow_html=True)
37
+ st.markdown("- Replies: 59,869 image–text pairs from the associated replies that received the highest number of likes in the tweet, if applicable;", unsafe_allow_html=True)
38
+ st.markdown("- PathLAION: 32,041 additional image–text pairs from the Internet which are outside from the Twitter community extracted from the LAION dataset.", unsafe_allow_html=True)
39
+ st.markdown("Leveraging the largest publicly available pathology dataset which contains image–text pairs across 32 different pathology subspecialty-specific hashtags, where each image has detailed text descriptions, we fine-tuned a pre-trained CLIP model and proposed a multimodal deep learning model for pathology, PLIP.", unsafe_allow_html=True)
40
+ with col2:
41
+ fig1c = Image.open('resources/4x/Fig1c.png')
42
+ st.image(fig1c, caption='Pathology hashtags in Twitter', output_format='png')
43
+ fig1d = Image.open('resources/4x/Fig1d.png')
44
+ st.image(fig1d, caption='Number of words in sentence', output_format='png')
45
+
46
+
47
+
48
+ st.markdown("#### PLIP is trained with connecting the image and text via contrastive learning")
49
+
50
+ col1, col2 = st.columns([3, 1])
51
+ with col1:
52
+ st.markdown("The proposed PLIP model generates two embedding vectors from both the text and image encoders. These vectors were then forced to be similar for each of the paired image and text vectors and dissimilar for non-paired image and text pairs via contrastive learning.", unsafe_allow_html=True)
53
+ fig1e = Image.open('resources/4x/Fig1e.png')
54
+ st.image(fig1e, caption='PLIP training', output_format='png')
55
+
56
+ with col2:
57
+ fig1f = Image.open('resources/4x/Fig1f.png')
58
+ st.image(fig1f, caption='Training illustration', output_format='png')
59
+
60
+
61
+
home.py CHANGED
@@ -1,9 +1,6 @@
1
  from pathlib import Path
2
  import streamlit as st
3
  import streamlit.components.v1 as components
4
- import plotly.figure_factory as ff
5
- import numpy as np
6
- import pandas as pd
7
  from PIL import Image
8
 
9
  def read_markdown_file(markdown_file):
@@ -11,59 +8,34 @@ def read_markdown_file(markdown_file):
11
 
12
 
13
  def app():
14
- #intro_markdown = read_markdown_file("introduction.md")
15
- #st.markdown(intro_markdown, unsafe_allow_html=True)
16
- st.markdown("# Leveraging medical Twitter to build a visual-language foundation model for pathology")
17
 
18
- col1, col2 = st.columns([2, 1])
19
- with col1:
20
- st.markdown("The lack of annotated publicly available medical images is a major barrier for innovations. At the same time, many de-identified images and much knowledge are shared by clinicians on public forums such as medical Twitter. Here we harness these crowd platforms to curate OpenPath, a large dataset of <b>208,414</b> pathology images paired with natural language descriptions. This is the largest public dataset for pathology images annotated with natural text. We demonstrate the value of this resource by developing PLIP, a multimodal AI with both image and text understanding, which is trained on OpenPath. PLIP achieves state-of-the-art zero-shot and few-short performance for classifying new pathology images across diverse tasks. Moreover, PLIP enables users to retrieve similar cases by either image or natural language search, greatly facilitating knowledge sharing. Our approach demonstrates that publicly shared medical data is a tremendous opportunity that can be harnessed to advance biomedical AI.", unsafe_allow_html=True)
21
-
22
- fig1ab = Image.open('resources/4x/Fig1ab.png')
23
- st.image(fig1ab, caption='OpenPath Dataset', output_format='png')
24
- with col2:
25
- st.caption('An example of tweet')
26
- components.html('''
27
- <blockquote class="twitter-tweet">
28
- <a href="https://twitter.com/xxx/status/1580753362059788288"></a>
29
- </blockquote>
30
- <script async src="https://platform.twitter.com/widgets.js" charset="utf-8">
31
- </script>
32
- ''',
33
- height=500)
34
 
 
 
35
 
36
- st.markdown("#### PLIP is trained on the largest public vision–language pathology dataset: OpenPath")
37
 
38
- col1, col2 = st.columns([1, 1])
39
- with col1:
40
- st.markdown("Following the usage policy and guidelines from Twitter and other entities, we established so far the largest public vision–language pathology dataset. To ensure the quality of the data, OpenPath followed rigorous protocols for cohort inclusion and exclusion, including the removal of retweets, sensitive tweets, and non-pathology images, as well as text cleaning.", unsafe_allow_html=True)
41
- st.markdown("The final OpenPath dataset consists of:", unsafe_allow_html=True)
42
- st.markdown("- Tweets: 116,504 image–text pairs from Twitter posts (tweets) during Mar. 21, 2006 – Nov. 15, 2022 across 32 pathology subspecialty-specific hashtags;", unsafe_allow_html=True)
43
- st.markdown("- Replies: 59,869 image–text pairs from the associated replies that received the highest number of likes in the tweet, if applicable;", unsafe_allow_html=True)
44
- st.markdown("- PathLAION: 32,041 additional image–text pairs from the Internet which are outside from the Twitter community extracted from the LAION dataset.", unsafe_allow_html=True)
45
- st.markdown("Leveraging the largest publicly available pathology dataset which contains image–text pairs across 32 different pathology subspecialty-specific hashtags, where each image has detailed text descriptions, we fine-tuned a pre-trained CLIP model and proposed a multimodal deep learning model for pathology, PLIP.", unsafe_allow_html=True)
46
- with col2:
47
- fig1c = Image.open('resources/4x/Fig1c.png')
48
- st.image(fig1c, caption='Pathology hashtags in Twitter', output_format='png')
49
- fig1d = Image.open('resources/4x/Fig1d.png')
50
- st.image(fig1d, caption='Number of words in sentence', output_format='png')
51
 
52
 
 
 
 
 
 
 
53
 
54
- st.markdown("#### PLIP is trained with connecting the image and text via contrastive learning")
55
-
56
- col1, col2 = st.columns([3, 1])
57
- with col1:
58
- st.markdown("The proposed PLIP model generates two embedding vectors from both the text and image encoders. These vectors were then forced to be similar for each of the paired image and text vectors and dissimilar for non-paired image and text pairs via contrastive learning.", unsafe_allow_html=True)
59
- fig1e = Image.open('resources/4x/Fig1e.png')
60
- st.image(fig1e, caption='PLIP training', output_format='png')
61
-
62
- with col2:
63
- fig1f = Image.open('resources/4x/Fig1f.png')
64
- st.image(fig1f, caption='Training illustration', output_format='png')
65
-
66
 
 
67
 
68
  st.markdown('Disclaimer')
69
  st.caption('Please be advised that this function has been developed in compliance with the Twitter policy of data usage and sharing. It is important to note that the results obtained from this function are not intended to constitute medical advice or replace consultation with a qualified medical professional. The use of this function is solely at your own risk and should be consistent with applicable laws, regulations, and ethical considerations. We do not warrant or guarantee the accuracy, completeness, suitability, or usefulness of this function for any particular purpose, and we hereby disclaim any liability arising from any reliance placed on this function or any results obtained from its use. If you wish to review the original Twitter post, you should access the source page directly on Twitter.')
 
1
  from pathlib import Path
2
  import streamlit as st
3
  import streamlit.components.v1 as components
 
 
 
4
  from PIL import Image
5
 
6
  def read_markdown_file(markdown_file):
 
8
 
9
 
10
  def app():
 
 
 
11
 
12
+ st.markdown("# A visual-language foundation model for pathology")
13
+ st.markdown("This is a webapp for PLIP, our new fundational AI model for pathology and OpenPath our new dataset, **from our recent work**: Leveraging medical Twitter to build a visual-language foundation model for pathology")
14
+ st.markdown("### Pathology Language and Image Pretraining (PLIP)\n We develop PLIP, a multimodal AI with both image and text understanding. PLIP achieves state-of-the-art zero-shot and few-short performance for classifying new pathology images across diverse tasks. Moreover, PLIP enables users to retrieve similar cases by either image or natural language search, greatly facilitating knowledge sharing. Our approach demonstrates that publicly shared medical data is a tremendous opportunity that can be harnessed to advance biomedical AI.")
 
 
 
 
 
 
 
 
 
 
 
 
 
15
 
16
+ fig1e = Image.open('resources/4x/Fig1e.png')
17
+ st.image(fig1e, caption='PLIP training procedure', output_format='png')
18
 
19
+ st.markdown("### OpenPath Dataset\nThe lack of annotated publicly available medical images is a major barrier for innovations. At the same time, many de-identified images and much knowledge are shared by clinicians on public forums such as medical Twitter. Here we harness these crowd platforms to curate OpenPath, a large dataset of **208,414** pathology images paired with natural language descriptions")
20
 
21
+ fig1ab = Image.open('resources/4x/Fig1ab.png')
22
+ st.image(fig1ab, caption='OpenPath Dataset', output_format='png')
 
 
 
 
 
 
 
 
 
 
 
23
 
24
 
25
+ st.markdown("### Documentation\n"
26
+ "This webapp comes with different functionalities.\n"
27
+ "* Details: The details page guides you through our work.\n"
28
+ "* Text to Image: allows users to perform text search on a database of images.\n"
29
+ "* Image to Image: allows users to perform image search on a database of images.\n"
30
+ "")
31
 
32
+ st.markdown("### Other Links\n"
33
+ "* Download OpenPath\n"
34
+ "* Code to reproduce [PLIP ](https://github.com/vinid/path_eval) results\n"
35
+ "* Link to the [PLIP Model](https://huggingface.co/vinid/plip)\n"
36
+ "")
 
 
 
 
 
 
 
37
 
38
+ st.markdown("### Important Information\n")
39
 
40
  st.markdown('Disclaimer')
41
  st.caption('Please be advised that this function has been developed in compliance with the Twitter policy of data usage and sharing. It is important to note that the results obtained from this function are not intended to constitute medical advice or replace consultation with a qualified medical professional. The use of this function is solely at your own risk and should be consistent with applicable laws, regulations, and ethical considerations. We do not warrant or guarantee the accuracy, completeness, suitability, or usefulness of this function for any particular purpose, and we hereby disclaim any liability arising from any reliance placed on this function or any results obtained from its use. If you wish to review the original Twitter post, you should access the source page directly on Twitter.')
introduction.md DELETED
@@ -1,6 +0,0 @@
1
-
2
- # Leveraging medical Twitter to build a visual-language foundation model for pathology
3
-
4
- The lack of annotated publicly available medical images is a major barrier for innovations. At the same time, many de-identified images and much knowledge are shared by clinicians on public forums such as medical Twitter. Here we harness these crowd platforms to curate OpenPath, a large dataset of 208,414 pathology images paired with natural language descriptions. This is the largest public dataset for pathology images annotated with natural text. We demonstrate the value of this resource by developing PLIP, a multimodal AI with both image and text understanding, which is trained on OpenPath. PLIP achieves state-of-the-art zero-shot and few-short performance for classifying new pathology images across diverse tasks. Moreover, PLIP enables users to retrieve similar cases by either image or natural language search, greatly facilitating knowledge sharing. Our approach demonstrates that publicly shared medical data is a tremendous opportunity that can be harnessed to advance biomedical AI.
5
-
6
- ![Alt Text](resources/4x/Fig1.png)
 
 
 
 
 
 
 
requirements.txt CHANGED
@@ -1,7 +1,7 @@
1
  git+https://github.com/openai/CLIP.git
2
  torch
3
  transformers
4
- pandas
5
  numpy
6
  Pillow
7
  streamlit==1.19.0
 
1
  git+https://github.com/openai/CLIP.git
2
  torch
3
  transformers
4
+ pandas==1.4.1
5
  numpy
6
  Pillow
7
  streamlit==1.19.0
text2image.py CHANGED
@@ -2,15 +2,12 @@ import streamlit as st
2
  import pandas as pd
3
  import numpy as np
4
  from PIL import Image
5
- import requests
6
  import pickle
7
  import tokenizers
8
- from io import BytesIO
9
  import torch
 
10
  from transformers import (
11
- VisionTextDualEncoderModel,
12
- AutoFeatureExtractor,
13
- AutoTokenizer,
14
  CLIPModel,
15
  AutoProcessor
16
  )
@@ -63,7 +60,7 @@ def embed_texts(model, texts, processor):
63
  def app():
64
 
65
  st.title('Text to Image Retrieval')
66
- st.markdown('#### A pathology image search engine that correlate texts directly with images.')
67
 
68
  col1, col2 = st.columns([1,1])
69
  with col1:
@@ -76,33 +73,33 @@ def app():
76
  meta, image_embedding, text_embedding, validation_subset_index = init()
77
  model, processor = load_path_clip()
78
 
79
- st.markdown('#### Demo')
 
 
 
80
 
81
  col1, col2 = st.columns(2)
82
  with col1:
83
- data_options = ["All twitter data (03/21/2006 β€” 01/15/2023)",
84
- "Twitter validation data (11/16/2022 β€” 01/15/2023)"]
85
- st.radio(
86
- "Choose dataset for image retrieval πŸ‘‰",
87
  key="datapool",
88
  options=data_options,
89
  )
 
90
  with col2:
91
  retrieval_options = ["Image only",
92
- "Text and image (beta)",
93
  ]
94
  st.radio(
95
- "Similarity calcuation Mapping input with πŸ‘‰",
96
  key="calculation_option",
97
  options=retrieval_options,
98
  )
99
 
100
-
101
-
102
  col1, col2 = st.columns(2)
103
- #query = st.text_input('Search Query', '')
104
- col1_submit = False
105
- show = False
106
  with col1:
107
  # Create selectbox
108
  examples = ['Breast tumor surrounded by fat',
@@ -112,42 +109,36 @@ def app():
112
  'Breast cancer DCIS',
113
  'Papillary carcinoma in breast tissue',
114
  ]
115
- query_1 = st.selectbox("Please select an example query", options=examples)
116
- #st.info(f":white_check_mark: The written option is {query_1} ")
117
  col1_submit = True
118
- show = True
119
-
120
  with col2:
121
  form = st.form(key='my_form')
122
  query_2 = form.text_input(label='Or input your custom query:')
123
  submit_button = form.form_submit_button(label='Submit')
124
-
125
  if submit_button:
126
  col1_submit = False
127
- show = True
128
-
129
 
130
  if col1_submit:
131
  query = query_1
132
  else:
133
  query = query_2
134
 
135
-
136
  input_text = embed_texts(model, [query], processor)[0].detach().cpu().numpy()
137
  input_text = input_text/np.linalg.norm(input_text)
138
-
139
-
140
- if st.session_state.calculation_option == retrieval_options[0]: # Image only
 
141
  similarity_scores = input_text.dot(image_embedding.T)
142
- else: # Text and Image
143
  similarity_scores_i = input_text.dot(image_embedding.T)
144
  similarity_scores_t = input_text.dot(text_embedding.T)
145
- similarity_scores_i = similarity_scores_i/np.max(similarity_scores_i)
146
- similarity_scores_t = similarity_scores_t/np.max(similarity_scores_t)
147
- similarity_scores = (similarity_scores_i + similarity_scores_t)/2
148
-
149
-
150
-
151
 
152
  ############################################################
153
  # Get top results
@@ -166,7 +157,7 @@ def app():
166
  ############################################################
167
  # Display results
168
  ############################################################
169
-
170
  st.markdown('Your input query: %s' % query)
171
  st.markdown('#### Top 5 results:')
172
  topk_options = ['1st', '2nd', '3rd', '4th', '5th']
@@ -203,14 +194,6 @@ def app():
203
 
204
 
205
 
206
-
207
-
208
-
209
-
210
-
211
-
212
-
213
-
214
  st.markdown('Disclaimer')
215
  st.caption('Please be advised that this function has been developed in compliance with the Twitter policy of data usage and sharing. It is important to note that the results obtained from this function are not intended to constitute medical advice or replace consultation with a qualified medical professional. The use of this function is solely at your own risk and should be consistent with applicable laws, regulations, and ethical considerations. We do not warrant or guarantee the accuracy, completeness, suitability, or usefulness of this function for any particular purpose, and we hereby disclaim any liability arising from any reliance placed on this function or any results obtained from its use. If you wish to review the original Twitter post, you should access the source page directly on Twitter.')
216
 
 
2
  import pandas as pd
3
  import numpy as np
4
  from PIL import Image
 
5
  import pickle
6
  import tokenizers
 
7
  import torch
8
+
9
  from transformers import (
10
+
 
 
11
  CLIPModel,
12
  AutoProcessor
13
  )
 
60
  def app():
61
 
62
  st.title('Text to Image Retrieval')
63
+ st.markdown('#### A pathology image search engine that geos from texts to images.')
64
 
65
  col1, col2 = st.columns([1,1])
66
  with col1:
 
73
  meta, image_embedding, text_embedding, validation_subset_index = init()
74
  model, processor = load_path_clip()
75
 
76
+ st.markdown('### Search')
77
+ st.markdown('How to use this: first of all, select a dataset on which to do retrieval.\n'
78
+ 'Then, either select a predefined search query or input one yourself.')
79
+
80
 
81
  col1, col2 = st.columns(2)
82
  with col1:
83
+ data_options = ["All Twitter Data (03/21/2006 β€” 01/15/2023)",
84
+ "Validation Twitter data (11/16/2022 β€” 01/15/2023)"]
85
+ st.selectbox(
86
+ "Dataset",
87
  key="datapool",
88
  options=data_options,
89
  )
90
+
91
  with col2:
92
  retrieval_options = ["Image only",
93
+ "Text and image (beta)",
94
  ]
95
  st.radio(
96
+ "Similarity calcuation πŸ‘‰",
97
  key="calculation_option",
98
  options=retrieval_options,
99
  )
100
 
 
 
101
  col1, col2 = st.columns(2)
102
+
 
 
103
  with col1:
104
  # Create selectbox
105
  examples = ['Breast tumor surrounded by fat',
 
109
  'Breast cancer DCIS',
110
  'Papillary carcinoma in breast tissue',
111
  ]
112
+ query_1 = st.selectbox("Select an example", options=examples)
113
+
114
  col1_submit = True
115
+
 
116
  with col2:
117
  form = st.form(key='my_form')
118
  query_2 = form.text_input(label='Or input your custom query:')
119
  submit_button = form.form_submit_button(label='Submit')
120
+
121
  if submit_button:
122
  col1_submit = False
 
 
123
 
124
  if col1_submit:
125
  query = query_1
126
  else:
127
  query = query_2
128
 
 
129
  input_text = embed_texts(model, [query], processor)[0].detach().cpu().numpy()
130
  input_text = input_text/np.linalg.norm(input_text)
131
+
132
+ # Sort IDs by cosine-similarity from high to low
133
+
134
+ if st.session_state.calculation_option == retrieval_options[0]: # Image only
135
  similarity_scores = input_text.dot(image_embedding.T)
136
+ else: # Text and Image
137
  similarity_scores_i = input_text.dot(image_embedding.T)
138
  similarity_scores_t = input_text.dot(text_embedding.T)
139
+ similarity_scores_i = similarity_scores_i / np.max(similarity_scores_i)
140
+ similarity_scores_t = similarity_scores_t / np.max(similarity_scores_t)
141
+ similarity_scores = (similarity_scores_i + similarity_scores_t) / 2
 
 
 
142
 
143
  ############################################################
144
  # Get top results
 
157
  ############################################################
158
  # Display results
159
  ############################################################
160
+
161
  st.markdown('Your input query: %s' % query)
162
  st.markdown('#### Top 5 results:')
163
  topk_options = ['1st', '2nd', '3rd', '4th', '5th']
 
194
 
195
 
196
 
 
 
 
 
 
 
 
 
197
  st.markdown('Disclaimer')
198
  st.caption('Please be advised that this function has been developed in compliance with the Twitter policy of data usage and sharing. It is important to note that the results obtained from this function are not intended to constitute medical advice or replace consultation with a qualified medical professional. The use of this function is solely at your own risk and should be consistent with applicable laws, regulations, and ethical considerations. We do not warrant or guarantee the accuracy, completeness, suitability, or usefulness of this function for any particular purpose, and we hereby disclaim any liability arising from any reliance placed on this function or any results obtained from its use. If you wish to review the original Twitter post, you should access the source page directly on Twitter.')
199