import nltk nltk.download('stopwords') nltk.download('punkt') import pandas as pd import classify_abs import extract_abs #pd.set_option('display.max_colwidth', None) import streamlit as st import spacy import tensorflow as tf import pickle import plotly.graph_objects as go #### LOGO #### st.markdown('''NIH Genetic and Rare Diseases Information Center Logo''',unsafe_allow_html=True) #st.markdown("![National Center for Advancing Translational Sciences (NCATS) Logo](https://huggingface.co/spaces/ncats/EpiPipeline4GARD/resolve/main/NCATS_logo.png)") #st.markdown('''National Center for Advancing Translational Sciences Logo''',unsafe_allow_html=True) #### TITLE #### st.title("Epidemiology Extraction Pipeline for Rare Diseases") #st.subheader("National Center for Advancing Translational Sciences (NIH/NCATS)") #### CHANGE SIDEBAR WIDTH ### st.markdown( """ """, unsafe_allow_html=True, ) #### DESCRIPTION #### st.markdown("This application was built by the [National Center for Advancing Translational Sciences (NCATS)](https://ncats.nih.gov/) for the [National Institutes of Health (NIH)](https://www.nih.gov/) [Genetic and Rare Diseases Information Center](https://rarediseases.info.nih.gov/) to automatically search PubMed abstracts for rare disease epidemiology information.") #### SIDEBAR WIDGETS #### #max_results is Maximum number of PubMed ID's to retrieve BEFORE filtering max_results = st.sidebar.number_input("Maximum number of articles to find in PubMed", min_value=1, max_value=None, value=50) filtering = st.sidebar.radio("What type of filtering would you like?",('Strict', 'Lenient', 'None')).lower() extract_diseases = st.sidebar.checkbox("Extract Rare Diseases", value=False) #### MODEL LOADING #### @st.experimental_singleton(show_spinner=False) def load_models_experimental(): classify_model_vars = classify_abs.init_classify_model() NER_pipeline, entity_classes = extract_abs.init_NER_pipeline() GARD_dict, max_length = extract_abs.load_GARD_diseases() return classify_model_vars, NER_pipeline, entity_classes, GARD_dict, max_length #### DOWNLOAD FUNCTION #### @st.cache def convert_df(df): # IMPORTANT: Cache the conversion to prevent computation on every rerun return df.to_csv().encode('utf-8') #### SANKEY FUNCTION #### @st.cache(allow_output_mutation=True) def epi_sankey(sankey_data,disease_or_gard_id): gathered, relevant, epidemiologic = sankey_data fig = go.Figure(data=[go.Sankey( node = dict( pad = 15, thickness = 20, line = dict(color = "white", width = 0.5), label = ["PubMed IDs Gathered", "Irrelevant Abstracts","Relevant Abstracts Gathered","Epidemiologic Abstracts","Not Epidemiologic"], color = "purple" ), #label = ["A1", "A2", "B1", "B2", "C1", "C2"] link = dict( source = [0, 0, 2, 2], target = [2, 1, 3, 4], value = [relevant, gathered-relevant, epidemiologic, relevant-epidemiologic] ))]) fig.update_layout( hovermode = 'x', title="Search for the Epidemiology of "+disease_or_gard_id, font=dict(size = 10, color = 'black'), ) return fig #### BEGIN APP #### with st.spinner('Loading Epidemiology Models and Dependencies...'): classify_model_vars, NER_pipeline, entity_classes, GARD_dict, max_length = load_models_experimental() #classify_tokenizer, classify_model, NER_pipeline, entity_classes, GARD_dict, max_length = load_models() #Load spaCy models which cannot be cached due to hash function error #nlp = spacy.load('en_core_web_lg') #nlpSci = spacy.load("en_ner_bc5cdr_md") #nlpSci2 = spacy.load('en_ner_bionlp13cg_md') #classify_model_vars = (nlp, nlpSci, nlpSci2, classify_model, classify_tokenizer) loaded = st.success('All Models and Dependencies Loaded!') st.markdown("Input a rare disease term or GARD ID.") disease_or_gard_id = st.text_input('') loaded.empty() st.markdown("Examples of rare diseases include [**Fellman syndrome**](https://rarediseases.info.nih.gov/diseases/1/gracile-syndrome), [**Classic Homocystinuria**](https://rarediseases.info.nih.gov/diseases/6667/classic-homocystinuria), [**7383**](https://rarediseases.info.nih.gov/diseases/7383/phenylketonuria), and [**GARD:0009941**](https://rarediseases.info.nih.gov/diseases/9941/fshmd1a).") st.markdown("A full list of rare diseases tracked by GARD can be found [here](https://rarediseases.info.nih.gov/diseases/browse-by-first-letter).") if disease_or_gard_id: df, sankey_data = extract_abs.streamlit_extraction(disease_or_gard_id, max_results, filtering, NER_pipeline, entity_classes, extract_diseases,GARD_dict, max_length, classify_model_vars) st.dataframe(df, height=100) csv = convert_df(df) st.download_button( label="Download epidemiology results for "+disease_or_gard_id+" as CSV", data = csv, file_name=disease_or_gard_id+'.csv', mime='text/csv', ) #st.dataframe(data=None, width=None, height=None) fig = epi_sankey(sankey_data,disease_or_gard_id) #if st.button('Display Sankey Diagram of Automated Search'): st.plotly_chart(fig, use_container_width=True) # st.code(body, language="python")