import streamlit as st import sparknlp import os import pandas as pd from sparknlp.base import * from sparknlp.annotator import * from pyspark.ml import Pipeline from sparknlp.pretrained import PretrainedPipeline from annotated_text import annotated_text # Page configuration st.set_page_config( layout="wide", initial_sidebar_state="auto" ) # CSS for styling st.markdown(""" """, unsafe_allow_html=True) @st.cache_resource def init_spark(): return sparknlp.start() @st.cache_resource def create_pipeline(model): document_assembler = DocumentAssembler() \ .setInputCol("text") \ .setOutputCol("document") tokenizer = Tokenizer() \ .setInputCols(["document"]) \ .setOutputCol("token") embeddings = WordEmbeddingsModel.pretrained("glove_840B_300", "xx")\ .setInputCols("document", "token") \ .setOutputCol("embeddings") ner_model = NerDLModel.pretrained("nerdl_atis_840b_300d", "en") \ .setInputCols(["document", "token", "embeddings"]) \ .setOutputCol("ner") ner_converter = NerConverter() \ .setInputCols(["document", "token", "ner"]) \ .setOutputCol("ner_chunk") # Create the pipeline pipeline = Pipeline(stages=[ document_assembler, tokenizer, embeddings, ner_model, ner_converter ]) return pipeline def fit_data(pipeline, data): empty_df = spark.createDataFrame([['']]).toDF('text') pipeline_model = pipeline.fit(empty_df) model = LightPipeline(pipeline_model) result = model.fullAnnotate(data) return result def annotate(data): document, chunks, labels = data["Document"], data["NER Chunk"], data["NER Label"] annotated_words = [] for chunk, label in zip(chunks, labels): parts = document.split(chunk, 1) if parts[0]: annotated_words.append(parts[0]) annotated_words.append((chunk, label)) document = parts[1] if document: annotated_words.append(document) annotated_text(*annotated_words) # Set up the page layout st.markdown('
Automate Question Answering of Airline Traffic Information Systems
', unsafe_allow_html=True) st.markdown("""

Understand user questions related to Airline Traffic, classfiy them into broad categories, find relevant entities and tag them to get a structured representation of questions for automation.

""", unsafe_allow_html=True) # Sidebar content model = st.sidebar.selectbox( "Choose the pretrained model", ["nerdl_atis_840b_300d"], help="For more info about the models visit: https://sparknlp.org/models" ) # Reference notebook link in sidebar link = """ Open In Colab """ st.sidebar.markdown('Reference notebook:') st.sidebar.markdown(link, unsafe_allow_html=True) # Load examples folder_path = f"inputs/{model}" examples = [ lines[1].strip() for filename in os.listdir(folder_path) if filename.endswith('.txt') for lines in [open(os.path.join(folder_path, filename), 'r', encoding='utf-8').readlines()] if len(lines) >= 2 ] selected_text = st.selectbox("Select an example", examples) custom_input = st.text_input("Try it with your own Sentence!") text_to_analyze = custom_input if custom_input else selected_text st.subheader('Full example text') HTML_WRAPPER = """
{}
""" st.markdown(HTML_WRAPPER.format(text_to_analyze), unsafe_allow_html=True) # Initialize Spark and create pipeline spark = init_spark() pipeline = create_pipeline(model) output = fit_data(pipeline, text_to_analyze) # Display matched sentence st.subheader("Processed output:") results = { 'Document': output[0]['document'][0].result, 'NER Chunk': [n.result for n in output[0]['ner_chunk']], "NER Label": [n.metadata['entity'] for n in output[0]['ner_chunk']] } annotate(results) with st.expander("View DataFrame"): df = pd.DataFrame({'NER Chunk': results['NER Chunk'], 'NER Label': results['NER Label']}) df.index += 1 st.dataframe(df)