|
import streamlit as st
|
|
import sparknlp
|
|
import os
|
|
import pandas as pd
|
|
|
|
from sparknlp.base import *
|
|
from sparknlp.annotator import *
|
|
from pyspark.ml import Pipeline
|
|
from sparknlp.pretrained import PretrainedPipeline
|
|
from annotated_text import annotated_text
|
|
|
|
|
|
st.set_page_config(
|
|
layout="wide",
|
|
initial_sidebar_state="auto"
|
|
)
|
|
|
|
|
|
st.markdown("""
|
|
<style>
|
|
.main-title {
|
|
font-size: 36px;
|
|
color: #4A90E2;
|
|
font-weight: bold;
|
|
text-align: center;
|
|
}
|
|
.section {
|
|
background-color: #f9f9f9;
|
|
padding: 10px;
|
|
border-radius: 10px;
|
|
margin-top: 10px;
|
|
}
|
|
.section p, .section ul {
|
|
color: #666666;
|
|
}
|
|
</style>
|
|
""", unsafe_allow_html=True)
|
|
|
|
@st.cache_resource
|
|
def init_spark():
|
|
return sparknlp.start()
|
|
|
|
@st.cache_resource
|
|
def create_pipeline(model):
|
|
document_assembler = DocumentAssembler() \
|
|
.setInputCol('text') \
|
|
.setOutputCol('document')
|
|
|
|
sentence_detector = SentenceDetector() \
|
|
.setInputCols(['document']) \
|
|
.setOutputCol('sentence')
|
|
|
|
tokenizer = Tokenizer() \
|
|
.setInputCols(['sentence']) \
|
|
.setOutputCol('token')
|
|
|
|
tokenClassifier_loaded = BertForTokenClassification.pretrained("bert_token_classifier_hi_en_ner", "hi") \
|
|
.setInputCols(["sentence", 'token']) \
|
|
.setOutputCol("ner")
|
|
|
|
ner_converter = NerConverter() \
|
|
.setInputCols(["sentence", "token", "ner"]) \
|
|
.setOutputCol("ner_chunk")
|
|
|
|
|
|
pipeline = Pipeline(stages=[
|
|
document_assembler,
|
|
sentence_detector,
|
|
tokenizer,
|
|
tokenClassifier_loaded,
|
|
ner_converter
|
|
])
|
|
return pipeline
|
|
|
|
def fit_data(pipeline, data):
|
|
empty_df = spark.createDataFrame([['']]).toDF('text')
|
|
pipeline_model = pipeline.fit(empty_df)
|
|
model = LightPipeline(pipeline_model)
|
|
result = model.fullAnnotate(data)
|
|
return result
|
|
|
|
def annotate(data):
|
|
document, chunks, labels = data["Document"], data["NER Chunk"], data["NER Label"]
|
|
annotated_words = []
|
|
for chunk, label in zip(chunks, labels):
|
|
parts = document.split(chunk, 1)
|
|
if parts[0]:
|
|
annotated_words.append(parts[0])
|
|
annotated_words.append((chunk, label))
|
|
document = parts[1]
|
|
if document:
|
|
annotated_words.append(document)
|
|
annotated_text(*annotated_words)
|
|
|
|
|
|
model = st.sidebar.selectbox(
|
|
"Choose the pretrained model",
|
|
["bert_token_classifier_hi_en_ner"],
|
|
help="For more info about the models visit: https://sparknlp.org/models"
|
|
)
|
|
|
|
|
|
title, sub_title = ('Named Entity Recogniation for Hindi+English text', 'This model was imported from Hugging Face to carry out Name Entity Recognition with mixed Hindi-English texts, provided by the LinCE repository.')
|
|
|
|
st.markdown(f'<div class="main-title">{title}</div>', unsafe_allow_html=True)
|
|
st.markdown(f'<div class="section"><p>{sub_title}</p></div>', unsafe_allow_html=True)
|
|
|
|
|
|
link = """
|
|
<a href="https://colab.research.google.com/github/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/streamlit_notebooks/NER_HINDI_ENGLISH.ipynb">
|
|
<img src="https://colab.research.google.com/assets/colab-badge.svg" style="zoom: 1.3" alt="Open In Colab"/>
|
|
</a>
|
|
"""
|
|
st.sidebar.markdown('Reference notebook:')
|
|
st.sidebar.markdown(link, unsafe_allow_html=True)
|
|
|
|
|
|
folder_path = f"inputs/{model}"
|
|
examples = [
|
|
lines[1].strip()
|
|
for filename in os.listdir(folder_path)
|
|
if filename.endswith('.txt')
|
|
for lines in [open(os.path.join(folder_path, filename), 'r', encoding='utf-8').readlines()]
|
|
if len(lines) >= 2
|
|
]
|
|
|
|
selected_text = st.selectbox("Select an example", examples)
|
|
custom_input = st.text_input("Try it with your own Sentence!")
|
|
|
|
text_to_analyze = custom_input if custom_input else selected_text
|
|
|
|
st.subheader('Full example text')
|
|
HTML_WRAPPER = """<div class="scroll entities" style="overflow-x: auto; border: 1px solid #e6e9ef; border-radius: 0.25rem; padding: 1rem; margin-bottom: 2.5rem; white-space:pre-wrap">{}</div>"""
|
|
st.markdown(HTML_WRAPPER.format(text_to_analyze), unsafe_allow_html=True)
|
|
|
|
|
|
spark = init_spark()
|
|
pipeline = create_pipeline(model)
|
|
output = fit_data(pipeline, text_to_analyze)
|
|
|
|
|
|
st.subheader("Processed output:")
|
|
|
|
results = {
|
|
'Document': output[0]['document'][0].result,
|
|
'NER Chunk': [n.result for n in output[0]['ner_chunk']],
|
|
"NER Label": [n.metadata['entity'] for n in output[0]['ner_chunk']]
|
|
}
|
|
|
|
annotate(results)
|
|
|
|
with st.expander("View DataFrame"):
|
|
df = pd.DataFrame({'NER Chunk': results['NER Chunk'], 'NER Label': results['NER Label']})
|
|
df.index += 1
|
|
st.dataframe(df)
|
|
|