|
import streamlit as st |
|
import time |
|
import pandas as pd |
|
import io |
|
from transformers import pipeline |
|
from streamlit_extras.stylable_container import stylable_container |
|
import plotly.express as px |
|
import zipfile |
|
from PyPDF2 import PdfReader |
|
import docx |
|
import os |
|
from comet_ml import Experiment |
|
import re |
|
import numpy as np |
|
|
|
st.set_page_config(layout="wide", page_title="Named Entity Recognition App") |
|
|
|
|
|
|
|
|
|
COMET_API_KEY = os.environ.get("COMET_API_KEY") |
|
COMET_WORKSPACE = os.environ.get("COMET_WORKSPACE") |
|
COMET_PROJECT_NAME = os.environ.get("COMET_PROJECT_NAME") |
|
|
|
comet_initialized = False |
|
if COMET_API_KEY and COMET_WORKSPACE and COMET_PROJECT_NAME: |
|
comet_initialized = True |
|
|
|
|
|
if 'file_upload_attempts' not in st.session_state: |
|
st.session_state['file_upload_attempts'] = 0 |
|
|
|
max_attempts = 10 |
|
|
|
|
|
@st.cache_resource |
|
def load_ner_model(): |
|
"""Loads the pre-trained NER model and caches it.""" |
|
return pipeline("token-classification", model="h2oai/deberta_finetuned_pii", aggregation_strategy="first") |
|
|
|
|
|
st.subheader("9-Personal Data Named Entity Recognition Web App", divider="rainbow") |
|
st.link_button("by nlpblogs", "https://nlpblogs.com", type="tertiary") |
|
|
|
expander = st.expander("**Important notes on the 9-Personal Data Named Entity Recognition Web App**") |
|
expander.write(''' |
|
|
|
**Named Entities:** |
|
This 9-Personal Data Named Entity Recognition Web App predicts nine (9) categories: |
|
|
|
1. **Account-related information**: Account name, account number, and transaction amounts |
|
|
|
2. **Banking details**: BIC, IBAN, and Bitcoin or Ethereum addresses |
|
|
|
3. **Personal information**: Full name, first name, middle name, last name, gender, and date of birth |
|
|
|
4. **Contact information**: Email, phone number, and street address (including building number, city, county, state, and zip code) |
|
|
|
5. **Job-related data**: Job title, job area, job descriptor, and job type |
|
|
|
6. **Financial data**: Credit card number, issuer, CVV, and currency information (code, name, and symbol) |
|
|
|
7. **Digital identifiers**: IP addresses (IPv4 and IPv6), MAC addresses, and user agents |
|
|
|
8. **Online presence**: URL, usernames, and passwords |
|
|
|
9. **Other sensitive data**: SSN, vehicle VIN and VRM, phone IMEI, and nearby GPS coordinates |
|
|
|
Results are presented in an easy-to-read table, visualized in an interactive tree map, pie chart, and bar chart, and are available for download along with a Glossary of tags. |
|
|
|
**How to Use:** |
|
Upload your .pdf or .docx file. Then, click the 'Results' button to extract and tag entities in your text data. |
|
|
|
**Usage Limits:** |
|
You can request results up to 10 times. |
|
|
|
**Customization:** |
|
To change the app's background color to white or black, click the three-dot menu on the right-hand side of your app, go to Settings and then Choose app theme, colors and fonts. |
|
|
|
**Technical issues:** |
|
If your connection times out, please refresh the page or reopen the app's URL. |
|
|
|
For any errors or inquiries, please contact us at info@nlpblogs.com |
|
|
|
''') |
|
|
|
|
|
|
|
|
|
with st.sidebar: |
|
container = st.container(border=True) |
|
container.write("**Named Entity Recognition (NER)** is the task of extracting and tagging entities in text data. Entities can be persons, organizations, locations, countries, products, events etc.") |
|
st.subheader("Related NLP Web Apps", divider="rainbow") |
|
st.link_button("8-Named Entity Recognition Web App", "https://nlpblogs.com/shop/named-entity-recognition-ner/8-named-entity-recognition-web-app/", type="primary") |
|
|
|
|
|
upload_file = st.file_uploader("Upload your file. Accepted file formats include: .pdf, .docx", type=['pdf', 'docx']) |
|
text = None |
|
df = None |
|
|
|
if upload_file is not None: |
|
file_extension = upload_file.name.split('.')[-1].lower() |
|
if file_extension == 'pdf': |
|
try: |
|
pdf_reader = PdfReader(upload_file) |
|
text = "" |
|
for page in pdf_reader.pages: |
|
text += page.extract_text() |
|
st.write("File uploaded successfully. Due to security protocols, the file content is hidden.") |
|
except Exception as e: |
|
st.error(f"An error occurred while reading PDF: {e}") |
|
text = None |
|
elif file_extension == 'docx': |
|
try: |
|
doc = docx.Document(upload_file) |
|
text = "\n".join([para.text for para in doc.paragraphs]) |
|
st.write("File uploaded successfully. Due to security protocols, the file content is hidden.") |
|
except Exception as e: |
|
st.error(f"An error occurred while reading docx: {e}") |
|
text = None |
|
else: |
|
st.warning("Unsupported file type.") |
|
text = None |
|
|
|
st.divider() |
|
|
|
|
|
if st.button("Results"): |
|
if not comet_initialized: |
|
st.warning("Comet ML not initialized. Check environment variables if you wish to log data.") |
|
|
|
if st.session_state['file_upload_attempts'] >= max_attempts: |
|
st.error(f"You have requested results {max_attempts} times. You have reached your daily request limit.") |
|
st.stop() |
|
|
|
if text is None: |
|
st.warning("Please upload a supported file (.pdf or .docx) before requesting results.") |
|
st.stop() |
|
|
|
st.session_state['file_upload_attempts'] += 1 |
|
|
|
with st.spinner("Analyzing text...", show_time=True): |
|
|
|
model = load_ner_model() |
|
text_entities = model(text) |
|
df = pd.DataFrame(text_entities) |
|
|
|
|
|
pattern = r'[^\w\s]' |
|
df['word'] = df['word'].replace(pattern, '', regex=True) |
|
df = df.replace('', 'Unknown').dropna() |
|
|
|
if df.empty: |
|
st.warning("No entities were extracted from the uploaded text.") |
|
st.stop() |
|
|
|
if comet_initialized: |
|
experiment = Experiment( |
|
api_key=COMET_API_KEY, |
|
workspace=COMET_WORKSPACE, |
|
project_name=COMET_PROJECT_NAME, |
|
) |
|
experiment.log_parameter("input_text_length", len(text)) |
|
experiment.log_table("predicted_entities", df) |
|
|
|
|
|
properties = {"border": "2px solid gray", "color": "blue", "font-size": "16px"} |
|
df_styled = df.style.set_properties(**properties) |
|
st.dataframe(df_styled, use_container_width=True) |
|
|
|
with st.expander("See Glossary of tags"): |
|
st.write(''' |
|
'**word**': ['entity extracted from your text data'] |
|
|
|
'**score**': ['accuracy score; how accurately a tag has been assigned to a given entity'] |
|
|
|
'**entity_group**': ['label (tag) assigned to a given extracted entity'] |
|
|
|
'**start**': ['index of the start of the corresponding entity'] |
|
|
|
'**end**': ['index of the end of the corresponding entity'] |
|
''') |
|
|
|
|
|
st.subheader("Tree map", divider="rainbow") |
|
fig_treemap = px.treemap(df, path=[px.Constant("all"), 'word', 'entity_group'], |
|
values='score', color='entity_group') |
|
fig_treemap.update_layout(margin=dict(t=50, l=25, r=25, b=25)) |
|
st.plotly_chart(fig_treemap) |
|
if comet_initialized: |
|
experiment.log_figure(figure=fig_treemap, figure_name="entity_treemap") |
|
|
|
value_counts1 = df['entity_group'].value_counts() |
|
final_df_counts = value_counts1.reset_index().rename(columns={"index": "entity_group"}) |
|
|
|
col1, col2 = st.columns(2) |
|
with col1: |
|
st.subheader("Pie Chart", divider="rainbow") |
|
fig_pie = px.pie(final_df_counts, values='count', names='entity_group', hover_data=['count'], labels={'count': 'count'}, title='Percentage of predicted labels') |
|
fig_pie.update_traces(textposition='inside', textinfo='percent+label') |
|
st.plotly_chart(fig_pie) |
|
if comet_initialized: |
|
experiment.log_figure(figure=fig_pie, figure_name="label_pie_chart") |
|
|
|
with col2: |
|
st.subheader("Bar Chart", divider="rainbow") |
|
fig_bar = px.bar(final_df_counts, x="count", y="entity_group", color="entity_group", text_auto=True, title='Occurrences of predicted labels') |
|
st.plotly_chart(fig_bar) |
|
if comet_initialized: |
|
experiment.log_figure(figure=fig_bar, figure_name="label_bar_chart") |
|
|
|
|
|
dfa = pd.DataFrame( |
|
data={ |
|
'word': ['entity extracted from your text data'], |
|
'score': ['accuracy score; how accurately a tag has been assigned to a given entity'], |
|
'entity_group': ['label (tag) assigned to a given extracted entity'], |
|
'start': ['index of the start of the corresponding entity'], |
|
'end': ['index of the end of the corresponding entity'], |
|
}) |
|
|
|
buf = io.BytesIO() |
|
with zipfile.ZipFile(buf, "w") as myzip: |
|
myzip.writestr("Summary of the results.csv", df.to_csv(index=False)) |
|
myzip.writestr("Glossary of tags.csv", dfa.to_csv(index=False)) |
|
|
|
with stylable_container( |
|
key="download_button", |
|
css_styles="""button { background-color: yellow; border: 1px solid black; padding: 5px; color: black; }""", |
|
): |
|
st.download_button( |
|
label="Download zip file", |
|
data=buf.getvalue(), |
|
file_name="nlpblogs_ner_results.zip", |
|
mime="application/zip", |
|
) |
|
if comet_initialized: |
|
experiment.log_asset(buf.getvalue(), file_name="downloadable_results.zip") |
|
|
|
st.divider() |
|
if comet_initialized: |
|
experiment.end() |
|
|
|
st.write(f"Number of times you requested results: **{st.session_state['file_upload_attempts']}/{max_attempts}**") |
|
|
|
|
|
|