Spaces:
Sleeping
Sleeping
File size: 14,369 Bytes
884e39e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 |
import streamlit as st
import os
import json
import pandas as pd
import numpy as np
import random
import base64
import ast
import sparknlp
import pyspark.sql.functions as F
from pyspark.ml import Pipeline
from pyspark.sql import SparkSession
from sparknlp.annotator import *
from sparknlp.base import *
from sparknlp.pretrained import PretrainedPipeline
from pyspark.sql.types import StringType, IntegerType
@st.cache_resource
def init_spark():
spark = sparknlp.start()
return spark
@st.cache_resource
def create_pipeline(model):
documentAssembler = DocumentAssembler() \
.setInputCol('text') \
.setOutputCol('document')
tokenizer = Tokenizer() \
.setInputCols(['document']) \
.setOutputCol('token')
embeddings = WordEmbeddingsModel.pretrained('glove_100d') \
.setInputCols(["document", 'token']) \
.setOutputCol("embeddings")
ner_model = NerDLModel.pretrained(model, 'en') \
.setInputCols(['document', 'token', 'embeddings']) \
.setOutputCol('ner')
ner_converter = NerConverter() \
.setInputCols(['document', 'token', 'ner']) \
.setOutputCol('ner_chunk')
nlp_pipeline = Pipeline(stages=[
documentAssembler,
tokenizer,
embeddings,
ner_model,
ner_converter
])
return nlp_pipeline
def fit_data(pipeline, data):
empty_df = spark.createDataFrame([['']]).toDF('text')
pipeline_model = pipeline.fit(empty_df)
model = LightPipeline(pipeline_model)
result = model.fullAnnotate(data)
return result
def get_color(l):
if str(l).lower() in LABEL_COLORS.keys():
return LABEL_COLORS[l.lower()]
else:
r = lambda: random.randint(0,200)
return '#%02X%02X%02X' % (r(), r(), r())
def simplified_display_annotations(text, annotations, labels):
def get_html_for_span(span_text, background_color=None, entity_name=None):
if background_color:
style = f'style="background-color: {background_color};"'
else:
style = ""
if entity_name:
return f'<span {style}>{span_text} <span>({entity_name})</span></span>'
return f'<span {style}>{span_text}</span>'
label_color = {label: get_color(label) for label in labels}
html_output, pos = "", 0
for chunk in annotations:
begin, end = chunk[1], chunk[2]
entity_type = chunk[4]['entity']
if pos < begin:
html_output += get_html_for_span(text[pos:begin])
html_output += get_html_for_span(chunk[3], label_color.get(entity_type), entity_type if entity_type in label_color else None)
pos = end + 1
if pos < len(text):
html_output += get_html_for_span(text[pos:])
return html_output
def parse_text_to_complex_list(text):
text = text.strip()[11:-1]
def parse_inner_structure(s):
stack = []
result = ''
for char in s:
if char in ['{', '[']:
stack.append(char)
elif char in ['}', ']']:
stack.pop()
if not stack:
return result + char
result += char
return result
elements = []
temp = ''
stack = []
for char in text:
if char in ['{', '[']:
stack.append(char)
elif char in [']', '}']:
stack.pop()
elif char == ',' and not stack:
elements.append(temp.strip())
temp = ''
continue
temp += char
elements.append(temp.strip())
# Convert elements to the appropriate type
parsed_elements = []
for element in elements:
element = element.strip()
if element.isdigit():
parsed_elements.append(int(element))
elif element.startswith(('\'', '"')):
parsed_elements.append(element.strip('\'"'))
elif element.startswith(('{', '[')):
parsed_elements.append(ast.literal_eval(element))
else:
parsed_elements.append(element)
return parsed_elements
############ SETTING UP THE PAGE LAYOUT ############
### SIDEBAR CONTENT ###
language_info = {
"EN": {
"title": "Recognize entities in text",
"description": "Recognize Persons, Locations, Organizations and Misc entities using out of the box pretrained Deep Learning models based on BERT (ner_dl_bert) word embeddings.",
}
}
model = st.sidebar.selectbox("Choose the pretrained model", ["ner_dl_bert", "ner_dl"], help="For more info about the models visit: https://sparknlp.org/models")
st.title(language_info["EN"]["title"])
link = """<a href="https://colab.research.google.com/github/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/streamlit_notebooks/NER_EN.ipynb">
<img src="https://colab.research.google.com/assets/colab-badge.svg" style="zoom: 1.3" alt="Open In Colab"/></a>"""
st.sidebar.markdown('Reference notebook:')
st.sidebar.markdown(link, unsafe_allow_html=True)
### MAIN CONTENT ###
examples = [
"William Henry Gates III (born October 28, 1955) is an American business magnate, software developer, investor, and philanthropist. He is best known as the co-founder of Microsoft Corporation. During his career at Microsoft, Gates held the positions of chairman, chief executive officer (CEO), president and chief software architect, while also being the largest individual shareholder until May 2014. He is one of the best-known entrepreneurs and pioneers of the microcomputer revolution of the 1970s and 1980s. Born and raised in Seattle, Washington, Gates co-founded Microsoft with childhood friend Paul Allen in 1975, in Albuquerque, New Mexico; it went on to become the world's largest personal computer software company. Gates led the company as chairman and CEO until stepping down as CEO in January 2000, but he remained chairman and became chief software architect. During the late 1990s, Gates had been criticized for his business tactics, which have been considered anti-competitive. This opinion has been upheld by numerous court rulings. In June 2006, Gates announced that he would be transitioning to a part-time role at Microsoft and full-time work at the Bill & Melinda Gates Foundation, the private charitable foundation that he and his wife, Melinda Gates, established in 2000.[9] He gradually transferred his duties to Ray Ozzie and Craig Mundie. He stepped down as chairman of Microsoft in February 2014 and assumed a new post as technology adviser to support the newly appointed CEO Satya Nadella.",
"The Mona Lisa is a 16th century oil painting created by Leonardo. It's held at the Louvre in Paris.",
"When Sebastian Thrun started working on self-driving cars at Google in 2007, few people outside of the company took him seriously. “I can tell you very senior CEOs of major American car companies would shake my hand and turn away because I wasn’t worth talking to,” said Thrun, now the co-founder and CEO of online higher education startup Udacity, in an interview with Recode earlier this week.",
"Facebook is a social networking service launched as TheFacebook on February 4, 2004. It was founded by Mark Zuckerberg with his college roommates and fellow Harvard University students Eduardo Saverin, Andrew McCollum, Dustin Moskovitz and Chris Hughes. The website's membership was initially limited by the founders to Harvard students, but was expanded to other colleges in the Boston area, the Ivy League, and gradually most universities in the United States and Canada.",
"The history of natural language processing generally started in the 1950s, although work can be found from earlier periods. In 1950, Alan Turing published an article titled 'Computing Machinery and Intelligence' which proposed what is now called the Turing test as a criterion of intelligence",
"Geoffrey Everest Hinton is an English Canadian cognitive psychologist and computer scientist, most noted for his work on artificial neural networks. Since 2013 he divides his time working for Google and the University of Toronto. In 2017, he cofounded and became the Chief Scientific Advisor of the Vector Institute in Toronto.",
"When I told John that I wanted to move to Alaska, he warned me that I'd have trouble finding a Starbucks there.",
"Steven Paul Jobs was an American business magnate, industrial designer, investor, and media proprietor. He was the chairman, chief executive officer (CEO), and co-founder of Apple Inc., the chairman and majority shareholder of Pixar, a member of The Walt Disney Company's board of directors following its acquisition of Pixar, and the founder, chairman, and CEO of NeXT. Jobs is widely recognized as a pioneer of the personal computer revolution of the 1970s and 1980s, along with Apple co-founder Steve Wozniak. Jobs was born in San Francisco, California, and put up for adoption. He was raised in the San Francisco Bay Area. He attended Reed College in 1972 before dropping out that same year, and traveled through India in 1974 seeking enlightenment and studying Zen Buddhism.",
"Titanic is a 1997 American epic romance and disaster film directed, written, co-produced, and co-edited by James Cameron. Incorporating both historical and fictionalized aspects, it is based on accounts of the sinking of the RMS Titanic, and stars Leonardo DiCaprio and Kate Winslet as members of different social classes who fall in love aboard the ship during its ill-fated maiden voyage.",
"Other than being the king of the north, John Snow is a an english physician and a leader in the development of anaesthesia and medical hygiene. He is considered for being the first one using data to cure cholera outbreak in 1834."
]
st.subheader(language_info["EN"]["description"])
selected_text = st.selectbox("Select an example", examples)
custom_input = st.text_input("Try it for yourself!")
if custom_input:
selected_text = custom_input
st.subheader('Selected Text')
st.write(selected_text)
spark = init_spark()
Pipeline = create_pipeline(model)
output = fit_data(Pipeline, selected_text)
ner_mapping = {
'PERSON': 'People, including fictional.',
'PER': 'People, including fictional.',
'HUM': 'Humans',
'IND': 'Individuals',
'NORP': 'Nationalities or religious or political groups.',
'FAC': 'Buildings, airports, highways, bridges, etc.',
'FACILITY': 'Buildings, airports, highways, bridges, etc.',
'STRUCTURE': 'Structures like buildings, bridges, etc.',
'ORG': 'Companies, agencies, institutions, etc.',
'ORGANIZATION': 'Companies, agencies, institutions, etc.',
'INSTITUTION': 'Educational, governmental, and other organizations.',
'LOC': 'Countries, cities, states, mountain ranges, bodies of water.',
'LOCATION': 'Countries, cities, states, mountain ranges, bodies of water.',
'PLACE': 'Specific locations.',
'GPE': 'Geopolitical entities, such as countries, cities, states.',
'PRODUCT': 'Objects, vehicles, foods, etc. (Not services.)',
'PROD': 'Product',
'GOOD': 'Goods and products.',
'EVENT': 'Named hurricanes, battles, wars, sports events, etc.',
'OCCURRENCE': 'Occurrences and events.',
'WORK_OF_ART': 'Titles of books, songs, etc.',
'ART': 'Works of art, including books, paintings, songs, etc.',
'LAW': 'Named documents made into laws.',
'LEGISLATION': 'Laws and legal documents.',
'LANGUAGE': 'Any named language.',
'DATE': 'Absolute or relative dates or periods.',
'TIME': 'Times smaller than a day.',
'PERCENT': 'Percentage, including ”%“.',
'MONEY': 'Monetary values, including unit.',
'CURRENCY': 'Monetary values, including unit.',
'QUANTITY': 'Measurements, as of weight or distance.',
'MEASURE': 'Measurements and quantities.',
'ORDINAL': '“first”, “second”, etc.',
'CARDINAL': 'Numerals that do not fall under another type.',
'NUMBER': 'Numbers and numerals.',
'MISC': 'Miscellaneous entities, e.g. events, nationalities, products or works of art.',
'MISCELLANEOUS': 'Miscellaneous entities.',
'ENT': 'Entity (generic label).',
'GPE_LOC': 'Geopolitical Entity',
'ANIMAL': 'Animals, including fictional.',
'PLANT': 'Plants, including fictional.',
'SUBSTANCE': 'Substances and materials.',
'DISEASE': 'Diseases and medical conditions.',
'SYMPTOM': 'Symptoms and medical signs.',
'MEDICAL': 'Medical terms and conditions.',
'FOOD': 'Food items.',
'DRINK': 'Drinks and beverages.',
'VEHICLE': 'Types of vehicles.',
'WEAPON': 'Weapons and armaments.',
'TECHNOLOGY': 'Technological terms and devices.',
'GAME': 'Games and sports.',
'HOBBY': 'Hobbies and recreational activities.',
'RELIGION': 'Religious terms and entities.',
'MYTH': 'Mythological entities.',
'ASTRONOMICAL': 'Astronomical entities (e.g., planets, stars).',
'NATURAL_PHENOMENON': 'Natural phenomena (e.g., earthquakes, storms).',
'CELESTIAL_BODY': 'Celestial bodies (e.g., stars, planets).',
'DRV': 'Driver'
}
entities = [{'text': ent.result, 'start': ent.begin, 'end': ent.end, 'label': ent.metadata['entity'], 'Explain NER Labels': ner_mapping[ent.metadata['entity']]} for ent in output[0]['ner_chunk']]
LABEL_COLORS = {
'per': '#0C8888', 'pers': '#0C8888', 'person': '#0C8888',
'org': '#FF33C1',
'misc': '#3196D4', 'mis': '#3196D4',
'loc': '#5B00A3', 'location': '#5B00A3'
}
HTML_WRAPPER = """<div class="scroll entities" style="overflow-x: auto; border: 1px solid #e6e9ef; border-radius: 0.25rem; padding: 1rem; margin-bottom: 2.5rem; white-space:pre-wrap">{}</div>"""
chunk_list = []
for n in output[0]['ner_chunk']:
parsed_list = parse_text_to_complex_list(str(n))
chunk_list.append(parsed_list)
st.subheader("Text annotated with identified Named Entities")
labels = [n.metadata['entity'] for n in output[0]['ner_chunk']]
ner = simplified_display_annotations(selected_text, chunk_list, labels)
st.markdown(HTML_WRAPPER.format(ner), unsafe_allow_html=True)
st.write(pd.DataFrame(entities)) |