abdullahmubeen10 commited on
Commit
aad3dc5
1 Parent(s): a332df1

Update Demo.py

Browse files
Files changed (1) hide show
  1. Demo.py +311 -311
Demo.py CHANGED
@@ -1,312 +1,312 @@
1
- import streamlit as st
2
- import sparknlp
3
- import os
4
- import pandas as pd
5
-
6
- from sparknlp.base import *
7
- from sparknlp.annotator import *
8
- from pyspark.ml import Pipeline
9
- from sparknlp.pretrained import PretrainedPipeline
10
- from annotated_text import annotated_text
11
- from streamlit_tags import st_tags
12
-
13
- # Page configuration
14
- st.set_page_config(
15
- layout="wide",
16
- initial_sidebar_state="auto"
17
- )
18
-
19
- # CSS for styling
20
- st.markdown("""
21
- <style>
22
- .main-title {
23
- font-size: 36px;
24
- color: #4A90E2;
25
- font-weight: bold;
26
- text-align: center;
27
- }
28
- .section {
29
- background-color: #f9f9f9;
30
- padding: 10px;
31
- border-radius: 10px;
32
- margin-top: 10px;
33
- }
34
- .section p, .section ul {
35
- color: #666666;
36
- }
37
- </style>
38
- """, unsafe_allow_html=True)
39
-
40
- @st.cache_resource
41
- def init_spark():
42
- return sparknlp.start()
43
-
44
- @st.cache_resource
45
- def create_pipeline(model, task, zeroShotLables=['']):
46
- document_assembler = DocumentAssembler() \
47
- .setInputCol('text') \
48
- .setOutputCol('document')
49
-
50
- sentence_detector = SentenceDetectorDLModel.pretrained("sentence_detector_dl", "xx")\
51
- .setInputCols(["document"])\
52
- .setOutputCol("sentence")
53
-
54
- tokenizer = Tokenizer() \
55
- .setInputCols(['sentence']) \
56
- .setOutputCol('token')
57
-
58
- if task == "Token Classification":
59
- TCclassifier = RoBertaForTokenClassification \
60
- .pretrained("roberta_ner_roberta_large_ner_english", "en") \
61
- .setInputCols(["token", "sentence"]) \
62
- .setOutputCol("ner")
63
-
64
- ner_converter = NerConverter() \
65
- .setInputCols(['sentence', 'token', 'ner']) \
66
- .setOutputCol('ner_chunk')
67
-
68
- TCpipeline = Pipeline(stages=[document_assembler, sentence_detector, tokenizer, TCclassifier, ner_converter])
69
- return TCpipeline
70
-
71
- elif task == "Zero-Shot Classification":
72
- ZSCtokenizer = Tokenizer() \
73
- .setInputCols(['document']) \
74
- .setOutputCol('token')
75
-
76
- zeroShotClassifier = RoBertaForZeroShotClassification \
77
- .pretrained('roberta_base_zero_shot_classifier_nli', 'en') \
78
- .setInputCols(['token', 'document']) \
79
- .setOutputCol('class') \
80
- .setCaseSensitive(False) \
81
- .setMaxSentenceLength(512) \
82
- .setCandidateLabels(zeroShotLables)
83
-
84
- ZSCpipeline = Pipeline(stages=[document_assembler, ZSCtokenizer, zeroShotClassifier])
85
- return ZSCpipeline
86
-
87
- elif task == "Sequence Classification":
88
- SCtokenizer = Tokenizer() \
89
- .setInputCols(['document']) \
90
- .setOutputCol('token')
91
-
92
- sequence_classifier = RoBertaForSequenceClassification \
93
- .pretrained("roberta_classifier_acts_feedback1", "en") \
94
- .setInputCols(["document", "token"]) \
95
- .setOutputCol("class")
96
-
97
- SCpipeline = Pipeline(stages=[document_assembler, SCtokenizer, sequence_classifier])
98
- return SCpipeline
99
-
100
- elif task == "Question Answering":
101
- QAdocument_assembler = MultiDocumentAssembler()\
102
- .setInputCols(["question", "context"]) \
103
- .setOutputCols(["document_question", "document_context"])
104
-
105
- spanClassifier = RoBertaForQuestionAnswering.pretrained("roberta_qa_deepset_base_squad2","en") \
106
- .setInputCols(["document_question","document_context"]) \
107
- .setOutputCol("answer")
108
-
109
- QApipeline = Pipeline(stages=[QAdocument_assembler, spanClassifier])
110
- return QApipeline
111
-
112
- def fit_data(pipeline, data, task, ques='', cont=''):
113
- if task in ['Token Classification', 'Sequence Classification']:
114
- empty_df = spark.createDataFrame([['']]).toDF('text')
115
- pipeline_model = pipeline.fit(empty_df)
116
- model = LightPipeline(pipeline_model)
117
- result = model.fullAnnotate(data)
118
- return result
119
- else:
120
- df = spark.createDataFrame([[ques, cont]]).toDF("question", "context")
121
- result = pipeline.fit(df).transform(df)
122
- return result.select('answer.result').collect()
123
-
124
- def annotate(data):
125
- document, chunks, labels = data["Document"], data["NER Chunk"], data["NER Label"]
126
- annotated_words = []
127
- for chunk, label in zip(chunks, labels):
128
- parts = document.split(chunk, 1)
129
- if parts[0]:
130
- annotated_words.append(parts[0])
131
- annotated_words.append((chunk, label))
132
- document = parts[1]
133
- if document:
134
- annotated_words.append(document)
135
- annotated_text(*annotated_words)
136
-
137
- tasks_models_descriptions = {
138
- "Token Classification": {
139
- "models": ["roberta_ner_roberta_large_ner_english"],
140
- "description": "The 'roberta_ner_roberta_large_ner_english' model is adept at token classification tasks, including named entity recognition (NER). It identifies and categorizes tokens in text, such as names, dates, and locations, enhancing the extraction of meaningful information from unstructured data."
141
- },
142
- "Zero-Shot Classification": {
143
- "models": ["roberta_base_zero_shot_classifier_nli"],
144
- "description": "The 'roberta_base_zero_shot_classifier_nli' model provides flexible text classification without needing training data for specific categories. It is ideal for dynamic scenarios where text needs to be categorized into topics like urgent issues, technology, or sports without prior labeling."
145
- },
146
- "Sequence Classification": {
147
- "models": ["roberta_classifier_acts_feedback1"],
148
- "description": "The 'roberta_classifier_acts_feedback1' model is proficient in sequence classification tasks, such as sentiment analysis and document categorization. It effectively determines the sentiment of reviews, classifies text, and sorts documents based on their content and context."
149
- },
150
- "Question Answering": {
151
- "models": ["roberta_qa_deepset_base_squad2"],
152
- "description": "The 'roberta_qa_deepset_base_squad2' model, based on RoBERTa, is designed for precise question answering. They excel in extracting answers from a given context, making them suitable for developing advanced QA systems, enhancing customer support, and retrieving specific information from text."
153
- }
154
- }
155
-
156
- # Sidebar content
157
- task = st.sidebar.selectbox("Choose the task", list(tasks_models_descriptions.keys()))
158
- model = st.sidebar.selectbox("Choose the pretrained model", tasks_models_descriptions[task]["models"], help="For more info about the models visit: https://sparknlp.org/models")
159
-
160
- # Reference notebook link in sidebar
161
- link = """
162
- <a href="https://github.com/JohnSnowLabs/spark-nlp-workshop/blob/d85f7594f0cedb324c6b56fd63b9f969a32e0f83/tutorials/streamlit_notebooks/RobertaTokenClassifier.ipynb">
163
- <img src="https://colab.research.google.com/assets/colab-badge.svg" style="zoom: 1.3" alt="Open In Colab"/>
164
- </a>
165
- """
166
- st.sidebar.markdown('Reference notebook:')
167
- st.sidebar.markdown(link, unsafe_allow_html=True)
168
-
169
- # Page content
170
- title, sub_title = (f'RoBERTa for {task}', tasks_models_descriptions[task]["description"])
171
- st.markdown(f'<div class="main-title">{title}</div>', unsafe_allow_html=True)
172
- container = st.container(border=True)
173
- container.write(sub_title)
174
-
175
- # Load examples
176
- examples_mapping = {
177
- "Token Classification": [
178
- "William Henry Gates III (born October 28, 1955) is an American business magnate, software developer, investor, and philanthropist. He is best known as the co-founder of Microsoft Corporation. During his career at Microsoft, Gates held the positions of chairman, chief executive officer (CEO), president and chief software architect, while also being the largest individual shareholder until May 2014. He is one of the best-known entrepreneurs and pioneers of the microcomputer revolution of the 1970s and 1980s. Born and raised in Seattle, Washington, Gates co-founded Microsoft with childhood friend Paul Allen in 1975, in Albuquerque, New Mexico; it went on to become the world's largest personal computer software company. Gates led the company as chairman and CEO until stepping down as CEO in January 2000, but he remained chairman and became chief software architect. During the late 1990s, Gates had been criticized for his business tactics, which have been considered anti-competitive. This opinion has been upheld by numerous court rulings. In June 2006, Gates announced that he would be transitioning to a part-time role at Microsoft and full-time work at the Bill & Melinda Gates Foundation, the private charitable foundation that he and his wife, Melinda Gates, established in 2000.[9] He gradually transferred his duties to Ray Ozzie and Craig Mundie. He stepped down as chairman of Microsoft in February 2014 and assumed a new post as technology adviser to support the newly appointed CEO Satya Nadella.",
179
- "The Mona Lisa is a 16th century oil painting created by Leonardo. It's held at the Louvre in Paris.",
180
- "When Sebastian Thrun started working on self-driving cars at Google in 2007, few people outside of the company took him seriously. “I can tell you very senior CEOs of major American car companies would shake my hand and turn away because I wasn’t worth talking to,” said Thrun, now the co-founder and CEO of online higher education startup Udacity, in an interview with Recode earlier this week.",
181
- "Facebook is a social networking service launched as TheFacebook on February 4, 2004. It was founded by Mark Zuckerberg with his college roommates and fellow Harvard University students Eduardo Saverin, Andrew McCollum, Dustin Moskovitz and Chris Hughes. The website's membership was initially limited by the founders to Harvard students, but was expanded to other colleges in the Boston area, the Ivy League, and gradually most universities in the United States and Canada.",
182
- "The history of natural language processing generally started in the 1950s, although work can be found from earlier periods. In 1950, Alan Turing published an article titled 'Computing Machinery and Intelligence' which proposed what is now called the Turing test as a criterion of intelligence",
183
- "Geoffrey Everest Hinton is an English Canadian cognitive psychologist and computer scientist, most noted for his work on artificial neural networks. Since 2013 he divides his time working for Google and the University of Toronto. In 2017, he cofounded and became the Chief Scientific Advisor of the Vector Institute in Toronto.",
184
- "When I told John that I wanted to move to Alaska, he warned me that I'd have trouble finding a Starbucks there.",
185
- "Steven Paul Jobs was an American business magnate, industrial designer, investor, and media proprietor. He was the chairman, chief executive officer (CEO), and co-founder of Apple Inc., the chairman and majority shareholder of Pixar, a member of The Walt Disney Company's board of directors following its acquisition of Pixar, and the founder, chairman, and CEO of NeXT. Jobs is widely recognized as a pioneer of the personal computer revolution of the 1970s and 1980s, along with Apple co-founder Steve Wozniak. Jobs was born in San Francisco, California, and put up for adoption. He was raised in the San Francisco Bay Area. He attended Reed College in 1972 before dropping out that same year, and traveled through India in 1974 seeking enlightenment and studying Zen Buddhism.",
186
- "Titanic is a 1997 American epic romance and disaster film directed, written, co-produced, and co-edited by James Cameron. Incorporating both historical and fictionalized aspects, it is based on accounts of the sinking of the RMS Titanic, and stars Leonardo DiCaprio and Kate Winslet as members of different social classes who fall in love aboard the ship during its ill-fated maiden voyage.",
187
- "Other than being the king of the north, John Snow is a an english physician and a leader in the development of anaesthesia and medical hygiene. He is considered for being the first one using data to cure cholera outbreak in 1834."
188
- ],
189
- "Zero-Shot Classification" : [
190
- "In today’s world, staying updated with urgent information is crucial as events can unfold rapidly and require immediate attention.", # Urgent
191
- "Mobile technology has become indispensable, allowing us to access news, updates, and connect with others no matter where we are.", # Mobile
192
- "For those who love to travel, the convenience of mobile apps has transformed how we plan and experience trips, providing real-time updates on flights, accommodations, and local attractions.", # Travel
193
- "The entertainment industry continually offers new movies that captivate audiences with their storytelling and visuals, providing a wide range of genres to suit every taste.", # Movie
194
- "Music is an integral part of modern life, with streaming platforms making it easy to discover new artists and enjoy favorite tunes anytime, anywhere.", # Music
195
- "Sports enthusiasts follow games and matches closely, with live updates and detailed statistics available at their fingertips, enhancing the excitement of every game.", # Sport
196
- "Weather forecasts play a vital role in daily planning, offering accurate and timely information to help us prepare for various weather conditions and adjust our plans accordingly.", # Weather
197
- "Technology continues to evolve rapidly, driving innovation across all sectors and improving our everyday lives through smarter devices, advanced software, and enhanced connectivity." # Technology
198
- ],
199
- "Sequence Classification": [
200
- "I had a fantastic day at the park with my friends and family, enjoying the beautiful weather and fun activities.", # Positive
201
- "The movie was a complete waste of time, with a terrible plot and poor acting.", # Negative
202
- "The meeting was rescheduled to next week due to a conflict in everyone's schedule.", # Neutral
203
- "I am thrilled with the service I received at the restaurant; the food was delicious and the staff were very friendly.", # Positive
204
- "The traffic was horrible this morning, causing me to be late for work and miss an important meeting.", # Negative
205
- "The report was submitted on time and included all the necessary information.", # Neutral
206
- "I love the new features on my phone; they make it so much easier to stay organized and connected.", # Positive
207
- "The customer service was disappointing, and I won't be returning to that store.", # Negative
208
- "The weather forecast predicts mild temperatures for the rest of the week.", # Neutral
209
- "My vacation was amazing, with stunning views and great experiences at every destination.", # Positive
210
- "I received a defective product and had a lot of trouble getting it replaced.", # Negative
211
- "The new policy will be implemented starting next month and applies to all employees.", # Neutral
212
- ],
213
- "Question Answering": {
214
- """What does increased oxygen concentrations in the patient’s lungs displace?""": """Hyperbaric (high-pressure) medicine uses special oxygen chambers to increase the partial pressure of O 2 around the patient and, when needed, the medical staff. Carbon monoxide poisoning, gas gangrene, and decompression sickness (the ’bends’) are sometimes treated using these devices. Increased O 2 concentration in the lungs helps to displace carbon monoxide from the heme group of hemoglobin. Oxygen gas is poisonous to the anaerobic bacteria that cause gas gangrene, so increasing its partial pressure helps kill them. Decompression sickness occurs in divers who decompress too quickly after a dive, resulting in bubbles of inert gas, mostly nitrogen and helium, forming in their blood. Increasing the pressure of O 2 as soon as possible is part of the treatment.""",
215
- """What category of game is Legend of Zelda: Twilight Princess?""": """The Legend of Zelda: Twilight Princess (Japanese: ゼルダの伝説 トワイライトプリンセス, Hepburn: Zeruda no Densetsu: Towairaito Purinsesu?) is an action-adventure game developed and published by Nintendo for the GameCube and Wii home video game consoles. It is the thirteenth installment in the The Legend of Zelda series. Originally planned for release on the GameCube in November 2005, Twilight Princess was delayed by Nintendo to allow its developers to refine the game, add more content, and port it to the Wii. The Wii version was released alongside the console in North America in November 2006, and in Japan, Europe, and Australia the following month. The GameCube version was released worldwide in December 2006.""",
216
- """Who is founder of Alibaba Group?""": """Alibaba Group founder Jack Ma has made his first appearance since Chinese regulators cracked down on his business empire. His absence had fuelled speculation over his whereabouts amid increasing official scrutiny of his businesses. The billionaire met 100 rural teachers in China via a video meeting on Wednesday, according to local government media. Alibaba shares surged 5% on Hong Kong's stock exchange on the news.""",
217
- """For what instrument did Frédéric write primarily for?""": """Frédéric François Chopin (/ˈʃoʊpæn/; French pronunciation: ​[fʁe.de.ʁik fʁɑ̃.swa ʃɔ.pɛ̃]; 22 February or 1 March 1810 – 17 October 1849), born Fryderyk Franciszek Chopin,[n 1] was a Polish and French (by citizenship and birth of father) composer and a virtuoso pianist of the Romantic era, who wrote primarily for the solo piano. He gained and has maintained renown worldwide as one of the leading musicians of his era, whose "poetic genius was based on a professional technique that was without equal in his generation." Chopin was born in what was then the Duchy of Warsaw, and grew up in Warsaw, which after 1815 became part of Congress Poland. A child prodigy, he completed his musical education and composed his earlier works in Warsaw before leaving Poland at the age of 20, less than a month before the outbreak of the November 1830 Uprising.""",
218
- """The most populated city in the United States is which city?""": """New York—often called New York City or the City of New York to distinguish it from the State of New York, of which it is a part—is the most populous city in the United States and the center of the New York metropolitan area, the premier gateway for legal immigration to the United States and one of the most populous urban agglomerations in the world. A global power city, New York exerts a significant impact upon commerce, finance, media, art, fashion, research, technology, education, and entertainment, its fast pace defining the term New York minute. Home to the headquarters of the United Nations, New York is an important center for international diplomacy and has been described as the cultural and financial capital of the world."""
219
- }
220
- }
221
-
222
- if task == 'Question Answering':
223
- examples = list(examples_mapping[task].keys())
224
- selected_text = st.selectbox('Select an Example:', examples)
225
- st.subheader('Try it yourself!')
226
- custom_input_question = st.text_input('Create a question')
227
- custom_input_context = st.text_input("Create it's context")
228
-
229
- custom_examples = {}
230
-
231
- st.subheader('Selected Text')
232
-
233
- if custom_input_question and custom_input_context:
234
- QUESTION = custom_input_question
235
- CONTEXT = custom_input_context
236
- elif selected_text:
237
- QUESTION = selected_text
238
- CONTEXT = examples_mapping[task][selected_text]
239
-
240
- st.markdown(f"**Question:** {QUESTION}")
241
- st.markdown(f"**Context:** {CONTEXT}")
242
-
243
- else:
244
- examples = examples_mapping[task]
245
- selected_text = st.selectbox("Select an example", examples)
246
- custom_input = st.text_input("Try it with your own Sentence!")
247
-
248
- if task == 'Zero-Shot Classification':
249
- zeroShotLables = ["urgent", "mobile", "travel", "movie", "music", "sport", "weather", "technology"]
250
- lables = st_tags(
251
- label='Select labels',
252
- text='Press enter to add more',
253
- value=zeroShotLables,
254
- suggestions=[
255
- "Positive", "Negative", "Neutral",
256
- "Urgent", "Mobile", "Travel", "Movie", "Music", "Sport", "Weather", "Technology",
257
- "Happiness", "Sadness", "Anger", "Fear", "Surprise", "Disgust",
258
- "Informational", "Navigational", "Transactional", "Commercial Investigation",
259
- "Politics", "Business", "Sports", "Entertainment", "Health", "Science",
260
- "Product Quality", "Delivery Experience", "Customer Service", "Pricing", "Return Policy",
261
- "Education", "Finance", "Lifestyle", "Fashion", "Food", "Art", "History",
262
- "Culture", "Environment", "Real Estate", "Automotive", "Travel", "Fitness", "Career"],
263
- maxtags = -1)
264
-
265
- try:
266
- text_to_analyze = custom_input if custom_input else selected_text
267
- st.subheader('Full example text')
268
- HTML_WRAPPER = """<div class="scroll entities" style="overflow-x: auto; border: 1px solid #e6e9ef; border-radius: 0.25rem; padding: 1rem; margin-bottom: 2.5rem; white-space:pre-wrap">{}</div>"""
269
- st.markdown(HTML_WRAPPER.format(text_to_analyze), unsafe_allow_html=True)
270
- except:
271
- text_to_analyze = selected_text
272
-
273
- # Initialize Spark and create pipeline
274
- spark = init_spark()
275
-
276
- if task == 'Zero-Shot Classification':
277
- pipeline = create_pipeline(model, task, zeroShotLables)
278
- else:
279
- pipeline = create_pipeline(model, task)
280
-
281
- try:
282
- output = fit_data(pipeline, text_to_analyze, task, QUESTION, CONTEXT)
283
- except:
284
- output = fit_data(pipeline, text_to_analyze, task)
285
-
286
- # Display matched sentence
287
- st.subheader("Prediction:")
288
-
289
- if task == 'Token Classification':
290
- abbreviation_mapping = {'R': 'PER', 'G': 'ORG', 'C': 'LOC', 'SC': 'MISC'}
291
- results = {
292
- 'Document': output[0]['document'][0].result,
293
- 'NER Chunk': [n.result for n in output[0]['ner_chunk']],
294
- 'NER Label': [abbreviation_mapping.get(n.metadata['entity'], 'UNKNOWN') for n in output[0]['ner_chunk']]
295
- }
296
- annotate(results)
297
- df = pd.DataFrame({'NER Chunk': results['NER Chunk'], 'NER Label': results['NER Label']})
298
- df.index += 1
299
- st.dataframe(df)
300
-
301
- elif task == 'Zero-Shot Classification':
302
- st.markdown(f"Document Classified as: **{output[0]['class'][0].result}**")
303
-
304
- elif task == 'Sequence Classification':
305
- st.markdown(f"Classified as : **{output[0]['class'][0].result}**")
306
-
307
- elif task == "Question Answering":
308
- output_text = "".join(output[0][0])
309
- st.markdown(f"Answer: **{output_text}**")
310
-
311
-
312
 
 
1
+ import streamlit as st
2
+ import sparknlp
3
+ import os
4
+ import pandas as pd
5
+
6
+ from sparknlp.base import *
7
+ from sparknlp.annotator import *
8
+ from pyspark.ml import Pipeline
9
+ from sparknlp.pretrained import PretrainedPipeline
10
+ from annotated_text import annotated_text
11
+ from streamlit_tags import st_tags
12
+
13
+ # Page configuration
14
+ st.set_page_config(
15
+ layout="wide",
16
+ initial_sidebar_state="auto"
17
+ )
18
+
19
+ # CSS for styling
20
+ st.markdown("""
21
+ <style>
22
+ .main-title {
23
+ font-size: 36px;
24
+ color: #4A90E2;
25
+ font-weight: bold;
26
+ text-align: center;
27
+ }
28
+ .section {
29
+ background-color: #f9f9f9;
30
+ padding: 10px;
31
+ border-radius: 10px;
32
+ margin-top: 10px;
33
+ }
34
+ .section p, .section ul {
35
+ color: #666666;
36
+ }
37
+ </style>
38
+ """, unsafe_allow_html=True)
39
+
40
+ @st.cache_resource
41
+ def init_spark():
42
+ return sparknlp.start()
43
+
44
+ @st.cache_resource
45
+ def create_pipeline(model, task, zeroShotLables=['']):
46
+ document_assembler = DocumentAssembler() \
47
+ .setInputCol('text') \
48
+ .setOutputCol('document')
49
+
50
+ sentence_detector = SentenceDetectorDLModel.pretrained("sentence_detector_dl", "xx")\
51
+ .setInputCols(["document"])\
52
+ .setOutputCol("sentence")
53
+
54
+ tokenizer = Tokenizer() \
55
+ .setInputCols(['sentence']) \
56
+ .setOutputCol('token')
57
+
58
+ if task == "Token Classification":
59
+ TCclassifier = RoBertaForTokenClassification \
60
+ .pretrained("roberta_ner_roberta_large_ner_english", "en") \
61
+ .setInputCols(["token", "sentence"]) \
62
+ .setOutputCol("ner")
63
+
64
+ ner_converter = NerConverter() \
65
+ .setInputCols(['sentence', 'token', 'ner']) \
66
+ .setOutputCol('ner_chunk')
67
+
68
+ TCpipeline = Pipeline(stages=[document_assembler, sentence_detector, tokenizer, TCclassifier, ner_converter])
69
+ return TCpipeline
70
+
71
+ elif task == "Zero-Shot Classification":
72
+ ZSCtokenizer = Tokenizer() \
73
+ .setInputCols(['document']) \
74
+ .setOutputCol('token')
75
+
76
+ zeroShotClassifier = RoBertaForZeroShotClassification \
77
+ .pretrained('roberta_base_zero_shot_classifier_nli', 'en') \
78
+ .setInputCols(['token', 'document']) \
79
+ .setOutputCol('class') \
80
+ .setCaseSensitive(False) \
81
+ .setMaxSentenceLength(512) \
82
+ .setCandidateLabels(zeroShotLables)
83
+
84
+ ZSCpipeline = Pipeline(stages=[document_assembler, ZSCtokenizer, zeroShotClassifier])
85
+ return ZSCpipeline
86
+
87
+ elif task == "Sequence Classification":
88
+ SCtokenizer = Tokenizer() \
89
+ .setInputCols(['document']) \
90
+ .setOutputCol('token')
91
+
92
+ sequence_classifier = RoBertaForSequenceClassification \
93
+ .pretrained("roberta_classifier_acts_feedback1", "en") \
94
+ .setInputCols(["document", "token"]) \
95
+ .setOutputCol("class")
96
+
97
+ SCpipeline = Pipeline(stages=[document_assembler, SCtokenizer, sequence_classifier])
98
+ return SCpipeline
99
+
100
+ elif task == "Question Answering":
101
+ QAdocument_assembler = MultiDocumentAssembler()\
102
+ .setInputCols(["question", "context"]) \
103
+ .setOutputCols(["document_question", "document_context"])
104
+
105
+ spanClassifier = RoBertaForQuestionAnswering.pretrained("roberta_qa_deepset_base_squad2","en") \
106
+ .setInputCols(["document_question","document_context"]) \
107
+ .setOutputCol("answer")
108
+
109
+ QApipeline = Pipeline(stages=[QAdocument_assembler, spanClassifier])
110
+ return QApipeline
111
+
112
+ def fit_data(pipeline, data, task, ques='', cont=''):
113
+ if task in ['Token Classification', 'Zero-Shot Classification', 'Sequence Classification']:
114
+ empty_df = spark.createDataFrame([['']]).toDF('text')
115
+ pipeline_model = pipeline.fit(empty_df)
116
+ model = LightPipeline(pipeline_model)
117
+ result = model.fullAnnotate(data)
118
+ return result
119
+ else:
120
+ df = spark.createDataFrame([[ques, cont]]).toDF("question", "context")
121
+ result = pipeline.fit(df).transform(df)
122
+ return result.select('answer.result').collect()
123
+
124
+ def annotate(data):
125
+ document, chunks, labels = data["Document"], data["NER Chunk"], data["NER Label"]
126
+ annotated_words = []
127
+ for chunk, label in zip(chunks, labels):
128
+ parts = document.split(chunk, 1)
129
+ if parts[0]:
130
+ annotated_words.append(parts[0])
131
+ annotated_words.append((chunk, label))
132
+ document = parts[1]
133
+ if document:
134
+ annotated_words.append(document)
135
+ annotated_text(*annotated_words)
136
+
137
+ tasks_models_descriptions = {
138
+ "Token Classification": {
139
+ "models": ["roberta_ner_roberta_large_ner_english"],
140
+ "description": "The 'roberta_ner_roberta_large_ner_english' model is adept at token classification tasks, including named entity recognition (NER). It identifies and categorizes tokens in text, such as names, dates, and locations, enhancing the extraction of meaningful information from unstructured data."
141
+ },
142
+ "Zero-Shot Classification": {
143
+ "models": ["roberta_base_zero_shot_classifier_nli"],
144
+ "description": "The 'roberta_base_zero_shot_classifier_nli' model provides flexible text classification without needing training data for specific categories. It is ideal for dynamic scenarios where text needs to be categorized into topics like urgent issues, technology, or sports without prior labeling."
145
+ },
146
+ "Sequence Classification": {
147
+ "models": ["roberta_classifier_acts_feedback1"],
148
+ "description": "The 'roberta_classifier_acts_feedback1' model is proficient in sequence classification tasks, such as sentiment analysis and document categorization. It effectively determines the sentiment of reviews, classifies text, and sorts documents based on their content and context."
149
+ },
150
+ "Question Answering": {
151
+ "models": ["roberta_qa_deepset_base_squad2"],
152
+ "description": "The 'roberta_qa_deepset_base_squad2' model, based on RoBERTa, is designed for precise question answering. They excel in extracting answers from a given context, making them suitable for developing advanced QA systems, enhancing customer support, and retrieving specific information from text."
153
+ }
154
+ }
155
+
156
+ # Sidebar content
157
+ task = st.sidebar.selectbox("Choose the task", list(tasks_models_descriptions.keys()))
158
+ model = st.sidebar.selectbox("Choose the pretrained model", tasks_models_descriptions[task]["models"], help="For more info about the models visit: https://sparknlp.org/models")
159
+
160
+ # Reference notebook link in sidebar
161
+ link = """
162
+ <a href="https://github.com/JohnSnowLabs/spark-nlp-workshop/blob/d85f7594f0cedb324c6b56fd63b9f969a32e0f83/tutorials/streamlit_notebooks/RobertaTokenClassifier.ipynb">
163
+ <img src="https://colab.research.google.com/assets/colab-badge.svg" style="zoom: 1.3" alt="Open In Colab"/>
164
+ </a>
165
+ """
166
+ st.sidebar.markdown('Reference notebook:')
167
+ st.sidebar.markdown(link, unsafe_allow_html=True)
168
+
169
+ # Page content
170
+ title, sub_title = (f'RoBERTa for {task}', tasks_models_descriptions[task]["description"])
171
+ st.markdown(f'<div class="main-title">{title}</div>', unsafe_allow_html=True)
172
+ container = st.container(border=True)
173
+ container.write(sub_title)
174
+
175
+ # Load examples
176
+ examples_mapping = {
177
+ "Token Classification": [
178
+ "William Henry Gates III (born October 28, 1955) is an American business magnate, software developer, investor, and philanthropist. He is best known as the co-founder of Microsoft Corporation. During his career at Microsoft, Gates held the positions of chairman, chief executive officer (CEO), president and chief software architect, while also being the largest individual shareholder until May 2014. He is one of the best-known entrepreneurs and pioneers of the microcomputer revolution of the 1970s and 1980s. Born and raised in Seattle, Washington, Gates co-founded Microsoft with childhood friend Paul Allen in 1975, in Albuquerque, New Mexico; it went on to become the world's largest personal computer software company. Gates led the company as chairman and CEO until stepping down as CEO in January 2000, but he remained chairman and became chief software architect. During the late 1990s, Gates had been criticized for his business tactics, which have been considered anti-competitive. This opinion has been upheld by numerous court rulings. In June 2006, Gates announced that he would be transitioning to a part-time role at Microsoft and full-time work at the Bill & Melinda Gates Foundation, the private charitable foundation that he and his wife, Melinda Gates, established in 2000.[9] He gradually transferred his duties to Ray Ozzie and Craig Mundie. He stepped down as chairman of Microsoft in February 2014 and assumed a new post as technology adviser to support the newly appointed CEO Satya Nadella.",
179
+ "The Mona Lisa is a 16th century oil painting created by Leonardo. It's held at the Louvre in Paris.",
180
+ "When Sebastian Thrun started working on self-driving cars at Google in 2007, few people outside of the company took him seriously. “I can tell you very senior CEOs of major American car companies would shake my hand and turn away because I wasn’t worth talking to,” said Thrun, now the co-founder and CEO of online higher education startup Udacity, in an interview with Recode earlier this week.",
181
+ "Facebook is a social networking service launched as TheFacebook on February 4, 2004. It was founded by Mark Zuckerberg with his college roommates and fellow Harvard University students Eduardo Saverin, Andrew McCollum, Dustin Moskovitz and Chris Hughes. The website's membership was initially limited by the founders to Harvard students, but was expanded to other colleges in the Boston area, the Ivy League, and gradually most universities in the United States and Canada.",
182
+ "The history of natural language processing generally started in the 1950s, although work can be found from earlier periods. In 1950, Alan Turing published an article titled 'Computing Machinery and Intelligence' which proposed what is now called the Turing test as a criterion of intelligence",
183
+ "Geoffrey Everest Hinton is an English Canadian cognitive psychologist and computer scientist, most noted for his work on artificial neural networks. Since 2013 he divides his time working for Google and the University of Toronto. In 2017, he cofounded and became the Chief Scientific Advisor of the Vector Institute in Toronto.",
184
+ "When I told John that I wanted to move to Alaska, he warned me that I'd have trouble finding a Starbucks there.",
185
+ "Steven Paul Jobs was an American business magnate, industrial designer, investor, and media proprietor. He was the chairman, chief executive officer (CEO), and co-founder of Apple Inc., the chairman and majority shareholder of Pixar, a member of The Walt Disney Company's board of directors following its acquisition of Pixar, and the founder, chairman, and CEO of NeXT. Jobs is widely recognized as a pioneer of the personal computer revolution of the 1970s and 1980s, along with Apple co-founder Steve Wozniak. Jobs was born in San Francisco, California, and put up for adoption. He was raised in the San Francisco Bay Area. He attended Reed College in 1972 before dropping out that same year, and traveled through India in 1974 seeking enlightenment and studying Zen Buddhism.",
186
+ "Titanic is a 1997 American epic romance and disaster film directed, written, co-produced, and co-edited by James Cameron. Incorporating both historical and fictionalized aspects, it is based on accounts of the sinking of the RMS Titanic, and stars Leonardo DiCaprio and Kate Winslet as members of different social classes who fall in love aboard the ship during its ill-fated maiden voyage.",
187
+ "Other than being the king of the north, John Snow is a an english physician and a leader in the development of anaesthesia and medical hygiene. He is considered for being the first one using data to cure cholera outbreak in 1834."
188
+ ],
189
+ "Zero-Shot Classification" : [
190
+ "In today’s world, staying updated with urgent information is crucial as events can unfold rapidly and require immediate attention.", # Urgent
191
+ "Mobile technology has become indispensable, allowing us to access news, updates, and connect with others no matter where we are.", # Mobile
192
+ "For those who love to travel, the convenience of mobile apps has transformed how we plan and experience trips, providing real-time updates on flights, accommodations, and local attractions.", # Travel
193
+ "The entertainment industry continually offers new movies that captivate audiences with their storytelling and visuals, providing a wide range of genres to suit every taste.", # Movie
194
+ "Music is an integral part of modern life, with streaming platforms making it easy to discover new artists and enjoy favorite tunes anytime, anywhere.", # Music
195
+ "Sports enthusiasts follow games and matches closely, with live updates and detailed statistics available at their fingertips, enhancing the excitement of every game.", # Sport
196
+ "Weather forecasts play a vital role in daily planning, offering accurate and timely information to help us prepare for various weather conditions and adjust our plans accordingly.", # Weather
197
+ "Technology continues to evolve rapidly, driving innovation across all sectors and improving our everyday lives through smarter devices, advanced software, and enhanced connectivity." # Technology
198
+ ],
199
+ "Sequence Classification": [
200
+ "I had a fantastic day at the park with my friends and family, enjoying the beautiful weather and fun activities.", # Positive
201
+ "The movie was a complete waste of time, with a terrible plot and poor acting.", # Negative
202
+ "The meeting was rescheduled to next week due to a conflict in everyone's schedule.", # Neutral
203
+ "I am thrilled with the service I received at the restaurant; the food was delicious and the staff were very friendly.", # Positive
204
+ "The traffic was horrible this morning, causing me to be late for work and miss an important meeting.", # Negative
205
+ "The report was submitted on time and included all the necessary information.", # Neutral
206
+ "I love the new features on my phone; they make it so much easier to stay organized and connected.", # Positive
207
+ "The customer service was disappointing, and I won't be returning to that store.", # Negative
208
+ "The weather forecast predicts mild temperatures for the rest of the week.", # Neutral
209
+ "My vacation was amazing, with stunning views and great experiences at every destination.", # Positive
210
+ "I received a defective product and had a lot of trouble getting it replaced.", # Negative
211
+ "The new policy will be implemented starting next month and applies to all employees.", # Neutral
212
+ ],
213
+ "Question Answering": {
214
+ """What does increased oxygen concentrations in the patient’s lungs displace?""": """Hyperbaric (high-pressure) medicine uses special oxygen chambers to increase the partial pressure of O 2 around the patient and, when needed, the medical staff. Carbon monoxide poisoning, gas gangrene, and decompression sickness (the ’bends’) are sometimes treated using these devices. Increased O 2 concentration in the lungs helps to displace carbon monoxide from the heme group of hemoglobin. Oxygen gas is poisonous to the anaerobic bacteria that cause gas gangrene, so increasing its partial pressure helps kill them. Decompression sickness occurs in divers who decompress too quickly after a dive, resulting in bubbles of inert gas, mostly nitrogen and helium, forming in their blood. Increasing the pressure of O 2 as soon as possible is part of the treatment.""",
215
+ """What category of game is Legend of Zelda: Twilight Princess?""": """The Legend of Zelda: Twilight Princess (Japanese: ゼルダの伝説 トワイライトプリンセス, Hepburn: Zeruda no Densetsu: Towairaito Purinsesu?) is an action-adventure game developed and published by Nintendo for the GameCube and Wii home video game consoles. It is the thirteenth installment in the The Legend of Zelda series. Originally planned for release on the GameCube in November 2005, Twilight Princess was delayed by Nintendo to allow its developers to refine the game, add more content, and port it to the Wii. The Wii version was released alongside the console in North America in November 2006, and in Japan, Europe, and Australia the following month. The GameCube version was released worldwide in December 2006.""",
216
+ """Who is founder of Alibaba Group?""": """Alibaba Group founder Jack Ma has made his first appearance since Chinese regulators cracked down on his business empire. His absence had fuelled speculation over his whereabouts amid increasing official scrutiny of his businesses. The billionaire met 100 rural teachers in China via a video meeting on Wednesday, according to local government media. Alibaba shares surged 5% on Hong Kong's stock exchange on the news.""",
217
+ """For what instrument did Frédéric write primarily for?""": """Frédéric François Chopin (/ˈʃoʊpæn/; French pronunciation: ​[fʁe.de.ʁik fʁɑ̃.swa ʃɔ.pɛ̃]; 22 February or 1 March 1810 – 17 October 1849), born Fryderyk Franciszek Chopin,[n 1] was a Polish and French (by citizenship and birth of father) composer and a virtuoso pianist of the Romantic era, who wrote primarily for the solo piano. He gained and has maintained renown worldwide as one of the leading musicians of his era, whose "poetic genius was based on a professional technique that was without equal in his generation." Chopin was born in what was then the Duchy of Warsaw, and grew up in Warsaw, which after 1815 became part of Congress Poland. A child prodigy, he completed his musical education and composed his earlier works in Warsaw before leaving Poland at the age of 20, less than a month before the outbreak of the November 1830 Uprising.""",
218
+ """The most populated city in the United States is which city?""": """New York—often called New York City or the City of New York to distinguish it from the State of New York, of which it is a part—is the most populous city in the United States and the center of the New York metropolitan area, the premier gateway for legal immigration to the United States and one of the most populous urban agglomerations in the world. A global power city, New York exerts a significant impact upon commerce, finance, media, art, fashion, research, technology, education, and entertainment, its fast pace defining the term New York minute. Home to the headquarters of the United Nations, New York is an important center for international diplomacy and has been described as the cultural and financial capital of the world."""
219
+ }
220
+ }
221
+
222
+ if task == 'Question Answering':
223
+ examples = list(examples_mapping[task].keys())
224
+ selected_text = st.selectbox('Select an Example:', examples)
225
+ st.subheader('Try it yourself!')
226
+ custom_input_question = st.text_input('Create a question')
227
+ custom_input_context = st.text_input("Create it's context")
228
+
229
+ custom_examples = {}
230
+
231
+ st.subheader('Selected Text')
232
+
233
+ if custom_input_question and custom_input_context:
234
+ QUESTION = custom_input_question
235
+ CONTEXT = custom_input_context
236
+ elif selected_text:
237
+ QUESTION = selected_text
238
+ CONTEXT = examples_mapping[task][selected_text]
239
+
240
+ st.markdown(f"**Question:** {QUESTION}")
241
+ st.markdown(f"**Context:** {CONTEXT}")
242
+
243
+ else:
244
+ examples = examples_mapping[task]
245
+ selected_text = st.selectbox("Select an example", examples)
246
+ custom_input = st.text_input("Try it with your own Sentence!")
247
+
248
+ if task == 'Zero-Shot Classification':
249
+ zeroShotLables = ["urgent", "mobile", "travel", "movie", "music", "sport", "weather", "technology"]
250
+ lables = st_tags(
251
+ label='Select labels',
252
+ text='Press enter to add more',
253
+ value=zeroShotLables,
254
+ suggestions=[
255
+ "Positive", "Negative", "Neutral",
256
+ "Urgent", "Mobile", "Travel", "Movie", "Music", "Sport", "Weather", "Technology",
257
+ "Happiness", "Sadness", "Anger", "Fear", "Surprise", "Disgust",
258
+ "Informational", "Navigational", "Transactional", "Commercial Investigation",
259
+ "Politics", "Business", "Sports", "Entertainment", "Health", "Science",
260
+ "Product Quality", "Delivery Experience", "Customer Service", "Pricing", "Return Policy",
261
+ "Education", "Finance", "Lifestyle", "Fashion", "Food", "Art", "History",
262
+ "Culture", "Environment", "Real Estate", "Automotive", "Travel", "Fitness", "Career"],
263
+ maxtags = -1)
264
+
265
+ try:
266
+ text_to_analyze = custom_input if custom_input else selected_text
267
+ st.subheader('Full example text')
268
+ HTML_WRAPPER = """<div class="scroll entities" style="overflow-x: auto; border: 1px solid #e6e9ef; border-radius: 0.25rem; padding: 1rem; margin-bottom: 2.5rem; white-space:pre-wrap">{}</div>"""
269
+ st.markdown(HTML_WRAPPER.format(text_to_analyze), unsafe_allow_html=True)
270
+ except:
271
+ text_to_analyze = selected_text
272
+
273
+ # Initialize Spark and create pipeline
274
+ spark = init_spark()
275
+
276
+ if task == 'Zero-Shot Classification':
277
+ pipeline = create_pipeline(model, task, zeroShotLables)
278
+ else:
279
+ pipeline = create_pipeline(model, task)
280
+
281
+ try:
282
+ output = fit_data(pipeline, text_to_analyze, task, QUESTION, CONTEXT)
283
+ except:
284
+ output = fit_data(pipeline, text_to_analyze, task)
285
+
286
+ # Display matched sentence
287
+ st.subheader("Prediction:")
288
+
289
+ if task == 'Token Classification':
290
+ abbreviation_mapping = {'R': 'PER', 'G': 'ORG', 'C': 'LOC', 'SC': 'MISC'}
291
+ results = {
292
+ 'Document': output[0]['document'][0].result,
293
+ 'NER Chunk': [n.result for n in output[0]['ner_chunk']],
294
+ 'NER Label': [abbreviation_mapping.get(n.metadata['entity'], 'UNKNOWN') for n in output[0]['ner_chunk']]
295
+ }
296
+ annotate(results)
297
+ df = pd.DataFrame({'NER Chunk': results['NER Chunk'], 'NER Label': results['NER Label']})
298
+ df.index += 1
299
+ st.dataframe(df)
300
+
301
+ elif task == 'Zero-Shot Classification':
302
+ st.markdown(f"Document Classified as: **{output[0]['class'][0].result}**")
303
+
304
+ elif task == 'Sequence Classification':
305
+ st.markdown(f"Classified as : **{output[0]['class'][0].result}**")
306
+
307
+ elif task == "Question Answering":
308
+ output_text = "".join(output[0][0])
309
+ st.markdown(f"Answer: **{output_text}**")
310
+
311
+
312