abdullahmubeen10 commited on
Commit
ef1c428
·
verified ·
1 Parent(s): 1faadae

Upload 7 files

Browse files
.streamlit/config.toml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ [theme]
2
+ base="light"
3
+ primaryColor="#29B4E8"
Demo.py ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import sparknlp
3
+ import os
4
+ import pandas as pd
5
+
6
+ from sparknlp.base import *
7
+ from sparknlp.annotator import *
8
+ from pyspark.ml import Pipeline
9
+ from sparknlp.pretrained import PretrainedPipeline
10
+
11
+ # Page configuration
12
+ st.set_page_config(
13
+ layout="wide",
14
+ page_title="Spark NLP Demos App",
15
+ initial_sidebar_state="auto"
16
+ )
17
+
18
+ # CSS for styling
19
+ st.markdown("""
20
+ <style>
21
+ .main-title {
22
+ font-size: 36px;
23
+ color: #4A90E2;
24
+ font-weight: bold;
25
+ text-align: center;
26
+ }
27
+ .section p, .section ul {
28
+ color: #666666;
29
+ }
30
+ </style>
31
+ """, unsafe_allow_html=True)
32
+
33
+ @st.cache_resource
34
+ def init_spark():
35
+ return sparknlp.start()
36
+
37
+ @st.cache_resource
38
+ def create_pipeline():
39
+ document_assembler = DocumentAssembler() \
40
+ .setInputCol("text") \
41
+ .setOutputCol("document")
42
+
43
+ tokenizer = Tokenizer() \
44
+ .setInputCols(["document"]) \
45
+ .setOutputCol("token")
46
+
47
+ postagger = PerceptronModel.pretrained("pos_anc", "en") \
48
+ .setInputCols(["document", "token"]) \
49
+ .setOutputCol("pos")
50
+
51
+ pipeline = Pipeline(stages=[document_assembler, tokenizer, postagger])
52
+ return pipeline
53
+
54
+ def fit_data(pipeline, data):
55
+ empty_df = spark.createDataFrame([['']]).toDF('text')
56
+ pipeline_model = pipeline.fit(empty_df)
57
+ model = LightPipeline(pipeline_model)
58
+ results = model.fullAnnotate(data)
59
+ return results
60
+
61
+ # Set up the page layout
62
+ st.markdown('<div class="main-title">State-of-the-Art Part-of-Speech Tagging with Spark NLP</div>', unsafe_allow_html=True)
63
+
64
+ # Sidebar content
65
+ model_name = st.sidebar.selectbox(
66
+ "Choose the pretrained model",
67
+ ['pos_anc'],
68
+ help="For more info about the models visit: https://sparknlp.org/models"
69
+ )
70
+
71
+ # Reference notebook link in sidebar
72
+ link = """
73
+ <a href="https://github.com/JohnSnowLabs/spark-nlp/blob/master/examples/python/annotation/text/english/coreference-resolution/Coreference_Resolution_SpanBertCorefModel.ipynb#L117">
74
+ <img src="https://colab.research.google.com/assets/colab-badge.svg" style="zoom: 1.3" alt="Open In Colab"/>
75
+ </a>
76
+ """
77
+ st.sidebar.markdown('Reference notebook:')
78
+ st.sidebar.markdown(link, unsafe_allow_html=True)
79
+
80
+ # Load examples
81
+ examples = [
82
+ "Alice went to the market. She bought some fresh vegetables there. The tomatoes she purchased were particularly ripe.",
83
+ "Dr. Smith is a renowned surgeon. He has performed over a thousand successful operations. His colleagues respect him a lot.",
84
+ "The company announced a new product launch. It is expected to revolutionize the industry. The CEO was very excited about it.",
85
+ "Jennifer enjoys hiking. She goes to the mountains every weekend. Her favorite spot is the Blue Ridge Mountains.",
86
+ "The team won the championship. They celebrated their victory with a huge party. Their coach praised their hard work and dedication.",
87
+ "Michael is studying computer science. He finds artificial intelligence fascinating. His dream is to work at a leading tech company.",
88
+ "Tom is a skilled guitarist. He plays in a local band. His performances are always energetic and captivating."
89
+ ]
90
+
91
+ # st.subheader("Automatically detect phrases expressing dates and normalize them with respect to a reference date.")
92
+ selected_text = st.selectbox("Select an example", examples)
93
+ custom_input = st.text_input("Try it with your own Sentence!")
94
+
95
+ text_to_analyze = custom_input if custom_input else selected_text
96
+
97
+ st.subheader('Full example text')
98
+ st.write(text_to_analyze)
99
+
100
+ # Initialize Spark and create pipeline
101
+ spark = init_spark()
102
+ pipeline = create_pipeline()
103
+ output = fit_data(pipeline, text_to_analyze)
104
+
105
+ # Display matched sentence
106
+ st.subheader("Processed output:")
107
+
108
+ results = {
109
+ 'Token': [t.result for t in output[0]['token']],
110
+ 'Begin': [p.begin for p in output[0]['pos']],
111
+ 'End': [p.end for p in output[0]['pos']],
112
+ 'POS': [p.result for p in output[0]['pos']]
113
+ }
114
+
115
+ df = pd.DataFrame(results)
116
+ df.index += 1
117
+ st.dataframe(df)
Dockerfile ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Download base image ubuntu 18.04
2
+ FROM ubuntu:18.04
3
+
4
+ # Set environment variables
5
+ ENV NB_USER jovyan
6
+ ENV NB_UID 1000
7
+ ENV HOME /home/${NB_USER}
8
+
9
+ # Install required packages
10
+ RUN apt-get update && apt-get install -y \
11
+ tar \
12
+ wget \
13
+ bash \
14
+ rsync \
15
+ gcc \
16
+ libfreetype6-dev \
17
+ libhdf5-serial-dev \
18
+ libpng-dev \
19
+ libzmq3-dev \
20
+ python3 \
21
+ python3-dev \
22
+ python3-pip \
23
+ unzip \
24
+ pkg-config \
25
+ software-properties-common \
26
+ graphviz \
27
+ openjdk-8-jdk \
28
+ ant \
29
+ ca-certificates-java \
30
+ && apt-get clean \
31
+ && update-ca-certificates -f;
32
+
33
+ # Install Python 3.8 and pip
34
+ RUN add-apt-repository ppa:deadsnakes/ppa \
35
+ && apt-get update \
36
+ && apt-get install -y python3.8 python3-pip \
37
+ && apt-get clean;
38
+
39
+ # Set up JAVA_HOME
40
+ ENV JAVA_HOME /usr/lib/jvm/java-8-openjdk-amd64/
41
+ RUN mkdir -p ${HOME} \
42
+ && echo "export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64/" >> ${HOME}/.bashrc \
43
+ && chown -R ${NB_UID}:${NB_UID} ${HOME}
44
+
45
+ # Create a new user named "jovyan" with user ID 1000
46
+ RUN useradd -m -u ${NB_UID} ${NB_USER}
47
+
48
+ # Switch to the "jovyan" user
49
+ USER ${NB_USER}
50
+
51
+ # Set home and path variables for the user
52
+ ENV HOME=/home/${NB_USER} \
53
+ PATH=/home/${NB_USER}/.local/bin:$PATH
54
+
55
+ # Set the working directory to the user's home directory
56
+ WORKDIR ${HOME}
57
+
58
+ # Upgrade pip and install Python dependencies
59
+ RUN python3.8 -m pip install --upgrade pip
60
+ COPY requirements.txt /tmp/requirements.txt
61
+ RUN python3.8 -m pip install -r /tmp/requirements.txt
62
+
63
+ # Copy the application code into the container at /home/jovyan
64
+ COPY --chown=${NB_USER}:${NB_USER} . ${HOME}
65
+
66
+ # Expose port for Streamlit
67
+ EXPOSE 7860
68
+
69
+ # Define the entry point for the container
70
+ ENTRYPOINT ["streamlit", "run", "Demo.py", "--server.port=7860", "--server.address=0.0.0.0"]
images/POS-Tagging.png ADDED
images/johnsnowlabs-output.png ADDED
pages/Workflow & Model Overview.py ADDED
@@ -0,0 +1,233 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+
3
+ # Custom CSS for better styling
4
+ st.markdown("""
5
+ <style>
6
+ .main-title {
7
+ font-size: 36px;
8
+ color: #4A90E2;
9
+ font-weight: bold;
10
+ text-align: center;
11
+ }
12
+ .sub-title {
13
+ font-size: 24px;
14
+ color: #4A90E2;
15
+ margin-top: 20px;
16
+ }
17
+ .section {
18
+ background-color: #f9f9f9;
19
+ padding: 15px;
20
+ border-radius: 10px;
21
+ margin-top: 20px;
22
+ }
23
+ .section h2 {
24
+ font-size: 22px;
25
+ color: #4A90E2;
26
+ }
27
+ .section p, .section ul {
28
+ color: #666666;
29
+ }
30
+ .link {
31
+ color: #4A90E2;
32
+ text-decoration: none;
33
+ }
34
+ </style>
35
+ """, unsafe_allow_html=True)
36
+
37
+ # Introduction
38
+ st.markdown('<div class="main-title">Part-of-Speech Tagging with Spark NLP</div>', unsafe_allow_html=True)
39
+
40
+ st.markdown("""
41
+ <div class="section">
42
+ <p>Welcome to the Spark NLP Part-of-Speech Tagging Demo App! Part-of-Speech (POS) tagging is an essential task in Natural Language Processing (NLP) that involves identifying the grammatical roles of words within a text, such as nouns, verbs, adjectives, and more. This app demonstrates how to use the PerceptronModel annotator to perform POS tagging in text data using Spark NLP.</p>
43
+ </div>
44
+ """, unsafe_allow_html=True)
45
+
46
+ st.image('images/POS-Tagging.png', caption="Dependency parsing with POS tags", use_column_width='auto')
47
+
48
+ # About POS Tagging
49
+ st.markdown('<div class="sub-title">About Part-of-Speech Tagging</div>', unsafe_allow_html=True)
50
+ st.markdown("""
51
+ <div class="section">
52
+ <p>Part-of-Speech (POS) tagging involves assigning each word in a sentence its grammatical role, such as subject, verb, or adjective. This technique helps improve many NLP tasks, including Named Entity Recognition (NER), Word Sense Disambiguation (WSD), Question Answering (QA), and Dependency Parsing (DP).</p>
53
+ <p>For instance, knowing that a word is an adjective increases the likelihood that one of the neighboring words is a noun. The context can also alter the meaning of words significantly:</p>
54
+ <ul>
55
+ <li><i>What is your address?</i> (noun)</li>
56
+ <li><i>I will address this issue today.</i> (verb)</li>
57
+ </ul>
58
+ <p>POS tags can be categorized using schemas like the Universal Dependencies schema or the Penn Treebank POS tags. Each schema provides a set of tags for different grammatical roles.</p>
59
+ </div>
60
+ """, unsafe_allow_html=True)
61
+
62
+ # Using PerceptronModel for POS Tagging
63
+ st.markdown('<div class="sub-title">Using PerceptronModel for POS Tagging in Spark NLP</div>', unsafe_allow_html=True)
64
+ st.markdown("""
65
+ <div class="section">
66
+ <p>The PerceptronModel annotator in Spark NLP allows users to perform POS tagging with high accuracy using pretrained models. This annotator can identify and label the grammatical roles of words in text data, providing valuable insights for various applications.</p>
67
+ <p>The PerceptronModel annotator in Spark NLP offers:</p>
68
+ <ul>
69
+ <li>Accurate POS tagging using pretrained models</li>
70
+ <li>Identification and labeling of grammatical roles</li>
71
+ <li>Efficient processing of large text datasets</li>
72
+ <li>Integration with other Spark NLP components for comprehensive NLP pipelines</li>
73
+ </ul>
74
+ </div>
75
+ """, unsafe_allow_html=True)
76
+
77
+ st.markdown('<h2 class="sub-title">Example Usage in Python</h2>', unsafe_allow_html=True)
78
+ st.markdown('<p>Here’s how you can implement POS tagging using the PerceptronModel annotator in Spark NLP:</p>', unsafe_allow_html=True)
79
+
80
+ # Setup Instructions
81
+ st.markdown('<div class="sub-title">Setup</div>', unsafe_allow_html=True)
82
+ st.markdown('<p>To install Spark NLP in Python, use your favorite package manager (conda, pip, etc.). For example:</p>', unsafe_allow_html=True)
83
+ st.code("""
84
+ pip install spark-nlp
85
+ pip install pyspark
86
+ """, language="bash")
87
+
88
+ st.markdown("<p>Then, import Spark NLP and start a Spark session:</p>", unsafe_allow_html=True)
89
+ st.code("""
90
+ import sparknlp
91
+
92
+ # Start Spark Session
93
+ spark = sparknlp.start()
94
+ """, language='python')
95
+
96
+ # POS Tagging Example
97
+ st.markdown('<div class="sub-title">Example Usage: POS Tagging with PerceptronModel</div>', unsafe_allow_html=True)
98
+ st.code('''
99
+ from sparknlp.base import DocumentAssembler
100
+ from sparknlp.annotator import Tokenizer, PerceptronModel
101
+ from pyspark.ml import Pipeline
102
+ import pyspark.sql.functions as F
103
+
104
+ # Stage 1: Transforms raw texts to document annotation
105
+ document_assembler = DocumentAssembler() \\
106
+ .setInputCol("text") \\
107
+ .setOutputCol("document")
108
+
109
+ # Stage 2: Tokenization
110
+ tokenizer = Tokenizer() \\
111
+ .setInputCols(["document"]) \\
112
+ .setOutputCol("token")
113
+
114
+ # Stage 3: Perceptron model for POS Tagger
115
+ # Pretrained model pos_anc for texts in English
116
+ postagger = PerceptronModel.pretrained("pos_anc", "en") \\
117
+ .setInputCols(["document", "token"]) \\
118
+ .setOutputCol("pos")
119
+
120
+ # Define the pipeline
121
+ pipeline = Pipeline(stages=[document_assembler, tokenizer, postagger])
122
+
123
+ # Create the dataframe
124
+ data = spark.createDataFrame([["Unions representing workers at Turner Newall say they are 'disappointed' after talks with stricken parent firm Federal Mogul"]]).toDF("text")
125
+
126
+ # Fit the dataframe to the pipeline to get the model
127
+ model = pipeline.fit(data)
128
+
129
+ # Transform the data to get predictions
130
+ result = model.transform(data)
131
+
132
+ # Display the POS tags
133
+ result.select(
134
+ F.explode(
135
+ F.arrays_zip(result.token.result, result.token.begin, result.token.end, result.pos.result)
136
+ ).alias("cols")
137
+ ).select(
138
+ F.expr("cols['0']").alias("token"),
139
+ F.expr("cols['1']").alias("begin"),
140
+ F.expr("cols['2']").alias("end"),
141
+ F.expr("cols['3']").alias("pos"),
142
+ ).show(truncate=False)
143
+ ''', language='python')
144
+
145
+ st.text("""
146
+ +------------+-----+---+---+
147
+ |token |begin|end|pos|
148
+ +------------+-----+---+---+
149
+ |Unions |0 |5 |NNP|
150
+ |representing|7 |18 |VBG|
151
+ |workers |20 |26 |NNS|
152
+ |at |28 |29 |IN |
153
+ |Turner |31 |36 |NNP|
154
+ |Newall |38 |43 |NNP|
155
+ |say |45 |47 |VBP|
156
+ |they |49 |52 |PRP|
157
+ |are |54 |56 |VBP|
158
+ |' |58 |58 |POS|
159
+ |disappointed|59 |70 |JJ |
160
+ |' |71 |71 |POS|
161
+ |after |73 |77 |IN |
162
+ |talks |79 |83 |NNS|
163
+ |with |85 |88 |IN |
164
+ |stricken |90 |97 |NN |
165
+ |parent |99 |104|NN |
166
+ |firm |106 |109|NN |
167
+ |Federal |111 |117|NNP|
168
+ |Mogul |119 |123|NNP|
169
+ +------------+-----+---+---+
170
+ """)
171
+
172
+ st.markdown("""
173
+ <p>The code snippet demonstrates how to set up a pipeline in Spark NLP to perform POS tagging on text data using the PerceptronModel annotator. The resulting DataFrame contains the tokens and their corresponding POS tags.</p>
174
+ """, unsafe_allow_html=True)
175
+
176
+ # One-liner Alternative
177
+ st.markdown('<div class="sub-title">One-liner Alternative</div>', unsafe_allow_html=True)
178
+ st.markdown("""
179
+ <div class="section">
180
+ <p>In October 2022, John Snow Labs released the open-source <code>johnsnowlabs</code> library that contains all the company products, open-source and licensed, under one common library. This simplified the workflow, especially for users working with more than one of the libraries (e.g., Spark NLP + Healthcare NLP). This new library is a wrapper on all of John Snow Lab’s libraries and can be installed with pip:</p>
181
+ <p><code>pip install johnsnowlabs</code></p>
182
+ <p>To run POS tagging with one line of code, we can simply:</p>
183
+ </div>
184
+ """, unsafe_allow_html=True)
185
+ st.code("""
186
+ # Import the NLP module which contains Spark NLP and NLU libraries
187
+ from johnsnowlabs import nlp
188
+
189
+ example_sentence = "Unions representing workers at Turner Newall say they are 'disappointed' after talks with stricken parent firm Federal Mogul"
190
+
191
+ # Returns a pandas DataFrame, we select the desired columns
192
+ nlp.load("pos").predict(example_sentence)[['token','pos']]
193
+ """, language='python')
194
+
195
+ st.image('images/johnsnowlabs-output.png', use_column_width='auto')
196
+
197
+ # Summary
198
+ st.markdown('<div class="sub-title">Summary</div>', unsafe_allow_html=True)
199
+ st.markdown("""
200
+ <div class="section">
201
+ <p>In this demo app, we showcased how to perform Part-of-Speech tagging using the PerceptronModel annotator in Spark NLP. POS tagging is a crucial step in many NLP applications, helping to understand the grammatical structure and context of the text. With Spark NLP, you can efficiently process and analyze large volumes of text data, leveraging powerful pretrained models for accurate and reliable results.</p>
202
+ <p>We hope you found this demo helpful and encourage you to explore more features and capabilities of Spark NLP for your NLP projects!</p>
203
+ </div>
204
+ """, unsafe_allow_html=True)
205
+
206
+ # References and Additional Information
207
+ st.markdown('<div class="sub-title">For additional information, please check the following references.</div>', unsafe_allow_html=True)
208
+
209
+ st.markdown("""
210
+ <div class="section">
211
+ <ul>
212
+ <li><a href="https://nlp.johnsnowlabs.com/docs/en/annotators" target="_blank" rel="noopener">Spark NLP documentation page</a> for all available annotators</li>
213
+ <li>Python API documentation for <a href="https://nlp.johnsnowlabs.com/api/python/reference/autosummary/sparknlp/annotator/pos/perceptron/index.html#sparknlp.annotator.pos.perceptron.PerceptronModel" target="_blank" rel="noopener">PerceptronModel</a> and <a href="https://nlp.johnsnowlabs.com/api/python/reference/autosummary/sparknlp/annotator/dependency/dependency_parser/index.html#sparknlp.annotator.dependency.dependency_parser.DependencyParserModel" target="_blank" rel="noopener">Dependency Parser</a></li>
214
+ <li>Scala API documentation for <a href="https://nlp.johnsnowlabs.com/api/com/johnsnowlabs/nlp/annotators/pos/perceptron/PerceptronModel.html" target="_blank" rel="noopener">PerceptronModel</a> and <a href="https://nlp.johnsnowlabs.com/api/com/johnsnowlabs/nlp/annotators/parser/dep/DependencyParserModel.html" target="_blank" rel="noopener">DependencyParserModel</a></li>
215
+ <li>For extended examples of usage of Spark NLP annotators, check the <a href="https://github.com/JohnSnowLabs/spark-nlp-workshop" target="_blank" rel="noopener">Spark NLP Workshop repository</a>.</li>
216
+ <li>Minsky, M.L. and Papert, S.A. (1969) Perceptrons. MIT Press, Cambridge.</li>
217
+ </ul>
218
+ </div>
219
+ """, unsafe_allow_html=True)
220
+
221
+ st.markdown('<div class="sub-title">Community & Support</div>', unsafe_allow_html=True)
222
+ st.markdown("""
223
+ <div class="section">
224
+ <ul>
225
+ <li><a class="link" href="https://sparknlp.org/" target="_blank">Official Website</a>: Documentation and examples</li>
226
+ <li><a class="link" href="https://join.slack.com/t/spark-nlp/shared_invite/zt-198dipu77-L3UWNe_AJ8xqDk0ivmih5Q" target="_blank">Slack</a>: Live discussion with the community and team</li>
227
+ <li><a class="link" href="https://github.com/JohnSnowLabs/spark-nlp" target="_blank">GitHub</a>: Bug reports, feature requests, and contributions</li>
228
+ <li><a class="link" href="https://medium.com/spark-nlp" target="_blank">Medium</a>: Spark NLP articles</li>
229
+ <li><a class="link" href="https://www.youtube.com/channel/UCmFOjlpYEhxf_wJUDuz6xxQ/videos" target="_blank">YouTube</a>: Video tutorials</li>
230
+ </ul>
231
+ </div>
232
+ """, unsafe_allow_html=True)
233
+
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ streamlit
2
+ pandas
3
+ numpy
4
+ spark-nlp
5
+ pyspark