prithush commited on
Commit
5aa6b7f
1 Parent(s): 7a5bb5c

Uploaded Project files

Browse files
.gitattributes CHANGED
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ airline_sentiment.csv filter=lfs diff=lfs merge=lfs -text
37
+ nlp_model/variables/variables.data-00000-of-00001 filter=lfs diff=lfs merge=lfs -text
acronym.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"aka":"also known as","asap":"as soon as possible","brb":"be right back","btw":"by the way","dob":"date of birth","faq":"frequently asked questions","fyi":"for your information","idk":"i don't know","idc":"i don't care","iirc":"if i recall correctly","imo":"in my opinion","irl":"in real life","lmk":"let me know","lol":"laugh out loud","ngl":"not gonna lie","noyb":"none of your business","nvm":"never mind","ofc":"of course","omg":"oh my god","pfa":"please find attached","rofl":"rolling on the floor laughing","stfu":"shut the fuck up","tba":"to be announced","tbc":"to be continued","tbd":"to be determined","tbh":"to be honest","ttyl":"talk to you later","wtf":"what the fuck","wth":"what the heck"}
airline_recommend.joblib ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6c79a26d312394e66dc29dba164097854cf47fa66eb3c2ffb6309493e80d71ae
3
+ size 8192271
airline_reviews_cleaned.csv ADDED
The diff for this file is too large to render. See raw diff
 
airline_sentiment.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a0246fb482ad199fe0820d169dc2db34fd5be3c310819aa6a0994f0c0822919b
3
+ size 46112842
app.py ADDED
@@ -0,0 +1,319 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import pandas as pd
3
+ import numpy as np
4
+ import joblib
5
+ import tensorflow as tf
6
+ import re
7
+ import string
8
+ import nltk
9
+ from nltk.tokenize import RegexpTokenizer
10
+ from nltk.stem import WordNetLemmatizer
11
+
12
+ df = pd.read_csv("airline_reviews_cleaned.csv")
13
+ st.set_page_config(layout="wide", page_title="AeroAdvisor", page_icon="✈️")
14
+
15
+ css = f"""
16
+ <style>
17
+ [data-testid="stAppViewContainer"] > .main {{
18
+ background-image: url("https://cdn.discordapp.com/attachments/1075699203046641687/1165351110828101673/PhotoReal_From_a_distance_the_plane_appears_as_a_tiny_speck_ag_1.jpg?ex=654688cb&is=653413cb&hm=096dcc994304b93afd210555607d563f725d74c1dddfd392176f11e15076bcfa&");
19
+ background-size: 120%;
20
+ background-position: top left;
21
+ background-repeat: repeat;
22
+ background-attachment: local;
23
+ }}
24
+
25
+ [data-testid="stHeader"] {{
26
+ background: rgba(0,0,0,0);
27
+ }}
28
+
29
+ [data-testid="stExpander"] {{
30
+ background: rgba(0,0,0,0.5);
31
+ border: 2px solid #000071;
32
+ border-radius: 10px;
33
+ }}
34
+ </style>
35
+ """
36
+ st.markdown(css, unsafe_allow_html=True)
37
+
38
+ #-------------------------------- Function to clean reviews -------------------------------------#
39
+
40
+ # Check if wordnet is installed
41
+ try:
42
+ nltk.find("corpora/popular.zip")
43
+ except LookupError:
44
+ nltk.download('popular')
45
+
46
+
47
+ # Defining acronyms
48
+ acronyms_dict = pd.read_json('acronym.json', typ = 'series')
49
+
50
+ # Defining contractions
51
+ contractions_dict = pd.read_json('contractions.json', typ = 'series')
52
+
53
+ # Defining stopwords
54
+
55
+ alphabets = ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
56
+ others = ["ã", "å", "ì", "û", "ûªm", "ûó", "ûò", "ìñ", "ûªre", "ûªve", "ûª", "ûªs", "ûówe", "ï", "ûï"]
57
+ stops = alphabets + others
58
+ stops = list(set(stops))
59
+
60
+ # Defining tokenizer
61
+ regexp = RegexpTokenizer("[\w']+")
62
+
63
+ # Preprocessing
64
+ def preprocess(text):
65
+
66
+ text = text.lower() # lowercase
67
+ text = text.strip() # whitespaces
68
+
69
+ # Removing html tags
70
+ html = re.compile(r'<.*?>')
71
+ text = html.sub(r'', text) # html tags
72
+
73
+ # Removing emoji patterns
74
+ emoji_pattern = re.compile("["
75
+ u"\U0001F600-\U0001F64F" # emoticons
76
+ u"\U0001F300-\U0001F5FF" # symbols & pictographs
77
+ u"\U0001F680-\U0001F6FF" # transport & map symbols
78
+ u"\U0001F1E0-\U0001F1FF" # flags (iOS)
79
+ u"\U00002702-\U000027B0"
80
+ u"\U000024C2-\U0001F251"
81
+ "]+", flags = re.UNICODE)
82
+ text = emoji_pattern.sub(r'', text) # unicode char
83
+
84
+ # Removing urls
85
+ http = "https?://\S+|www\.\S+" # matching strings beginning with http (but not just "http")
86
+ pattern = r"({})".format(http) # creating pattern
87
+ text = re.sub(pattern, "", text) # remove urls
88
+
89
+ # Removing twitter usernames
90
+ pattern = r'@[\w_]+'
91
+ text = re.sub(pattern, "", text) # remove @twitter usernames
92
+
93
+ # Removing punctuations and numbers
94
+ punct_str = string.punctuation + string.digits
95
+ punct_str = punct_str.replace("'", "")
96
+ punct_str = punct_str.replace("-", "")
97
+ text = text.translate(str.maketrans('', '', punct_str)) # punctuation and numbers
98
+
99
+ # Replacing "-" in text with empty space
100
+ text = text.replace("-", " ") # "-"
101
+
102
+ # Substituting acronyms
103
+ words = []
104
+ for word in regexp.tokenize(text):
105
+ if word in acronyms_dict.index:
106
+ words = words + acronyms_dict[word].split()
107
+ else:
108
+ words = words + word.split()
109
+ text = ' '.join(words) # acronyms
110
+
111
+ # Substituting Contractions
112
+ words = []
113
+ for word in regexp.tokenize(text):
114
+ if word in contractions_dict.index:
115
+ words = words + contractions_dict[word].split()
116
+ else:
117
+ words = words + word.split()
118
+ text = " ".join(words) # contractions
119
+
120
+ punct_str = string.punctuation
121
+ text = text.translate(str.maketrans('', '', punct_str)) # punctuation again to remove "'"
122
+
123
+ # lemmatization
124
+ lemmatizer = WordNetLemmatizer()
125
+ text = " ".join([lemmatizer.lemmatize(word) for word in regexp.tokenize(text)]) # lemmatize
126
+
127
+ # Stopwords Removal
128
+ text = ' '.join([word for word in regexp.tokenize(text) if word not in stops]) # stopwords
129
+
130
+ # Removing all characters except alphabets and " " (space)
131
+ filter = string.ascii_letters + " "
132
+ text = "".join([chr for chr in text if chr in filter]) # remove all characters except alphabets and " " (space)
133
+
134
+ # Removing words with one alphabet occuring more than 3 times continuously
135
+ pattern = r'\b\w*?(.)\1{2,}\w*\b'
136
+ text = re.sub(pattern, "", text).strip() # remove words with one alphabet occuring more than 3 times continuously
137
+
138
+ # return final output
139
+ return text
140
+
141
+ #-------------------------------- Container 1 for Heading -------------------------------------#
142
+ container_1 = st.container()
143
+ with container_1:
144
+ empty1, head2, empty3 = st.columns(spec = [2,3,2], gap = 'medium')
145
+ with empty1:
146
+ st.empty()
147
+ with head2:
148
+ st.title("Welcome Aboard")
149
+ st.write("## Tell us about your Experience :airplane:")
150
+ with empty3:
151
+ st.empty()
152
+
153
+ #-------------------------------- Container 2 for main_content --------------------------------#
154
+ container_2 = st.container()
155
+ with container_2:
156
+ col1, col2, col3, col4 = st.columns(spec = [1,3,3,1], gap = 'medium')
157
+ with col1:
158
+ st.empty()
159
+
160
+ with col2:
161
+ expander_1 = st.expander(label = "Your Trip Info", expanded = True)
162
+ with expander_1:
163
+ airline = st.selectbox(
164
+ label = "Enter your Airline",
165
+ options = tuple(sorted(df['airline'].unique())),
166
+ index = None,
167
+ placeholder = "Choose an option..."
168
+ )
169
+
170
+ traveller_type = st.selectbox(
171
+ label = "Enter your trip type",
172
+ options = ("Business", "Solo Leisure", "Couple Leisure", "Family Leisure"),
173
+ index = None,
174
+ placeholder = "Choose an option..."
175
+ )
176
+
177
+ cabin = st.selectbox(
178
+ label = "Enter your seat class",
179
+ options = ("Economy Class", "Premium Economy", "Business Class", "First Class"),
180
+ index = None,
181
+ placeholder = "Choose an option..."
182
+ )
183
+
184
+ type_of_flight = st.radio(
185
+ label = "Enter your flight type",
186
+ options = ("Direct", 'Indirect'),
187
+ index = 0,
188
+ )
189
+
190
+ frequency = st.radio(
191
+ label = "How often do you fly?",
192
+ options = ('Often', 'Occasionally', 'Rarely'),
193
+ index = 1,
194
+ )
195
+
196
+ with col3:
197
+ expander_2 = st.expander(label = "Your Ratings", expanded = True)
198
+ with expander_2:
199
+ seat_comfort = st.slider(
200
+ label = "How comfortable are you with your seat?",
201
+ min_value = 1,
202
+ max_value = 5,
203
+ value = 3
204
+ )
205
+
206
+ cabin_service = st.slider(
207
+ label = "Please Rate your Cabin Service",
208
+ min_value = 1,
209
+ max_value = 5,
210
+ value = 3
211
+ )
212
+
213
+ food_bev = st.slider(
214
+ label = "Please rate the quality of food/beverages",
215
+ min_value = 1,
216
+ max_value = 5,
217
+ value = 3
218
+ )
219
+
220
+ entertainment = st.slider(
221
+ label = "Please rate the Entertainment Service",
222
+ min_value = 1,
223
+ max_value = 5,
224
+ value = 3
225
+ )
226
+
227
+ ground_service = st.slider(
228
+ label = "Please rate the Ground Service",
229
+ min_value = 1,
230
+ max_value = 5,
231
+ value = 3
232
+ )
233
+
234
+ value_for_money = st.slider(
235
+ label = "Value for Money Rating",
236
+ min_value = 1,
237
+ max_value = 5,
238
+ value = 3
239
+ )
240
+
241
+ with col4:
242
+ st.empty()
243
+
244
+ #-------------------------------- Container 3 for Final Rating Slider --------------------------------#
245
+ container_3 = st.container()
246
+ with container_3:
247
+ empty1, head2, empty3 = st.columns(spec = [1,3,1], gap = 'medium')
248
+ with empty1:
249
+ st.empty()
250
+ with head2:
251
+ overall = st.slider(
252
+ label = "How was your overall experience with the Airline?",
253
+ min_value = 1,
254
+ max_value = 10,
255
+ value = 7
256
+ )
257
+
258
+ review = st.text_area("Enter your review")
259
+ with empty3:
260
+ st.empty()
261
+
262
+
263
+ #-----------------------------------------------------------------------------------------------------#
264
+ # Creating DataFrame using values input by user
265
+ temp_df = pd.DataFrame(
266
+ data = [[airline, traveller_type, cabin, type_of_flight, frequency,
267
+ seat_comfort, cabin_service, food_bev, entertainment,
268
+ ground_service, value_for_money, overall]],
269
+ columns = ['airline', 'traveller_type', 'cabin', 'type_of_flight', 'frequency',
270
+ 'seat_comfort', 'cabin_service', 'food_bev', 'entertainment',
271
+ 'ground_service', 'value_for_money', 'overall']
272
+ )
273
+
274
+ # Loading Model using joblib file
275
+ model = joblib.load('airline_recommend.joblib')
276
+
277
+ # Defining a function to store the nlp_model in streamlit cache memory
278
+ @st.cache_resource
279
+ def cache_model(model_name):
280
+ model = tf.keras.models.load_model(model_name)
281
+ return model
282
+
283
+ # Loading the nlp_model
284
+ nlp_model = cache_model("nlp_model")
285
+
286
+ #-------------------------------- Container 4 for Final Predictions --------------------------------#
287
+
288
+ container_4 = st.container()
289
+ with container_4:
290
+ empty1, head2, empty3 = st.columns(spec = [1.2,2,1.5], gap = 'medium')
291
+ with empty1:
292
+ st.empty()
293
+
294
+ with head2:
295
+ # Creating a button to get prediction
296
+ if st.button('Get Prediction'):
297
+ y_pred = model.predict(temp_df)
298
+ y_pred_prob = model.predict_proba(temp_df)
299
+
300
+ if review=="":
301
+ st.warning("Please enter your review")
302
+ st.stop()
303
+
304
+ clean_review = preprocess(review)
305
+ review_pred_proba = nlp_model.predict([clean_review])
306
+ review_pred = np.where(review_pred_proba > 0.5, 1, 0)[0][0]
307
+
308
+ if (y_pred[0] == 'yes') & (review_pred == 1):
309
+ st.success("Thank you for your positive feedback! \nWe're delighted to hear that you had a great experience with our service.")
310
+ st.balloons()
311
+ elif (y_pred[0] == 'yes') & (review_pred == 0):
312
+ st.warning("We appreciate your positive rating, but we're sorry to hear about your concerns in the review. \nPlease share more details so we can address them and enhance your experience.")
313
+ elif (y_pred[0] == 'no') & (review_pred == 0):
314
+ st.error("We apologize for falling short of your expectations. \nYour feedback is valuable, and we're committed to improving. \nPlease provide specific details about your experience for us to better understand and address the issues.")
315
+ elif (y_pred[0] == 'no') & (review_pred == 1):
316
+ st.error("We're sorry to hear about your negative rating, but we're glad to see your positive comments in the review. \nWe'd like to learn more about your concerns to ensure we address any issues and enhance your satisfaction.")
317
+
318
+ with empty3:
319
+ st.empty()
contractions.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"'aight":"alright","ain't":"are not","amn't":"am not","arencha":"are not you","aren't":"are not","'bout":"about","can't":"cannot","cap'n":"captain","'cause":"because","'cept":"except","could've":"could have","couldn't":"could not","couldn't've":"could not have","dammit":"damn it","daren't":"dare not","daresn't":"dare not","dasn't":"dare not","didn't":"did not","doesn't":"does not","doin'":"doing","don't":"do not","dunno":"do not know","d'ye":"do you","e'en":"even","e'er":"ever","'em":"them","everybody's":"everybody is","everyone's":"everyone is","fo'c'sle":"forecastle","finna":"fixing to","'gainst":"against","g'day":"good day","gimme":"give me","giv'n":"given","gonna":"going to","gon't":"go not","gotcha":"got you","gotta":"got to","gtg":"got to go","hadn't":"had not","had've":"had have","hasn't":"has not","haven't":"have not","he'd":"he had","he'll":"he shall","helluva":"hell of a","he's":"he is","here's":"here is","he've":"he have","how'd":"how would","howdy":"how do you do","how'll":"how will","how're":"how are","how's":"how is","i'd":"i had","i'd've":"i would have","i'll":"i shall","i'm":"i am","imma":"i am about to","i'm'a":"i am about to","i'm'o":"i am going to","innit":"is it not","ion":"i do not","i've":"i have","isn't":"is not","it'd":"it would","it'll":"it shall","it's":"it is","iunno":"i do not know","kinda":"kind of","let's":"let us","li'l":"little","ma'am":"madam","mayn't":"may not","may've":"may have","methinks":"me thinks","mightn't":"might not","might've":"might have","mustn't":"must not","mustn't've":"must not have","must've":"must have","'neath":"beneath","needn't":"need not","nal":"and all","ne'er":"never","o'clock":"of the clock","o'er":"over","ol'":"old","oughtn't":"ought not","'round":"around","'s":"is","shalln't":"shall not","shan't":"shall not","she'd":"she had","she'll":"she shall","she's":"she is","should've":"should have","shouldn't":"should not","shouldn't've":"should not have","somebody's":"somebody is","someone's":"someone is","something's":"something is","so're":"so are","so's":"so is","so've":"so have","that'll":"that shall","that're":"that are","that's":"that is","that'd":"that would","there'd":"there had","there'll":"there shall","there're":"there are","there's":"there is","these're":"these are","these've":"these have","they'd":"they had","they'll":"they shall","they're":"they are","they've":"they have","this's":"this is","those're":"those are","those've":"those have","'thout":"without","'til":"until","'tis":"it is","to've":"to have","'twas":"it was","'tween":"between","'twhere":"it were","wanna":"want to","wasn't":"was not","we'd":"we had","we'd've":"we would have","we'll":"we shall","we're":"we are","we've":"we have","weren't":"were not","whatcha":"what are you","what'd":"what did","what'll":"what shall","what're":"what are","what's":"what is","what've":"what have","when's":"when is","where'd":"where did","where'll":"where shall","where're":"where are","where's":"where is","where've":"where have","which'd":"which had","which'll":"which shall","which're":"which are","which's":"which is","which've":"which have","who'd":"who would","who'd've":"who would have","who'll":"who shall","who're":"who are","who's":"who is","who've":"who have","why'd":"why did","why're":"why are","why's":"why is","willn't":"will not","won't":"will not","wonnot":"will not","would've":"would have","wouldn't":"would not","wouldn't've":"would not have","y'all":"you all","y'all'd've":"you all would have","y'all'd'n't've":"you all would not have","y'all're":"you all are","y'all'ren't":"you all are not","y'at":"you at","yes'm":"yes madam","yessir":"yes sir","you'd":"you had","you'll":"you shall","you're":"you are","you've":"you have","aight":"alright","aint":"are not","amnt":"am not","arent":"are not","cant":"cannot","cause":"because","couldve":"could have","couldnt":"could not","couldntve":"could not have","darent":"dare not","daresnt":"dare not","dasnt":"dare not","didnt":"did not","doesnt":"does not","doin":"doing","dont":"do not","eer":"ever","everybodys":"everybody is","everyones":"everyone is","gday":"good day","givn":"given","gont":"go not","hadnt":"had not","hadve":"had have","hasnt":"has not","havent":"have not","hed":"he had","hell":"he shall","hes":"he is","heve":"he have","howd":"how did","howll":"how will","howre":"how are","hows":"how is","idve":"i would have","ill":"i shall","im":"i am","ima":"i am about to","imo":"i am going to","ive":"i have","isnt":"is not","itd":"it would","itll":"it shall","its":"it is","lets":"let us","lil":"little","maam":"madam","maynt":"may not","mayve":"may have","mightnt":"might not","mightve":"might have","mustnt":"must not","mustntve":"must not have","mustve":"must have","neednt":"need not","neer":"never","oclock":"of the clock","oer":"over","ol":"old","oughtnt":"ought not","shallnt":"shall not","shant":"shall not","shed":"she had","shell":"she shall","shes":"she is","shouldve":"should have","shouldnt":"should not","shouldntve":"should not have","somebodys":"somebody is","someones":"someone is","somethings":"something is","thatll":"that shall","thatre":"that are","thatd":"that would","thered":"there had","therell":"there shall","therere":"there are","theres":"there is","thesere":"these are","theseve":"these have","theyd":"they had","theyll":"they shall","theyre":"they are","theyve":"they have","thiss":"this is","thosere":"those are","thoseve":"those have","tis":"it is","tove":"to have","twas":"it was","wasnt":"was not","wed":"we had","wedve":"we would have","were":"we are","weve":"we have","werent":"were not","whatd":"what did","whatll":"what shall","whatre":"what are","whats":"what is","whatve":"what have","whens":"when is","whered":"where did","wherell":"where shall","wherere":"where are","wheres":"where is","whereve":"where have","whichd":"which had","whichll":"which shall","whichre":"which are","whichs":"which is","whichve":"which have","whod":"who would","whodve":"who would have","wholl":"who shall","whore":"who are","whos":"who is","whove":"who have","whyd":"why did","whyre":"why are","whys":"why is","wont":"will not","wouldve":"would have","wouldnt":"would not","wouldntve":"would not have","yall":"you all","yalldve":"you all would have","yallre":"you all are","youd":"you had","youll":"you shall","youre":"you are","youve":"you have","'re":"are","thats":"that is"}
nlp_model/fingerprint.pb ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4ff0637eaea9c0888a9a1a47f51eb29379df82b783d4a6d32e02374bc8b9e368
3
+ size 57
nlp_model/keras_metadata.pb ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5316ddb63ad7ec3c3f14a8ebc6e92857230dabfdba85d3b3bbf5bc1fb71ce7d6
3
+ size 21115
nlp_model/saved_model.pb ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0da8aec7aca4c5d0faf1ed830243e6fee7c6a38cc6d92e13fea82aeeb7e652e4
3
+ size 10039014
nlp_model/variables/variables.data-00000-of-00001 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:06d21a9640a08890dad97cfa91d3560a1f520ebc177af67f1d9a09499ea3c7cf
3
+ size 1029457957
nlp_model/variables/variables.index ADDED
Binary file (14.7 kB). View file
 
requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ streamlit==1.3.0
2
+ Pillow == 9.4.0
3
+ nltk == 3.8.1
4
+ numpy == 1.24.3
5
+ pandas == 2.0.3
6
+ tensorflow == 2.14.0
7
+ regex == 2022.7.9
8
+ joblib