vrajeshbhatt commited on
Commit
7b0d52e
1 Parent(s): 04cf5c2

Upload 15 files

Browse files
README.md CHANGED
Binary files a/README.md and b/README.md differ
 
accuracy.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ Epoch 50/50
2
+ 10400/10400 [==============================] - 15s 1ms/step - loss: 0.3548 - category_loss: 0.1281 - priority_loss: 0.2267 - category_accuracy: 0.9525 - priority_accuracy: 0.8862 - val_loss: 2.2296 - val_category_loss: 1.0585 - val_priority_loss: 1.1711 - val_category_accuracy: 0.8338 - val_priority_accuracy: 0.7346
3
+ ------------------------------------------------------------------------------
4
+ Assign
5
+
6
+ Epoch 50/50
7
+ 10400/10400 [==============================] - 15s 1ms/step - loss: 1.0819 - accuracy: 0.6577 - val_loss: 2.9107 - val_accuracy: 0.3846
app.py ADDED
@@ -0,0 +1,433 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Project: Ticket Sampling
3
+
4
+ """
5
+
6
+ #Import all required Modules
7
+ import os
8
+ from flask import Flask, request,render_template,make_response
9
+ from werkzeug.utils import secure_filename
10
+ from keras.models import model_from_json
11
+ from tensorflow import keras
12
+ import pandas as pd
13
+ from sklearn.feature_extraction.text import TfidfVectorizer
14
+ from nltk.corpus import stopwords
15
+ from nltk.stem import WordNetLemmatizer
16
+ from nltk.tokenize import word_tokenize
17
+ import string
18
+ import numpy as np
19
+ from sklearn.preprocessing import OneHotEncoder
20
+ from sklearn.model_selection import train_test_split
21
+ import pickle
22
+ from keras.optimizers import Adam
23
+ from keras.models import Model, Sequential
24
+ from keras.layers import Dense
25
+ from keras.layers import Input
26
+ from keras.layers import Dropout
27
+
28
+ #Intialize Flask application
29
+ app = Flask(__name__)
30
+
31
+ #Store paths for upload directory, templates directory and retrain directory
32
+ uploads_dir = os.path.join(app.root_path, 'uploads')
33
+ templates_dir = os.path.join(app.root_path, 'templates')
34
+ retrain_dir = os.path.join(app.root_path, 'retrain')
35
+ os.makedirs(uploads_dir, exist_ok=True)
36
+
37
+ #Load Original Project Names from pickle file
38
+ enc_project_original = pickle.load(open(os.path.join(retrain_dir, "enc_project_original.pickle"), "rb"))
39
+ project_original = list(np.concatenate(enc_project_original.categories_).flat)
40
+
41
+ #Set loss list and test metrics for model evaluation
42
+ loss_list = ['categorical_crossentropy','categorical_crossentropy']
43
+ test_metrics = {'category': 'accuracy','priority': 'accuracy'}
44
+
45
+ #Currently model is trained on 50 Epochs, 0.0001 learning rate and 10 batch size
46
+ EPOCHS=50
47
+ @app.route('/')
48
+ #Home page for the Application
49
+ def home():
50
+ # Load Encoder for Project List
51
+ return render_template('prediction.html', original_project=project_original)
52
+
53
+ @app.route('/data_preparation')
54
+ def data_preparation(desc):
55
+ tokens = []
56
+ stop_words = set(stopwords.words('english'))
57
+ lemmatizer = WordNetLemmatizer()
58
+ table = str.maketrans('', '', string.punctuation)
59
+
60
+ for i in desc:
61
+ token = word_tokenize(i)
62
+ words = pd.Series(token).str.lower()
63
+ words = [w.translate(table) for w in words]
64
+ words = [w for w in words if w.lower() not in stop_words]
65
+ words = pd.Series(words).replace('n', '')
66
+ words = [w for w in words if w.isalpha()]
67
+ words = [lemmatizer.lemmatize(word) for word in words]
68
+ text = ' '.join(words)
69
+ tokens.append(text)
70
+ return np.array(tokens)
71
+
72
+ #Project preprocessing for training
73
+ @app.route('/project_preparation')
74
+ def project_preparation(project):
75
+ if not project.empty:
76
+ proj = []
77
+ table = str.maketrans('', '', string.punctuation)
78
+ for i in project:
79
+ words = pd.Series(i).str.lower()
80
+ words = [w.translate(table) for w in words]
81
+ words = [w.replace(" ", "") for w in words]
82
+ words = [w.strip() for w in words]
83
+ txt = ' '.join(words)
84
+ proj.append(txt)
85
+ return pd.DataFrame(proj)
86
+
87
+ #Project preprocessing for prediction
88
+ @app.route('/pred_project_preparation')
89
+ def pred_project_preparation(project):
90
+ project = project.lower()
91
+ table = str.maketrans('', '', string.punctuation)
92
+ project = project.translate(table)
93
+ project = project.replace(" ", "")
94
+ project = project.strip()
95
+ return np.array(project)
96
+
97
+ #Route for Model Retrain template
98
+ @app.route("/temp_retrain")
99
+ def temp_retrain():
100
+ return render_template('model_retrain.html')
101
+
102
+ #Route for Data Preview of Working Model
103
+ @app.route('/retrain_data_preview',methods=['GET', 'POST'])
104
+ def retrain_data_preview():
105
+ f = open(os.path.join(retrain_dir,'file_name.txt'),"r")
106
+ return render_template('retrain_data_preview.html', name=f.read())
107
+
108
+ #Route for html data table template (Newly loaded file)
109
+ @app.route('/data')
110
+ def data():
111
+ return render_template('data.html')
112
+
113
+ #Route for html data table template
114
+ @app.route('/retrain_data')
115
+ def retrain_data():
116
+ return render_template('retrain_data.html')
117
+
118
+ #Prediction
119
+ @app.route("/predict",methods=['POST'])
120
+ def predict():
121
+ #Load Model and Dependencies
122
+ json_file = open(os.path.join(retrain_dir,'cat_prior_model.json'), 'r')
123
+ loaded_model_json = json_file.read()
124
+ json_file.close()
125
+ multi_model = model_from_json(loaded_model_json)
126
+ multi_model.load_weights(os.path.join(retrain_dir,'cat_prior_model.h5'))
127
+
128
+ json_file1 = open(os.path.join(retrain_dir,'assign_model.json'), 'r')
129
+ loaded_model_json1 = json_file1.read()
130
+ json_file1.close()
131
+ assign_model = model_from_json(loaded_model_json1)
132
+ assign_model.load_weights(os.path.join(retrain_dir,'assign_model.h5'))
133
+
134
+ vectorizer = pickle.load(open(os.path.join(retrain_dir, "vectorizer.pickle"), "rb"))
135
+ enc_project = pickle.load(open(os.path.join(retrain_dir,"enc_project.pickle"), "rb"))
136
+ enc_category = pickle.load(open(os.path.join(retrain_dir,"enc_category.pickle"), "rb"))
137
+ enc_priority = pickle.load(open(os.path.join(retrain_dir,"enc_priority.pickle"), "rb"))
138
+ enc_assign = pickle.load(open(os.path.join(retrain_dir,"enc_assign.pickle"),'rb'))
139
+
140
+ #Request project and description from form
141
+ project = request.form['select2-single-box project']
142
+ print(project)
143
+ text = request.form['desc']
144
+ print(text)
145
+ #Pre-process & Encode the data
146
+ prep_project = pred_project_preparation(project)
147
+ vect_project = enc_project.transform(prep_project.reshape(-1,1)).toarray()
148
+ prep_text = data_preparation([text])
149
+ vect_desc = vectorizer.transform(prep_text).toarray()
150
+ df = np.concatenate([vect_project,vect_desc],axis=1)
151
+ #Makes a prediction of Category and Priority
152
+ pred = multi_model.predict(df)
153
+
154
+ #Decode prediction results
155
+ category =enc_category.inverse_transform(pred[0])
156
+ priority= enc_priority.inverse_transform(pred[1])
157
+
158
+ #Prepare data for Assignment Prediction
159
+ df1 = np.concatenate([vect_project,pred[0],pred[1],vect_desc],axis=1)
160
+ #Assignment Prediction
161
+ pred1 = assign_model.predict(df1)
162
+ #Decode Result
163
+ ass=enc_assign.inverse_transform(pred1)
164
+ print('\n',category[0][0],'\n',priority[0][0],'\n',ass[0][0])
165
+ return render_template('prediction.html',project=project, original_project=project_original, text=text, category=category[0][0],priority=priority[0][0],assign=ass[0][0], count = int(1))
166
+
167
+ #Callbacks at the time of training
168
+ class LossAndErrorPrintingCallback(keras.callbacks.Callback):
169
+ def on_epoch_end(self, epoch, logs=None):
170
+ global EPOCHS, progress_count
171
+ progress_count = int(((epoch+1)/EPOCHS)*100)
172
+ response = make_response(render_template('model_retrain.html'))
173
+ print('----------------------------------------' + str(progress_count) + '-------------------------------')
174
+ return response
175
+
176
+ #Get no. of rows in choosen file
177
+ def show_rows(data):
178
+ row=data.shape[0]
179
+ return render_template('model_retrain.html', rows=row)
180
+
181
+ #Retrain Model
182
+ @app.route("/get_data",methods=['GET', 'POST'])
183
+ def get_data():
184
+ if request.method == 'POST':
185
+ global data, desc, project, category, priority,vectorizer,vect_desc,file_path,multi_model,assign_model, project_original
186
+ global enc_project_original, enc_project, enc_category, enc_priority, enc_assign
187
+ global trans_category, trans_priority, trans_assign ,trans_project, profile
188
+ #path for upload directory
189
+ path=os.path.join(uploads_dir,profile.filename)
190
+ print(path)
191
+ #check for csv and Excel file
192
+ if path:
193
+ if profile.filename.endswith('.csv'):
194
+ data = pd.read_csv(path)
195
+ elif profile.filename.endswith('.xlsx' or '.xls'):
196
+ data = pd.read_excel(path)
197
+ else:
198
+ pass
199
+
200
+ #initialize encoders
201
+ enc_project_original = OneHotEncoder()
202
+ enc_project = OneHotEncoder()
203
+ enc_category = OneHotEncoder()
204
+ enc_priority = OneHotEncoder()
205
+ enc_assign = OneHotEncoder()
206
+ #get data from selected file
207
+ desc = data['desc']
208
+ project = data['project']
209
+ category = pd.DataFrame(data['category'])
210
+ priority = pd.DataFrame(data['priority'])
211
+ assign = pd.DataFrame(data['assign to'])
212
+ #pre-processing
213
+ prep_data = data_preparation(desc)
214
+ vectorizer = TfidfVectorizer()
215
+ vect_desc = vectorizer.fit_transform(prep_data).toarray()
216
+ prep_project = project_preparation(project)
217
+ #One Hot Encoding
218
+ trans_project_original = enc_project_original.fit_transform(pd.DataFrame(project))
219
+ #Update Project list to preview in prediction page
220
+ project_original = list(np.concatenate(enc_project_original.categories_).flat)
221
+ trans_project = enc_project.fit_transform(prep_project).toarray()
222
+ trans_category = enc_category.fit_transform(category).toarray()
223
+ trans_priority = enc_priority.fit_transform(priority).toarray()
224
+ trans_assign = enc_assign.fit_transform(assign).toarray()
225
+ #prepare list of projects, categories, priority and assign
226
+ d_x_cat_pri = {}
227
+ d_y_cat_pri = {}
228
+ d_x_ass = {}
229
+ d_y_ass = {}
230
+ pr = list(np.concatenate(enc_project.categories_).flat)
231
+ cate = list(np.concatenate(enc_category.categories_).flat)
232
+ prior = list(np.concatenate(enc_priority.categories_).flat)
233
+ assign = list(np.concatenate(enc_assign.categories_).flat)
234
+
235
+ #prepare dictonary assosiated with encoded values
236
+ x = [p for p in enumerate(pr)]
237
+ y = [c for c in enumerate(cate)]
238
+ z = [i for i in enumerate(prior)]
239
+ ass = [i for i in enumerate(assign)]
240
+ for i, j in x:
241
+ d_x_cat_pri[j] = trans_project[:, i]
242
+ for i, j in y:
243
+ d_y_cat_pri[j] = trans_category[:, i]
244
+ for i, j in z:
245
+ d_y_cat_pri[j] = trans_priority[:, i]
246
+
247
+ for i, j in x:
248
+ d_x_ass[j] = trans_project[:, i]
249
+ for i, j in y:
250
+ d_x_ass[j] = trans_category[:, i]
251
+ for i, j in z:
252
+ d_x_ass[j] = trans_priority[:, i]
253
+ for i, j in ass:
254
+ d_y_ass[j] = trans_assign[:, i]
255
+ #Dataframe for Category and Priority Prediction
256
+ d_x_cat_pri = pd.DataFrame(data=d_x_cat_pri)
257
+ X_cat_pri = pd.concat([d_x_cat_pri, pd.DataFrame(vect_desc)], axis=1)
258
+ Y_cat_pri = pd.DataFrame(data=d_y_cat_pri)
259
+ #Split data for train and test
260
+ X_train_duo, X_test_duo, Y_train_duo, Y_test_duo = train_test_split(X_cat_pri, Y_cat_pri, test_size=0.20, random_state=10)
261
+
262
+ #Dataframe for Assignment Prediction
263
+ d_x_ass = pd.DataFrame(data=d_x_ass)
264
+ X_ass = pd.concat([d_x_ass, pd.DataFrame(vect_desc)], axis=1)
265
+ Y_ass = pd.DataFrame(data=d_y_ass)
266
+ #Split data for train and test
267
+ X_train_ass, X_test_ass, Y_train_ass, Y_test_ass = train_test_split(X_ass, Y_ass, test_size=0.20, random_state=10)
268
+
269
+ # ---------------------Train--------------------------
270
+ category_train = Y_train_duo[cate]
271
+ category_nodes = category_train.shape[1]
272
+ category_train = category_train.values
273
+
274
+ priority_train = Y_train_duo[prior]
275
+ priority_nodes = priority_train.shape[1]
276
+ priority_train = priority_train.values
277
+
278
+ # ---------------------Test--------------------------
279
+ category_test = Y_test_duo[cate]
280
+ category_nodes = category_test.shape[1]
281
+ category_test = category_test.values
282
+
283
+ priority_test = Y_test_duo[prior]
284
+ priority_nodes = priority_test.shape[1]
285
+ priority_test = priority_test.values
286
+
287
+ #-----------------------------------------Multi Model(Category & Priority)------------------------------------------------------------------------
288
+ multi_model = Sequential()
289
+ duo_model_input = Input(shape=(X_train_duo.shape[1],))
290
+ x = multi_model(duo_model_input)
291
+ x = Dense(512, activation='relu')(x)
292
+ x = Dropout(0.3)(x)
293
+ x = Dense(256, activation='relu')(x)
294
+ x = Dropout(0.3)(x)
295
+
296
+ y1 = Dense(128, activation='relu')(x)
297
+ y1 = Dropout(0.3)(y1)
298
+ y1 = Dense(64, activation='relu')(y1)
299
+ y1 = Dropout(0.3)(y1)
300
+
301
+ y2 = Dense(128, activation='relu')(x)
302
+ y2 = Dropout(0.3)(y2)
303
+ y2 = Dense(64, activation='relu')(y2)
304
+ y2 = Dropout(0.3)(y2)
305
+
306
+ y1 = Dense(category_nodes, activation='softmax', name='category')(y1)
307
+ y2 = Dense(priority_nodes, activation='softmax', name='priority')(y2)
308
+
309
+ multi_model = Model(inputs=duo_model_input, outputs=[y1, y2])
310
+ multi_model.compile(loss=loss_list, optimizer=Adam(lr=0.0001), metrics=test_metrics)
311
+
312
+ multi_model.fit(x=X_train_duo, y=[category_train,priority_train], batch_size=10, epochs=EPOCHS,
313
+ validation_data=(X_test_duo,[category_test,priority_test]),callbacks=[LossAndErrorPrintingCallback()])
314
+
315
+ #------------------------------------------Assign Model---------------------------------------------------------------
316
+
317
+ ass_model = Sequential()
318
+ ass_model_input = Input(shape=(X_train_ass.shape[1],))
319
+ x = ass_model(ass_model_input)
320
+
321
+ x = Dense(512, activation='relu')(x)
322
+ x = Dropout(0.3)(x)
323
+ x = Dense(256, activation='relu')(x)
324
+ x = Dropout(0.3)(x)
325
+ x = Dense(128, activation='relu')(x)
326
+ x = Dropout(0.3)(x)
327
+ x = Dense(64, activation='relu')(x)
328
+ x = Dropout(0.3)(x)
329
+
330
+ y1 = Dense(trans_assign.shape[1], activation='softmax', name='assign')(x)
331
+
332
+
333
+ assign_model = Model(inputs=ass_model_input, outputs=[y1])
334
+ assign_model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=0.0001), metrics=['accuracy'])
335
+
336
+ assign_model.fit(x=X_train_ass, y=Y_train_ass, batch_size=10, epochs=EPOCHS,
337
+ validation_data=(X_test_ass,Y_test_ass),callbacks=[LossAndErrorPrintingCallback()])
338
+ #Store Model and dependencies
339
+ store()
340
+ return render_template('model_retrain.html', complete=1, result='Your Model is Trained and Stored Successfully')
341
+
342
+ #Upload selected file
343
+ @app.route('/uploader', methods=['GET', 'POST'])
344
+ def uploader():
345
+ global html_table_data
346
+ if request.method == 'POST':
347
+ global profile
348
+ profile = request.files['file']
349
+ path =os.path.join(uploads_dir,profile.filename)
350
+ #Check for the CSV and Excel file
351
+ if profile.filename.endswith('.csv'):
352
+ #Save selected file
353
+ profile.save(os.path.join(uploads_dir, secure_filename(profile.filename)))
354
+ #Read File
355
+ data=pd.read_csv(os.path.join(uploads_dir,secure_filename(profile.filename)))
356
+ elif profile.filename.endswith('.xlsx' or '.xls'):
357
+ #Save selected file
358
+ profile.save(os.path.join(uploads_dir, secure_filename(profile.filename)))
359
+ #Read File
360
+ data = pd.read_excel(os.path.join(uploads_dir,secure_filename(profile.filename)))
361
+ else:
362
+ return render_template('model_retrain.html', msg=1)
363
+
364
+ if data is not None:
365
+ #Check for the file structure (Required columns are present or not)
366
+ if ('project' not in data) or ('desc' not in data) or ('category' not in data) or ('priority' not in data) or ('assign to' not in data):
367
+ return render_template('model_retrain.html', msg=1)
368
+ else:
369
+ #Set css for html table
370
+ css="{{ url_for('static',filename='css/df_style.css') }}"
371
+ pd.set_option('colheader_justify', 'center')
372
+ # HTML Table for file data
373
+ html_string = '''
374
+ <head>
375
+ <link rel="stylesheet" type="text/css" href="{css}">
376
+ </head>
377
+ <body>
378
+ {table}
379
+ </body>
380
+
381
+ '''
382
+ html_table_data = html_string.format(table=data.to_html(classes='mystyle'),css=css)
383
+ # Output an HTML file
384
+ with open(os.path.join(templates_dir,"data.html"), 'w') as f:
385
+ f.write(html_table_data)
386
+ f.close()
387
+
388
+ return render_template('model_retrain.html', upload=1, f_name=profile.filename, row_data=data.shape[0])
389
+
390
+
391
+
392
+
393
+ def store():
394
+ global html_table_data, profile
395
+ # Store dependencies and models
396
+ pickle.dump(vectorizer, open(os.path.join(retrain_dir,"vectorizer.pickle"), "wb"))
397
+ pickle.dump(enc_project_original, open(os.path.join(retrain_dir, "enc_project_original.pickle"), "wb"))
398
+ pickle.dump(enc_project, open(os.path.join(retrain_dir,"enc_project.pickle"), "wb"))
399
+ pickle.dump(enc_category, open(os.path.join(retrain_dir,"enc_category.pickle"), "wb"))
400
+ pickle.dump(enc_priority, open(os.path.join(retrain_dir,"enc_priority.pickle"), "wb"))
401
+ pickle.dump(enc_assign, open(os.path.join(retrain_dir,"enc_assign.pickle"), "wb"))
402
+
403
+ multi_model_json = multi_model.to_json()
404
+ with open(os.path.join(retrain_dir,"cat_prior_model.json"), "w") as json_file:
405
+ json_file.write(multi_model_json)
406
+ multi_model.save_weights(os.path.join(retrain_dir,"cat_prior_model.h5"))
407
+
408
+ assign_model_json = assign_model.to_json()
409
+ with open(os.path.join(retrain_dir,"assign_model.json"), "w") as json_file:
410
+ json_file.write(assign_model_json)
411
+ assign_model.save_weights(os.path.join(retrain_dir,"assign_model.h5"))
412
+ # Store file name of choosen file for further use
413
+ with open(os.path.join(retrain_dir,"file_name.txt"), 'w') as f:
414
+ f.write(profile.filename)
415
+ f.close()
416
+ # Output an HTML file
417
+ with open(os.path.join(templates_dir,"retrain_data.html"), 'w') as f:
418
+ f.write(html_table_data)
419
+ f.close()
420
+
421
+ # Clear Browser Cache
422
+ def before_request():
423
+ app.jinja_env.cache = {}
424
+
425
+
426
+ if __name__=='__main__':
427
+ app.before_request(before_request)
428
+ app.run(host='127.0.0.1' )
429
+
430
+
431
+
432
+
433
+
assign_model.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:827a4f1edf6a7ff1a833c42552df87662b6b60e7532a97d943c7a975f81b5e4f
3
+ size 4349728
assign_model.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"class_name": "Model", "config": {"name": "model_1", "layers": [{"name": "input_1", "class_name": "InputLayer", "config": {"batch_input_shape": [null, 1771], "dtype": "float32", "sparse": false, "name": "input_1"}, "inbound_nodes": []}, {"name": "sequential_1", "class_name": "Sequential", "config": {"name": "sequential_1", "layers": [], "build_input_shape": [null, 1771]}, "inbound_nodes": [[["input_1", 0, 0, {}]]]}, {"name": "dense_1", "class_name": "Dense", "config": {"name": "dense_1", "trainable": true, "dtype": "float32", "units": 512, "activation": "relu", "use_bias": true, "kernel_initializer": {"class_name": "VarianceScaling", "config": {"scale": 1.0, "mode": "fan_avg", "distribution": "uniform", "seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "inbound_nodes": [[["sequential_1", 1, 0, {}]]]}, {"name": "dropout_1", "class_name": "Dropout", "config": {"name": "dropout_1", "trainable": true, "dtype": "float32", "rate": 0.3, "noise_shape": null, "seed": null}, "inbound_nodes": [[["dense_1", 0, 0, {}]]]}, {"name": "dense_2", "class_name": "Dense", "config": {"name": "dense_2", "trainable": true, "dtype": "float32", "units": 256, "activation": "relu", "use_bias": true, "kernel_initializer": {"class_name": "VarianceScaling", "config": {"scale": 1.0, "mode": "fan_avg", "distribution": "uniform", "seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "inbound_nodes": [[["dropout_1", 0, 0, {}]]]}, {"name": "dropout_2", "class_name": "Dropout", "config": {"name": "dropout_2", "trainable": true, "dtype": "float32", "rate": 0.3, "noise_shape": null, "seed": null}, "inbound_nodes": [[["dense_2", 0, 0, {}]]]}, {"name": "dense_3", "class_name": "Dense", "config": {"name": "dense_3", "trainable": true, "dtype": "float32", "units": 128, "activation": "relu", "use_bias": true, "kernel_initializer": {"class_name": "VarianceScaling", "config": {"scale": 1.0, "mode": "fan_avg", "distribution": "uniform", "seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "inbound_nodes": [[["dropout_2", 0, 0, {}]]]}, {"name": "dropout_3", "class_name": "Dropout", "config": {"name": "dropout_3", "trainable": true, "dtype": "float32", "rate": 0.3, "noise_shape": null, "seed": null}, "inbound_nodes": [[["dense_3", 0, 0, {}]]]}, {"name": "dense_4", "class_name": "Dense", "config": {"name": "dense_4", "trainable": true, "dtype": "float32", "units": 64, "activation": "relu", "use_bias": true, "kernel_initializer": {"class_name": "VarianceScaling", "config": {"scale": 1.0, "mode": "fan_avg", "distribution": "uniform", "seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "inbound_nodes": [[["dropout_3", 0, 0, {}]]]}, {"name": "dropout_4", "class_name": "Dropout", "config": {"name": "dropout_4", "trainable": true, "dtype": "float32", "rate": 0.3, "noise_shape": null, "seed": null}, "inbound_nodes": [[["dense_4", 0, 0, {}]]]}, {"name": "category", "class_name": "Dense", "config": {"name": "category", "trainable": true, "dtype": "float32", "units": 22, "activation": "softmax", "use_bias": true, "kernel_initializer": {"class_name": "VarianceScaling", "config": {"scale": 1.0, "mode": "fan_avg", "distribution": "uniform", "seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "inbound_nodes": [[["dropout_4", 0, 0, {}]]]}], "input_layers": [["input_1", 0, 0]], "output_layers": [["category", 0, 0]]}, "keras_version": "2.3.1", "backend": "tensorflow"}
cat_prior_model.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:03d4ce3b84c1c2d5765868fd983ff808310f6b4afb56796962543efd5f5ae6a3
3
+ size 4500048
cat_prior_model.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"class_name": "Model", "config": {"name": "model_1", "layers": [{"name": "input_1", "class_name": "InputLayer", "config": {"batch_input_shape": [null, 1760], "dtype": "float32", "sparse": false, "name": "input_1"}, "inbound_nodes": []}, {"name": "sequential_1", "class_name": "Sequential", "config": {"name": "sequential_1", "layers": [], "build_input_shape": [null, 1760]}, "inbound_nodes": [[["input_1", 0, 0, {}]]]}, {"name": "dense_1", "class_name": "Dense", "config": {"name": "dense_1", "trainable": true, "dtype": "float32", "units": 512, "activation": "relu", "use_bias": true, "kernel_initializer": {"class_name": "VarianceScaling", "config": {"scale": 1.0, "mode": "fan_avg", "distribution": "uniform", "seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "inbound_nodes": [[["sequential_1", 1, 0, {}]]]}, {"name": "dropout_1", "class_name": "Dropout", "config": {"name": "dropout_1", "trainable": true, "dtype": "float32", "rate": 0.3, "noise_shape": null, "seed": null}, "inbound_nodes": [[["dense_1", 0, 0, {}]]]}, {"name": "dense_2", "class_name": "Dense", "config": {"name": "dense_2", "trainable": true, "dtype": "float32", "units": 256, "activation": "relu", "use_bias": true, "kernel_initializer": {"class_name": "VarianceScaling", "config": {"scale": 1.0, "mode": "fan_avg", "distribution": "uniform", "seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "inbound_nodes": [[["dropout_1", 0, 0, {}]]]}, {"name": "dropout_2", "class_name": "Dropout", "config": {"name": "dropout_2", "trainable": true, "dtype": "float32", "rate": 0.3, "noise_shape": null, "seed": null}, "inbound_nodes": [[["dense_2", 0, 0, {}]]]}, {"name": "dense_3", "class_name": "Dense", "config": {"name": "dense_3", "trainable": true, "dtype": "float32", "units": 128, "activation": "relu", "use_bias": true, "kernel_initializer": {"class_name": "VarianceScaling", "config": {"scale": 1.0, "mode": "fan_avg", "distribution": "uniform", "seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "inbound_nodes": [[["dropout_2", 0, 0, {}]]]}, {"name": "dense_5", "class_name": "Dense", "config": {"name": "dense_5", "trainable": true, "dtype": "float32", "units": 128, "activation": "relu", "use_bias": true, "kernel_initializer": {"class_name": "VarianceScaling", "config": {"scale": 1.0, "mode": "fan_avg", "distribution": "uniform", "seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "inbound_nodes": [[["dropout_2", 0, 0, {}]]]}, {"name": "dropout_3", "class_name": "Dropout", "config": {"name": "dropout_3", "trainable": true, "dtype": "float32", "rate": 0.3, "noise_shape": null, "seed": null}, "inbound_nodes": [[["dense_3", 0, 0, {}]]]}, {"name": "dropout_5", "class_name": "Dropout", "config": {"name": "dropout_5", "trainable": true, "dtype": "float32", "rate": 0.3, "noise_shape": null, "seed": null}, "inbound_nodes": [[["dense_5", 0, 0, {}]]]}, {"name": "dense_4", "class_name": "Dense", "config": {"name": "dense_4", "trainable": true, "dtype": "float32", "units": 64, "activation": "relu", "use_bias": true, "kernel_initializer": {"class_name": "VarianceScaling", "config": {"scale": 1.0, "mode": "fan_avg", "distribution": "uniform", "seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "inbound_nodes": [[["dropout_3", 0, 0, {}]]]}, {"name": "dense_6", "class_name": "Dense", "config": {"name": "dense_6", "trainable": true, "dtype": "float32", "units": 64, "activation": "relu", "use_bias": true, "kernel_initializer": {"class_name": "VarianceScaling", "config": {"scale": 1.0, "mode": "fan_avg", "distribution": "uniform", "seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "inbound_nodes": [[["dropout_5", 0, 0, {}]]]}, {"name": "dropout_4", "class_name": "Dropout", "config": {"name": "dropout_4", "trainable": true, "dtype": "float32", "rate": 0.3, "noise_shape": null, "seed": null}, "inbound_nodes": [[["dense_4", 0, 0, {}]]]}, {"name": "dropout_6", "class_name": "Dropout", "config": {"name": "dropout_6", "trainable": true, "dtype": "float32", "rate": 0.3, "noise_shape": null, "seed": null}, "inbound_nodes": [[["dense_6", 0, 0, {}]]]}, {"name": "category", "class_name": "Dense", "config": {"name": "category", "trainable": true, "dtype": "float32", "units": 6, "activation": "softmax", "use_bias": true, "kernel_initializer": {"class_name": "VarianceScaling", "config": {"scale": 1.0, "mode": "fan_avg", "distribution": "uniform", "seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "inbound_nodes": [[["dropout_4", 0, 0, {}]]]}, {"name": "priority", "class_name": "Dense", "config": {"name": "priority", "trainable": true, "dtype": "float32", "units": 5, "activation": "softmax", "use_bias": true, "kernel_initializer": {"class_name": "VarianceScaling", "config": {"scale": 1.0, "mode": "fan_avg", "distribution": "uniform", "seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "inbound_nodes": [[["dropout_6", 0, 0, {}]]]}], "input_layers": [["input_1", 0, 0]], "output_layers": [["category", 0, 0], ["priority", 0, 0]]}, "keras_version": "2.3.1", "backend": "tensorflow"}
enc_assign.pickle ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:23f09c1dc57b875821320ad54ed644be7d81c86bea9aa44bae03c640a4d225f6
3
+ size 697
enc_category.pickle ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:33cd227dbe547af3560f32adab71d804373b1274150d3529d798711cb4ba0d49
3
+ size 486
enc_priority.pickle ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:06ca7dc97bb56ae7d02bcb579cd2840858f3c1f38c2b1b3a7a7b9b7e8c30fcb9
3
+ size 462
enc_project.pickle ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a84d05726bbee168243ae2328842828e0f3cd28d07de05596b07300b0ab94a2c
3
+ size 485
enc_project_original.pickle ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:30e33bb48a43668dda2dc09b82efe706b28f0c131a32f9d55f6eec871bcece47
3
+ size 503
pyvenv.cfg ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ home = C:\Users\vrajb\AppData\Local\Programs\Python\Python39
2
+ include-system-site-packages = false
3
+ version = 3.9.13
requirements.txt ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ absl-py==0.12.0
2
+ astor==0.8.1
3
+ astunparse==1.6.3
4
+ cached-property==1.5.2
5
+ cachetools==4.2.1
6
+ certifi==2020.12.5
7
+ chardet==4.0.0
8
+ click==7.1.2
9
+ Flask==1.1.2
10
+ flatbuffers==1.12
11
+ gast==0.2.2
12
+ google-auth==1.28.0
13
+ google-auth-oauthlib==0.4.4
14
+ google-pasta==0.2.0
15
+ grpcio==1.36.1
16
+ h5py==2.10.0
17
+ idna==2.10
18
+ importlib-metadata==3.10.0
19
+ itsdangerous==1.1.0
20
+ Jinja2==2.11.3
21
+ joblib==1.0.1
22
+ Keras==2.3.1
23
+ Keras-Applications==1.0.8
24
+ Keras-Preprocessing==1.1.2
25
+ Markdown==3.3.4
26
+ MarkupSafe==1.1.1
27
+ nltk==3.5
28
+ numpy==1.17.2
29
+ oauthlib==3.1.0
30
+ opt-einsum==3.3.0
31
+ pandas==1.0.1
32
+ protobuf==3.15.7
33
+ pyasn1==0.4.8
34
+ pyasn1-modules==0.2.8
35
+ python-dateutil==2.8.1
36
+ pytz==2021.1
37
+ PyYAML==5.4.1
38
+ regex==2021.3.17
39
+ requests==2.25.1
40
+ requests-oauthlib==1.3.0
41
+ rsa==4.7.2
42
+ scikit-learn==0.23.2
43
+ scipy==1.4.1
44
+ six==1.15.0
45
+ sklearn==0.0
46
+ tensorboard==1.15.0
47
+ tensorboard-plugin-wit==1.8.0
48
+ tensorflow==1.15.0
49
+ tensorflow-estimator==1.15.1
50
+ termcolor==1.1.0
51
+ threadpoolctl==2.1.0
52
+ tqdm==4.59.0
53
+ typing-extensions==3.7.4.3
54
+ urllib3==1.26.4
55
+ Werkzeug==1.0.1
56
+ wrapt==1.12.1
57
+ xlrd==1.2.0
58
+ zipp==3.4.1
vectorizer.pickle ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bbea419072f0a60cbb40467e9ec8bbe36a76ad1eb5a9b82d78aab858d3f3a713
3
+ size 62136