Seetha commited on
Commit
f6ec4be
1 Parent(s): b8cf00c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -1
app.py CHANGED
@@ -71,8 +71,12 @@ from huggingface_hub import HfFileSystem
71
  from tensorflow.keras.models import Sequential, model_from_json
72
  import tensorflow_datasets as tfds
73
  import tensorflow as tf
 
 
 
74
 
75
  tfds.disable_progress_bar()
 
76
 
77
  # dataset = load_dataset('Seetha/Visualization', streaming=True)
78
  # df = pd.DataFrame.from_dict(dataset['train'])
@@ -164,13 +168,21 @@ def main():
164
 
165
  # pipeline_test_output = loaded_vectorizer.transform(class_list)
166
  # predicted = loaded_model.predict(pipeline_test_output)
 
 
 
167
  json_file = open('model.json', 'r')
168
  loaded_model_json = json_file.read()
169
  json_file.close()
170
  loaded_model = model_from_json(loaded_model_json)
171
  # load weights into new model
172
  loaded_model.load_weights("model.h5")
173
-
 
 
 
 
 
174
  pred1 = predicted
175
  level0 = []
176
  count =0
 
71
  from tensorflow.keras.models import Sequential, model_from_json
72
  import tensorflow_datasets as tfds
73
  import tensorflow as tf
74
+ from tensorflow.keras.preprocessing.sequence import pad_sequences
75
+ import spacy
76
+ nlp = spacy.load('en_core_web_lg')
77
 
78
  tfds.disable_progress_bar()
79
+ MAX_SEQUENCE_LENGTH = 500
80
 
81
  # dataset = load_dataset('Seetha/Visualization', streaming=True)
82
  # df = pd.DataFrame.from_dict(dataset['train'])
 
168
 
169
  # pipeline_test_output = loaded_vectorizer.transform(class_list)
170
  # predicted = loaded_model.predict(pipeline_test_output)
171
+ text_embedding = np.zeros((len(word_index) + 1, 300))
172
+ for word, i in word_index.items():
173
+ text_embedding[i] = nlp(word).vector
174
  json_file = open('model.json', 'r')
175
  loaded_model_json = json_file.read()
176
  json_file.close()
177
  loaded_model = model_from_json(loaded_model_json)
178
  # load weights into new model
179
  loaded_model.load_weights("model.h5")
180
+
181
+ loss = tf.keras.losses.CategoricalCrossentropy() #from_logits=True
182
+ loaded_model.compile(loss=loss,optimizer=tf.keras.optimizers.Adam(1e-4))
183
+
184
+ predictions = loaded_model.predict(pad_sequences(tokenizer.texts_to_sequences(class_list),maxlen=MAX_SEQUENCE_LENGTH))
185
+ predicted = np.argmax(predictions,axis=1)
186
  pred1 = predicted
187
  level0 = []
188
  count =0