ierhon commited on
Commit
59c9077
1 Parent(s): 769f501

fix cache ignore

Browse files
Files changed (1) hide show
  1. app.py +3 -2
app.py CHANGED
@@ -26,8 +26,9 @@ def train(data: str, message: str):
26
  tokenizer.fit_on_texts(list(dset.keys()))
27
 
28
  vocab_size = len(tokenizer.word_index) + 1
 
29
  if hash_str(data)+".keras" in os.listdir("cache"):
30
- model = load_model(hash_str(data)+".keras")
31
  else:
32
  input_layer = Input(shape=(inp_len,))
33
  emb_layer = Embedding(input_dim=vocab_size, output_dim=emb_size, input_length=inp_len)(input_layer)
@@ -67,7 +68,7 @@ def train(data: str, message: str):
67
  model.compile(loss="sparse_categorical_crossentropy", metrics=["accuracy",])
68
 
69
  model.fit(X, y, epochs=32, batch_size=8, workers=4, use_multiprocessing=True)
70
- model.save("cache/{data_hash}.keras")
71
  tokens = tokenizer.texts_to_sequences([message,])[0]
72
  prediction = model.predict(np.array([(list(tokens)+[0,]*inp_len)[:inp_len],]))[0]
73
  max_o = 0
 
26
  tokenizer.fit_on_texts(list(dset.keys()))
27
 
28
  vocab_size = len(tokenizer.word_index) + 1
29
+ data_hash = hash_str(data)+".keras"
30
  if hash_str(data)+".keras" in os.listdir("cache"):
31
+ model = load_model(data_hash)
32
  else:
33
  input_layer = Input(shape=(inp_len,))
34
  emb_layer = Embedding(input_dim=vocab_size, output_dim=emb_size, input_length=inp_len)(input_layer)
 
68
  model.compile(loss="sparse_categorical_crossentropy", metrics=["accuracy",])
69
 
70
  model.fit(X, y, epochs=32, batch_size=8, workers=4, use_multiprocessing=True)
71
+ model.save(f"cache/{data_hash}.keras")
72
  tokens = tokenizer.texts_to_sequences([message,])[0]
73
  prediction = model.predict(np.array([(list(tokens)+[0,]*inp_len)[:inp_len],]))[0]
74
  max_o = 0