himanishprak23 Kumarkishalaya commited on
Commit
f18bb84
1 Parent(s): fb302fa

Update app.py (#3)

Browse files

- Update app.py (4af16022d9721e838c4d9d0ee922fb58eec5e09f)


Co-authored-by: Kumar Kishalaya <Kumarkishalaya@users.noreply.huggingface.co>

Files changed (1) hide show
  1. app.py +12 -1
app.py CHANGED
@@ -2,6 +2,17 @@ import gradio as gr
2
  from transformers import AutoTokenizer, TFAutoModelForSeq2SeqLM
3
  from tensorflow.keras.models import load_model
4
  import pickle
 
 
 
 
 
 
 
 
 
 
 
5
 
6
  # Define the model repository and tokenizer checkpoint
7
  model_checkpoint = "himanishprak23/neural_machine_translation"
@@ -15,7 +26,7 @@ tokenizer_nmt = AutoTokenizer.from_pretrained(tokenizer_checkpoint)
15
  model_nmt = TFAutoModelForSeq2SeqLM.from_pretrained(model_checkpoint)
16
 
17
  # Loading models, tokenizer & variables for trained LSTM translation model.
18
- model_lstm = load_model('seq2seq_model.h5')
19
  with open('eng_tokenizer.pkl', 'rb') as file:
20
  eng_tokenizer = pickle.load(file)
21
  with open('hin_tokenizer.pkl', 'rb') as file:
 
2
  from transformers import AutoTokenizer, TFAutoModelForSeq2SeqLM
3
  from tensorflow.keras.models import load_model
4
  import pickle
5
+ import json
6
+ import keras
7
+
8
+ custom_objects = {
9
+ 'LSTM': keras.layers.LSTM,
10
+ }
11
+ # Load model
12
+ model_lstm = load_model('seq2seq_model.h5', custom_objects=custom_objects, compile=False)
13
+
14
+ # Recompile the model
15
+ model_lstm.compile(optimizer='adam', loss='sparse_categorical_crossentropy')
16
 
17
  # Define the model repository and tokenizer checkpoint
18
  model_checkpoint = "himanishprak23/neural_machine_translation"
 
26
  model_nmt = TFAutoModelForSeq2SeqLM.from_pretrained(model_checkpoint)
27
 
28
  # Loading models, tokenizer & variables for trained LSTM translation model.
29
+ model_lstm = model_lstm
30
  with open('eng_tokenizer.pkl', 'rb') as file:
31
  eng_tokenizer = pickle.load(file)
32
  with open('hin_tokenizer.pkl', 'rb') as file: