Bajiyo commited on
Commit
96273c1
·
verified ·
1 Parent(s): ff2bbdf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -10
app.py CHANGED
@@ -1,20 +1,27 @@
1
  import gradio as gr
2
- from huggingface_hub import from_pretrained_keras
3
-
4
- # Load the model from Hugging Face Hub
5
- model = from_pretrained_keras("Bajiyo/ml-en-transliteration")
6
- # Load the saved model and tokenizers
7
  import json
8
  from keras.preprocessing.sequence import pad_sequences
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
 
10
- # Load tokenizer configurations from local files (assuming they are saved locally)
11
- source_tokenizer_path = "Bajiyo/ml-en-transliteration/source_tokenizer.json" # Replace with actual path
12
  with open(source_tokenizer_path, "r") as f:
13
- source_tokenizer_config = json.load(f)
14
 
15
- target_tokenizer_path = "Bajiyo/ml-en-transliteration/target_tokenizer.json" # Replace with actual path
16
  with open(target_tokenizer_path, "r") as f:
17
- target_tokenizer_config = json.load(f)
18
 
19
  # Reconstruct tokenizers
20
  from keras.preprocessing.text import tokenizer_from_json
 
1
  import gradio as gr
 
 
 
 
 
2
  import json
3
  from keras.preprocessing.sequence import pad_sequences
4
+ from huggingface_hub import cached_download, from_pretrained_keras
5
+
6
+ # Load the model from Hugging Face Hub (assuming the model identifier is "Bajiyo/ml-en-transliteration")
7
+ model = from_pretrained_keras("Bajiyo/ml-en-transliteration")
8
+
9
+ # Define URLs for tokenizer files on Hugging Face Hub (replace with actual model identifier if different)
10
+ source_tokenizer_url = f"https://huggingface.co/Bajiyo/ml-en-transliteration/resolve/main/source_tokenizer.json"
11
+ target_tokenizer_url = f"https://huggingface.co/Bajiyo/ml-en-transliteration/resolve/main/target_tokenizer.json"
12
+
13
+ # Download tokenizer files using cached_download (avoids redundant downloads)
14
+ source_tokenizer_path = cached_download(source_tokenizer_url)
15
+ target_tokenizer_path = cached_download(target_tokenizer_url)
16
+
17
+ # Load tokenizers from downloaded files
18
+ from keras.preprocessing.text import tokenizer_from_json
19
 
 
 
20
  with open(source_tokenizer_path, "r") as f:
21
+ source_tokenizer = tokenizer_from_json(json.load(f))
22
 
 
23
  with open(target_tokenizer_path, "r") as f:
24
+ target_tokenizer = tokenizer_from_json(json.load(f))
25
 
26
  # Reconstruct tokenizers
27
  from keras.preprocessing.text import tokenizer_from_json