cointegrated commited on
Commit
f515ff9
1 Parent(s): aa11d91

force download tkn

Browse files
Files changed (1) hide show
  1. app.py +7 -5
app.py CHANGED
@@ -1,9 +1,8 @@
1
  import gradio as gr
2
  import torch
3
  from transformers import NllbTokenizer, AutoModelForSeq2SeqLM
4
-
5
-
6
  MODEL_URL = 'slone/nllb-rus-tyv-v1'
 
7
 
8
 
9
  lang_to_code = {
@@ -78,9 +77,12 @@ def translate_wrapper(text, src, trg, correct=None):
78
 
79
 
80
  article = """
81
- Please wait until I publish all the details.
 
 
 
82
 
83
- Please translate one sentence at a time; the model is not working adequately with multiple sentences!
84
  """
85
 
86
 
@@ -101,7 +103,7 @@ if __name__ == '__main__':
101
  model = AutoModelForSeq2SeqLM.from_pretrained(MODEL_URL)
102
  if torch.cuda.is_available():
103
  model.cuda()
104
- tokenizer = NllbTokenizer.from_pretrained(MODEL_URL)
105
  fix_tokenizer(tokenizer)
106
 
107
  interface.launch()
 
1
  import gradio as gr
2
  import torch
3
  from transformers import NllbTokenizer, AutoModelForSeq2SeqLM
 
 
4
  MODEL_URL = 'slone/nllb-rus-tyv-v1'
5
+ # tokenizer = NllbTokenizer.from_pretrained(MODEL_URL, force_download=True)
6
 
7
 
8
  lang_to_code = {
 
77
 
78
 
79
  article = """
80
+ This is a NLLB-200-600M model fine-tuned for translation between Russian and Tyvan (Tuvan) languages,
81
+ using the data from https://tyvan.ru/.
82
+
83
+ **More details will be published soon!**
84
 
85
+ __Please translate one sentence at a time; the model is not working adequately with multiple sentences!__
86
  """
87
 
88
 
 
103
  model = AutoModelForSeq2SeqLM.from_pretrained(MODEL_URL)
104
  if torch.cuda.is_available():
105
  model.cuda()
106
+ tokenizer = NllbTokenizer.from_pretrained(MODEL_URL, force_download=True)
107
  fix_tokenizer(tokenizer)
108
 
109
  interface.launch()