Svngoku commited on
Commit
ac9b3b3
1 Parent(s): f4f48e3

Update the `app.py`

Browse files
Files changed (1) hide show
  1. app.py +32 -6
app.py CHANGED
@@ -8,13 +8,38 @@ from flores200_codes import flores_codes
8
 
9
  def load_models():
10
  # build model and tokenizer
11
- model_name_dict = {
12
- #'nllb-distilled-1.3B': 'facebook/nllb-200-distilled-1.3B',
13
- 'nllb-distilled-600M': 'facebook/nllb-200-distilled-600M',
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
  #'nllb-1.3B': 'facebook/nllb-200-1.3B',
15
  #'nllb-distilled-1.3B': 'facebook/nllb-200-distilled-1.3B',
16
  #'nllb-3.3B': 'facebook/nllb-200-3.3B',
17
- # 'nllb-distilled-600M': 'facebook/nllb-200-distilled-600M',
18
  }
19
 
20
  model_dict = {}
@@ -31,7 +56,7 @@ def load_models():
31
 
32
  def translation(source, target, text):
33
  if len(model_dict) == 2:
34
- model_name = 'nllb-distilled-1.3B'
35
 
36
  start_time = time.time()
37
  source = flores_codes[source]
@@ -53,6 +78,7 @@ def translation(source, target, text):
53
  return result
54
 
55
 
 
56
  if __name__ == '__main__':
57
  print('\tinit models')
58
 
@@ -70,7 +96,7 @@ if __name__ == '__main__':
70
 
71
  outputs = gr.outputs.JSON()
72
 
73
- title = "NLLB distilled 600M demo endpoint"
74
 
75
  demo_status = "Demo is running on CPU"
76
  description = f"Details: https://github.com/facebookresearch/fairseq/tree/nllb. {demo_status}"
 
8
 
9
  def load_models():
10
  # build model and tokenizer
11
+ model_name_dict = {'nllb-distilled-600M': 'facebook/nllb-200-distilled-600M',
12
+ #'nllb-1.3B': 'facebook/nllb-200-1.3B',
13
+ #'nllb-distilled-1.3B': 'facebook/nllb-200-distilled-1.3B',
14
+ #'nllb-3.3B': 'facebook/nllb-200-3.3B',
15
+ }
16
+
17
+ model_dict = {}
18
+
19
+ for call_name, real_name in model_name_dict.items():
20
+ print('\tLoading model: %s' % call_name)
21
+ model = AutoModelForSeq2SeqLM.from_pretrained(real_name)
22
+ tokenizer = AutoTokenizer.from_pretrained(real_name)
23
+ model_dict[call_name+'_model'] = model
24
+ model_dict[call_name+'_tokenizer'] = tokenizer
25
+
26
+ return model_dict
27
+
28
+
29
+ import os
30
+ import torch
31
+ import gradio as gr
32
+ import time
33
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
34
+ from flores200_codes import flores_codes
35
+
36
+
37
+ def load_models():
38
+ # build model and tokenizer
39
+ model_name_dict = {'nllb-distilled-600M': 'facebook/nllb-200-distilled-600M',
40
  #'nllb-1.3B': 'facebook/nllb-200-1.3B',
41
  #'nllb-distilled-1.3B': 'facebook/nllb-200-distilled-1.3B',
42
  #'nllb-3.3B': 'facebook/nllb-200-3.3B',
 
43
  }
44
 
45
  model_dict = {}
 
56
 
57
  def translation(source, target, text):
58
  if len(model_dict) == 2:
59
+ model_name = 'nllb-distilled-600M'
60
 
61
  start_time = time.time()
62
  source = flores_codes[source]
 
78
  return result
79
 
80
 
81
+
82
  if __name__ == '__main__':
83
  print('\tinit models')
84
 
 
96
 
97
  outputs = gr.outputs.JSON()
98
 
99
+ title = "NLLB distilled 600M demo"
100
 
101
  demo_status = "Demo is running on CPU"
102
  description = f"Details: https://github.com/facebookresearch/fairseq/tree/nllb. {demo_status}"