Spaces:
Sleeping
Sleeping
Gabriela Nicole Gonzalez Saez
commited on
Commit
·
8bbc0a2
1
Parent(s):
9e6f123
en-sw
Browse files
app.py
CHANGED
@@ -24,36 +24,42 @@ from bertviz_gradio import head_view_mod
|
|
24 |
model_es = "Helsinki-NLP/opus-mt-en-es"
|
25 |
model_fr = "Helsinki-NLP/opus-mt-en-fr"
|
26 |
model_zh = "Helsinki-NLP/opus-mt-en-zh"
|
|
|
27 |
|
28 |
tokenizer_es = AutoTokenizer.from_pretrained(model_es)
|
29 |
tokenizer_fr = AutoTokenizer.from_pretrained(model_fr)
|
30 |
tokenizer_zh = AutoTokenizer.from_pretrained(model_zh)
|
|
|
31 |
|
32 |
model_tr_es = MarianMTModel.from_pretrained(model_es)
|
33 |
model_tr_fr = MarianMTModel.from_pretrained(model_fr)
|
34 |
model_tr_zh = MarianMTModel.from_pretrained(model_zh)
|
|
|
35 |
|
36 |
model_es = inseq.load_model("Helsinki-NLP/opus-mt-en-es", "input_x_gradient")
|
37 |
model_fr = inseq.load_model("Helsinki-NLP/opus-mt-en-fr", "input_x_gradient")
|
38 |
model_zh = inseq.load_model("Helsinki-NLP/opus-mt-en-zh", "input_x_gradient")
|
39 |
-
|
40 |
|
41 |
dict_models = {
|
42 |
'en-es': model_es,
|
43 |
'en-fr': model_fr,
|
44 |
'en-zh': model_zh,
|
|
|
45 |
}
|
46 |
|
47 |
dict_models_tr = {
|
48 |
'en-es': model_tr_es,
|
49 |
'en-fr': model_tr_fr,
|
50 |
'en-zh': model_tr_zh,
|
|
|
51 |
}
|
52 |
|
53 |
dict_tokenizer_tr = {
|
54 |
'en-es': tokenizer_es,
|
55 |
'en-fr': tokenizer_fr,
|
56 |
'en-zh': tokenizer_zh,
|
|
|
57 |
}
|
58 |
|
59 |
saliency_examples = [
|
@@ -152,7 +158,7 @@ with gr.Blocks(js="plotsjs_bertviz.js") as demo:
|
|
152 |
out_text2 = gr.Textbox(visible=False)
|
153 |
var2 = gr.JSON(visible=False)
|
154 |
btn = gr.Button("Create sentence.")
|
155 |
-
radio_c = gr.Radio(choices=['en-zh', 'en-es', 'en-fr'], value="en-zh", label= '', container=False)
|
156 |
|
157 |
|
158 |
with gr.Column(scale=4):
|
|
|
24 |
model_es = "Helsinki-NLP/opus-mt-en-es"
|
25 |
model_fr = "Helsinki-NLP/opus-mt-en-fr"
|
26 |
model_zh = "Helsinki-NLP/opus-mt-en-zh"
|
27 |
+
model_sw = "Helsinki-NLP/opus-mt-en-sw"
|
28 |
|
29 |
tokenizer_es = AutoTokenizer.from_pretrained(model_es)
|
30 |
tokenizer_fr = AutoTokenizer.from_pretrained(model_fr)
|
31 |
tokenizer_zh = AutoTokenizer.from_pretrained(model_zh)
|
32 |
+
tokenizer_sw = AutoTokenizer.from_pretrained(model_sw)
|
33 |
|
34 |
model_tr_es = MarianMTModel.from_pretrained(model_es)
|
35 |
model_tr_fr = MarianMTModel.from_pretrained(model_fr)
|
36 |
model_tr_zh = MarianMTModel.from_pretrained(model_zh)
|
37 |
+
model_tr_sw = MarianMTModel.from_pretrained(model_sw)
|
38 |
|
39 |
model_es = inseq.load_model("Helsinki-NLP/opus-mt-en-es", "input_x_gradient")
|
40 |
model_fr = inseq.load_model("Helsinki-NLP/opus-mt-en-fr", "input_x_gradient")
|
41 |
model_zh = inseq.load_model("Helsinki-NLP/opus-mt-en-zh", "input_x_gradient")
|
42 |
+
model_sw = inseq.load_model("Helsinki-NLP/opus-mt-en-sw", "input_x_gradient")
|
43 |
|
44 |
dict_models = {
|
45 |
'en-es': model_es,
|
46 |
'en-fr': model_fr,
|
47 |
'en-zh': model_zh,
|
48 |
+
'en-sw': model_sw,
|
49 |
}
|
50 |
|
51 |
dict_models_tr = {
|
52 |
'en-es': model_tr_es,
|
53 |
'en-fr': model_tr_fr,
|
54 |
'en-zh': model_tr_zh,
|
55 |
+
'en-sw': model_tr_sw,
|
56 |
}
|
57 |
|
58 |
dict_tokenizer_tr = {
|
59 |
'en-es': tokenizer_es,
|
60 |
'en-fr': tokenizer_fr,
|
61 |
'en-zh': tokenizer_zh,
|
62 |
+
'en-sw': tokenizer_sw,
|
63 |
}
|
64 |
|
65 |
saliency_examples = [
|
|
|
158 |
out_text2 = gr.Textbox(visible=False)
|
159 |
var2 = gr.JSON(visible=False)
|
160 |
btn = gr.Button("Create sentence.")
|
161 |
+
radio_c = gr.Radio(choices=['en-zh', 'en-es', 'en-fr', 'en-sw'], value="en-zh", label= '', container=False)
|
162 |
|
163 |
|
164 |
with gr.Column(scale=4):
|