simeon-m maxbittker commited on
Commit
78c3520
·
0 Parent(s):

Duplicate from maxbittker/english-tokipona-round-trip-translator

Browse files

Co-authored-by: max bittker <maxbittker@users.noreply.huggingface.co>

Files changed (5) hide show
  1. .gitattributes +34 -0
  2. .gitignore +2 -0
  3. README.md +13 -0
  4. app.py +122 -0
  5. requirements.txt +4 -0
.gitattributes ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ .venv
2
+ __pycache__
README.md ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: English / toki pona Translator
3
+ emoji: 💬
4
+ colorFrom: indigo
5
+ colorTo: purple
6
+ sdk: gradio
7
+ sdk_version: 3.15.0
8
+ app_file: app.py
9
+ pinned: false
10
+ duplicated_from: maxbittker/english-tokipona-round-trip-translator
11
+ ---
12
+
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
3
+ import torch
4
+
5
+ model = AutoModelForSeq2SeqLM.from_pretrained("Jayyydyyy/m2m100_418m_tokipona")
6
+ tokenizer = AutoTokenizer.from_pretrained("facebook/m2m100_418M")
7
+ device = "cuda:0" if torch.cuda.is_available() else "cpu"
8
+ model.to(device)
9
+ LANG_CODES = {"English": "en", "toki pona": "tl"}
10
+
11
+
12
+ def translate(text):
13
+ """
14
+ Translate the text from source lang to target lang
15
+ """
16
+
17
+ # src = LANG_CODES.get(src_lang)
18
+ # tgt = LANG_CODES.get(tgt_lang)
19
+
20
+ tokenizer.src_lang = "en"
21
+ tokenizer.tgt_lang = "tl"
22
+ ins = tokenizer(text, return_tensors="pt").to(device)
23
+
24
+ gen_args = {
25
+ "return_dict_in_generate": True,
26
+ "output_scores": True,
27
+ "output_hidden_states": True,
28
+ "length_penalty": 0.0, # don't encourage longer or shorter output,
29
+ "num_return_sequences": 1,
30
+ "num_beams": 1,
31
+ "forced_bos_token_id": tokenizer.lang_code_to_id["tl"],
32
+ }
33
+
34
+ outs = model.generate(**{**ins, **gen_args})
35
+ output = tokenizer.batch_decode(outs.sequences, skip_special_tokens=True)
36
+ text2 = "\n".join(output)
37
+
38
+ ##################
39
+
40
+ tokenizer.src_lang = "tl"
41
+ tokenizer.tgt_lang = "en"
42
+
43
+ ins = tokenizer(text2, return_tensors="pt").to(device)
44
+
45
+ gen_args = {
46
+ "return_dict_in_generate": True,
47
+ "output_scores": True,
48
+ "output_hidden_states": True,
49
+ "length_penalty": 0.0, # don't encourage longer or shorter output,
50
+ "num_return_sequences": 1,
51
+ "num_beams": 1,
52
+ "forced_bos_token_id": tokenizer.lang_code_to_id["en"],
53
+ }
54
+
55
+ outs2 = model.generate(**{**ins, **gen_args})
56
+ output2 = tokenizer.batch_decode(outs2.sequences, skip_special_tokens=True)
57
+
58
+ return "\n".join(output2)
59
+
60
+
61
+ with gr.Blocks() as app:
62
+ markdown = """
63
+ # An English / toki pona Neural Machine Translation App!
64
+
65
+ ### toki a! 💬
66
+
67
+ This is an english to toki pona / toki pona to english neural machine translation app.
68
+
69
+ Input your text to translate, a source language and target language, and desired number of return sequences!
70
+
71
+ ### Grammar Regularization
72
+ An interesting quirk of training a many-to-many translation model is that pseudo-grammar correction
73
+ can be achieved by translating *from* **language A** *to* **language A**
74
+
75
+ Remember, this can ***approximate*** grammaticality, but it isn't always the best.
76
+
77
+ For example, "mi li toki e toki pona" (Source Language: toki pona & Target Language: toki pona) will result in:
78
+ - ['mi toki e toki pona.', 'mi toki pona.', 'mi toki e toki pona']
79
+ - (Thus, the ungrammatical "li" is dropped)
80
+
81
+ ### Model and Data
82
+ This app utilizes a fine-tuned version of Facebook/Meta AI's M2M100 418M param model.
83
+
84
+ By leveraging the pretrained weights of the massively multilingual M2M100 model,
85
+ we can jumpstart our transfer learning to accomplish machine translation for toki pona!
86
+
87
+ The model was fine-tuned on the English/toki pona bitexts found at [https://tatoeba.org/](https://tatoeba.org/)
88
+
89
+ ### This app is a work in progress and obviously not all translations will be perfect.
90
+ In addition to parameter quantity and the hyper-parameters used while training,
91
+ the *quality of data* found on Tatoeba directly influences the perfomance of projects like this!
92
+
93
+ If you wish to contribute, please add high quality and diverse translations to Tatoeba!
94
+ """
95
+
96
+ with gr.Row():
97
+ gr.Markdown(markdown)
98
+ with gr.Column():
99
+ input_text = gr.components.Textbox(
100
+ label="Input Text",
101
+ value="Raccoons are fascinating creatures, but I prefer opossums.",
102
+ )
103
+ # source_lang = gr.components.Dropdown(label="Source Language", value="English", choices=list(LANG_CODES.keys()))
104
+ # target_lang = gr.components.Dropdown(label="Target Language", value="toki pona", choices=list(LANG_CODES.keys()))
105
+ # return_seqs = gr.Slider(label="Number of return sequences", value=3, minimum=1, maximum=12, step=1)
106
+
107
+ inputs = [input_text]
108
+ outputs = gr.Textbox()
109
+
110
+ translate_btn = gr.Button("Translate! | o ante toki!")
111
+ translate_btn.click(translate, inputs=inputs, outputs=outputs, api_name="translate")
112
+
113
+ gr.Examples(
114
+ [
115
+ ["Hello! How are you?", "English", "toki pona", 3],
116
+ ["toki a! ilo pi ante toki ni li pona!", "toki pona", "English", 3],
117
+ ["mi li toki e toki pona", "toki pona", "toki pona", 3],
118
+ ],
119
+ inputs=inputs,
120
+ )
121
+
122
+ app.launch()
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ torch
2
+ transformers
3
+ gradio
4
+ sentencepiece