Steven Zhang commited on
Commit
ffd9653
1 Parent(s): 435f2f4

chinese update

Browse files
Files changed (3) hide show
  1. .gitignore +5 -1
  2. TestTranslationChinese/translation_model.py +252 -0
  3. app.py +14 -6
.gitignore CHANGED
@@ -129,4 +129,8 @@ dmypy.json
129
  .pyre/
130
 
131
  # my own ckpts from gdown
132
- EngToSpanishckpts/
 
 
 
 
 
129
  .pyre/
130
 
131
  # my own ckpts from gdown
132
+ EngToSpanishckpts/
133
+ cmn.txt
134
+ re-model.h5
135
+ TestTranslationChinese/cmn.txt
136
+ TestTranslationChinese/re-model.h5
TestTranslationChinese/translation_model.py ADDED
@@ -0,0 +1,252 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """Translation Model.ipynb
3
+
4
+ Automatically generated by Colaboratory.
5
+
6
+ Original file is located at
7
+ https://colab.research.google.com/drive/1njNMtQLmXo_zVtAKxA91dZJh_bxlLume
8
+ """
9
+
10
+ import pathlib
11
+ import random
12
+ import string
13
+ import h5py
14
+ import re
15
+ import numpy as np
16
+ import tensorflow as tf
17
+ from tensorflow import keras
18
+ from tensorflow.keras import layers
19
+ from tensorflow.keras.layers.experimental.preprocessing import TextVectorization
20
+
21
+ import gdown
22
+
23
+ url = "https://drive.google.com/uc?id=1FOC2x5HlgcFTMgnGhPjvLWWlEqVTLQno"
24
+ gdown.download(url, quiet=False)
25
+
26
+ with open('cmn.txt', encoding="utf-8") as f:
27
+ lines = f.read().split("\n")[:-1]
28
+ text_pairs = []
29
+ for line in lines:
30
+ eng, cmn, o1 = line.split("\t")
31
+ text_pairs.append((eng, cmn))
32
+
33
+ random.shuffle(text_pairs)
34
+ num_val_samples = int(0.15 * len(text_pairs))
35
+ num_train_samples = len(text_pairs) - 2 * num_val_samples
36
+ train_pairs = text_pairs[:num_train_samples]
37
+ val_pairs = text_pairs[num_train_samples : num_train_samples + num_val_samples]
38
+ test_pairs = text_pairs[num_train_samples + num_val_samples :]
39
+
40
+ strip_chars = string.punctuation + "¿"
41
+ strip_chars = strip_chars.replace("[", "")
42
+ strip_chars = strip_chars.replace("]", "")
43
+
44
+ vocab_size = 15000
45
+ sequence_length = 20
46
+ batch_size = 64
47
+
48
+ eng_vectorization = TextVectorization(
49
+ max_tokens=vocab_size,
50
+ output_mode="int",
51
+ output_sequence_length=sequence_length,
52
+ )
53
+ cmn_vectorization = TextVectorization(
54
+ max_tokens=vocab_size,
55
+ output_mode="int",
56
+ split='character',
57
+ output_sequence_length=sequence_length + 1,
58
+ standardize='strip_punctuation',
59
+ )
60
+ train_eng_texts = [pair[0] for pair in train_pairs]
61
+ train_cmn_texts = [pair[1] for pair in train_pairs]
62
+ eng_vectorization.adapt(train_eng_texts)
63
+ cmn_vectorization.adapt(train_cmn_texts)
64
+
65
+ def format_dataset(eng, cmn):
66
+ eng = eng_vectorization(eng)
67
+ cmn = cmn_vectorization(cmn)
68
+ return (
69
+ {
70
+ "encoder_inputs": eng,
71
+ "decoder_inputs": cmn[:, :-1],
72
+ },
73
+ cmn[:, 1:],
74
+ )
75
+
76
+
77
+ def make_dataset(pairs):
78
+ eng_texts, cmn_texts = zip(*pairs)
79
+ eng_texts = list(eng_texts)
80
+ cmn_texts = list(cmn_texts)
81
+ dataset = tf.data.Dataset.from_tensor_slices((eng_texts, cmn_texts))
82
+ dataset = dataset.batch(batch_size)
83
+ dataset = dataset.map(format_dataset)
84
+ return dataset.shuffle(2048).prefetch(16).cache()
85
+
86
+
87
+ train_ds = make_dataset(train_pairs)
88
+ val_ds = make_dataset(val_pairs)
89
+
90
+ class TransformerEncoder(layers.Layer):
91
+ def __init__(self, embed_dim, dense_dim, num_heads, **kwargs):
92
+ super(TransformerEncoder, self).__init__(**kwargs)
93
+ self.embed_dim = embed_dim
94
+ self.dense_dim = dense_dim
95
+ self.num_heads = num_heads
96
+ self.attention = layers.MultiHeadAttention(
97
+ num_heads=num_heads, key_dim=embed_dim
98
+ )
99
+ self.dense_proj = keras.Sequential(
100
+ [
101
+ layers.Dense(dense_dim, activation="relu"),
102
+ layers.Dense(embed_dim),
103
+ ]
104
+ )
105
+ self.layernorm_1 = layers.LayerNormalization()
106
+ self.layernorm_2 = layers.LayerNormalization()
107
+ self.supports_masking = True
108
+
109
+ def call(self, inputs, mask=None):
110
+ if mask is not None:
111
+ padding_mask = tf.cast(mask[:, tf.newaxis, tf.newaxis, :], dtype="int32")
112
+ attention_output = self.attention(
113
+ query=inputs, value=inputs, key=inputs, attention_mask=padding_mask
114
+ )
115
+ proj_input = self.layernorm_1(inputs + attention_output)
116
+ proj_output = self.dense_proj(proj_input)
117
+ return self.layernorm_2(proj_input + proj_output)
118
+ def get_config(self):
119
+ config = super().get_config()
120
+ config.update({
121
+ "embed_dim": self.embed_dim,
122
+ "dense_dim": self.dense_dim,
123
+ "num_heads": self.num_heads,
124
+ })
125
+ return config
126
+
127
+
128
+ class PositionalEmbedding(layers.Layer):
129
+ def __init__(self, sequence_length, vocab_size, embed_dim, **kwargs):
130
+ super(PositionalEmbedding, self).__init__(**kwargs)
131
+ self.token_embeddings = layers.Embedding(
132
+ input_dim=vocab_size, output_dim=embed_dim
133
+ )
134
+ self.position_embeddings = layers.Embedding(
135
+ input_dim=sequence_length, output_dim=embed_dim
136
+ )
137
+ self.sequence_length = sequence_length
138
+ self.vocab_size = vocab_size
139
+ self.embed_dim = embed_dim
140
+
141
+ def call(self, inputs):
142
+ length = tf.shape(inputs)[-1]
143
+ positions = tf.range(start=0, limit=length, delta=1)
144
+ embedded_tokens = self.token_embeddings(inputs)
145
+ embedded_positions = self.position_embeddings(positions)
146
+ return embedded_tokens + embedded_positions
147
+
148
+ def compute_mask(self, inputs, mask=None):
149
+ return tf.math.not_equal(inputs, 0)
150
+ def get_config(self):
151
+ config = super().get_config()
152
+ config.update({
153
+ "sequence_length": self.sequence_length,
154
+ "vocab_size": self.vocab_size,
155
+ "embed_dim": self.embed_dim,
156
+ })
157
+ return config
158
+
159
+
160
+ class TransformerDecoder(layers.Layer):
161
+ def __init__(self, embed_dim, latent_dim, num_heads, **kwargs):
162
+ super(TransformerDecoder, self).__init__(**kwargs)
163
+ self.embed_dim = embed_dim
164
+ self.latent_dim = latent_dim
165
+ self.num_heads = num_heads
166
+ self.attention_1 = layers.MultiHeadAttention(
167
+ num_heads=num_heads, key_dim=embed_dim
168
+ )
169
+ self.attention_2 = layers.MultiHeadAttention(
170
+ num_heads=num_heads, key_dim=embed_dim
171
+ )
172
+ self.dense_proj = keras.Sequential(
173
+ [
174
+ layers.Dense(latent_dim, activation="relu"),
175
+ layers.Dense(embed_dim),
176
+ ]
177
+ )
178
+ self.layernorm_1 = layers.LayerNormalization()
179
+ self.layernorm_2 = layers.LayerNormalization()
180
+ self.layernorm_3 = layers.LayerNormalization()
181
+ self.supports_masking = True
182
+
183
+ def call(self, inputs, encoder_outputs, mask=None):
184
+ causal_mask = self.get_causal_attention_mask(inputs)
185
+ if mask is not None:
186
+ padding_mask = tf.cast(mask[:, tf.newaxis, :], dtype="int32")
187
+ padding_mask = tf.minimum(padding_mask, causal_mask)
188
+
189
+ attention_output_1 = self.attention_1(
190
+ query=inputs, value=inputs, key=inputs, attention_mask=causal_mask
191
+ )
192
+ out_1 = self.layernorm_1(inputs + attention_output_1)
193
+
194
+ attention_output_2 = self.attention_2(
195
+ query=out_1,
196
+ value=encoder_outputs,
197
+ key=encoder_outputs,
198
+ attention_mask=padding_mask,
199
+ )
200
+ out_2 = self.layernorm_2(out_1 + attention_output_2)
201
+
202
+ proj_output = self.dense_proj(out_2)
203
+ return self.layernorm_3(out_2 + proj_output)
204
+
205
+ def get_causal_attention_mask(self, inputs):
206
+ input_shape = tf.shape(inputs)
207
+ batch_size, sequence_length = input_shape[0], input_shape[1]
208
+ i = tf.range(sequence_length)[:, tf.newaxis]
209
+ j = tf.range(sequence_length)
210
+ mask = tf.cast(i >= j, dtype="int32")
211
+ mask = tf.reshape(mask, (1, input_shape[1], input_shape[1]))
212
+ mult = tf.concat(
213
+ [tf.expand_dims(batch_size, -1), tf.constant([1, 1], dtype=tf.int32)],
214
+ axis=0,
215
+ )
216
+ return tf.tile(mask, mult)
217
+ def get_config(self):
218
+ config = super().get_config()
219
+ config.update({
220
+ "embed_dim": self.embed_dim,
221
+ "latent_dim": self.latent_dim,
222
+ "num_heads": self.num_heads,
223
+ })
224
+ return config
225
+
226
+ url = "https://drive.google.com/uc?id=1a4eTAL4sLUi42P28Veihrv-fVPwFymTa"
227
+ gdown.download(url, quiet=False)
228
+
229
+ custom_objects = {"TransformerEncoder": TransformerEncoder, "PositionalEmbedding": PositionalEmbedding, "TransformerDecoder": TransformerDecoder}
230
+ with keras.utils.custom_object_scope(custom_objects):
231
+ transformer = tf.keras.models.load_model('re-model.h5')
232
+
233
+ cmn_vocab = cmn_vectorization.get_vocabulary()
234
+ cmn_index_lookup = dict(zip(range(len(cmn_vocab)), cmn_vocab))
235
+ max_decoded_sentence_length = 20
236
+
237
+
238
+ def decode_sequence_chinese(input_sentence):
239
+ tokenized_input_sentence = eng_vectorization([input_sentence])
240
+ decoded_sentence = "[start]"
241
+ for i in range(max_decoded_sentence_length):
242
+ tokenized_target_sentence = cmn_vectorization([decoded_sentence])[:, :-1]
243
+ predictions = transformer([tokenized_input_sentence, tokenized_target_sentence])
244
+
245
+ sampled_token_index = np.argmax(predictions[0, i, :])
246
+ sampled_token = cmn_index_lookup[sampled_token_index]
247
+ decoded_sentence += " " + sampled_token
248
+
249
+ if sampled_token == "[end]":
250
+ break
251
+ return decoded_sentence
252
+
app.py CHANGED
@@ -2,20 +2,28 @@
2
  import streamlit as st
3
  from Autocorrect.autocorrectreal import edit
4
  from TestTranslation.translation import *
5
-
6
 
7
 
8
  st.title("Translation model test")
9
 
10
  option = st.selectbox("Select input type:", ("text input", "audio input"))
 
11
  if option == "text input":
12
  input_sentence = st.text_input("Enter input sentence:")
13
  if input_sentence is not None and len(input_sentence) > 0:
14
- edited = edit(input_sentence)
15
- st.write("Autocorrected sentence: " + edited)
16
- translated = decode_sequence(edited)[8:-5]
17
- st.write(translated)
18
- input_sentence = None
 
 
 
 
 
 
 
19
  else:
20
  wav_sentence = st.file_uploader("Upload a wav file:")
21
  st.button("Submit wav file")
 
2
  import streamlit as st
3
  from Autocorrect.autocorrectreal import edit
4
  from TestTranslation.translation import *
5
+ from TestTranslationChinese.translation_model import decode_sequence_chinese
6
 
7
 
8
  st.title("Translation model test")
9
 
10
  option = st.selectbox("Select input type:", ("text input", "audio input"))
11
+ option2 = st.selectbox("Select translation language:", ("Spanish", "Chinese"))
12
  if option == "text input":
13
  input_sentence = st.text_input("Enter input sentence:")
14
  if input_sentence is not None and len(input_sentence) > 0:
15
+ if option2 == "Spanish":
16
+ edited = edit(input_sentence)
17
+ st.write("Autocorrected sentence: " + edited)
18
+ translated = decode_sequence(edited)[8:-5]
19
+ st.write(translated)
20
+ input_sentence = None
21
+ else:
22
+ edited = edit(input_sentence)
23
+ st.write("Autocorrected sentence: " + edited)
24
+ translated = decode_sequence_chinese(edited)[8:]
25
+ st.write(translated)
26
+ input_sentence = None
27
  else:
28
  wav_sentence = st.file_uploader("Upload a wav file:")
29
  st.button("Submit wav file")