reputation commited on
Commit
2189a68
1 Parent(s): 8c433a0

Upload 7 files

Browse files
dataset/.ipynb_checkpoints/data-checkpoint.csv ADDED
The diff for this file is too large to render. See raw diff
 
dataset/data.csv ADDED
The diff for this file is too large to render. See raw diff
 
dataset/english.txt ADDED
The diff for this file is too large to render. See raw diff
 
dataset/spanish.txt ADDED
The diff for this file is too large to render. See raw diff
 
englishTOspanish.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:21fc375bda44d14de08b0859f23ceb00ef36c6f2ca46846834582281d9c47f06
3
+ size 30598354
transformer.py ADDED
@@ -0,0 +1,303 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import torch
3
+ import math
4
+ from torch import nn
5
+ import torch.nn.functional as F
6
+
7
+ def get_device():
8
+ return torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
9
+
10
+ def scaled_dot_product(q, k, v, mask=None):
11
+ d_k = q.size()[-1]
12
+ scaled = torch.matmul(q, k.transpose(-1, -2)) / math.sqrt(d_k)
13
+ if mask is not None:
14
+ scaled = scaled.permute(1, 0, 2, 3) + mask
15
+ scaled = scaled.permute(1, 0, 2, 3)
16
+ attention = F.softmax(scaled, dim=-1)
17
+ values = torch.matmul(attention, v)
18
+ return values, attention
19
+
20
+ class PositionalEncoding(nn.Module):
21
+ def __init__(self, d_model, max_sequence_length):
22
+ super().__init__()
23
+ self.max_sequence_length = max_sequence_length
24
+ self.d_model = d_model
25
+
26
+ def forward(self):
27
+ even_i = torch.arange(0, self.d_model, 2).float()
28
+ denominator = torch.pow(10000, even_i/self.d_model)
29
+ position = (torch.arange(self.max_sequence_length)
30
+ .reshape(self.max_sequence_length, 1))
31
+ even_PE = torch.sin(position / denominator)
32
+ odd_PE = torch.cos(position / denominator)
33
+ stacked = torch.stack([even_PE, odd_PE], dim=2)
34
+ PE = torch.flatten(stacked, start_dim=1, end_dim=2)
35
+ return PE
36
+
37
+ class SentenceEmbedding(nn.Module):
38
+ def __init__(self, max_sequence_length, d_model, language_to_index, START_TOKEN, END_TOKEN, PADDING_TOKEN):
39
+ super().__init__()
40
+ self.vocab_size = len(language_to_index)
41
+ self.max_sequence_length = max_sequence_length
42
+ self.embedding = nn.Embedding(self.vocab_size, d_model)
43
+ self.language_to_index = language_to_index
44
+ self.position_encoder = PositionalEncoding(d_model, max_sequence_length)
45
+ self.dropout = nn.Dropout(p=0.1)
46
+ self.START_TOKEN = START_TOKEN
47
+ self.END_TOKEN = END_TOKEN
48
+ self.PADDING_TOKEN = PADDING_TOKEN
49
+
50
+ def batch_tokenize(self, batch, start_token, end_token):
51
+
52
+ def tokenize(sentence, start_token, end_token):
53
+ sentence_word_indicies = [self.language_to_index[token] for token in list(sentence)]
54
+ if start_token:
55
+ sentence_word_indicies.insert(0, self.language_to_index[self.START_TOKEN])
56
+ if end_token:
57
+ sentence_word_indicies.append(self.language_to_index[self.END_TOKEN])
58
+ for _ in range(len(sentence_word_indicies), self.max_sequence_length):
59
+ sentence_word_indicies.append(self.language_to_index[self.PADDING_TOKEN])
60
+ return torch.tensor(sentence_word_indicies)
61
+
62
+ tokenized = []
63
+ for sentence_num in range(len(batch)):
64
+ tokenized.append( tokenize(batch[sentence_num], start_token, end_token) )
65
+ tokenized = torch.stack(tokenized)
66
+ return tokenized.to(get_device())
67
+
68
+ def forward(self, x, start_token, end_token):
69
+ x = self.batch_tokenize(x, start_token, end_token)
70
+ x = self.embedding(x)
71
+ pos = self.position_encoder().to(get_device())
72
+ x = self.dropout(x + pos)
73
+ return x
74
+
75
+
76
+ class MultiHeadAttention(nn.Module):
77
+ def __init__(self, d_model, num_heads):
78
+ super().__init__()
79
+ self.d_model = d_model
80
+ self.num_heads = num_heads
81
+ self.head_dim = d_model // num_heads
82
+ self.qkv_layer = nn.Linear(d_model , 3 * d_model)
83
+ self.linear_layer = nn.Linear(d_model, d_model)
84
+
85
+ def forward(self, x, mask):
86
+ batch_size, sequence_length, d_model = x.size()
87
+ qkv = self.qkv_layer(x)
88
+ qkv = qkv.reshape(batch_size, sequence_length, self.num_heads, 3 * self.head_dim)
89
+ qkv = qkv.permute(0, 2, 1, 3)
90
+ q, k, v = qkv.chunk(3, dim=-1)
91
+ values, attention = scaled_dot_product(q, k, v, mask)
92
+ values = values.permute(0, 2, 1, 3).reshape(batch_size, sequence_length, self.num_heads * self.head_dim)
93
+ out = self.linear_layer(values)
94
+ return out
95
+
96
+
97
+ class LayerNormalization(nn.Module):
98
+ def __init__(self, parameters_shape, eps=1e-5):
99
+ super().__init__()
100
+ self.parameters_shape=parameters_shape
101
+ self.eps=eps
102
+ self.gamma = nn.Parameter(torch.ones(parameters_shape))
103
+ self.beta = nn.Parameter(torch.zeros(parameters_shape))
104
+
105
+ def forward(self, inputs):
106
+ dims = [-(i + 1) for i in range(len(self.parameters_shape))]
107
+ mean = inputs.mean(dim=dims, keepdim=True)
108
+ var = ((inputs - mean) ** 2).mean(dim=dims, keepdim=True)
109
+ std = (var + self.eps).sqrt()
110
+ y = (inputs - mean) / std
111
+ out = self.gamma * y + self.beta
112
+ return out
113
+
114
+
115
+ class PositionwiseFeedForward(nn.Module):
116
+ def __init__(self, d_model, hidden, drop_prob=0.1):
117
+ super(PositionwiseFeedForward, self).__init__()
118
+ self.linear1 = nn.Linear(d_model, hidden)
119
+ self.linear2 = nn.Linear(hidden, d_model)
120
+ self.relu = nn.ReLU()
121
+ self.dropout = nn.Dropout(p=drop_prob)
122
+
123
+ def forward(self, x):
124
+ x = self.linear1(x)
125
+ x = self.relu(x)
126
+ x = self.dropout(x)
127
+ x = self.linear2(x)
128
+ return x
129
+
130
+
131
+ class EncoderLayer(nn.Module):
132
+ def __init__(self, d_model, ffn_hidden, num_heads, drop_prob):
133
+ super(EncoderLayer, self).__init__()
134
+ self.attention = MultiHeadAttention(d_model=d_model, num_heads=num_heads)
135
+ self.norm1 = LayerNormalization(parameters_shape=[d_model])
136
+ self.dropout1 = nn.Dropout(p=drop_prob)
137
+ self.ffn = PositionwiseFeedForward(d_model=d_model, hidden=ffn_hidden, drop_prob=drop_prob)
138
+ self.norm2 = LayerNormalization(parameters_shape=[d_model])
139
+ self.dropout2 = nn.Dropout(p=drop_prob)
140
+
141
+ def forward(self, x, self_attention_mask):
142
+ residual_x = x.clone()
143
+ x = self.attention(x, mask=self_attention_mask)
144
+ x = self.dropout1(x)
145
+ x = self.norm1(x + residual_x)
146
+ residual_x = x.clone()
147
+ x = self.ffn(x)
148
+ x = self.dropout2(x)
149
+ x = self.norm2(x + residual_x)
150
+ return x
151
+
152
+ class SequentialEncoder(nn.Sequential):
153
+ def forward(self, *inputs):
154
+ x, self_attention_mask = inputs
155
+ for module in self._modules.values():
156
+ x = module(x, self_attention_mask)
157
+ return x
158
+
159
+ class Encoder(nn.Module):
160
+ def __init__(self,
161
+ d_model,
162
+ ffn_hidden,
163
+ num_heads,
164
+ drop_prob,
165
+ num_layers,
166
+ max_sequence_length,
167
+ language_to_index,
168
+ START_TOKEN,
169
+ END_TOKEN,
170
+ PADDING_TOKEN):
171
+ super().__init__()
172
+ self.sentence_embedding = SentenceEmbedding(max_sequence_length, d_model, language_to_index, START_TOKEN, END_TOKEN, PADDING_TOKEN)
173
+ self.layers = SequentialEncoder(*[EncoderLayer(d_model, ffn_hidden, num_heads, drop_prob)
174
+ for _ in range(num_layers)])
175
+
176
+ def forward(self, x, self_attention_mask, start_token, end_token):
177
+ x = self.sentence_embedding(x, start_token, end_token)
178
+ x = self.layers(x, self_attention_mask)
179
+ return x
180
+
181
+
182
+ class MultiHeadCrossAttention(nn.Module):
183
+ def __init__(self, d_model, num_heads):
184
+ super().__init__()
185
+ self.d_model = d_model
186
+ self.num_heads = num_heads
187
+ self.head_dim = d_model // num_heads
188
+ self.kv_layer = nn.Linear(d_model , 2 * d_model)
189
+ self.q_layer = nn.Linear(d_model , d_model)
190
+ self.linear_layer = nn.Linear(d_model, d_model)
191
+
192
+ def forward(self, x, y, mask):
193
+ batch_size, sequence_length, d_model = x.size()
194
+ kv = self.kv_layer(x)
195
+ q = self.q_layer(y)
196
+ kv = kv.reshape(batch_size, sequence_length, self.num_heads, 2 * self.head_dim)
197
+ q = q.reshape(batch_size, sequence_length, self.num_heads, self.head_dim)
198
+ kv = kv.permute(0, 2, 1, 3)
199
+ q = q.permute(0, 2, 1, 3)
200
+ k, v = kv.chunk(2, dim=-1)
201
+ values, attention = scaled_dot_product(q, k, v, mask)
202
+ values = values.permute(0, 2, 1, 3).reshape(batch_size, sequence_length, d_model)
203
+ out = self.linear_layer(values)
204
+ return out
205
+
206
+
207
+ class DecoderLayer(nn.Module):
208
+ def __init__(self, d_model, ffn_hidden, num_heads, drop_prob):
209
+ super(DecoderLayer, self).__init__()
210
+ self.self_attention = MultiHeadAttention(d_model=d_model, num_heads=num_heads)
211
+ self.layer_norm1 = LayerNormalization(parameters_shape=[d_model])
212
+ self.dropout1 = nn.Dropout(p=drop_prob)
213
+
214
+ self.encoder_decoder_attention = MultiHeadCrossAttention(d_model=d_model, num_heads=num_heads)
215
+ self.layer_norm2 = LayerNormalization(parameters_shape=[d_model])
216
+ self.dropout2 = nn.Dropout(p=drop_prob)
217
+
218
+ self.ffn = PositionwiseFeedForward(d_model=d_model, hidden=ffn_hidden, drop_prob=drop_prob)
219
+ self.layer_norm3 = LayerNormalization(parameters_shape=[d_model])
220
+ self.dropout3 = nn.Dropout(p=drop_prob)
221
+
222
+ def forward(self, x, y, self_attention_mask, cross_attention_mask):
223
+ _y = y.clone()
224
+ y = self.self_attention(y, mask=self_attention_mask)
225
+ y = self.dropout1(y)
226
+ y = self.layer_norm1(y + _y)
227
+
228
+ _y = y.clone()
229
+ y = self.encoder_decoder_attention(x, y, mask=cross_attention_mask)
230
+ y = self.dropout2(y)
231
+ y = self.layer_norm2(y + _y)
232
+
233
+ _y = y.clone()
234
+ y = self.ffn(y)
235
+ y = self.dropout3(y)
236
+ y = self.layer_norm3(y + _y)
237
+ return y
238
+
239
+
240
+ class SequentialDecoder(nn.Sequential):
241
+ def forward(self, *inputs):
242
+ x, y, self_attention_mask, cross_attention_mask = inputs
243
+ for module in self._modules.values():
244
+ y = module(x, y, self_attention_mask, cross_attention_mask)
245
+ return y
246
+
247
+ class Decoder(nn.Module):
248
+ def __init__(self,
249
+ d_model,
250
+ ffn_hidden,
251
+ num_heads,
252
+ drop_prob,
253
+ num_layers,
254
+ max_sequence_length,
255
+ language_to_index,
256
+ START_TOKEN,
257
+ END_TOKEN,
258
+ PADDING_TOKEN):
259
+ super().__init__()
260
+ self.sentence_embedding = SentenceEmbedding(max_sequence_length, d_model, language_to_index, START_TOKEN, END_TOKEN, PADDING_TOKEN)
261
+ self.layers = SequentialDecoder(*[DecoderLayer(d_model, ffn_hidden, num_heads, drop_prob) for _ in range(num_layers)])
262
+
263
+ def forward(self, x, y, self_attention_mask, cross_attention_mask, start_token, end_token):
264
+ y = self.sentence_embedding(y, start_token, end_token)
265
+ y = self.layers(x, y, self_attention_mask, cross_attention_mask)
266
+ return y
267
+
268
+
269
+ class Transformer(nn.Module):
270
+ def __init__(self,
271
+ d_model,
272
+ ffn_hidden,
273
+ num_heads,
274
+ drop_prob,
275
+ num_layers,
276
+ max_sequence_length,
277
+ kn_vocab_size,
278
+ english_to_index,
279
+ kannada_to_index,
280
+ START_TOKEN,
281
+ END_TOKEN,
282
+ PADDING_TOKEN
283
+ ):
284
+ super().__init__()
285
+ self.encoder = Encoder(d_model, ffn_hidden, num_heads, drop_prob, num_layers, max_sequence_length, english_to_index, START_TOKEN, END_TOKEN, PADDING_TOKEN)
286
+ self.decoder = Decoder(d_model, ffn_hidden, num_heads, drop_prob, num_layers, max_sequence_length, kannada_to_index, START_TOKEN, END_TOKEN, PADDING_TOKEN)
287
+ self.linear = nn.Linear(d_model, kn_vocab_size)
288
+ self.device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
289
+
290
+ def forward(self,
291
+ x,
292
+ y,
293
+ encoder_self_attention_mask=None,
294
+ decoder_self_attention_mask=None,
295
+ decoder_cross_attention_mask=None,
296
+ enc_start_token=False,
297
+ enc_end_token=False,
298
+ dec_start_token=False,
299
+ dec_end_token=False):
300
+ x = self.encoder(x, encoder_self_attention_mask, start_token=enc_start_token, end_token=enc_end_token)
301
+ out = self.decoder(x, y, decoder_self_attention_mask, decoder_cross_attention_mask, start_token=dec_start_token, end_token=dec_end_token)
302
+ out = self.linear(out)
303
+ return out
transformer_train.ipynb ADDED
The diff for this file is too large to render. See raw diff