bu1 commited on
Commit
6ce1a2a
·
verified ·
1 Parent(s): d725bef

Upload model

Browse files
config.json CHANGED
@@ -2,6 +2,10 @@
2
  "architectures": [
3
  "transformerModel"
4
  ],
 
 
 
 
5
  "batch_size": 64,
6
  "dropout": 0.1,
7
  "ffn_num_hiddens": 64,
@@ -18,6 +22,8 @@
18
  "num_layers": 2,
19
  "num_steps": 10,
20
  "query_size": 32,
 
 
21
  "torch_dtype": "float32",
22
  "transformers_version": "4.45.2",
23
  "value_size": 32
 
2
  "architectures": [
3
  "transformerModel"
4
  ],
5
+ "auto_map": {
6
+ "AutoConfig": "configuration_transformer.transformerConfig",
7
+ "AutoModel": "modeling_transformer.transformerModel"
8
+ },
9
  "batch_size": 64,
10
  "dropout": 0.1,
11
  "ffn_num_hiddens": 64,
 
22
  "num_layers": 2,
23
  "num_steps": 10,
24
  "query_size": 32,
25
+ "src_vocab_len": 184,
26
+ "tgt_vocab": 201,
27
  "torch_dtype": "float32",
28
  "transformers_version": "4.45.2",
29
  "value_size": 32
configuration_transformer.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import PretrainedConfig
2
+
3
+ class transformerConfig(PretrainedConfig):
4
+ model_type = "custom_transformer"
5
+
6
+ def __init__(
7
+ self,
8
+ src_vocab_len : int =184,
9
+ tgt_vocab : int =201,
10
+ num_hiddens : int =32,
11
+ num_layers : int =2,
12
+ dropout : int =0.1,
13
+ batch_size : int =64,
14
+ num_steps : int =10,
15
+ lr : int =0.005,
16
+ num_epochs : int =200,
17
+ # device=d2l.try_gpu(),
18
+ ffn_num_input : int =32,
19
+ ffn_num_hiddens : int =64,
20
+ num_heads : int =4,
21
+ key_size : int =32,
22
+ query_size : int =32,
23
+ value_size : int =32,
24
+ norm_shape : int =[32],
25
+
26
+ # block_type="bottleneck",
27
+ # layers: List[int] = [3, 4, 6, 3],
28
+ # num_classes: int = 1000,
29
+ # input_channels: int = 3,
30
+ # cardinality: int = 1,
31
+ # base_width: int = 64,
32
+ # stem_width: int = 64,
33
+ # stem_type: str = "",
34
+ # avg_down: bool = False,
35
+
36
+ **kwargs,
37
+ ):
38
+ # if block_type not in ["basic", "bottleneck"]:
39
+ # raise ValueError(f"`block_type` must be 'basic' or bottleneck', got {block_type}.")
40
+ # if stem_type not in ["", "deep", "deep-tiered"]:
41
+ # raise ValueError(f"`stem_type` must be '', 'deep' or 'deep-tiered', got {stem_type}.")
42
+ self.src_vocab_len = src_vocab_len
43
+ self.tgt_vocab = tgt_vocab
44
+ self.num_hiddens = num_hiddens
45
+ self.num_layers = num_layers
46
+ self.dropout = dropout
47
+ self.batch_size = batch_size
48
+ self.num_steps = num_steps
49
+ self.lr = lr
50
+ self.num_epochs = num_epochs
51
+ self.ffn_num_input = ffn_num_input
52
+ self.ffn_num_hiddens = ffn_num_hiddens
53
+ self.num_heads = num_heads
54
+ self.key_size = key_size
55
+ self.query_size = query_size
56
+ self.value_size = value_size
57
+ self.norm_shape = norm_shape
58
+
59
+ super().__init__(**kwargs)
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bde2659df97ac0e0dd046dc237522f336e5b3270b8622be9efb2672f69ebaa26
3
  size 250204
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b762859c138e7e8ccc36f5d3fee30bfacfba5417f6843a51a7087e607bd31173
3
  size 250204
modeling_transformer.py ADDED
@@ -0,0 +1,355 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import PreTrainedModel
2
+
3
+ import math
4
+ import pandas as pd
5
+ import torch
6
+ from torch import nn
7
+ from d2l import torch as d2l
8
+
9
+ #基于位置的前馈网络
10
+ #@save
11
+ class PositionWiseFFN(nn.Module):
12
+ """基于位置的前馈网络"""
13
+ def __init__(self, ffn_num_input, ffn_num_hiddens, ffn_num_outputs,
14
+ **kwargs):
15
+ super(PositionWiseFFN, self).__init__(**kwargs)
16
+ self.dense1 = nn.Linear(ffn_num_input, ffn_num_hiddens)
17
+ self.relu = nn.ReLU()
18
+ self.dense2 = nn.Linear(ffn_num_hiddens, ffn_num_outputs)
19
+
20
+ def forward(self, X):
21
+ return self.dense2(self.relu(self.dense1(X)))
22
+
23
+ def transpose_qkv(X, num_heads):
24
+ """为了多注意力头的并行计算而变换形状
25
+
26
+ Defined in :numref:`sec_multihead-attention`"""
27
+ # 输入X的形状:(batch_size,查询或者“键-值”对的个数,num_hiddens)
28
+ # 输出X的形状:(batch_size,查询或者“键-值”对的个数,num_heads,
29
+ # num_hiddens/num_heads)
30
+ X = X.reshape(X.shape[0], X.shape[1], num_heads, -1)
31
+
32
+ # 输出X的形状:(batch_size,num_heads,查询或者“键-值”对的个数,
33
+ # num_hiddens/num_heads)
34
+ X = X.permute(0, 2, 1, 3)
35
+
36
+ # 最终输出的形状:(batch_size*num_heads,查询或者“键-值”对的个数,
37
+ # num_hiddens/num_heads)
38
+ return X.reshape(-1, X.shape[2], X.shape[3])
39
+
40
+ def transpose_output(X, num_heads):
41
+ """逆转transpose_qkv函数的操作
42
+
43
+ Defined in :numref:`sec_multihead-attention`"""
44
+ X = X.reshape(-1, num_heads, X.shape[1], X.shape[2])
45
+ X = X.permute(0, 2, 1, 3)
46
+ return X.reshape(X.shape[0], X.shape[1], -1)
47
+
48
+ def sequence_mask(X, valid_len, value=0):
49
+ """在序列中屏蔽不相关的项
50
+
51
+ Defined in :numref:`sec_seq2seq_decoder`"""
52
+ maxlen = X.size(1)
53
+ mask = torch.arange((maxlen), dtype=torch.float32,
54
+ device=X.device)[None, :] < valid_len[:, None]
55
+ X[~mask] = value
56
+ return X
57
+
58
+ def masked_softmax(X, valid_lens):
59
+ """通过在最后一个轴上掩蔽元素来执行softmax操作
60
+
61
+ Defined in :numref:`sec_attention-scoring-functions`"""
62
+ # X:3D张量,valid_lens:1D或2D张量
63
+ if valid_lens is None:
64
+ return nn.functional.softmax(X, dim=-1)
65
+ else:
66
+ shape = X.shape
67
+ if valid_lens.dim() == 1:
68
+ valid_lens = torch.repeat_interleave(valid_lens, shape[1])
69
+ else:
70
+ valid_lens = valid_lens.reshape(-1)
71
+ # 最后一轴上被掩蔽的元素使用一个非常大的负值替换,从而其softmax输出为0
72
+ X = sequence_mask(X.reshape(-1, shape[-1]), valid_lens,
73
+ value=-1e6)
74
+ return nn.functional.softmax(X.reshape(shape), dim=-1)
75
+
76
+ class DotProductAttention(nn.Module):
77
+ """缩放点积注意力
78
+
79
+ Defined in :numref:`subsec_additive-attention`"""
80
+ def __init__(self, dropout, **kwargs):
81
+ super(DotProductAttention, self).__init__(**kwargs)
82
+ self.dropout = nn.Dropout(dropout)
83
+
84
+ # queries的形状:(batch_size,查询的个数,d)
85
+ # keys的形状:(batch_size,“键-值”对的个数,d)
86
+ # values的形状:(batch_size,“键-值”对的个数,值的维度)
87
+ # valid_lens的形状:(batch_size,)或者(batch_size,查询的个数)
88
+ def forward(self, queries, keys, values, valid_lens=None):
89
+ d = queries.shape[-1]
90
+ # 设置transpose_b=True为了交换keys的最后两个维度
91
+ scores = torch.bmm(queries, keys.transpose(1,2)) / math.sqrt(d)
92
+ self.attention_weights = masked_softmax(scores, valid_lens)
93
+ return torch.bmm(self.dropout(self.attention_weights), values)
94
+
95
+ class MultiHeadAttention(nn.Module):
96
+ """多头注意力
97
+
98
+ Defined in :numref:`sec_multihead-attention`"""
99
+ def __init__(self, key_size, query_size, value_size, num_hiddens,
100
+ num_heads, dropout, bias=False, **kwargs):
101
+ super(MultiHeadAttention, self).__init__(**kwargs)
102
+ self.num_heads = num_heads
103
+ self.attention = DotProductAttention(dropout)
104
+ self.W_q = nn.Linear(query_size, num_hiddens, bias=bias)
105
+ self.W_k = nn.Linear(key_size, num_hiddens, bias=bias)
106
+ self.W_v = nn.Linear(value_size, num_hiddens, bias=bias)
107
+ self.W_o = nn.Linear(num_hiddens, num_hiddens, bias=bias)
108
+
109
+ def forward(self, queries, keys, values, valid_lens):
110
+ # queries,keys,values的形状:
111
+ # (batch_size,查询或者“键-值”对的个数,num_hiddens)
112
+ # valid_lens 的形状:
113
+ # (batch_size,)或(batch_size,查询的个数)
114
+ # 经过变换后,输出的queries,keys,values 的形状:
115
+ # (batch_size*num_heads,查询或者“键-值”对的个数,
116
+ # num_hiddens/num_heads)
117
+ queries = transpose_qkv(self.W_q(queries), self.num_heads)
118
+ keys = transpose_qkv(self.W_k(keys), self.num_heads)
119
+ values = transpose_qkv(self.W_v(values), self.num_heads)
120
+
121
+ if valid_lens is not None:
122
+ # 在轴0,将第一项(标量或者矢量)复制num_heads次,
123
+ # 然后如此复制第二项,然后诸如此类。
124
+ valid_lens = torch.repeat_interleave(
125
+ valid_lens, repeats=self.num_heads, dim=0)
126
+
127
+ # output的形状:(batch_size*num_heads,查询的个数,
128
+ # num_hiddens/num_heads)
129
+ output = self.attention(queries, keys, values, valid_lens)
130
+
131
+ # output_concat的形状:(batch_size,查询的个数,num_hiddens)
132
+ output_concat = transpose_output(output, self.num_heads)
133
+ return self.W_o(output_concat)
134
+
135
+
136
+ #残差连接和层规范化
137
+ #@save
138
+ class AddNorm(nn.Module):
139
+ """残差连接后进行层规范化"""
140
+ def __init__(self, normalized_shape, dropout, **kwargs):
141
+ super(AddNorm, self).__init__(**kwargs)
142
+ self.dropout = nn.Dropout(dropout)
143
+ self.ln = nn.LayerNorm(normalized_shape)
144
+
145
+ def forward(self, X, Y):
146
+ return self.ln(self.dropout(Y) + X)
147
+
148
+ #编码器,Transformer编码器中的任何层都不会改变其输入的形状
149
+ #@save
150
+ class EncoderBlock(nn.Module):
151
+ """Transformer编码器块"""
152
+ def __init__(self, key_size, query_size, value_size, num_hiddens,
153
+ norm_shape, ffn_num_input, ffn_num_hiddens, num_heads,
154
+ dropout, use_bias=False, **kwargs):
155
+ super(EncoderBlock, self).__init__(**kwargs)
156
+ self.attention = MultiHeadAttention(
157
+ key_size, query_size, value_size, num_hiddens, num_heads, dropout,
158
+ use_bias)
159
+ self.addnorm1 = AddNorm(norm_shape, dropout)
160
+ self.ffn = PositionWiseFFN(
161
+ ffn_num_input, ffn_num_hiddens, num_hiddens)
162
+ self.addnorm2 = AddNorm(norm_shape, dropout)
163
+
164
+ def forward(self, X, valid_lens):
165
+ Y = self.addnorm1(X, self.attention(X, X, X, valid_lens))
166
+ return self.addnorm2(Y, self.ffn(Y))
167
+
168
+ class PositionalEncoding(nn.Module):
169
+ """位置编码
170
+
171
+ Defined in :numref:`sec_self-attention-and-positional-encoding`"""
172
+ def __init__(self, num_hiddens, dropout, max_len=1000):
173
+ super(PositionalEncoding, self).__init__()
174
+ self.dropout = nn.Dropout(dropout)
175
+ # 创建一个足够长的P
176
+ self.P = torch.zeros((1, max_len, num_hiddens))
177
+ X = torch.arange(max_len, dtype=torch.float32).reshape(
178
+ -1, 1) / torch.pow(10000, torch.arange(
179
+ 0, num_hiddens, 2, dtype=torch.float32) / num_hiddens)
180
+ self.P[:, :, 0::2] = torch.sin(X)
181
+ self.P[:, :, 1::2] = torch.cos(X)
182
+
183
+ def forward(self, X):
184
+ X = X + self.P[:, :X.shape[1], :].to(X.device)
185
+ return self.dropout(X)
186
+
187
+ class Encoder(nn.Module):
188
+ """编码器-解码器架构的基本编码器接口"""
189
+ def __init__(self, **kwargs):
190
+ super(Encoder, self).__init__(**kwargs)
191
+
192
+ def forward(self, X, *args):
193
+ raise NotImplementedError
194
+
195
+
196
+
197
+ ###解码器
198
+ class DecoderBlock(nn.Module):
199
+ """解码器中第i个块"""
200
+ def __init__(self, key_size, query_size, value_size, num_hiddens,
201
+ norm_shape, ffn_num_input, ffn_num_hiddens, num_heads,
202
+ dropout, i, **kwargs):
203
+ super(DecoderBlock, self).__init__(**kwargs)
204
+ self.i = i
205
+ self.attention1 = MultiHeadAttention(
206
+ key_size, query_size, value_size, num_hiddens, num_heads, dropout)
207
+ self.addnorm1 = AddNorm(norm_shape, dropout)
208
+ self.attention2 = MultiHeadAttention(
209
+ key_size, query_size, value_size, num_hiddens, num_heads, dropout)
210
+ self.addnorm2 = AddNorm(norm_shape, dropout)
211
+ self.ffn = PositionWiseFFN(ffn_num_input, ffn_num_hiddens,
212
+ num_hiddens)
213
+ self.addnorm3 = AddNorm(norm_shape, dropout)
214
+
215
+ def forward(self, X, state):
216
+ enc_outputs, enc_valid_lens = state[0], state[1]
217
+ # 训练阶段,输出序列的所有词元都在同一时间处理,
218
+ # 因此state[2][self.i]初始化为None。
219
+ # 预测阶段,输出序列是通过词元一个接着一个解码的,
220
+ # 因此state[2][self.i]包含着直到当前时间步第i个块解码的输出表示
221
+ if state[2][self.i] is None:
222
+ key_values = X
223
+ else:
224
+ key_values = torch.cat((state[2][self.i], X), axis=1)
225
+ state[2][self.i] = key_values
226
+ if self.training:
227
+ batch_size, num_steps, _ = X.shape
228
+ # dec_valid_lens的开头:(batch_size,num_steps),
229
+ # 其中每一行是[1,2,...,num_steps]
230
+ dec_valid_lens = torch.arange(
231
+ 1, num_steps + 1, device=X.device).repeat(batch_size, 1)
232
+ else:
233
+ dec_valid_lens = None
234
+
235
+ # 自注意力
236
+ X2 = self.attention1(X, key_values, key_values, dec_valid_lens)
237
+ Y = self.addnorm1(X, X2)
238
+ # 编码器-解码器注意力。
239
+ # enc_outputs的开头:(batch_size,num_steps,num_hiddens)
240
+ Y2 = self.attention2(Y, enc_outputs, enc_outputs, enc_valid_lens)
241
+ Z = self.addnorm2(Y, Y2)
242
+ return self.addnorm3(Z, self.ffn(Z)), state
243
+
244
+ class Decoder(nn.Module):
245
+ """编码器-解码器架构的基本解码器接口
246
+
247
+ Defined in :numref:`sec_encoder-decoder`"""
248
+ def __init__(self, **kwargs):
249
+ super(Decoder, self).__init__(**kwargs)
250
+
251
+ def init_state(self, enc_outputs, *args):
252
+ raise NotImplementedError
253
+
254
+ def forward(self, X, state):
255
+ raise NotImplementedError
256
+
257
+ class AttentionDecoder(Decoder):
258
+ """带有注意力机制解码器的基本接口
259
+
260
+ Defined in :numref:`sec_seq2seq_attention`"""
261
+ def __init__(self, **kwargs):
262
+ super(AttentionDecoder, self).__init__(**kwargs)
263
+
264
+ @property
265
+ def attention_weights(self):
266
+ raise NotImplementedError
267
+
268
+
269
+ #@save
270
+ class TransformerEncoder(Encoder):
271
+ """Transformer编码器"""
272
+ def __init__(self, vocab_size, key_size, query_size, value_size,
273
+ num_hiddens, norm_shape, ffn_num_input, ffn_num_hiddens,
274
+ num_heads, num_layers, dropout, use_bias=False, **kwargs):
275
+ super(TransformerEncoder, self).__init__(**kwargs)
276
+ self.num_hiddens = num_hiddens
277
+ self.embedding = nn.Embedding(vocab_size, num_hiddens)
278
+ self.pos_encoding = PositionalEncoding(num_hiddens, dropout)
279
+ self.blks = nn.Sequential()
280
+ for i in range(num_layers):
281
+ self.blks.add_module("block"+str(i),
282
+ EncoderBlock(key_size, query_size, value_size, num_hiddens,
283
+ norm_shape, ffn_num_input, ffn_num_hiddens,
284
+ num_heads, dropout, use_bias))
285
+
286
+ def forward(self, X, valid_lens, *args):
287
+ # 因为位置编码值在-1和1之间,
288
+ # 因此嵌入值乘以嵌入维度的平方根进行缩放,
289
+ # 然后再与位置编码相加。
290
+ X = self.pos_encoding(self.embedding(X) * math.sqrt(self.num_hiddens))
291
+ self.attention_weights = [None] * len(self.blks)
292
+ for i, blk in enumerate(self.blks):
293
+ X = blk(X, valid_lens)
294
+ self.attention_weights[
295
+ i] = blk.attention.attention.attention_weights
296
+ return X
297
+
298
+
299
+ class TransformerDecoder(AttentionDecoder):
300
+ def __init__(self, vocab_size, key_size, query_size, value_size,
301
+ num_hiddens, norm_shape, ffn_num_input, ffn_num_hiddens,
302
+ num_heads, num_layers, dropout, **kwargs):
303
+ super(TransformerDecoder, self).__init__(**kwargs)
304
+ self.num_hiddens = num_hiddens
305
+ self.num_layers = num_layers
306
+ self.embedding = nn.Embedding(vocab_size, num_hiddens)
307
+ self.pos_encoding = PositionalEncoding(num_hiddens, dropout)
308
+ self.blks = nn.Sequential()
309
+ for i in range(num_layers):
310
+ self.blks.add_module("block"+str(i),
311
+ DecoderBlock(key_size, query_size, value_size, num_hiddens,
312
+ norm_shape, ffn_num_input, ffn_num_hiddens,
313
+ num_heads, dropout, i))
314
+ self.dense = nn.Linear(num_hiddens, vocab_size)
315
+
316
+ def init_state(self, enc_outputs, enc_valid_lens, *args):
317
+ return [enc_outputs, enc_valid_lens, [None] * self.num_layers]
318
+
319
+ def forward(self, X, state):
320
+ X = self.pos_encoding(self.embedding(X) * math.sqrt(self.num_hiddens))
321
+ self._attention_weights = [[None] * len(self.blks) for _ in range (2)]
322
+ for i, blk in enumerate(self.blks):
323
+ X, state = blk(X, state)
324
+ # 解码器自注意力权重
325
+ self._attention_weights[0][
326
+ i] = blk.attention1.attention.attention_weights
327
+ # “编码器-解码器”自注意力权重
328
+ self._attention_weights[1][
329
+ i] = blk.attention2.attention.attention_weights
330
+ return self.dense(X), state
331
+
332
+ @property
333
+ def attention_weights(self):
334
+ return self._attention_weights
335
+
336
+
337
+ class transformerModel(PreTrainedModel):
338
+ """编码器-解码器架构的基类
339
+ Defined in :numref:`sec_encoder-decoder`"""
340
+ def __init__(self, config):
341
+ super().__init__(config)
342
+ self.encoder = TransformerEncoder(
343
+ config.src_vocab_len, config.key_size, config.query_size, config.value_size, config.num_hiddens,
344
+ config.norm_shape, config.ffn_num_input, config.ffn_num_hiddens, config.num_heads,
345
+ config.num_layers, config.dropout)
346
+
347
+ self.decoder = TransformerDecoder(
348
+ config.tgt_vocab, config.key_size, config.query_size, config.value_size, config.num_hiddens,
349
+ config.norm_shape, config.ffn_num_input, config.ffn_num_hiddens, config.num_heads,
350
+ config.num_layers, config.dropout)
351
+
352
+ def forward(self, enc_X, dec_X, *args):
353
+ enc_outputs = self.encoder(enc_X, *args)
354
+ dec_state = self.decoder.init_state(enc_outputs, *args)
355
+ return self.decoder(dec_X, dec_state)