Hwijung commited on
Commit
88545ca
1 Parent(s): ac19132

Upload 9 files

Browse files
config.json ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_function": "silu",
3
+ "architectures": [
4
+ "MidmLMHeadModel"
5
+ ],
6
+ "attn_pdrop": 0.0,
7
+ "auto_map": {
8
+ "AutoConfig": "configuration_midm.MidmBitextConfig",
9
+ "AutoModelForCausalLM": "modeling_midm.MidmLMHeadModel"
10
+ },
11
+ "bos_token_id": 2,
12
+ "embd_pdrop": 0.0,
13
+ "eos_token_id": 3,
14
+ "initializer_range": 0.02,
15
+ "layer_norm_epsilon": 1e-05,
16
+ "model_type": "midm-bitext-S",
17
+ "n_embd": 4096,
18
+ "n_head": 32,
19
+ "n_inner": 10880,
20
+ "n_layer": 32,
21
+ "n_positions": 8192,
22
+ "normalization_type": "layernorm1p",
23
+ "pad_token_id": 1,
24
+ "reorder_and_upcast_attn": false,
25
+ "resid_pdrop": 0.0,
26
+ "rotary_percentage": 0.5,
27
+ "scale_attn_by_inverse_layer_idx": false,
28
+ "scale_attn_weights": true,
29
+ "scale_qk_by_inverse_layer_idx": true,
30
+ "summary_activation": null,
31
+ "summary_first_dropout": 0.1,
32
+ "summary_proj_to_labels": true,
33
+ "summary_type": "cls_index",
34
+ "summary_use_proj": true,
35
+ "tie_word_embeddings": false,
36
+ "torch_dtype": "bfloat16",
37
+ "transformers_version": "4.20.0",
38
+ "use_absolute_position_embedding": false,
39
+ "use_cache": true,
40
+ "use_rotary_position_embedding": true,
41
+ "vocab_size": 72192
42
+ }
configuration_midm.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers.models.gpt2.configuration_gpt2 import GPT2Config
2
+
3
+ class MidmBitextConfig(GPT2Config):
4
+ model_type = "midm-bitext-S"
5
+
6
+ def __init__(
7
+ self,
8
+ use_absolute_position_embedding: bool = True,
9
+ use_rotary_position_embedding: bool = False,
10
+ rotary_percentage: float = 1.0,
11
+ normalization_type: str = 'layernorm',
12
+ scale_qk_by_inverse_layer_idx: bool = False,
13
+ *args,
14
+ **kwargs
15
+ ):
16
+ super().__init__(*args, **kwargs)
17
+ self.use_absolute_position_embedding = use_absolute_position_embedding
18
+ self.use_rotary_position_embedding = use_rotary_position_embedding
19
+ self.rotary_percentage = rotary_percentage
20
+ self.normalization_type = normalization_type
21
+ self.scale_qk_by_inverse_layer_idx = scale_qk_by_inverse_layer_idx
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 2,
4
+ "eos_token_id": 3,
5
+ "pad_token_id": 1,
6
+ "transformers_version": "4.34.0"
7
+ }
midm_bitext_tokenization.py ADDED
@@ -0,0 +1,310 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ """ Tokenization class for model Midm_bitext_tonkenizer."""
14
+ import os
15
+ import re
16
+ import warnings
17
+ from shutil import copyfile
18
+ from typing import Any, Dict, List, Optional, Tuple
19
+
20
+ import sentencepiece as spm
21
+
22
+ from transformers.tokenization_utils import PreTrainedTokenizer
23
+ from transformers.utils import logging
24
+
25
+
26
+ logger = logging.get_logger(__name__)
27
+
28
+ VOCAB_FILES_NAMES = {"vocab_file": "midm_bitext_tokenizer.model"}
29
+
30
+ PRETRAINED_VOCAB_FILES_MAP = {}
31
+
32
+ PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {}
33
+
34
+
35
+ class Midm_bitext_Tokenizer(PreTrainedTokenizer):
36
+ """
37
+ Construct a Midm bitext tonkenizer. Based on [SentencePiece](https://github.com/google/sentencepiece).
38
+
39
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
40
+ this superclass for more information regarding those methods.
41
+
42
+ Args:
43
+ vocab_file (`str`):
44
+ [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
45
+ contains the vocabulary necessary to instantiate a tokenizer.
46
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
47
+ The end of sequence token.
48
+
49
+ <Tip>
50
+
51
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
52
+ The token used is the `sep_token`.
53
+
54
+ </Tip>
55
+
56
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
57
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
58
+ token instead.
59
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
60
+ The token used for padding, for example when batching sequences of different lengths.
61
+ extra_ids (`int`, *optional*, defaults to 100):
62
+ Add a number of extra ids added to the end of the vocabulary for use as sentinels. These tokens are
63
+ accessible as "<extra_id_{%d}>" where "{%d}" is a number between 0 and extra_ids-1. Extra tokens are
64
+ indexed from the end of the vocabulary up to beginning.
65
+ additional_special_tokens (`List[str]`, *optional*):
66
+ Additional special tokens used by the tokenizer.
67
+ sp_model_kwargs (`dict`, *optional*):
68
+ Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
69
+ SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
70
+ to set:
71
+
72
+ - `enable_sampling`: Enable subword regularization.
73
+ - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
74
+
75
+ - `nbest_size = {0,1}`: No sampling is performed.
76
+ - `nbest_size > 1`: samples from the nbest_size results.
77
+ - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
78
+ using forward-filtering-and-backward-sampling algorithm.
79
+
80
+ - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
81
+ BPE-dropout.
82
+
83
+ Attributes:
84
+ sp_model (`SentencePieceProcessor`):
85
+ The *SentencePiece* processor that is used for every conversion (string, tokens and IDs).
86
+ """
87
+
88
+ vocab_files_names = VOCAB_FILES_NAMES
89
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
90
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
91
+ model_input_names = ["input_ids", "attention_mask"]
92
+
93
+ def __init__(
94
+ self,
95
+ vocab_file,
96
+ eos_token="</s>",
97
+ unk_token="<unk>",
98
+ pad_token="<pad>",
99
+ extra_ids=100,
100
+ additional_special_tokens=None,
101
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
102
+ **kwargs
103
+ ) -> None:
104
+ # Add extra_ids to the special token list
105
+ if extra_ids > 0 and additional_special_tokens is None:
106
+ additional_special_tokens = [f"<extra_id_{i}>" for i in range(extra_ids)]
107
+ elif extra_ids > 0 and additional_special_tokens is not None:
108
+ # Check that we have the right number of extra_id special tokens
109
+ extra_tokens = len(set(filter(lambda x: bool("extra_id" in str(x)), additional_special_tokens)))
110
+ if extra_tokens != extra_ids:
111
+ raise ValueError(
112
+ f"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are provided to Midm_bitext_Tonkenizer. "
113
+ "In this case the additional_special_tokens must include the extra_ids tokens"
114
+ )
115
+
116
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
117
+
118
+ # custom special tokens
119
+ # convert \n, \t in input text -> <[!newline]>, <[!tab]>
120
+ self.newline_token = "<[!newline]>"
121
+ self.tab_token = "<[!tab]>"
122
+
123
+ self.vocab_file = vocab_file
124
+ self._extra_ids = extra_ids
125
+ ##JHS
126
+ #print(**self.sp_model_kwargs)
127
+ #import pdb
128
+ #pdb.set_trace()
129
+
130
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
131
+ self.sp_model.Load(vocab_file)
132
+ super().__init__(
133
+ eos_token=eos_token,
134
+ unk_token=unk_token,
135
+ pad_token=pad_token,
136
+ extra_ids=extra_ids,
137
+ additional_special_tokens=additional_special_tokens,
138
+ sp_model_kwargs=self.sp_model_kwargs,
139
+ **kwargs,
140
+ )
141
+
142
+
143
+
144
+ @property
145
+ def vocab_size(self):
146
+ return self.sp_model.get_piece_size() + self._extra_ids
147
+
148
+ def get_vocab(self):
149
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
150
+ vocab.update(self.added_tokens_encoder)
151
+ return vocab
152
+
153
+ def get_special_tokens_mask(
154
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
155
+ ) -> List[int]:
156
+ """
157
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
158
+ special tokens using the tokenizer `prepare_for_model` method.
159
+
160
+ Args:
161
+ token_ids_0 (`List[int]`):
162
+ List of IDs.
163
+ token_ids_1 (`List[int]`, *optional*):
164
+ Optional second list of IDs for sequence pairs.
165
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
166
+ Whether or not the token list is already formatted with special tokens for the model.
167
+
168
+ Returns:
169
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
170
+ """
171
+ if already_has_special_tokens:
172
+ return super().get_special_tokens_mask(
173
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
174
+ )
175
+
176
+ # normal case: some special tokens
177
+ if token_ids_1 is None:
178
+ return ([0] * len(token_ids_0)) + [1]
179
+ return ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
180
+
181
+ def _add_eos_if_not_present(self, token_ids: List[int]) -> List[int]:
182
+ """Do not add eos again if user already added it."""
183
+ if len(token_ids) > 0 and token_ids[-1] == self.eos_token_id:
184
+ warnings.warn(
185
+ f"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated eos tokens being added."
186
+ )
187
+ return token_ids
188
+ else:
189
+ return token_ids + [self.eos_token_id]
190
+
191
+ def create_token_type_ids_from_sequences(
192
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
193
+ ) -> List[int]:
194
+ """
195
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. Midm does not make
196
+ use of token type ids, therefore a list of zeros is returned.
197
+
198
+ Args:
199
+ token_ids_0 (`List[int]`):
200
+ List of IDs.
201
+ token_ids_1 (`List[int]`, *optional*):
202
+ Optional second list of IDs for sequence pairs.
203
+
204
+ Returns:
205
+ `List[int]`: List of zeros.
206
+ """
207
+ eos = [self.eos_token_id]
208
+
209
+ if token_ids_1 is None:
210
+ return len(token_ids_0 + eos) * [0]
211
+ return len(token_ids_0 + eos + token_ids_1 + eos) * [0]
212
+
213
+ def build_inputs_with_special_tokens(
214
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
215
+ ) -> List[int]:
216
+ """
217
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
218
+ adding special tokens. A sequence has the following format:
219
+
220
+ - single sequence: `X </s>`
221
+ - pair of sequences: `A </s> B </s>`
222
+
223
+ Args:
224
+ token_ids_0 (`List[int]`):
225
+ List of IDs to which the special tokens will be added.
226
+ token_ids_1 (`List[int]`, *optional*):
227
+ Optional second list of IDs for sequence pairs.
228
+
229
+ Returns:
230
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
231
+ """
232
+ token_ids_0 = self._add_eos_if_not_present(token_ids_0)
233
+ if token_ids_1 is None:
234
+ return token_ids_0
235
+ else:
236
+ token_ids_1 = self._add_eos_if_not_present(token_ids_1)
237
+ return token_ids_0 + token_ids_1
238
+
239
+ def __getstate__(self):
240
+ state = self.__dict__.copy()
241
+ state["sp_model"] = None
242
+ return state
243
+
244
+ def __setstate__(self, d):
245
+ self.__dict__ = d
246
+
247
+ # for backward compatibility
248
+ if not hasattr(self, "sp_model_kwargs"):
249
+ self.sp_model_kwargs = {}
250
+
251
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
252
+ self.sp_model.Load(self.vocab_file)
253
+
254
+ def _tokenize(self, text: str) -> List[str]:
255
+ """Take as input a string and return a list of strings (tokens) for words/sub-words"""
256
+ text = text.replace("\n", self.newline_token)
257
+ text = text.replace("\t", self.tab_token)
258
+
259
+ return self.sp_model.encode(text, out_type=str)
260
+
261
+ def _convert_token_to_id(self, token):
262
+ """Converts a token (str) in an id using the vocab."""
263
+ if token.startswith("<extra_id_"):
264
+ match = re.match(r"<extra_id_(\d+)>", token)
265
+ num = int(match.group(1))
266
+ return self.vocab_size - num - 1
267
+ return self.sp_model.piece_to_id(token)
268
+
269
+ def _convert_id_to_token(self, index):
270
+ """Converts an index (integer) in a token (str) using the vocab."""
271
+ if index < self.sp_model.get_piece_size():
272
+ token = self.sp_model.IdToPiece(index)
273
+ else:
274
+ token = f"<extra_id_{self.vocab_size - 1 - index}>"
275
+ return token
276
+
277
+ def convert_tokens_to_string(self, tokens):
278
+ """Converts a sequence of tokens (string) in a single string."""
279
+ current_sub_tokens = []
280
+ out_string = ""
281
+ for token in tokens:
282
+ # make sure that special tokens are not decoded using sentencepiece model
283
+ if token in self.all_special_tokens:
284
+ out_string += self.sp_model.decode_pieces(current_sub_tokens) + token + " "
285
+ current_sub_tokens = []
286
+ else:
287
+ current_sub_tokens.append(token)
288
+ out_string += self.sp_model.decode_pieces(current_sub_tokens)
289
+
290
+ out_string.replace(self.newline_token, "\n")
291
+ out_string.replace(self.tab_token, "\t")
292
+
293
+ return out_string.strip()
294
+
295
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
296
+ if not os.path.isdir(save_directory):
297
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
298
+ return
299
+ out_vocab_file = os.path.join(
300
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
301
+ )
302
+
303
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
304
+ copyfile(self.vocab_file, out_vocab_file)
305
+ elif not os.path.isfile(self.vocab_file):
306
+ with open(out_vocab_file, "wb") as fi:
307
+ content_spiece_model = self.sp_model.serialized_model_proto()
308
+ fi.write(content_spiece_model)
309
+
310
+ return (out_vocab_file,)
midm_bitext_tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:98789fa1bf89a1f9692889fb4a0029d3d096a9109cebf4f6bce1a255f2701378
3
+ size 1457356
modeling_midm.py ADDED
@@ -0,0 +1,1464 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """PyTorch Midm model."""
16
+
17
+ import math
18
+ import os
19
+ from dataclasses import dataclass
20
+ from typing import Optional, Tuple
21
+
22
+ import torch
23
+ import torch.utils.checkpoint
24
+ from packaging import version
25
+ from torch import nn
26
+ from torch.nn import CrossEntropyLoss, MSELoss
27
+ from types import SimpleNamespace
28
+ from .rotary_position_embedding import RotaryEmbedding, apply_rotary_pos_emb
29
+
30
+ if version.parse(torch.__version__) >= version.parse("1.6"):
31
+ is_amp_available = True
32
+ from torch.cuda.amp import autocast
33
+ else:
34
+ is_amp_available = False
35
+
36
+ from transformers.activations import ACT2FN
37
+ from transformers.file_utils import (
38
+ ModelOutput,
39
+ add_code_sample_docstrings,
40
+ add_start_docstrings,
41
+ add_start_docstrings_to_model_forward,
42
+ replace_return_docstrings,
43
+ )
44
+ from transformers.modeling_outputs import (
45
+ BaseModelOutputWithPastAndCrossAttentions,
46
+ CausalLMOutputWithCrossAttentions,
47
+ SequenceClassifierOutputWithPast,
48
+ TokenClassifierOutput,
49
+ )
50
+ from transformers.modeling_utils import (
51
+ Conv1D,
52
+ PreTrainedModel,
53
+ SequenceSummary,
54
+ find_pruneable_heads_and_indices,
55
+ prune_conv1d_layer,
56
+ )
57
+ from transformers.utils import logging
58
+ from transformers.utils.model_parallel_utils import assert_device_map, get_device_map
59
+ from .configuration_midm import MidmBitextConfig
60
+
61
+
62
+ logger = logging.get_logger(__name__)
63
+
64
+ _CHECKPOINT_FOR_DOC = "Midm"
65
+ _CONFIG_FOR_DOC = "MidmBitextConfig"
66
+ _TOKENIZER_FOR_DOC = "Midm_bitext_Tokenizer"
67
+
68
+ MIDM_PRETRAINED_MODEL_ARCHIVE_LIST = [
69
+ "Midm-bitext-S",
70
+ ]
71
+
72
+ def layernorm1p(module, input):
73
+ return torch.nn.functional.layer_norm(
74
+ input, module.normalized_shape, module.weight + 1, module.bias, module.eps)
75
+
76
+ class MidmAttention(nn.Module):
77
+ def __init__(self, config, is_cross_attention=False, layer_idx=None):
78
+ super().__init__()
79
+
80
+ max_positions = config.max_position_embeddings
81
+ self.register_buffer(
82
+ "bias",
83
+ torch.tril(torch.ones((max_positions, max_positions), dtype=torch.uint8)).view(
84
+ 1, 1, max_positions, max_positions
85
+ ),
86
+ )
87
+ self.register_buffer("masked_bias", torch.tensor(-1e4))
88
+
89
+ self.embed_dim = config.hidden_size
90
+ self.num_heads = config.num_attention_heads
91
+ self.head_dim = self.embed_dim // self.num_heads
92
+ self.split_size = self.embed_dim
93
+ if self.head_dim * self.num_heads != self.embed_dim:
94
+ raise ValueError(
95
+ f"`embed_dim` must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads})."
96
+ )
97
+
98
+ self.scale_attn_weights = config.scale_attn_weights
99
+ self.is_cross_attention = is_cross_attention
100
+
101
+ # Layer-wise attention scaling, reordering, and upcasting
102
+ self.scale_attn_by_inverse_layer_idx = config.scale_attn_by_inverse_layer_idx
103
+ self.layer_idx = layer_idx
104
+ self.reorder_and_upcast_attn = config.reorder_and_upcast_attn
105
+ self.scale_qk_by_inverse_layer_idx = config.scale_qk_by_inverse_layer_idx
106
+ assert self.scale_attn_by_inverse_layer_idx != self.scale_qk_by_inverse_layer_idx
107
+
108
+ if self.is_cross_attention:
109
+ self.c_attn = nn.Linear(self.embed_dim, 2 * self.embed_dim)
110
+ nn.init.normal_(self.c_attn.weight, std=0.02)
111
+ nn.init.zeros_(self.c_attn.bias)
112
+ self.q_attn = nn.Linear(self.embed_dim, self.embed_dim)
113
+ nn.init.normal_(self.q_attn.weight, std=0.02)
114
+ nn.init.zeros_(self.q_attn.bias)
115
+ else:
116
+ self.c_attn = nn.Linear(self.embed_dim, 3 * self.embed_dim)
117
+ nn.init.normal_(self.c_attn.weight, std=0.02)
118
+ nn.init.zeros_(self.c_attn.bias)
119
+ self.c_proj = nn.Linear(self.embed_dim, self.embed_dim)
120
+ nn.init.normal_(self.c_proj.weight, std=0.02)
121
+ nn.init.zeros_(self.c_proj.bias)
122
+
123
+ self.attn_dropout = nn.Dropout(config.attn_pdrop)
124
+ self.resid_dropout = nn.Dropout(config.resid_pdrop)
125
+
126
+ self.pruned_heads = set()
127
+
128
+ def prune_heads(self, heads):
129
+ if len(heads) == 0:
130
+ return
131
+ heads, index = find_pruneable_heads_and_indices(heads, self.num_heads, self.head_dim, self.pruned_heads)
132
+ index_attn = torch.cat([index, index + self.split_size, index + (2 * self.split_size)])
133
+
134
+ # Prune conv1d layers
135
+ self.c_attn = prune_conv1d_layer(self.c_attn, index_attn, dim=1)
136
+ self.c_proj = prune_conv1d_layer(self.c_proj, index, dim=0)
137
+
138
+ # Update hyper params
139
+ self.split_size = (self.split_size // self.num_heads) * (self.num_heads - len(heads))
140
+ self.num_heads = self.num_heads - len(heads)
141
+ self.pruned_heads = self.pruned_heads.union(heads)
142
+
143
+ def _attn(self, query, key, value, attention_mask=None, head_mask=None):
144
+ attn_weights = torch.matmul(query, key.transpose(-1, -2))
145
+
146
+ if self.scale_attn_weights:
147
+ attn_weights = attn_weights / (float(value.size(-1)) ** 0.5)
148
+
149
+ # Layer-wise attention scaling
150
+ if self.scale_attn_by_inverse_layer_idx or self.scale_qk_by_inverse_layer_idx:
151
+ attn_weights = attn_weights / float(self.layer_idx + 1)
152
+
153
+ if not self.is_cross_attention:
154
+ # if only "normal" attention layer implements causal mask
155
+ query_length, key_length = query.size(-2), key.size(-2)
156
+ causal_mask = self.bias[:, :, key_length - query_length : key_length, :key_length].bool()
157
+ attn_weights = torch.where(causal_mask, attn_weights, self.masked_bias.to(attn_weights.dtype))
158
+
159
+ if attention_mask is not None:
160
+ # Apply the attention mask
161
+ attn_weights = attn_weights + attention_mask
162
+
163
+ if self.scale_qk_by_inverse_layer_idx:
164
+ attn_weights = attn_weights * float(self.layer_idx + 1)
165
+
166
+ attn_weights = nn.Softmax(dim=-1)(attn_weights)
167
+
168
+ # Downcast (if necessary) back to V's dtype (if in mixed-precision) -- No-Op otherwise
169
+ attn_weights = attn_weights.type(value.dtype)
170
+ attn_weights = self.attn_dropout(attn_weights)
171
+
172
+ # Mask heads if we want to
173
+ if head_mask is not None:
174
+ attn_weights = attn_weights * head_mask
175
+
176
+ attn_output = torch.matmul(attn_weights, value)
177
+
178
+ return attn_output, attn_weights
179
+
180
+ def _upcast_and_reordered_attn(self, query, key, value, attention_mask=None, head_mask=None):
181
+ # Use `torch.baddbmm` (a bit more efficient w/ alpha param for scaling -- from Megatron-LM)
182
+ bsz, num_heads, q_seq_len, dk = query.size()
183
+ _, _, k_seq_len, _ = key.size()
184
+
185
+ # Preallocate attn_weights for `baddbmm`
186
+ attn_weights = torch.empty(bsz * num_heads, q_seq_len, k_seq_len, dtype=torch.float32, device=query.device)
187
+
188
+ # Compute Scale Factor
189
+ scale_factor = 1.0
190
+ if self.scale_attn_weights:
191
+ scale_factor /= float(value.size(-1)) ** 0.5
192
+
193
+ if self.scale_attn_by_inverse_layer_idx:
194
+ scale_factor /= float(self.layer_idx + 1)
195
+
196
+ # Upcast (turn off autocast) and reorder (Scale K by 1 / root(dk))
197
+ if is_amp_available:
198
+ with autocast(enabled=False):
199
+ q, k = query.reshape(-1, q_seq_len, dk), key.transpose(-1, -2).reshape(-1, dk, k_seq_len)
200
+ attn_weights = torch.baddbmm(attn_weights, q.float(), k.float(), beta=0, alpha=scale_factor)
201
+ attn_weights = attn_weights.reshape(bsz, num_heads, q_seq_len, k_seq_len)
202
+ else:
203
+ q, k = query.reshape(-1, q_seq_len, dk), key.transpose(-1, -2).reshape(-1, dk, k_seq_len)
204
+ attn_weights = torch.baddbmm(attn_weights, q.float(), k.float(), beta=0, alpha=scale_factor)
205
+ attn_weights = attn_weights.reshape(bsz, num_heads, q_seq_len, k_seq_len)
206
+
207
+ if not self.is_cross_attention:
208
+ # if only "normal" attention layer implements causal mask
209
+ query_length, key_length = query.size(-2), key.size(-2)
210
+ causal_mask = self.bias[:, :, key_length - query_length : key_length, :key_length].bool()
211
+ attn_weights = torch.where(causal_mask, attn_weights, self.masked_bias.to(attn_weights.dtype))
212
+
213
+ if attention_mask is not None:
214
+ # Apply the attention mask
215
+ attn_weights = attn_weights + attention_mask
216
+
217
+ attn_weights = nn.Softmax(dim=-1)(attn_weights)
218
+
219
+ # Downcast (if necessary) back to V's dtype (if in mixed-precision) -- No-Op if otherwise
220
+ if attn_weights.dtype != torch.float32:
221
+ raise RuntimeError("Error with upcasting, attn_weights does not have dtype torch.float32")
222
+ attn_weights = attn_weights.type(value.dtype)
223
+ attn_weights = self.attn_dropout(attn_weights)
224
+
225
+ # Mask heads if we want to
226
+ if head_mask is not None:
227
+ attn_weights = attn_weights * head_mask
228
+
229
+ attn_output = torch.matmul(attn_weights, value)
230
+
231
+ return attn_output, attn_weights
232
+
233
+ def _split_heads(self, tensor, num_heads, attn_head_size):
234
+ """
235
+ Splits hidden_size dim into attn_head_size and num_heads
236
+ """
237
+ new_shape = tensor.size()[:-1] + (num_heads, attn_head_size)
238
+ tensor = tensor.view(*new_shape)
239
+ return tensor.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features)
240
+
241
+ def _merge_heads(self, tensor, num_heads, attn_head_size):
242
+ """
243
+ Merges attn_head_size dim and num_attn_heads dim into hidden_size
244
+ """
245
+ tensor = tensor.permute(0, 2, 1, 3).contiguous()
246
+ new_shape = tensor.size()[:-2] + (num_heads * attn_head_size,)
247
+ return tensor.view(new_shape)
248
+
249
+ def forward(
250
+ self,
251
+ hidden_states,
252
+ layer_past=None,
253
+ attention_mask=None,
254
+ head_mask=None,
255
+ encoder_hidden_states=None,
256
+ encoder_attention_mask=None,
257
+ use_cache=False,
258
+ output_attentions=False,
259
+ rotary_pos_emb=None,
260
+ ):
261
+ if encoder_hidden_states is not None:
262
+ if not hasattr(self, "q_attn"):
263
+ raise ValueError(
264
+ "If class is used as cross attention, the weights `q_attn` have to be defined. "
265
+ "Please make sure to instantiate class with `MidmAttention(..., is_cross_attention=True)`."
266
+ )
267
+
268
+ query = self.q_attn(hidden_states)
269
+ key, value = self.c_attn(encoder_hidden_states).split(self.split_size, dim=2)
270
+ attention_mask = encoder_attention_mask
271
+ else:
272
+ query, key, value = self.c_attn(hidden_states).split(self.split_size, dim=2)
273
+
274
+ query = self._split_heads(query, self.num_heads, self.head_dim)
275
+ key = self._split_heads(key, self.num_heads, self.head_dim)
276
+ value = self._split_heads(value, self.num_heads, self.head_dim)
277
+
278
+ if layer_past is not None:
279
+ past_key, past_value = layer_past
280
+ key = torch.cat((past_key, key), dim=-2)
281
+ value = torch.cat((past_value, value), dim=-2)
282
+
283
+ if use_cache is True:
284
+ present = (key, value)
285
+ else:
286
+ present = None
287
+
288
+ if rotary_pos_emb is not None:
289
+ query = apply_rotary_pos_emb(query, rotary_pos_emb)
290
+ key = apply_rotary_pos_emb(key, rotary_pos_emb)
291
+
292
+ if self.reorder_and_upcast_attn:
293
+ attn_output, attn_weights = self._upcast_and_reordered_attn(query, key, value, attention_mask, head_mask)
294
+ else:
295
+ attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask)
296
+
297
+ attn_output = self._merge_heads(attn_output, self.num_heads, self.head_dim)
298
+ attn_output = self.c_proj(attn_output)
299
+ attn_output = self.resid_dropout(attn_output)
300
+
301
+ outputs = (attn_output, present)
302
+ if output_attentions:
303
+ outputs += (attn_weights,)
304
+
305
+ return outputs # a, present, (attentions)
306
+
307
+
308
+ class MidmMLP(nn.Module):
309
+ def __init__(self, intermediate_size, config):
310
+ super().__init__()
311
+ embed_dim = config.hidden_size
312
+ self.kt_glu = config.activation_function in ['silu']
313
+ if self.kt_glu:
314
+ self.c_fc = nn.Linear(embed_dim, intermediate_size * 2)
315
+ else:
316
+ self.c_fc = nn.Linear(embed_dim, intermediate_size)
317
+ nn.init.normal_(self.c_fc.weight, std=0.02)
318
+ nn.init.zeros_(self.c_fc.bias)
319
+ self.c_proj = nn.Linear(intermediate_size, embed_dim)
320
+ nn.init.normal_(self.c_proj.weight, std=0.02)
321
+ nn.init.zeros_(self.c_proj.bias)
322
+
323
+ if config.activation_function == 'silu':
324
+ self.act = torch.nn.functional.silu
325
+ else:
326
+ self.act = ACT2FN[config.activation_function]
327
+ self.dropout = nn.Dropout(config.resid_pdrop)
328
+
329
+ def forward(self, hidden_states):
330
+ hidden_states = self.c_fc(hidden_states)
331
+ if self.kt_glu:
332
+ hidden_states1, hidden_states2 = torch.chunk(hidden_states, 2, dim=-1)
333
+ hidden_states = self.act(hidden_states1) * hidden_states2
334
+ else:
335
+ hidden_states = self.act(hidden_states)
336
+ hidden_states = self.c_proj(hidden_states)
337
+ hidden_states = self.dropout(hidden_states)
338
+ return hidden_states
339
+
340
+
341
+ class MidmBlock(nn.Module):
342
+ def __init__(self, config, layer_idx=None):
343
+ super().__init__()
344
+ hidden_size = config.hidden_size
345
+ inner_dim = config.n_inner if config.n_inner is not None else 4 * hidden_size
346
+
347
+ self.ln_1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
348
+ self.attn = MidmAttention(config, layer_idx=layer_idx)
349
+ self.ln_2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
350
+ self.use_layernorm1p = config.normalization_type == 'layernorm1p'
351
+
352
+ if config.add_cross_attention:
353
+ self.crossattention = MidmAttention(config, is_cross_attention=True)
354
+ self.ln_cross_attn = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
355
+
356
+ self.mlp = MidmMLP(inner_dim, config)
357
+
358
+ def forward(
359
+ self,
360
+ hidden_states,
361
+ layer_past=None,
362
+ attention_mask=None,
363
+ head_mask=None,
364
+ encoder_hidden_states=None,
365
+ encoder_attention_mask=None,
366
+ use_cache=False,
367
+ output_attentions=False,
368
+ rotary_pos_emb=None,
369
+ ):
370
+ residual = hidden_states
371
+ if self.use_layernorm1p:
372
+ hidden_states = layernorm1p(self.ln_1, hidden_states)
373
+ else:
374
+ hidden_states = self.ln_1(hidden_states)
375
+ attn_outputs = self.attn(
376
+ hidden_states,
377
+ layer_past=layer_past,
378
+ attention_mask=attention_mask,
379
+ head_mask=head_mask,
380
+ use_cache=use_cache,
381
+ output_attentions=output_attentions,
382
+ rotary_pos_emb=rotary_pos_emb,
383
+ )
384
+ attn_output = attn_outputs[0] # output_attn: a, present, (attentions)
385
+ outputs = attn_outputs[1:]
386
+ # residual connection
387
+ hidden_states = attn_output + residual
388
+
389
+ if encoder_hidden_states is not None:
390
+ # add one self-attention block for cross-attention
391
+ if not hasattr(self, "crossattention"):
392
+ raise ValueError(
393
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with "
394
+ "cross-attention layers by setting `config.add_cross_attention=True`"
395
+ )
396
+ residual = hidden_states
397
+ if self.use_layernorm1p:
398
+ hidden_states = layernorm1p(self.ln_cross_attn, hidden_states)
399
+ else:
400
+ hidden_states = self.ln_cross_attn(hidden_states)
401
+ cross_attn_outputs = self.crossattention(
402
+ hidden_states,
403
+ attention_mask=attention_mask,
404
+ head_mask=head_mask,
405
+ encoder_hidden_states=encoder_hidden_states,
406
+ encoder_attention_mask=encoder_attention_mask,
407
+ output_attentions=output_attentions,
408
+ )
409
+ attn_output = cross_attn_outputs[0]
410
+ # residual connection
411
+ hidden_states = residual + attn_output
412
+ outputs = outputs + cross_attn_outputs[2:] # add cross attentions if we output attention weights
413
+
414
+ residual = hidden_states
415
+ if self.use_layernorm1p:
416
+ hidden_states = layernorm1p(self.ln_2, hidden_states)
417
+ else:
418
+ hidden_states = self.ln_2(hidden_states)
419
+ feed_forward_hidden_states = self.mlp(hidden_states)
420
+ # residual connection
421
+ hidden_states = residual + feed_forward_hidden_states
422
+
423
+ if use_cache:
424
+ outputs = (hidden_states,) + outputs
425
+ else:
426
+ outputs = (hidden_states,) + outputs[1:]
427
+
428
+ return outputs # hidden_states, present, (attentions, cross_attentions)
429
+
430
+
431
+ class MidmPreTrainedModel(PreTrainedModel):
432
+ """
433
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
434
+ models.
435
+ """
436
+
437
+ config_class = MidmBitextConfig
438
+ base_model_prefix = "transformer"
439
+ is_parallelizable = True
440
+ supports_gradient_checkpointing = True
441
+ _no_split_modules = ["MidmBlock"]
442
+
443
+ def __init__(self, *inputs, **kwargs):
444
+ super().__init__(*inputs, **kwargs)
445
+
446
+ def _init_weights(self, module):
447
+ """Initialize the weights."""
448
+ if isinstance(module, (nn.Linear, Conv1D)):
449
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
450
+ if module.bias is not None:
451
+ module.bias.data.zero_()
452
+ elif isinstance(module, nn.Embedding):
453
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
454
+ if module.padding_idx is not None:
455
+ module.weight.data[module.padding_idx].zero_()
456
+ elif isinstance(module, nn.LayerNorm):
457
+ module.bias.data.zero_()
458
+ module.weight.data.fill_(1.0)
459
+
460
+ for name, p in module.named_parameters():
461
+ if "c_proj" in name and "weight" in name:
462
+ # Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block
463
+ p.data.normal_(mean=0.0, std=(self.config.initializer_range / math.sqrt(2 * self.config.n_layer)))
464
+
465
+ def _set_gradient_checkpointing(self, module, value=False):
466
+ if isinstance(module, MidmModel):
467
+ module.gradient_checkpointing = value
468
+
469
+
470
+ @dataclass
471
+ class MidmDoubleHeadsModelOutput(ModelOutput):
472
+ loss: Optional[torch.FloatTensor] = None
473
+ mc_loss: Optional[torch.FloatTensor] = None
474
+ logits: torch.FloatTensor = None
475
+ mc_logits: torch.FloatTensor = None
476
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
477
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
478
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
479
+
480
+
481
+ MIDM_START_DOCSTRING = r"""
482
+
483
+ This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
484
+ methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
485
+ pruning heads etc.)
486
+
487
+ This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__
488
+ subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
489
+ general usage and behavior.
490
+
491
+ Parameters:
492
+ config (:class:`~transformers.MidmBitextConfig`): Model configuration class with all the parameters of the model.
493
+ Initializing with a config file does not load the weights associated with the model, only the
494
+ configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
495
+ weights.
496
+ """
497
+
498
+ MIDM_INPUTS_DOCSTRING = r"""
499
+ Args:
500
+ input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, input_ids_length)`):
501
+ :obj:`input_ids_length` = ``sequence_length`` if :obj:`past_key_values` is ``None`` else
502
+ ``past_key_values[0][0].shape[-2]`` (``sequence_length`` of input past key value states). Indices of input
503
+ sequence tokens in the vocabulary.
504
+
505
+ If :obj:`past_key_values` is used, only ``input_ids`` that do not have their past calculated should be
506
+ passed as ``input_ids``.
507
+
508
+ Indices can be obtained using :class:`~transformers.Midm_bitext_Tokenizer`. See
509
+ :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
510
+ details.
511
+
512
+ `What are input IDs? <../glossary.html#input-ids>`__
513
+ past_key_values (:obj:`Tuple[Tuple[torch.Tensor]]` of length :obj:`config.n_layers`):
514
+ Contains precomputed hidden-states (key and values in the attention blocks) as computed by the model (see
515
+ :obj:`past_key_values` output below). Can be used to speed up sequential decoding. The ``input_ids`` which
516
+ have their past given to this model should not be passed as ``input_ids`` as they have already been
517
+ computed.
518
+ attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
519
+ Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
520
+
521
+ - 1 for tokens that are **not masked**,
522
+ - 0 for tokens that are **masked**.
523
+
524
+ `What are attention masks? <../glossary.html#attention-mask>`__
525
+ token_type_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, input_ids_length)`, `optional`):
526
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,
527
+ 1]``:
528
+
529
+ - 0 corresponds to a `sentence A` token,
530
+ - 1 corresponds to a `sentence B` token.
531
+
532
+ `What are token type IDs? <../glossary.html#token-type-ids>`_
533
+ position_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
534
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,
535
+ config.max_position_embeddings - 1]``.
536
+
537
+ `What are position IDs? <../glossary.html#position-ids>`_
538
+ head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):
539
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:
540
+
541
+ - 1 indicates the head is **not masked**,
542
+ - 0 indicates the head is **masked**.
543
+
544
+ inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
545
+ Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
546
+ This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
547
+ vectors than the model's internal embedding lookup matrix.
548
+
549
+ If :obj:`past_key_values` is used, optionally only the last :obj:`inputs_embeds` have to be input (see
550
+ :obj:`past_key_values`).
551
+ use_cache (:obj:`bool`, `optional`):
552
+ If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
553
+ decoding (see :obj:`past_key_values`).
554
+ output_attentions (:obj:`bool`, `optional`):
555
+ Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
556
+ tensors for more detail.
557
+ output_hidden_states (:obj:`bool`, `optional`):
558
+ Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
559
+ more detail.
560
+ return_dict (:obj:`bool`, `optional`):
561
+ Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
562
+ """
563
+ PARALLELIZE_DOCSTRING = r"""
564
+ This is an experimental feature and is a subject to change at a moment's notice.
565
+
566
+ Uses a device map to distribute attention modules of the model across several devices. If no device map is given,
567
+ it will evenly distribute blocks across all devices.
568
+
569
+ Args:
570
+ device_map (:obj:`Dict[int, list]`, optional, defaults to None):
571
+ A dictionary that maps attention modules to devices. Note that the embedding module and LMHead are always
572
+ automatically mapped to the first device (for esoteric reasons). That means that the first device should
573
+ have fewer attention modules mapped to it than other devices. For reference, the Midm models have the
574
+ following number of attention modules:
575
+
576
+ - midm-bitext-S: 32
577
+
578
+ Example::
579
+
580
+ # Here is an example of a device map on a machine with 4 GPUs using midm-bitext-S, which has a total of 48 attention modules:
581
+ model = MidmLMHeadModel.from_pretrained('midm-bitext-S')
582
+ device_map = {0: [0, 1, 2, 3, 4, 5, 6, 7, 8],
583
+ 1: [9, 10, 11, 12, 13, 14, 15, 16],
584
+ 2: [17, 18, 19, 20, 21, 22, 23, 24],
585
+ 3: [25, 26, 27, 28, 29, 30, 31, 32]}
586
+ model.parallelize(device_map)
587
+ """
588
+ DEPARALLELIZE_DOCSTRING = r"""
589
+ Moves the model to cpu from a model parallel state.
590
+
591
+ Example::
592
+
593
+ # On a 4 GPU machine with midm-bitext-S:
594
+ model = MidmLMHeadModel.from_pretrained('midm-bitext-S')
595
+ device_map = {0: [0, 1, 2, 3, 4, 5, 6, 7, 8],
596
+ 1: [9, 10, 11, 12, 13, 14, 15, 16],
597
+ 2: [17, 18, 19, 20, 21, 22, 23, 24],
598
+ 3: [25, 26, 27, 28, 29, 30, 31, 32]}
599
+ model.parallelize(device_map) # Splits the model across several devices
600
+ model.deparallelize() # Put the model back on cpu and cleans memory by calling torch.cuda.empty_cache()
601
+ """
602
+
603
+
604
+ @add_start_docstrings(
605
+ "The bare Midm Model transformer outputting raw hidden-states without any specific head on top.",
606
+ MIDM_START_DOCSTRING,
607
+ )
608
+ class MidmModel(MidmPreTrainedModel):
609
+ _keys_to_ignore_on_load_missing = ["attn.masked_bias"]
610
+
611
+ def __init__(self, config):
612
+ super().__init__(config)
613
+
614
+ self.embed_dim = config.hidden_size
615
+
616
+ self.wte = nn.Embedding(config.vocab_size, self.embed_dim)
617
+ self.use_absolute_position_embedding = config.use_absolute_position_embedding
618
+ if self.use_absolute_position_embedding:
619
+ self.wpe = nn.Embedding(config.max_position_embeddings, self.embed_dim)
620
+
621
+ self.use_rotary_position_embedding = config.use_rotary_position_embedding
622
+ if self.use_rotary_position_embedding:
623
+ rotary_dim = config.hidden_size // config.num_attention_heads
624
+ assert 0 < config.rotary_percentage <= 1
625
+ if config.rotary_percentage < 1:
626
+ rotary_dim = int(rotary_dim * config.rotary_percentage)
627
+ self.rotary_pos_emb = RotaryEmbedding(
628
+ rotary_dim,
629
+ seq_len_interpolation_factor=None,
630
+ pretrained_max_position_embeddings=config.max_position_embeddings)
631
+
632
+ self.drop = nn.Dropout(config.embd_pdrop)
633
+ self.h = nn.ModuleList([MidmBlock(config, layer_idx=i) for i in range(config.num_hidden_layers)])
634
+ self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon)
635
+ self.use_layernorm1p = config.normalization_type == 'layernorm1p'
636
+
637
+ self.init_weights()
638
+
639
+ # Model parallel
640
+ self.model_parallel = False
641
+ self.device_map = None
642
+ self.gradient_checkpointing = False
643
+
644
+ @add_start_docstrings(PARALLELIZE_DOCSTRING)
645
+ def parallelize(self, device_map=None):
646
+ # Check validity of device_map
647
+ self.device_map = (
648
+ get_device_map(len(self.h), range(torch.cuda.device_count())) if device_map is None else device_map
649
+ )
650
+ assert_device_map(self.device_map, len(self.h))
651
+ self.model_parallel = True
652
+ self.first_device = "cpu" if "cpu" in self.device_map.keys() else "cuda:" + str(min(self.device_map.keys()))
653
+ self.last_device = "cuda:" + str(max(self.device_map.keys()))
654
+ self.wte = self.wte.to(self.first_device)
655
+ if self.use_absolute_position_embedding:
656
+ self.wpe = self.wpe.to(self.first_device)
657
+ # Load onto devices
658
+ for k, v in self.device_map.items():
659
+ for block in v:
660
+ cuda_device = "cuda:" + str(k)
661
+ self.h[block] = self.h[block].to(cuda_device)
662
+ # ln_f to last
663
+ self.ln_f = self.ln_f.to(self.last_device)
664
+
665
+ @add_start_docstrings(DEPARALLELIZE_DOCSTRING)
666
+ def deparallelize(self):
667
+ self.model_parallel = False
668
+ self.device_map = None
669
+ self.first_device = "cpu"
670
+ self.last_device = "cpu"
671
+ self.wte = self.wte.to("cpu")
672
+ if self.use_absolute_position_embedding:
673
+ self.wpe = self.wpe.to("cpu")
674
+ for index in range(len(self.h)):
675
+ self.h[index] = self.h[index].to("cpu")
676
+ self.ln_f = self.ln_f.to("cpu")
677
+ torch.cuda.empty_cache()
678
+
679
+ def get_input_embeddings(self):
680
+ return self.wte
681
+
682
+ def set_input_embeddings(self, new_embeddings):
683
+ self.wte = new_embeddings
684
+
685
+ def _prune_heads(self, heads_to_prune):
686
+ """
687
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
688
+ """
689
+ for layer, heads in heads_to_prune.items():
690
+ self.h[layer].attn.prune_heads(heads)
691
+
692
+ @add_start_docstrings_to_model_forward(MIDM_INPUTS_DOCSTRING)
693
+ @add_code_sample_docstrings(
694
+ processor_class=_TOKENIZER_FOR_DOC,
695
+ checkpoint=_CHECKPOINT_FOR_DOC,
696
+ output_type=BaseModelOutputWithPastAndCrossAttentions,
697
+ config_class=_CONFIG_FOR_DOC,
698
+ )
699
+ def forward(
700
+ self,
701
+ input_ids=None,
702
+ past_key_values=None,
703
+ attention_mask=None,
704
+ token_type_ids=None,
705
+ position_ids=None,
706
+ head_mask=None,
707
+ inputs_embeds=None,
708
+ encoder_hidden_states=None,
709
+ encoder_attention_mask=None,
710
+ use_cache=None,
711
+ output_attentions=None,
712
+ output_hidden_states=None,
713
+ return_dict=None,
714
+ ):
715
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
716
+ output_hidden_states = (
717
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
718
+ )
719
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
720
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
721
+
722
+ if input_ids is not None and inputs_embeds is not None:
723
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
724
+ elif input_ids is not None:
725
+ input_shape = input_ids.size()
726
+ input_ids = input_ids.view(-1, input_shape[-1])
727
+ batch_size = input_ids.shape[0]
728
+ elif inputs_embeds is not None:
729
+ input_shape = inputs_embeds.size()[:-1]
730
+ batch_size = inputs_embeds.shape[0]
731
+ else:
732
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
733
+
734
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
735
+
736
+ if token_type_ids is not None:
737
+ token_type_ids = token_type_ids.view(-1, input_shape[-1])
738
+ if position_ids is not None:
739
+ position_ids = position_ids.view(-1, input_shape[-1])
740
+
741
+ if past_key_values is None:
742
+ past_length = 0
743
+ past_key_values = tuple([None] * len(self.h))
744
+ else:
745
+ past_length = past_key_values[0][0].size(-2)
746
+ if position_ids is None:
747
+ position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device)
748
+ position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1])
749
+
750
+ # MidmAttention mask.
751
+ if attention_mask is not None:
752
+ if batch_size <= 0:
753
+ raise ValueError("batch_size has to be defined and > 0")
754
+ attention_mask = attention_mask.view(batch_size, -1)
755
+ # We create a 3D attention mask from a 2D tensor mask.
756
+ # Sizes are [batch_size, 1, 1, to_seq_length]
757
+ # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
758
+ # this attention mask is more simple than the triangular masking of causal attention
759
+ # used in KT Midm, we just need to prepare the broadcast dimension here.
760
+ attention_mask = attention_mask[:, None, None, :]
761
+
762
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
763
+ # masked positions, this operation will create a tensor which is 0.0 for
764
+ # positions we want to attend and -10000.0 for masked positions.
765
+ # Since we are adding it to the raw scores before the softmax, this is
766
+ # effectively the same as removing these entirely.
767
+ attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility
768
+ attention_mask = (1.0 - attention_mask) * -10000.0
769
+
770
+ # If a 2D or 3D attention mask is provided for the cross-attention
771
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
772
+ if self.config.add_cross_attention and encoder_hidden_states is not None:
773
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
774
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
775
+ if encoder_attention_mask is None:
776
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
777
+ encoder_attention_mask = self.invert_attention_mask(encoder_attention_mask)
778
+ else:
779
+ encoder_attention_mask = None
780
+
781
+ rotary_pos_emb = None
782
+ if self.use_rotary_position_embedding:
783
+ rotary_pos_emb = self.rotary_pos_emb(past_length + input_shape[-1])
784
+
785
+ # Prepare head mask if needed
786
+ # 1.0 in head_mask indicate we keep the head
787
+ # attention_probs has shape bsz x n_heads x N x N
788
+ # head_mask has shape n_layer x batch x n_heads x N x N
789
+ head_mask = self.get_head_mask(head_mask, self.config.n_layer)
790
+
791
+ if inputs_embeds is None:
792
+ inputs_embeds = self.wte(input_ids)
793
+ if self.use_absolute_position_embedding:
794
+ position_embeds = self.wpe(position_ids)
795
+ hidden_states = inputs_embeds + position_embeds
796
+ else:
797
+ hidden_states = inputs_embeds
798
+
799
+ if token_type_ids is not None:
800
+ token_type_embeds = self.wte(token_type_ids)
801
+ hidden_states = hidden_states + token_type_embeds
802
+
803
+ hidden_states = self.drop(hidden_states)
804
+
805
+ output_shape = input_shape + (hidden_states.size(-1),)
806
+
807
+ presents = () if use_cache else None
808
+ all_self_attentions = () if output_attentions else None
809
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
810
+ all_hidden_states = () if output_hidden_states else None
811
+ for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):
812
+
813
+ # Model parallel
814
+ if self.model_parallel:
815
+ torch.cuda.set_device(hidden_states.device)
816
+ # Ensure layer_past is on same device as hidden_states (might not be correct)
817
+ if layer_past is not None:
818
+ layer_past = tuple(past_state.to(hidden_states.device) for past_state in layer_past)
819
+ # Ensure that attention_mask is always on the same device as hidden_states
820
+ if attention_mask is not None:
821
+ attention_mask = attention_mask.to(hidden_states.device)
822
+ if isinstance(head_mask, torch.Tensor):
823
+ head_mask = head_mask.to(hidden_states.device)
824
+ if output_hidden_states:
825
+ all_hidden_states = all_hidden_states + (hidden_states,)
826
+
827
+ if self.gradient_checkpointing and self.training:
828
+
829
+ if use_cache:
830
+ logger.warning(
831
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
832
+ )
833
+ use_cache = False
834
+
835
+ def create_custom_forward(module):
836
+ def custom_forward(*inputs):
837
+ # None for past_key_value
838
+ return module(*inputs, use_cache, output_attentions)
839
+
840
+ return custom_forward
841
+
842
+ outputs = torch.utils.checkpoint.checkpoint(
843
+ create_custom_forward(block),
844
+ hidden_states,
845
+ None,
846
+ attention_mask,
847
+ head_mask[i],
848
+ encoder_hidden_states,
849
+ encoder_attention_mask,
850
+ rotary_pos_emb=rotary_pos_emb,
851
+ )
852
+ else:
853
+ outputs = block(
854
+ hidden_states,
855
+ layer_past=layer_past,
856
+ attention_mask=attention_mask,
857
+ head_mask=head_mask[i],
858
+ encoder_hidden_states=encoder_hidden_states,
859
+ encoder_attention_mask=encoder_attention_mask,
860
+ use_cache=use_cache,
861
+ output_attentions=output_attentions,
862
+ rotary_pos_emb=rotary_pos_emb,
863
+ )
864
+
865
+ hidden_states = outputs[0]
866
+ if use_cache is True:
867
+ presents = presents + (outputs[1],)
868
+
869
+ if output_attentions:
870
+ all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)
871
+ if self.config.add_cross_attention:
872
+ all_cross_attentions = all_cross_attentions + (outputs[3 if use_cache else 2],)
873
+
874
+ # Model Parallel: If it's the last layer for that device, put things on the next device
875
+ if self.model_parallel:
876
+ for k, v in self.device_map.items():
877
+ if i == v[-1] and "cuda:" + str(k) != self.last_device:
878
+ hidden_states = hidden_states.to("cuda:" + str(k + 1))
879
+
880
+ if self.use_layernorm1p:
881
+ hidden_states = layernorm1p(self.ln_f, hidden_states)
882
+ else:
883
+ hidden_states = self.ln_f(hidden_states)
884
+
885
+ hidden_states = hidden_states.view(*output_shape)
886
+ # Add last hidden state
887
+ if output_hidden_states:
888
+ all_hidden_states = all_hidden_states + (hidden_states,)
889
+
890
+ if not return_dict:
891
+ return tuple(
892
+ v
893
+ for v in [hidden_states, presents, all_hidden_states, all_self_attentions, all_cross_attentions]
894
+ if v is not None
895
+ )
896
+
897
+ return BaseModelOutputWithPastAndCrossAttentions(
898
+ last_hidden_state=hidden_states,
899
+ past_key_values=presents,
900
+ hidden_states=all_hidden_states,
901
+ attentions=all_self_attentions,
902
+ cross_attentions=all_cross_attentions,
903
+ )
904
+
905
+
906
+ @add_start_docstrings(
907
+ """
908
+ The Midm Model transformer with a language modeling head on top (linear layer with weights tied to the input
909
+ embeddings).
910
+ """,
911
+ MIDM_START_DOCSTRING,
912
+ )
913
+ class MidmLMHeadModel(MidmPreTrainedModel):
914
+ _keys_to_ignore_on_load_missing = [r"attn.masked_bias", r"attn.bias", r"lm_head.weight"]
915
+
916
+ def __init__(self, config):
917
+ super().__init__(config)
918
+ self.transformer = MidmModel(config)
919
+ self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
920
+
921
+ self.init_weights()
922
+
923
+ # Model parallel
924
+ self.model_parallel = False
925
+ self.device_map = None
926
+
927
+ @add_start_docstrings(PARALLELIZE_DOCSTRING)
928
+ def parallelize(self, device_map=None):
929
+ self.device_map = (
930
+ get_device_map(len(self.transformer.h), range(torch.cuda.device_count()))
931
+ if device_map is None
932
+ else device_map
933
+ )
934
+ assert_device_map(self.device_map, len(self.transformer.h))
935
+ self.transformer.parallelize(self.device_map)
936
+ self.lm_head = self.lm_head.to(self.transformer.first_device)
937
+ self.model_parallel = True
938
+
939
+ @add_start_docstrings(DEPARALLELIZE_DOCSTRING)
940
+ def deparallelize(self):
941
+ self.transformer.deparallelize()
942
+ self.transformer = self.transformer.to("cpu")
943
+ self.lm_head = self.lm_head.to("cpu")
944
+ self.model_parallel = False
945
+ torch.cuda.empty_cache()
946
+
947
+ def get_output_embeddings(self):
948
+ return self.lm_head
949
+
950
+ def set_output_embeddings(self, new_embeddings):
951
+ self.lm_head = new_embeddings
952
+
953
+ def prepare_inputs_for_generation(self, input_ids, past=None, **kwargs):
954
+ token_type_ids = kwargs.get("token_type_ids", None)
955
+ # only last token for inputs_ids if past is defined in kwargs
956
+ if past:
957
+ input_ids = input_ids[:, -1].unsqueeze(-1)
958
+ if token_type_ids is not None:
959
+ token_type_ids = token_type_ids[:, -1].unsqueeze(-1)
960
+
961
+ attention_mask = kwargs.get("attention_mask", None)
962
+ position_ids = kwargs.get("position_ids", None)
963
+
964
+ if attention_mask is not None and position_ids is None:
965
+ # create position_ids on the fly for batch generation
966
+ position_ids = attention_mask.long().cumsum(-1) - 1
967
+ position_ids.masked_fill_(attention_mask == 0, 1)
968
+ if past:
969
+ position_ids = position_ids[:, -1].unsqueeze(-1)
970
+ else:
971
+ position_ids = None
972
+ return {
973
+ "input_ids": input_ids,
974
+ "past_key_values": past,
975
+ "use_cache": kwargs.get("use_cache"),
976
+ "position_ids": position_ids,
977
+ "attention_mask": attention_mask,
978
+ "token_type_ids": token_type_ids,
979
+ }
980
+
981
+ @add_start_docstrings_to_model_forward(MIDM_INPUTS_DOCSTRING)
982
+ @add_code_sample_docstrings(
983
+ processor_class=_TOKENIZER_FOR_DOC,
984
+ checkpoint=_CHECKPOINT_FOR_DOC,
985
+ output_type=CausalLMOutputWithCrossAttentions,
986
+ config_class=_CONFIG_FOR_DOC,
987
+ )
988
+ def forward(
989
+ self,
990
+ input_ids=None,
991
+ past_key_values=None,
992
+ attention_mask=None,
993
+ token_type_ids=None,
994
+ position_ids=None,
995
+ head_mask=None,
996
+ inputs_embeds=None,
997
+ encoder_hidden_states=None,
998
+ encoder_attention_mask=None,
999
+ labels=None,
1000
+ use_cache=None,
1001
+ output_attentions=None,
1002
+ output_hidden_states=None,
1003
+ return_dict=None,
1004
+ ):
1005
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1006
+
1007
+ transformer_outputs = self.transformer(
1008
+ input_ids,
1009
+ past_key_values=past_key_values,
1010
+ attention_mask=attention_mask,
1011
+ token_type_ids=token_type_ids,
1012
+ position_ids=position_ids,
1013
+ head_mask=head_mask,
1014
+ inputs_embeds=inputs_embeds,
1015
+ encoder_hidden_states=encoder_hidden_states,
1016
+ encoder_attention_mask=encoder_attention_mask,
1017
+ use_cache=use_cache,
1018
+ output_attentions=output_attentions,
1019
+ output_hidden_states=output_hidden_states,
1020
+ return_dict=return_dict,
1021
+ )
1022
+ hidden_states = transformer_outputs[0]
1023
+
1024
+ # Set device for model parallelism
1025
+ if self.model_parallel:
1026
+ torch.cuda.set_device(self.transformer.first_device)
1027
+ hidden_states = hidden_states.to(self.lm_head.weight.device)
1028
+
1029
+ lm_logits = self.lm_head(hidden_states)
1030
+
1031
+ loss = None
1032
+ if labels is not None:
1033
+ # Shift so that tokens < n predict n
1034
+ shift_logits = lm_logits[..., :-1, :].contiguous()
1035
+ shift_labels = labels[..., 1:].contiguous()
1036
+ # Flatten the tokens
1037
+ loss_fct = CrossEntropyLoss()
1038
+ loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
1039
+
1040
+ if not return_dict:
1041
+ output = (lm_logits,) + transformer_outputs[1:]
1042
+ return ((loss,) + output) if loss is not None else output
1043
+
1044
+ return CausalLMOutputWithCrossAttentions(
1045
+ loss=loss,
1046
+ logits=lm_logits,
1047
+ past_key_values=transformer_outputs.past_key_values,
1048
+ hidden_states=transformer_outputs.hidden_states,
1049
+ attentions=transformer_outputs.attentions,
1050
+ cross_attentions=transformer_outputs.cross_attentions,
1051
+ )
1052
+
1053
+ @staticmethod
1054
+ def _reorder_cache(past: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor) -> Tuple[Tuple[torch.Tensor]]:
1055
+ """
1056
+ This function is used to re-order the :obj:`past_key_values` cache if
1057
+ :meth:`~transformers.PreTrainedModel.beam_search` or :meth:`~transformers.PreTrainedModel.beam_sample` is
1058
+ called. This is required to match :obj:`past_key_values` with the correct beam_idx at every generation step.
1059
+ """
1060
+ return tuple(
1061
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)
1062
+ for layer_past in past
1063
+ )
1064
+
1065
+
1066
+ @add_start_docstrings(
1067
+ """
1068
+ The Midm Model transformer with a language modeling and a multiple-choice classification head on top e.g. for
1069
+ RocStories/SWAG tasks. The two heads are two linear layers. The language modeling head has its weights tied to the
1070
+ input embeddings, the classification head takes as input the input of a specified classification token index in the
1071
+ input sequence).
1072
+ """,
1073
+ MIDM_START_DOCSTRING,
1074
+ )
1075
+ class MidmDoubleHeadsModel(MidmPreTrainedModel):
1076
+ _keys_to_ignore_on_load_missing = [r"attn.masked_bias", r"attn.bias", r"lm_head.weight"]
1077
+
1078
+ def __init__(self, config):
1079
+ super().__init__(config)
1080
+ config.num_labels = 1
1081
+ self.transformer = MidmModel(config)
1082
+ self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
1083
+ self.multiple_choice_head = SequenceSummary(config)
1084
+
1085
+ self.init_weights()
1086
+
1087
+ # Model parallel
1088
+ self.model_parallel = False
1089
+ self.device_map = None
1090
+
1091
+ @add_start_docstrings(PARALLELIZE_DOCSTRING)
1092
+ def parallelize(self, device_map=None):
1093
+ self.device_map = (
1094
+ get_device_map(len(self.transformer.h), range(torch.cuda.device_count()))
1095
+ if device_map is None
1096
+ else device_map
1097
+ )
1098
+ assert_device_map(self.device_map, len(self.transformer.h))
1099
+ self.transformer.parallelize(self.device_map)
1100
+ self.lm_head = self.lm_head.to(self.transformer.first_device)
1101
+ self.multiple_choice_head = self.multiple_choice_head.to(self.transformer.first_device)
1102
+ self.model_parallel = True
1103
+
1104
+ @add_start_docstrings(DEPARALLELIZE_DOCSTRING)
1105
+ def deparallelize(self):
1106
+ self.transformer.deparallelize()
1107
+ self.transformer = self.transformer.to("cpu")
1108
+ self.lm_head = self.lm_head.to("cpu")
1109
+ self.multiple_choice_head = self.multiple_choice_head.to("cpu")
1110
+ self.model_parallel = False
1111
+ torch.cuda.empty_cache()
1112
+
1113
+ def get_output_embeddings(self):
1114
+ return self.lm_head
1115
+
1116
+ def set_output_embeddings(self, new_embeddings):
1117
+ self.lm_head = new_embeddings
1118
+
1119
+ def prepare_inputs_for_generation(self, input_ids, past=None, **kwargs):
1120
+ token_type_ids = kwargs.get("token_type_ids", None)
1121
+ # only last token for inputs_ids if past is defined in kwargs
1122
+ if past:
1123
+ input_ids = input_ids[:, -1].unsqueeze(-1)
1124
+ if token_type_ids is not None:
1125
+ token_type_ids = token_type_ids[:, -1].unsqueeze(-1)
1126
+
1127
+ attention_mask = kwargs.get("attention_mask", None)
1128
+ position_ids = kwargs.get("position_ids", None)
1129
+
1130
+ if attention_mask is not None and position_ids is None:
1131
+ # create position_ids on the fly for batch generation
1132
+ position_ids = attention_mask.long().cumsum(-1) - 1
1133
+ position_ids.masked_fill_(attention_mask == 0, 1)
1134
+ if past:
1135
+ position_ids = position_ids[:, -1].unsqueeze(-1)
1136
+ else:
1137
+ position_ids = None
1138
+
1139
+ return {
1140
+ "input_ids": input_ids,
1141
+ "past_key_values": past,
1142
+ "use_cache": kwargs.get("use_cache"),
1143
+ "position_ids": position_ids,
1144
+ "attention_mask": attention_mask,
1145
+ "token_type_ids": token_type_ids,
1146
+ }
1147
+
1148
+ @add_start_docstrings_to_model_forward(MIDM_INPUTS_DOCSTRING)
1149
+ def forward(
1150
+ self,
1151
+ input_ids=None,
1152
+ past_key_values=None,
1153
+ attention_mask=None,
1154
+ token_type_ids=None,
1155
+ position_ids=None,
1156
+ head_mask=None,
1157
+ inputs_embeds=None,
1158
+ mc_token_ids=None,
1159
+ labels=None,
1160
+ mc_labels=None,
1161
+ use_cache=None,
1162
+ output_attentions=None,
1163
+ output_hidden_states=None,
1164
+ return_dict=None,
1165
+ **kwargs,
1166
+ ):
1167
+
1168
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1169
+
1170
+ transformer_outputs = self.transformer(
1171
+ input_ids,
1172
+ past_key_values=past_key_values,
1173
+ attention_mask=attention_mask,
1174
+ token_type_ids=token_type_ids,
1175
+ position_ids=position_ids,
1176
+ head_mask=head_mask,
1177
+ inputs_embeds=inputs_embeds,
1178
+ use_cache=use_cache,
1179
+ output_attentions=output_attentions,
1180
+ output_hidden_states=output_hidden_states,
1181
+ return_dict=return_dict,
1182
+ )
1183
+
1184
+ hidden_states = transformer_outputs[0]
1185
+
1186
+ # Set device for model parallelism
1187
+ if self.model_parallel:
1188
+ torch.cuda.set_device(self.transformer.first_device)
1189
+ hidden_states = hidden_states.to(self.lm_head.weight.device)
1190
+
1191
+ lm_logits = self.lm_head(hidden_states)
1192
+ mc_logits = self.multiple_choice_head(hidden_states, mc_token_ids).squeeze(-1)
1193
+
1194
+ mc_loss = None
1195
+ if mc_labels is not None:
1196
+ loss_fct = CrossEntropyLoss()
1197
+ mc_loss = loss_fct(mc_logits.view(-1, mc_logits.size(-1)), mc_labels.view(-1))
1198
+ lm_loss = None
1199
+ if labels is not None:
1200
+ shift_logits = lm_logits[..., :-1, :].contiguous()
1201
+ shift_labels = labels[..., 1:].contiguous()
1202
+ loss_fct = CrossEntropyLoss()
1203
+ lm_loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
1204
+
1205
+ if not return_dict:
1206
+ output = (lm_logits, mc_logits) + transformer_outputs[1:]
1207
+ if mc_loss is not None:
1208
+ output = (mc_loss,) + output
1209
+ return ((lm_loss,) + output) if lm_loss is not None else output
1210
+
1211
+ return MidmDoubleHeadsModelOutput(
1212
+ loss=lm_loss,
1213
+ mc_loss=mc_loss,
1214
+ logits=lm_logits,
1215
+ mc_logits=mc_logits,
1216
+ past_key_values=transformer_outputs.past_key_values,
1217
+ hidden_states=transformer_outputs.hidden_states,
1218
+ attentions=transformer_outputs.attentions,
1219
+ )
1220
+
1221
+ @staticmethod
1222
+ def _reorder_cache(past: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor) -> Tuple[Tuple[torch.Tensor]]:
1223
+ return tuple(
1224
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)
1225
+ for layer_past in past
1226
+ )
1227
+
1228
+
1229
+ @add_start_docstrings(
1230
+ """
1231
+ The Midm Model transformer with a sequence classification head on top (linear layer).
1232
+
1233
+ :class:`~transformers.MidmForSequenceClassification` uses the last token in order to do the classification, as
1234
+ other causal models do.
1235
+
1236
+ Since it does classification on the last token, it requires to know the position of the last token. If a
1237
+ :obj:`pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each
1238
+ row. If no :obj:`pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot
1239
+ guess the padding tokens when :obj:`inputs_embeds` are passed instead of :obj:`input_ids`, it does the same (take
1240
+ the last value in each row of the batch).
1241
+ """,
1242
+ MIDM_START_DOCSTRING,
1243
+ )
1244
+ class MidmForSequenceClassification(MidmPreTrainedModel):
1245
+ _keys_to_ignore_on_load_missing = [r"h\.\d+\.attn\.masked_bias", r"lm_head\.weight"]
1246
+
1247
+ def __init__(self, config):
1248
+ super().__init__(config)
1249
+ self.num_labels = config.num_labels
1250
+ self.transformer = MidmModel(config)
1251
+ self.score = nn.Linear(config.n_embd, self.num_labels, bias=False)
1252
+
1253
+ self.init_weights()
1254
+
1255
+ # Model parallel
1256
+ self.model_parallel = False
1257
+ self.device_map = None
1258
+
1259
+ @add_start_docstrings_to_model_forward(MIDM_INPUTS_DOCSTRING)
1260
+ def forward(
1261
+ self,
1262
+ input_ids=None,
1263
+ past_key_values=None,
1264
+ attention_mask=None,
1265
+ token_type_ids=None,
1266
+ position_ids=None,
1267
+ head_mask=None,
1268
+ inputs_embeds=None,
1269
+ labels=None,
1270
+ use_cache=None,
1271
+ output_attentions=None,
1272
+ output_hidden_states=None,
1273
+ return_dict=None,
1274
+ ):
1275
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1276
+
1277
+ transformer_outputs = self.transformer(
1278
+ input_ids,
1279
+ past_key_values=past_key_values,
1280
+ attention_mask=attention_mask,
1281
+ token_type_ids=token_type_ids,
1282
+ position_ids=position_ids,
1283
+ head_mask=head_mask,
1284
+ inputs_embeds=inputs_embeds,
1285
+ use_cache=use_cache,
1286
+ output_attentions=output_attentions,
1287
+ output_hidden_states=output_hidden_states,
1288
+ return_dict=return_dict,
1289
+ )
1290
+ hidden_states = transformer_outputs[0]
1291
+ logits = self.score(hidden_states)
1292
+
1293
+ if input_ids is not None:
1294
+ batch_size, sequence_length = input_ids.shape[:2]
1295
+ else:
1296
+ batch_size, sequence_length = inputs_embeds.shape[:2]
1297
+
1298
+ assert (
1299
+ self.config.pad_token_id is not None or batch_size == 1
1300
+ ), "Cannot handle batch sizes > 1 if no padding token is defined."
1301
+ if self.config.pad_token_id is None:
1302
+ sequence_lengths = -1
1303
+ else:
1304
+ if input_ids is not None:
1305
+ sequence_lengths = torch.ne(input_ids, self.config.pad_token_id).sum(-1) - 1
1306
+ else:
1307
+ sequence_lengths = -1
1308
+ logger.warning(
1309
+ f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
1310
+ f"unexpected if using padding tokens in conjunction with `inputs_embeds.`"
1311
+ )
1312
+
1313
+ pooled_logits = logits[range(batch_size), sequence_lengths]
1314
+
1315
+ loss = None
1316
+ if labels is not None:
1317
+ if self.num_labels == 1:
1318
+ # We are doing regression
1319
+ loss_fct = MSELoss()
1320
+ loss = loss_fct(pooled_logits.view(-1), labels.to(self.dtype).view(-1))
1321
+ else:
1322
+ loss_fct = CrossEntropyLoss()
1323
+ loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
1324
+
1325
+ if not return_dict:
1326
+ output = (pooled_logits,) + transformer_outputs[1:]
1327
+ return ((loss,) + output) if loss is not None else output
1328
+
1329
+ return SequenceClassifierOutputWithPast(
1330
+ loss=loss,
1331
+ logits=pooled_logits,
1332
+ past_key_values=transformer_outputs.past_key_values,
1333
+ hidden_states=transformer_outputs.hidden_states,
1334
+ attentions=transformer_outputs.attentions,
1335
+ )
1336
+
1337
+
1338
+ @add_start_docstrings(
1339
+ """
1340
+ Midm Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
1341
+ Named-Entity-Recognition (NER) tasks.
1342
+ """,
1343
+ MIDM_START_DOCSTRING,
1344
+ )
1345
+ class MidmForTokenClassification(MidmPreTrainedModel):
1346
+ def __init__(self, config):
1347
+ super().__init__(config)
1348
+ self.num_labels = config.num_labels
1349
+
1350
+ self.transformer = MidmModel(config)
1351
+ if hasattr(config, "classifier_dropout") and config.classifier_dropout is not None:
1352
+ classifier_dropout = config.classifier_dropout
1353
+ elif hasattr(config, "hidden_dropout") and config.hidden_dropout is not None:
1354
+ classifier_dropout = config.hidden_dropout
1355
+ else:
1356
+ classifier_dropout = 0.1
1357
+ self.dropout = nn.Dropout(classifier_dropout)
1358
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1359
+
1360
+ self.init_weights()
1361
+
1362
+ # Model parallel
1363
+ self.model_parallel = False
1364
+ self.device_map = None
1365
+
1366
+ @add_start_docstrings_to_model_forward(MIDM_INPUTS_DOCSTRING)
1367
+ def forward(
1368
+ self,
1369
+ input_ids=None,
1370
+ past_key_values=None,
1371
+ attention_mask=None,
1372
+ token_type_ids=None,
1373
+ position_ids=None,
1374
+ head_mask=None,
1375
+ inputs_embeds=None,
1376
+ labels=None,
1377
+ use_cache=None,
1378
+ output_attentions=None,
1379
+ output_hidden_states=None,
1380
+ return_dict=None,
1381
+ ):
1382
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1383
+
1384
+ transformer_outputs = self.transformer(
1385
+ input_ids,
1386
+ past_key_values=past_key_values,
1387
+ attention_mask=attention_mask,
1388
+ token_type_ids=token_type_ids,
1389
+ position_ids=position_ids,
1390
+ head_mask=head_mask,
1391
+ inputs_embeds=inputs_embeds,
1392
+ use_cache=use_cache,
1393
+ output_attentions=output_attentions,
1394
+ output_hidden_states=output_hidden_states,
1395
+ return_dict=return_dict,
1396
+ )
1397
+
1398
+ hidden_states = transformer_outputs[0]
1399
+ hidden_states = self.dropout(hidden_states)
1400
+ logits = self.classifier(hidden_states)
1401
+
1402
+ loss = None
1403
+ if labels is not None:
1404
+ loss_fct = CrossEntropyLoss()
1405
+ # Only keep active parts of the loss
1406
+ if attention_mask is not None:
1407
+ active_loss = attention_mask.view(-1) == 1
1408
+ active_logits = logits.view(-1, self.num_labels)
1409
+ active_labels = torch.where(
1410
+ active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)
1411
+ )
1412
+ loss = loss_fct(active_logits, active_labels)
1413
+ else:
1414
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1415
+
1416
+ if not return_dict:
1417
+ output = (logits,) + transformer_outputs[2:]
1418
+ return ((loss,) + output) if loss is not None else output
1419
+
1420
+ return TokenClassifierOutput(
1421
+ loss=loss,
1422
+ logits=logits,
1423
+ hidden_states=transformer_outputs.hidden_states,
1424
+ attentions=transformer_outputs.attentions,
1425
+ )
1426
+
1427
+ def get_submodule(module, target: str) -> "Module":
1428
+ if target == "":
1429
+ return module
1430
+
1431
+ atoms: List[str] = target.split(".")
1432
+ mod: torch.nn.Module = module
1433
+
1434
+ for item in atoms:
1435
+
1436
+ if not hasattr(mod, item):
1437
+ raise AttributeError(mod._get_name() + " has no "
1438
+ "attribute `" + item + "`")
1439
+
1440
+ mod = getattr(mod, item)
1441
+
1442
+ if not isinstance(mod, torch.nn.Module):
1443
+ raise AttributeError("`" + item + "` is not "
1444
+ "an nn.Module")
1445
+
1446
+ return mod
1447
+
1448
+
1449
+ def get_parameter(module, target: str) -> "Parameter":
1450
+ module_path, _, param_name = target.rpartition(".")
1451
+
1452
+ mod: torch.nn.Module = get_submodule(module, module_path)
1453
+
1454
+ if not hasattr(mod, param_name):
1455
+ raise AttributeError(mod._get_name() + " has no attribute `"
1456
+ + param_name + "`")
1457
+
1458
+ param: torch.nn.Parameter = getattr(mod, param_name)
1459
+
1460
+ if not isinstance(param, torch.nn.Parameter):
1461
+ raise AttributeError("`" + param_name + "` is not an "
1462
+ "nn.Parameter")
1463
+
1464
+ return param
pytorch_model.bin.index.json ADDED
@@ -0,0 +1,460 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 16185393280
4
+ },
5
+ "weight_map": {
6
+ "lm_head.weight": "pytorch_model-00002-of-00002.bin",
7
+ "transformer.h.0.attn.bias": "pytorch_model-00001-of-00002.bin",
8
+ "transformer.h.0.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
9
+ "transformer.h.0.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
10
+ "transformer.h.0.attn.c_proj.bias": "pytorch_model-00001-of-00002.bin",
11
+ "transformer.h.0.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
12
+ "transformer.h.0.attn.masked_bias": "pytorch_model-00001-of-00002.bin",
13
+ "transformer.h.0.ln_1.bias": "pytorch_model-00001-of-00002.bin",
14
+ "transformer.h.0.ln_1.weight": "pytorch_model-00001-of-00002.bin",
15
+ "transformer.h.0.ln_2.bias": "pytorch_model-00001-of-00002.bin",
16
+ "transformer.h.0.ln_2.weight": "pytorch_model-00001-of-00002.bin",
17
+ "transformer.h.0.mlp.c_fc.bias": "pytorch_model-00001-of-00002.bin",
18
+ "transformer.h.0.mlp.c_fc.weight": "pytorch_model-00001-of-00002.bin",
19
+ "transformer.h.0.mlp.c_proj.bias": "pytorch_model-00001-of-00002.bin",
20
+ "transformer.h.0.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
21
+ "transformer.h.1.attn.bias": "pytorch_model-00001-of-00002.bin",
22
+ "transformer.h.1.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
23
+ "transformer.h.1.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
24
+ "transformer.h.1.attn.c_proj.bias": "pytorch_model-00001-of-00002.bin",
25
+ "transformer.h.1.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
26
+ "transformer.h.1.attn.masked_bias": "pytorch_model-00001-of-00002.bin",
27
+ "transformer.h.1.ln_1.bias": "pytorch_model-00001-of-00002.bin",
28
+ "transformer.h.1.ln_1.weight": "pytorch_model-00001-of-00002.bin",
29
+ "transformer.h.1.ln_2.bias": "pytorch_model-00001-of-00002.bin",
30
+ "transformer.h.1.ln_2.weight": "pytorch_model-00001-of-00002.bin",
31
+ "transformer.h.1.mlp.c_fc.bias": "pytorch_model-00001-of-00002.bin",
32
+ "transformer.h.1.mlp.c_fc.weight": "pytorch_model-00001-of-00002.bin",
33
+ "transformer.h.1.mlp.c_proj.bias": "pytorch_model-00001-of-00002.bin",
34
+ "transformer.h.1.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
35
+ "transformer.h.10.attn.bias": "pytorch_model-00001-of-00002.bin",
36
+ "transformer.h.10.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
37
+ "transformer.h.10.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
38
+ "transformer.h.10.attn.c_proj.bias": "pytorch_model-00001-of-00002.bin",
39
+ "transformer.h.10.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
40
+ "transformer.h.10.attn.masked_bias": "pytorch_model-00001-of-00002.bin",
41
+ "transformer.h.10.ln_1.bias": "pytorch_model-00001-of-00002.bin",
42
+ "transformer.h.10.ln_1.weight": "pytorch_model-00001-of-00002.bin",
43
+ "transformer.h.10.ln_2.bias": "pytorch_model-00001-of-00002.bin",
44
+ "transformer.h.10.ln_2.weight": "pytorch_model-00001-of-00002.bin",
45
+ "transformer.h.10.mlp.c_fc.bias": "pytorch_model-00001-of-00002.bin",
46
+ "transformer.h.10.mlp.c_fc.weight": "pytorch_model-00001-of-00002.bin",
47
+ "transformer.h.10.mlp.c_proj.bias": "pytorch_model-00001-of-00002.bin",
48
+ "transformer.h.10.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
49
+ "transformer.h.11.attn.bias": "pytorch_model-00001-of-00002.bin",
50
+ "transformer.h.11.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
51
+ "transformer.h.11.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
52
+ "transformer.h.11.attn.c_proj.bias": "pytorch_model-00001-of-00002.bin",
53
+ "transformer.h.11.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
54
+ "transformer.h.11.attn.masked_bias": "pytorch_model-00001-of-00002.bin",
55
+ "transformer.h.11.ln_1.bias": "pytorch_model-00001-of-00002.bin",
56
+ "transformer.h.11.ln_1.weight": "pytorch_model-00001-of-00002.bin",
57
+ "transformer.h.11.ln_2.bias": "pytorch_model-00001-of-00002.bin",
58
+ "transformer.h.11.ln_2.weight": "pytorch_model-00001-of-00002.bin",
59
+ "transformer.h.11.mlp.c_fc.bias": "pytorch_model-00001-of-00002.bin",
60
+ "transformer.h.11.mlp.c_fc.weight": "pytorch_model-00001-of-00002.bin",
61
+ "transformer.h.11.mlp.c_proj.bias": "pytorch_model-00001-of-00002.bin",
62
+ "transformer.h.11.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
63
+ "transformer.h.12.attn.bias": "pytorch_model-00001-of-00002.bin",
64
+ "transformer.h.12.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
65
+ "transformer.h.12.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
66
+ "transformer.h.12.attn.c_proj.bias": "pytorch_model-00001-of-00002.bin",
67
+ "transformer.h.12.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
68
+ "transformer.h.12.attn.masked_bias": "pytorch_model-00001-of-00002.bin",
69
+ "transformer.h.12.ln_1.bias": "pytorch_model-00001-of-00002.bin",
70
+ "transformer.h.12.ln_1.weight": "pytorch_model-00001-of-00002.bin",
71
+ "transformer.h.12.ln_2.bias": "pytorch_model-00001-of-00002.bin",
72
+ "transformer.h.12.ln_2.weight": "pytorch_model-00001-of-00002.bin",
73
+ "transformer.h.12.mlp.c_fc.bias": "pytorch_model-00001-of-00002.bin",
74
+ "transformer.h.12.mlp.c_fc.weight": "pytorch_model-00001-of-00002.bin",
75
+ "transformer.h.12.mlp.c_proj.bias": "pytorch_model-00001-of-00002.bin",
76
+ "transformer.h.12.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
77
+ "transformer.h.13.attn.bias": "pytorch_model-00001-of-00002.bin",
78
+ "transformer.h.13.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
79
+ "transformer.h.13.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
80
+ "transformer.h.13.attn.c_proj.bias": "pytorch_model-00001-of-00002.bin",
81
+ "transformer.h.13.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
82
+ "transformer.h.13.attn.masked_bias": "pytorch_model-00001-of-00002.bin",
83
+ "transformer.h.13.ln_1.bias": "pytorch_model-00001-of-00002.bin",
84
+ "transformer.h.13.ln_1.weight": "pytorch_model-00001-of-00002.bin",
85
+ "transformer.h.13.ln_2.bias": "pytorch_model-00001-of-00002.bin",
86
+ "transformer.h.13.ln_2.weight": "pytorch_model-00001-of-00002.bin",
87
+ "transformer.h.13.mlp.c_fc.bias": "pytorch_model-00001-of-00002.bin",
88
+ "transformer.h.13.mlp.c_fc.weight": "pytorch_model-00001-of-00002.bin",
89
+ "transformer.h.13.mlp.c_proj.bias": "pytorch_model-00001-of-00002.bin",
90
+ "transformer.h.13.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
91
+ "transformer.h.14.attn.bias": "pytorch_model-00001-of-00002.bin",
92
+ "transformer.h.14.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
93
+ "transformer.h.14.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
94
+ "transformer.h.14.attn.c_proj.bias": "pytorch_model-00001-of-00002.bin",
95
+ "transformer.h.14.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
96
+ "transformer.h.14.attn.masked_bias": "pytorch_model-00001-of-00002.bin",
97
+ "transformer.h.14.ln_1.bias": "pytorch_model-00001-of-00002.bin",
98
+ "transformer.h.14.ln_1.weight": "pytorch_model-00001-of-00002.bin",
99
+ "transformer.h.14.ln_2.bias": "pytorch_model-00001-of-00002.bin",
100
+ "transformer.h.14.ln_2.weight": "pytorch_model-00001-of-00002.bin",
101
+ "transformer.h.14.mlp.c_fc.bias": "pytorch_model-00001-of-00002.bin",
102
+ "transformer.h.14.mlp.c_fc.weight": "pytorch_model-00001-of-00002.bin",
103
+ "transformer.h.14.mlp.c_proj.bias": "pytorch_model-00001-of-00002.bin",
104
+ "transformer.h.14.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
105
+ "transformer.h.15.attn.bias": "pytorch_model-00001-of-00002.bin",
106
+ "transformer.h.15.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
107
+ "transformer.h.15.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
108
+ "transformer.h.15.attn.c_proj.bias": "pytorch_model-00001-of-00002.bin",
109
+ "transformer.h.15.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
110
+ "transformer.h.15.attn.masked_bias": "pytorch_model-00001-of-00002.bin",
111
+ "transformer.h.15.ln_1.bias": "pytorch_model-00001-of-00002.bin",
112
+ "transformer.h.15.ln_1.weight": "pytorch_model-00001-of-00002.bin",
113
+ "transformer.h.15.ln_2.bias": "pytorch_model-00001-of-00002.bin",
114
+ "transformer.h.15.ln_2.weight": "pytorch_model-00001-of-00002.bin",
115
+ "transformer.h.15.mlp.c_fc.bias": "pytorch_model-00001-of-00002.bin",
116
+ "transformer.h.15.mlp.c_fc.weight": "pytorch_model-00001-of-00002.bin",
117
+ "transformer.h.15.mlp.c_proj.bias": "pytorch_model-00001-of-00002.bin",
118
+ "transformer.h.15.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
119
+ "transformer.h.16.attn.bias": "pytorch_model-00001-of-00002.bin",
120
+ "transformer.h.16.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
121
+ "transformer.h.16.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
122
+ "transformer.h.16.attn.c_proj.bias": "pytorch_model-00001-of-00002.bin",
123
+ "transformer.h.16.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
124
+ "transformer.h.16.attn.masked_bias": "pytorch_model-00001-of-00002.bin",
125
+ "transformer.h.16.ln_1.bias": "pytorch_model-00001-of-00002.bin",
126
+ "transformer.h.16.ln_1.weight": "pytorch_model-00001-of-00002.bin",
127
+ "transformer.h.16.ln_2.bias": "pytorch_model-00001-of-00002.bin",
128
+ "transformer.h.16.ln_2.weight": "pytorch_model-00001-of-00002.bin",
129
+ "transformer.h.16.mlp.c_fc.bias": "pytorch_model-00001-of-00002.bin",
130
+ "transformer.h.16.mlp.c_fc.weight": "pytorch_model-00001-of-00002.bin",
131
+ "transformer.h.16.mlp.c_proj.bias": "pytorch_model-00001-of-00002.bin",
132
+ "transformer.h.16.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
133
+ "transformer.h.17.attn.bias": "pytorch_model-00001-of-00002.bin",
134
+ "transformer.h.17.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
135
+ "transformer.h.17.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
136
+ "transformer.h.17.attn.c_proj.bias": "pytorch_model-00001-of-00002.bin",
137
+ "transformer.h.17.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
138
+ "transformer.h.17.attn.masked_bias": "pytorch_model-00001-of-00002.bin",
139
+ "transformer.h.17.ln_1.bias": "pytorch_model-00001-of-00002.bin",
140
+ "transformer.h.17.ln_1.weight": "pytorch_model-00001-of-00002.bin",
141
+ "transformer.h.17.ln_2.bias": "pytorch_model-00001-of-00002.bin",
142
+ "transformer.h.17.ln_2.weight": "pytorch_model-00001-of-00002.bin",
143
+ "transformer.h.17.mlp.c_fc.bias": "pytorch_model-00001-of-00002.bin",
144
+ "transformer.h.17.mlp.c_fc.weight": "pytorch_model-00001-of-00002.bin",
145
+ "transformer.h.17.mlp.c_proj.bias": "pytorch_model-00001-of-00002.bin",
146
+ "transformer.h.17.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
147
+ "transformer.h.18.attn.bias": "pytorch_model-00001-of-00002.bin",
148
+ "transformer.h.18.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
149
+ "transformer.h.18.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
150
+ "transformer.h.18.attn.c_proj.bias": "pytorch_model-00001-of-00002.bin",
151
+ "transformer.h.18.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
152
+ "transformer.h.18.attn.masked_bias": "pytorch_model-00001-of-00002.bin",
153
+ "transformer.h.18.ln_1.bias": "pytorch_model-00001-of-00002.bin",
154
+ "transformer.h.18.ln_1.weight": "pytorch_model-00001-of-00002.bin",
155
+ "transformer.h.18.ln_2.bias": "pytorch_model-00001-of-00002.bin",
156
+ "transformer.h.18.ln_2.weight": "pytorch_model-00001-of-00002.bin",
157
+ "transformer.h.18.mlp.c_fc.bias": "pytorch_model-00001-of-00002.bin",
158
+ "transformer.h.18.mlp.c_fc.weight": "pytorch_model-00001-of-00002.bin",
159
+ "transformer.h.18.mlp.c_proj.bias": "pytorch_model-00001-of-00002.bin",
160
+ "transformer.h.18.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
161
+ "transformer.h.19.attn.bias": "pytorch_model-00001-of-00002.bin",
162
+ "transformer.h.19.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
163
+ "transformer.h.19.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
164
+ "transformer.h.19.attn.c_proj.bias": "pytorch_model-00001-of-00002.bin",
165
+ "transformer.h.19.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
166
+ "transformer.h.19.attn.masked_bias": "pytorch_model-00001-of-00002.bin",
167
+ "transformer.h.19.ln_1.bias": "pytorch_model-00001-of-00002.bin",
168
+ "transformer.h.19.ln_1.weight": "pytorch_model-00001-of-00002.bin",
169
+ "transformer.h.19.ln_2.bias": "pytorch_model-00001-of-00002.bin",
170
+ "transformer.h.19.ln_2.weight": "pytorch_model-00001-of-00002.bin",
171
+ "transformer.h.19.mlp.c_fc.bias": "pytorch_model-00001-of-00002.bin",
172
+ "transformer.h.19.mlp.c_fc.weight": "pytorch_model-00001-of-00002.bin",
173
+ "transformer.h.19.mlp.c_proj.bias": "pytorch_model-00001-of-00002.bin",
174
+ "transformer.h.19.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
175
+ "transformer.h.2.attn.bias": "pytorch_model-00001-of-00002.bin",
176
+ "transformer.h.2.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
177
+ "transformer.h.2.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
178
+ "transformer.h.2.attn.c_proj.bias": "pytorch_model-00001-of-00002.bin",
179
+ "transformer.h.2.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
180
+ "transformer.h.2.attn.masked_bias": "pytorch_model-00001-of-00002.bin",
181
+ "transformer.h.2.ln_1.bias": "pytorch_model-00001-of-00002.bin",
182
+ "transformer.h.2.ln_1.weight": "pytorch_model-00001-of-00002.bin",
183
+ "transformer.h.2.ln_2.bias": "pytorch_model-00001-of-00002.bin",
184
+ "transformer.h.2.ln_2.weight": "pytorch_model-00001-of-00002.bin",
185
+ "transformer.h.2.mlp.c_fc.bias": "pytorch_model-00001-of-00002.bin",
186
+ "transformer.h.2.mlp.c_fc.weight": "pytorch_model-00001-of-00002.bin",
187
+ "transformer.h.2.mlp.c_proj.bias": "pytorch_model-00001-of-00002.bin",
188
+ "transformer.h.2.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
189
+ "transformer.h.20.attn.bias": "pytorch_model-00002-of-00002.bin",
190
+ "transformer.h.20.attn.c_attn.bias": "pytorch_model-00002-of-00002.bin",
191
+ "transformer.h.20.attn.c_attn.weight": "pytorch_model-00002-of-00002.bin",
192
+ "transformer.h.20.attn.c_proj.bias": "pytorch_model-00002-of-00002.bin",
193
+ "transformer.h.20.attn.c_proj.weight": "pytorch_model-00002-of-00002.bin",
194
+ "transformer.h.20.attn.masked_bias": "pytorch_model-00002-of-00002.bin",
195
+ "transformer.h.20.ln_1.bias": "pytorch_model-00001-of-00002.bin",
196
+ "transformer.h.20.ln_1.weight": "pytorch_model-00001-of-00002.bin",
197
+ "transformer.h.20.ln_2.bias": "pytorch_model-00002-of-00002.bin",
198
+ "transformer.h.20.ln_2.weight": "pytorch_model-00002-of-00002.bin",
199
+ "transformer.h.20.mlp.c_fc.bias": "pytorch_model-00002-of-00002.bin",
200
+ "transformer.h.20.mlp.c_fc.weight": "pytorch_model-00002-of-00002.bin",
201
+ "transformer.h.20.mlp.c_proj.bias": "pytorch_model-00002-of-00002.bin",
202
+ "transformer.h.20.mlp.c_proj.weight": "pytorch_model-00002-of-00002.bin",
203
+ "transformer.h.21.attn.bias": "pytorch_model-00002-of-00002.bin",
204
+ "transformer.h.21.attn.c_attn.bias": "pytorch_model-00002-of-00002.bin",
205
+ "transformer.h.21.attn.c_attn.weight": "pytorch_model-00002-of-00002.bin",
206
+ "transformer.h.21.attn.c_proj.bias": "pytorch_model-00002-of-00002.bin",
207
+ "transformer.h.21.attn.c_proj.weight": "pytorch_model-00002-of-00002.bin",
208
+ "transformer.h.21.attn.masked_bias": "pytorch_model-00002-of-00002.bin",
209
+ "transformer.h.21.ln_1.bias": "pytorch_model-00002-of-00002.bin",
210
+ "transformer.h.21.ln_1.weight": "pytorch_model-00002-of-00002.bin",
211
+ "transformer.h.21.ln_2.bias": "pytorch_model-00002-of-00002.bin",
212
+ "transformer.h.21.ln_2.weight": "pytorch_model-00002-of-00002.bin",
213
+ "transformer.h.21.mlp.c_fc.bias": "pytorch_model-00002-of-00002.bin",
214
+ "transformer.h.21.mlp.c_fc.weight": "pytorch_model-00002-of-00002.bin",
215
+ "transformer.h.21.mlp.c_proj.bias": "pytorch_model-00002-of-00002.bin",
216
+ "transformer.h.21.mlp.c_proj.weight": "pytorch_model-00002-of-00002.bin",
217
+ "transformer.h.22.attn.bias": "pytorch_model-00002-of-00002.bin",
218
+ "transformer.h.22.attn.c_attn.bias": "pytorch_model-00002-of-00002.bin",
219
+ "transformer.h.22.attn.c_attn.weight": "pytorch_model-00002-of-00002.bin",
220
+ "transformer.h.22.attn.c_proj.bias": "pytorch_model-00002-of-00002.bin",
221
+ "transformer.h.22.attn.c_proj.weight": "pytorch_model-00002-of-00002.bin",
222
+ "transformer.h.22.attn.masked_bias": "pytorch_model-00002-of-00002.bin",
223
+ "transformer.h.22.ln_1.bias": "pytorch_model-00002-of-00002.bin",
224
+ "transformer.h.22.ln_1.weight": "pytorch_model-00002-of-00002.bin",
225
+ "transformer.h.22.ln_2.bias": "pytorch_model-00002-of-00002.bin",
226
+ "transformer.h.22.ln_2.weight": "pytorch_model-00002-of-00002.bin",
227
+ "transformer.h.22.mlp.c_fc.bias": "pytorch_model-00002-of-00002.bin",
228
+ "transformer.h.22.mlp.c_fc.weight": "pytorch_model-00002-of-00002.bin",
229
+ "transformer.h.22.mlp.c_proj.bias": "pytorch_model-00002-of-00002.bin",
230
+ "transformer.h.22.mlp.c_proj.weight": "pytorch_model-00002-of-00002.bin",
231
+ "transformer.h.23.attn.bias": "pytorch_model-00002-of-00002.bin",
232
+ "transformer.h.23.attn.c_attn.bias": "pytorch_model-00002-of-00002.bin",
233
+ "transformer.h.23.attn.c_attn.weight": "pytorch_model-00002-of-00002.bin",
234
+ "transformer.h.23.attn.c_proj.bias": "pytorch_model-00002-of-00002.bin",
235
+ "transformer.h.23.attn.c_proj.weight": "pytorch_model-00002-of-00002.bin",
236
+ "transformer.h.23.attn.masked_bias": "pytorch_model-00002-of-00002.bin",
237
+ "transformer.h.23.ln_1.bias": "pytorch_model-00002-of-00002.bin",
238
+ "transformer.h.23.ln_1.weight": "pytorch_model-00002-of-00002.bin",
239
+ "transformer.h.23.ln_2.bias": "pytorch_model-00002-of-00002.bin",
240
+ "transformer.h.23.ln_2.weight": "pytorch_model-00002-of-00002.bin",
241
+ "transformer.h.23.mlp.c_fc.bias": "pytorch_model-00002-of-00002.bin",
242
+ "transformer.h.23.mlp.c_fc.weight": "pytorch_model-00002-of-00002.bin",
243
+ "transformer.h.23.mlp.c_proj.bias": "pytorch_model-00002-of-00002.bin",
244
+ "transformer.h.23.mlp.c_proj.weight": "pytorch_model-00002-of-00002.bin",
245
+ "transformer.h.24.attn.bias": "pytorch_model-00002-of-00002.bin",
246
+ "transformer.h.24.attn.c_attn.bias": "pytorch_model-00002-of-00002.bin",
247
+ "transformer.h.24.attn.c_attn.weight": "pytorch_model-00002-of-00002.bin",
248
+ "transformer.h.24.attn.c_proj.bias": "pytorch_model-00002-of-00002.bin",
249
+ "transformer.h.24.attn.c_proj.weight": "pytorch_model-00002-of-00002.bin",
250
+ "transformer.h.24.attn.masked_bias": "pytorch_model-00002-of-00002.bin",
251
+ "transformer.h.24.ln_1.bias": "pytorch_model-00002-of-00002.bin",
252
+ "transformer.h.24.ln_1.weight": "pytorch_model-00002-of-00002.bin",
253
+ "transformer.h.24.ln_2.bias": "pytorch_model-00002-of-00002.bin",
254
+ "transformer.h.24.ln_2.weight": "pytorch_model-00002-of-00002.bin",
255
+ "transformer.h.24.mlp.c_fc.bias": "pytorch_model-00002-of-00002.bin",
256
+ "transformer.h.24.mlp.c_fc.weight": "pytorch_model-00002-of-00002.bin",
257
+ "transformer.h.24.mlp.c_proj.bias": "pytorch_model-00002-of-00002.bin",
258
+ "transformer.h.24.mlp.c_proj.weight": "pytorch_model-00002-of-00002.bin",
259
+ "transformer.h.25.attn.bias": "pytorch_model-00002-of-00002.bin",
260
+ "transformer.h.25.attn.c_attn.bias": "pytorch_model-00002-of-00002.bin",
261
+ "transformer.h.25.attn.c_attn.weight": "pytorch_model-00002-of-00002.bin",
262
+ "transformer.h.25.attn.c_proj.bias": "pytorch_model-00002-of-00002.bin",
263
+ "transformer.h.25.attn.c_proj.weight": "pytorch_model-00002-of-00002.bin",
264
+ "transformer.h.25.attn.masked_bias": "pytorch_model-00002-of-00002.bin",
265
+ "transformer.h.25.ln_1.bias": "pytorch_model-00002-of-00002.bin",
266
+ "transformer.h.25.ln_1.weight": "pytorch_model-00002-of-00002.bin",
267
+ "transformer.h.25.ln_2.bias": "pytorch_model-00002-of-00002.bin",
268
+ "transformer.h.25.ln_2.weight": "pytorch_model-00002-of-00002.bin",
269
+ "transformer.h.25.mlp.c_fc.bias": "pytorch_model-00002-of-00002.bin",
270
+ "transformer.h.25.mlp.c_fc.weight": "pytorch_model-00002-of-00002.bin",
271
+ "transformer.h.25.mlp.c_proj.bias": "pytorch_model-00002-of-00002.bin",
272
+ "transformer.h.25.mlp.c_proj.weight": "pytorch_model-00002-of-00002.bin",
273
+ "transformer.h.26.attn.bias": "pytorch_model-00002-of-00002.bin",
274
+ "transformer.h.26.attn.c_attn.bias": "pytorch_model-00002-of-00002.bin",
275
+ "transformer.h.26.attn.c_attn.weight": "pytorch_model-00002-of-00002.bin",
276
+ "transformer.h.26.attn.c_proj.bias": "pytorch_model-00002-of-00002.bin",
277
+ "transformer.h.26.attn.c_proj.weight": "pytorch_model-00002-of-00002.bin",
278
+ "transformer.h.26.attn.masked_bias": "pytorch_model-00002-of-00002.bin",
279
+ "transformer.h.26.ln_1.bias": "pytorch_model-00002-of-00002.bin",
280
+ "transformer.h.26.ln_1.weight": "pytorch_model-00002-of-00002.bin",
281
+ "transformer.h.26.ln_2.bias": "pytorch_model-00002-of-00002.bin",
282
+ "transformer.h.26.ln_2.weight": "pytorch_model-00002-of-00002.bin",
283
+ "transformer.h.26.mlp.c_fc.bias": "pytorch_model-00002-of-00002.bin",
284
+ "transformer.h.26.mlp.c_fc.weight": "pytorch_model-00002-of-00002.bin",
285
+ "transformer.h.26.mlp.c_proj.bias": "pytorch_model-00002-of-00002.bin",
286
+ "transformer.h.26.mlp.c_proj.weight": "pytorch_model-00002-of-00002.bin",
287
+ "transformer.h.27.attn.bias": "pytorch_model-00002-of-00002.bin",
288
+ "transformer.h.27.attn.c_attn.bias": "pytorch_model-00002-of-00002.bin",
289
+ "transformer.h.27.attn.c_attn.weight": "pytorch_model-00002-of-00002.bin",
290
+ "transformer.h.27.attn.c_proj.bias": "pytorch_model-00002-of-00002.bin",
291
+ "transformer.h.27.attn.c_proj.weight": "pytorch_model-00002-of-00002.bin",
292
+ "transformer.h.27.attn.masked_bias": "pytorch_model-00002-of-00002.bin",
293
+ "transformer.h.27.ln_1.bias": "pytorch_model-00002-of-00002.bin",
294
+ "transformer.h.27.ln_1.weight": "pytorch_model-00002-of-00002.bin",
295
+ "transformer.h.27.ln_2.bias": "pytorch_model-00002-of-00002.bin",
296
+ "transformer.h.27.ln_2.weight": "pytorch_model-00002-of-00002.bin",
297
+ "transformer.h.27.mlp.c_fc.bias": "pytorch_model-00002-of-00002.bin",
298
+ "transformer.h.27.mlp.c_fc.weight": "pytorch_model-00002-of-00002.bin",
299
+ "transformer.h.27.mlp.c_proj.bias": "pytorch_model-00002-of-00002.bin",
300
+ "transformer.h.27.mlp.c_proj.weight": "pytorch_model-00002-of-00002.bin",
301
+ "transformer.h.28.attn.bias": "pytorch_model-00002-of-00002.bin",
302
+ "transformer.h.28.attn.c_attn.bias": "pytorch_model-00002-of-00002.bin",
303
+ "transformer.h.28.attn.c_attn.weight": "pytorch_model-00002-of-00002.bin",
304
+ "transformer.h.28.attn.c_proj.bias": "pytorch_model-00002-of-00002.bin",
305
+ "transformer.h.28.attn.c_proj.weight": "pytorch_model-00002-of-00002.bin",
306
+ "transformer.h.28.attn.masked_bias": "pytorch_model-00002-of-00002.bin",
307
+ "transformer.h.28.ln_1.bias": "pytorch_model-00002-of-00002.bin",
308
+ "transformer.h.28.ln_1.weight": "pytorch_model-00002-of-00002.bin",
309
+ "transformer.h.28.ln_2.bias": "pytorch_model-00002-of-00002.bin",
310
+ "transformer.h.28.ln_2.weight": "pytorch_model-00002-of-00002.bin",
311
+ "transformer.h.28.mlp.c_fc.bias": "pytorch_model-00002-of-00002.bin",
312
+ "transformer.h.28.mlp.c_fc.weight": "pytorch_model-00002-of-00002.bin",
313
+ "transformer.h.28.mlp.c_proj.bias": "pytorch_model-00002-of-00002.bin",
314
+ "transformer.h.28.mlp.c_proj.weight": "pytorch_model-00002-of-00002.bin",
315
+ "transformer.h.29.attn.bias": "pytorch_model-00002-of-00002.bin",
316
+ "transformer.h.29.attn.c_attn.bias": "pytorch_model-00002-of-00002.bin",
317
+ "transformer.h.29.attn.c_attn.weight": "pytorch_model-00002-of-00002.bin",
318
+ "transformer.h.29.attn.c_proj.bias": "pytorch_model-00002-of-00002.bin",
319
+ "transformer.h.29.attn.c_proj.weight": "pytorch_model-00002-of-00002.bin",
320
+ "transformer.h.29.attn.masked_bias": "pytorch_model-00002-of-00002.bin",
321
+ "transformer.h.29.ln_1.bias": "pytorch_model-00002-of-00002.bin",
322
+ "transformer.h.29.ln_1.weight": "pytorch_model-00002-of-00002.bin",
323
+ "transformer.h.29.ln_2.bias": "pytorch_model-00002-of-00002.bin",
324
+ "transformer.h.29.ln_2.weight": "pytorch_model-00002-of-00002.bin",
325
+ "transformer.h.29.mlp.c_fc.bias": "pytorch_model-00002-of-00002.bin",
326
+ "transformer.h.29.mlp.c_fc.weight": "pytorch_model-00002-of-00002.bin",
327
+ "transformer.h.29.mlp.c_proj.bias": "pytorch_model-00002-of-00002.bin",
328
+ "transformer.h.29.mlp.c_proj.weight": "pytorch_model-00002-of-00002.bin",
329
+ "transformer.h.3.attn.bias": "pytorch_model-00001-of-00002.bin",
330
+ "transformer.h.3.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
331
+ "transformer.h.3.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
332
+ "transformer.h.3.attn.c_proj.bias": "pytorch_model-00001-of-00002.bin",
333
+ "transformer.h.3.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
334
+ "transformer.h.3.attn.masked_bias": "pytorch_model-00001-of-00002.bin",
335
+ "transformer.h.3.ln_1.bias": "pytorch_model-00001-of-00002.bin",
336
+ "transformer.h.3.ln_1.weight": "pytorch_model-00001-of-00002.bin",
337
+ "transformer.h.3.ln_2.bias": "pytorch_model-00001-of-00002.bin",
338
+ "transformer.h.3.ln_2.weight": "pytorch_model-00001-of-00002.bin",
339
+ "transformer.h.3.mlp.c_fc.bias": "pytorch_model-00001-of-00002.bin",
340
+ "transformer.h.3.mlp.c_fc.weight": "pytorch_model-00001-of-00002.bin",
341
+ "transformer.h.3.mlp.c_proj.bias": "pytorch_model-00001-of-00002.bin",
342
+ "transformer.h.3.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
343
+ "transformer.h.30.attn.bias": "pytorch_model-00002-of-00002.bin",
344
+ "transformer.h.30.attn.c_attn.bias": "pytorch_model-00002-of-00002.bin",
345
+ "transformer.h.30.attn.c_attn.weight": "pytorch_model-00002-of-00002.bin",
346
+ "transformer.h.30.attn.c_proj.bias": "pytorch_model-00002-of-00002.bin",
347
+ "transformer.h.30.attn.c_proj.weight": "pytorch_model-00002-of-00002.bin",
348
+ "transformer.h.30.attn.masked_bias": "pytorch_model-00002-of-00002.bin",
349
+ "transformer.h.30.ln_1.bias": "pytorch_model-00002-of-00002.bin",
350
+ "transformer.h.30.ln_1.weight": "pytorch_model-00002-of-00002.bin",
351
+ "transformer.h.30.ln_2.bias": "pytorch_model-00002-of-00002.bin",
352
+ "transformer.h.30.ln_2.weight": "pytorch_model-00002-of-00002.bin",
353
+ "transformer.h.30.mlp.c_fc.bias": "pytorch_model-00002-of-00002.bin",
354
+ "transformer.h.30.mlp.c_fc.weight": "pytorch_model-00002-of-00002.bin",
355
+ "transformer.h.30.mlp.c_proj.bias": "pytorch_model-00002-of-00002.bin",
356
+ "transformer.h.30.mlp.c_proj.weight": "pytorch_model-00002-of-00002.bin",
357
+ "transformer.h.31.attn.bias": "pytorch_model-00002-of-00002.bin",
358
+ "transformer.h.31.attn.c_attn.bias": "pytorch_model-00002-of-00002.bin",
359
+ "transformer.h.31.attn.c_attn.weight": "pytorch_model-00002-of-00002.bin",
360
+ "transformer.h.31.attn.c_proj.bias": "pytorch_model-00002-of-00002.bin",
361
+ "transformer.h.31.attn.c_proj.weight": "pytorch_model-00002-of-00002.bin",
362
+ "transformer.h.31.attn.masked_bias": "pytorch_model-00002-of-00002.bin",
363
+ "transformer.h.31.ln_1.bias": "pytorch_model-00002-of-00002.bin",
364
+ "transformer.h.31.ln_1.weight": "pytorch_model-00002-of-00002.bin",
365
+ "transformer.h.31.ln_2.bias": "pytorch_model-00002-of-00002.bin",
366
+ "transformer.h.31.ln_2.weight": "pytorch_model-00002-of-00002.bin",
367
+ "transformer.h.31.mlp.c_fc.bias": "pytorch_model-00002-of-00002.bin",
368
+ "transformer.h.31.mlp.c_fc.weight": "pytorch_model-00002-of-00002.bin",
369
+ "transformer.h.31.mlp.c_proj.bias": "pytorch_model-00002-of-00002.bin",
370
+ "transformer.h.31.mlp.c_proj.weight": "pytorch_model-00002-of-00002.bin",
371
+ "transformer.h.4.attn.bias": "pytorch_model-00001-of-00002.bin",
372
+ "transformer.h.4.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
373
+ "transformer.h.4.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
374
+ "transformer.h.4.attn.c_proj.bias": "pytorch_model-00001-of-00002.bin",
375
+ "transformer.h.4.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
376
+ "transformer.h.4.attn.masked_bias": "pytorch_model-00001-of-00002.bin",
377
+ "transformer.h.4.ln_1.bias": "pytorch_model-00001-of-00002.bin",
378
+ "transformer.h.4.ln_1.weight": "pytorch_model-00001-of-00002.bin",
379
+ "transformer.h.4.ln_2.bias": "pytorch_model-00001-of-00002.bin",
380
+ "transformer.h.4.ln_2.weight": "pytorch_model-00001-of-00002.bin",
381
+ "transformer.h.4.mlp.c_fc.bias": "pytorch_model-00001-of-00002.bin",
382
+ "transformer.h.4.mlp.c_fc.weight": "pytorch_model-00001-of-00002.bin",
383
+ "transformer.h.4.mlp.c_proj.bias": "pytorch_model-00001-of-00002.bin",
384
+ "transformer.h.4.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
385
+ "transformer.h.5.attn.bias": "pytorch_model-00001-of-00002.bin",
386
+ "transformer.h.5.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
387
+ "transformer.h.5.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
388
+ "transformer.h.5.attn.c_proj.bias": "pytorch_model-00001-of-00002.bin",
389
+ "transformer.h.5.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
390
+ "transformer.h.5.attn.masked_bias": "pytorch_model-00001-of-00002.bin",
391
+ "transformer.h.5.ln_1.bias": "pytorch_model-00001-of-00002.bin",
392
+ "transformer.h.5.ln_1.weight": "pytorch_model-00001-of-00002.bin",
393
+ "transformer.h.5.ln_2.bias": "pytorch_model-00001-of-00002.bin",
394
+ "transformer.h.5.ln_2.weight": "pytorch_model-00001-of-00002.bin",
395
+ "transformer.h.5.mlp.c_fc.bias": "pytorch_model-00001-of-00002.bin",
396
+ "transformer.h.5.mlp.c_fc.weight": "pytorch_model-00001-of-00002.bin",
397
+ "transformer.h.5.mlp.c_proj.bias": "pytorch_model-00001-of-00002.bin",
398
+ "transformer.h.5.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
399
+ "transformer.h.6.attn.bias": "pytorch_model-00001-of-00002.bin",
400
+ "transformer.h.6.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
401
+ "transformer.h.6.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
402
+ "transformer.h.6.attn.c_proj.bias": "pytorch_model-00001-of-00002.bin",
403
+ "transformer.h.6.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
404
+ "transformer.h.6.attn.masked_bias": "pytorch_model-00001-of-00002.bin",
405
+ "transformer.h.6.ln_1.bias": "pytorch_model-00001-of-00002.bin",
406
+ "transformer.h.6.ln_1.weight": "pytorch_model-00001-of-00002.bin",
407
+ "transformer.h.6.ln_2.bias": "pytorch_model-00001-of-00002.bin",
408
+ "transformer.h.6.ln_2.weight": "pytorch_model-00001-of-00002.bin",
409
+ "transformer.h.6.mlp.c_fc.bias": "pytorch_model-00001-of-00002.bin",
410
+ "transformer.h.6.mlp.c_fc.weight": "pytorch_model-00001-of-00002.bin",
411
+ "transformer.h.6.mlp.c_proj.bias": "pytorch_model-00001-of-00002.bin",
412
+ "transformer.h.6.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
413
+ "transformer.h.7.attn.bias": "pytorch_model-00001-of-00002.bin",
414
+ "transformer.h.7.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
415
+ "transformer.h.7.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
416
+ "transformer.h.7.attn.c_proj.bias": "pytorch_model-00001-of-00002.bin",
417
+ "transformer.h.7.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
418
+ "transformer.h.7.attn.masked_bias": "pytorch_model-00001-of-00002.bin",
419
+ "transformer.h.7.ln_1.bias": "pytorch_model-00001-of-00002.bin",
420
+ "transformer.h.7.ln_1.weight": "pytorch_model-00001-of-00002.bin",
421
+ "transformer.h.7.ln_2.bias": "pytorch_model-00001-of-00002.bin",
422
+ "transformer.h.7.ln_2.weight": "pytorch_model-00001-of-00002.bin",
423
+ "transformer.h.7.mlp.c_fc.bias": "pytorch_model-00001-of-00002.bin",
424
+ "transformer.h.7.mlp.c_fc.weight": "pytorch_model-00001-of-00002.bin",
425
+ "transformer.h.7.mlp.c_proj.bias": "pytorch_model-00001-of-00002.bin",
426
+ "transformer.h.7.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
427
+ "transformer.h.8.attn.bias": "pytorch_model-00001-of-00002.bin",
428
+ "transformer.h.8.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
429
+ "transformer.h.8.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
430
+ "transformer.h.8.attn.c_proj.bias": "pytorch_model-00001-of-00002.bin",
431
+ "transformer.h.8.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
432
+ "transformer.h.8.attn.masked_bias": "pytorch_model-00001-of-00002.bin",
433
+ "transformer.h.8.ln_1.bias": "pytorch_model-00001-of-00002.bin",
434
+ "transformer.h.8.ln_1.weight": "pytorch_model-00001-of-00002.bin",
435
+ "transformer.h.8.ln_2.bias": "pytorch_model-00001-of-00002.bin",
436
+ "transformer.h.8.ln_2.weight": "pytorch_model-00001-of-00002.bin",
437
+ "transformer.h.8.mlp.c_fc.bias": "pytorch_model-00001-of-00002.bin",
438
+ "transformer.h.8.mlp.c_fc.weight": "pytorch_model-00001-of-00002.bin",
439
+ "transformer.h.8.mlp.c_proj.bias": "pytorch_model-00001-of-00002.bin",
440
+ "transformer.h.8.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
441
+ "transformer.h.9.attn.bias": "pytorch_model-00001-of-00002.bin",
442
+ "transformer.h.9.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
443
+ "transformer.h.9.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
444
+ "transformer.h.9.attn.c_proj.bias": "pytorch_model-00001-of-00002.bin",
445
+ "transformer.h.9.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
446
+ "transformer.h.9.attn.masked_bias": "pytorch_model-00001-of-00002.bin",
447
+ "transformer.h.9.ln_1.bias": "pytorch_model-00001-of-00002.bin",
448
+ "transformer.h.9.ln_1.weight": "pytorch_model-00001-of-00002.bin",
449
+ "transformer.h.9.ln_2.bias": "pytorch_model-00001-of-00002.bin",
450
+ "transformer.h.9.ln_2.weight": "pytorch_model-00001-of-00002.bin",
451
+ "transformer.h.9.mlp.c_fc.bias": "pytorch_model-00001-of-00002.bin",
452
+ "transformer.h.9.mlp.c_fc.weight": "pytorch_model-00001-of-00002.bin",
453
+ "transformer.h.9.mlp.c_proj.bias": "pytorch_model-00001-of-00002.bin",
454
+ "transformer.h.9.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
455
+ "transformer.ln_f.bias": "pytorch_model-00002-of-00002.bin",
456
+ "transformer.ln_f.weight": "pytorch_model-00002-of-00002.bin",
457
+ "transformer.rotary_pos_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
458
+ "transformer.wte.weight": "pytorch_model-00001-of-00002.bin"
459
+ }
460
+ }
rotary_position_embedding.py ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import torch
17
+ from einops import rearrange
18
+ from torch import einsum, nn
19
+
20
+ __all__ = ['RotaryEmbedding', 'apply_rotary_pos_emb']
21
+
22
+
23
+ class RotaryEmbedding(nn.Module):
24
+ """
25
+ Implements Rotary Position Embedding from https://arxiv.org/abs/2104.09864.
26
+ """
27
+
28
+ def __init__(
29
+ self, dim: int, seq_len_interpolation_factor: int = None, pretrained_max_position_embeddings: int = None
30
+ ):
31
+ """
32
+ Args:
33
+
34
+ dim (int): rotary embedding dimension
35
+ seq_len_interpolation_factor (int): if not None, discrete positions will be interpolated
36
+ by this factor via the trick in https://arxiv.org/abs/2306.15595.
37
+ pretrained_max_position_embeddings (int): pre-trained max_position_embeddings before position interpolation.
38
+ """
39
+ super().__init__()
40
+ self.seq_len_interpolation_factor = seq_len_interpolation_factor
41
+ inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2).float() / dim))
42
+ self.register_buffer('inv_freq', inv_freq)
43
+ self.pretrained_max_position_embeddings = pretrained_max_position_embeddings
44
+
45
+ def forward(self, max_seq_len, offset=0):
46
+ seq = torch.arange(max_seq_len, device=self.inv_freq.device) + offset
47
+ seq = seq.type_as(self.inv_freq)
48
+
49
+ if self.pretrained_max_position_embeddings is not None and self.seq_len_interpolation_factor is not None:
50
+ if max_seq_len > self.pretrained_max_position_embeddings * self.seq_len_interpolation_factor:
51
+ # dynamic linear scaling (length > position we have learned)
52
+ seq *= 1 / (max_seq_len / self.pretrained_max_position_embeddings)
53
+ else:
54
+ # fixed linear scaling
55
+ seq *= 1 / self.seq_len_interpolation_factor
56
+
57
+ freqs = einsum('i , j -> i j', seq, self.inv_freq)
58
+ # first part even vector components, second part odd vector components,
59
+ # 2 * dim in dimension size
60
+ emb = torch.cat((freqs, freqs), dim=-1)
61
+ # emb [seq_length, .., dim]
62
+ return rearrange(emb, 'n d -> n 1 1 d')
63
+
64
+
65
+ def _rotate_half(x):
66
+ """
67
+ change sign so the last dimension
68
+ [A, B, C, D] -> [-C, -D, A, B]
69
+ """
70
+ x = rearrange(x, '... (j d) -> ... j d', j=2)
71
+ x1, x2 = x.unbind(dim=-2)
72
+ return torch.cat((-x2, x1), dim=-1)
73
+
74
+
75
+ def apply_rotary_pos_emb(t, freqs):
76
+ """
77
+ input tensor t is of shape [seq_length, ..., dim]
78
+ rotary positional embeding tensor freqs is of shape [seq_length, ..., dim]
79
+ check https://kexue.fm/archives/8265 for detailed formulas
80
+ """
81
+ # Changes from the original RoPE implementation
82
+ # 1. The original NeMo implementation assumes the input tensor of shape
83
+ # [seq_length, ..., dim], but the HF layout is [..., seq_length, dim].
84
+ # Thus freqs needs to be viewed as [..., seq_length, dim].
85
+ freqs = freqs.permute(1, 2, 0, 3)
86
+ # 2. Support for queries which past tokens are truncated
87
+ assert freqs.shape[-2] >= t.shape[-2]
88
+ if freqs.shape[-2] != t.shape[-2]:
89
+ freqs = freqs[:, :, -t.shape[-2]:, :]
90
+
91
+ rot_dim = freqs.shape[-1]
92
+ # ideally t_pass is empty so rotary pos embedding is applied to all tensor t
93
+ t, t_pass = t[..., :rot_dim], t[..., rot_dim:]
94
+ # first part is cosine component
95
+ # second part is sine component, need to change signs with _rotate_half method
96
+ t = (t * freqs.cos()) + (_rotate_half(t) * freqs.sin())
97
+ return torch.cat((t, t_pass), dim=-1)
tokenizer_config.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "eos_token": "</s>",
3
+ "unk_token": "<unk>",
4
+ "pad_token": "<pad>",
5
+ "extra_ids": 0,
6
+ "additional_special_tokens": null,
7
+ "sp_model_kwargs": {},
8
+ "tokenizer_class": "Midm_bitext_Tokenizer",
9
+ "auto_map": {
10
+ "AutoTokenizer": [
11
+ "midm_bitext_tokenization.Midm_bitext_Tokenizer",
12
+ null
13
+ ]
14
+ }
15
+ }