AlumiK commited on
Commit
96b7ded
1 Parent(s): c7b742d

push model

Browse files
README.md CHANGED
@@ -1,3 +1,24 @@
1
- ---
2
- license: gpl-3.0
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - en
4
+ - zh
5
+ license: gpl-3.0
6
+ pipeline_tag: text-generation
7
+ ---
8
+ # LingLong (玲珑): A Small-Scale Chinese PreTrained Language Model
9
+
10
+ This is LingLong (玲珑), a Chinese pretrained language model trained by the College of Software at Nankai University.
11
+ Built on the foundation of the GPT-3 architecture, it has been meticulously trained on a vast collection of private
12
+ datasets.
13
+ With a modest parameter count of only 317 million, LingLong is significantly smaller than the original GPT-3 model, yet
14
+ it delivers impressive results across various NLP tasks.
15
+ In Chinese, "玲珑" translates to "exquisite" or "delicate," which perfectly embodies the small yet mighty nature of this
16
+ model.
17
+ Therefore, we chose to name it "LingLong" in honor of its exceptional precision and finesse.
18
+
19
+ Although it's true that this model's performance isn't on par with the large pretrained language models boasting
20
+ hundreds of billions of parameters, its relatively low parameter count makes it accessible to researchers with limited
21
+ computing resources.
22
+ As a result, this model serves as an excellent foundation for conducting follow-up research, such as fine-tuning.
23
+ By utilizing this model, researchers can begin to delve into the intricacies of pretrained language models and to
24
+ unravel the mysteries of language processing without the need for excessive computational resources.
config.json ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "AlumiK/LingLong-317M",
3
+ "activation_function": "gelu_new",
4
+ "architectures": [
5
+ "LingLongForCausalLM"
6
+ ],
7
+ "attn_c": 8,
8
+ "attn_mode": "sparse",
9
+ "attn_pdrop": 0.1,
10
+ "attn_stride": 128,
11
+ "auto_map": {
12
+ "AutoConfig": "configuration_linglong.LingLongConfig",
13
+ "AutoModelForCausalLM": "modeling_linglong.LingLongForCausalLM"
14
+ },
15
+ "backward": false,
16
+ "bos_token_id": 10,
17
+ "embd_pdrop": 0.1,
18
+ "eos_token_id": 8,
19
+ "initializer_range": 0.02,
20
+ "layer_norm_epsilon": 1e-08,
21
+ "model_type": "linglong",
22
+ "n_embd": 1024,
23
+ "n_head": 16,
24
+ "n_inner": null,
25
+ "n_layer": 24,
26
+ "n_position": 1024,
27
+ "pad_token_id": 0,
28
+ "reorder_and_upcast_attn": false,
29
+ "resid_pdrop": 0.1,
30
+ "scale_attn_by_inverse_layer_idx": false,
31
+ "scale_attn_weights": true,
32
+ "tokenizer_class": "LingLongTokenizer",
33
+ "torch_dtype": "float32",
34
+ "transformers_version": "4.40.2",
35
+ "use_cache": true,
36
+ "use_pinyin": false,
37
+ "vocab_size": 13312
38
+ }
configuration_linglong.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import PretrainedConfig
2
+
3
+
4
+ class LingLongConfig(PretrainedConfig):
5
+ model_type = 'linglong'
6
+
7
+ def __init__(
8
+ self,
9
+ vocab_size: int = 13312,
10
+ n_position: int = 1024,
11
+ n_embd: int = 1024,
12
+ n_layer: int = 24,
13
+ n_head: int = 16,
14
+ n_inner: int | None = None,
15
+ activation_function: str = 'gelu_new',
16
+ resid_pdrop: float = 0.1,
17
+ embd_pdrop: float = 0.1,
18
+ attn_pdrop: float = 0.1,
19
+ layer_norm_epsilon: float = 1e-8,
20
+ initializer_range: float = 0.02,
21
+ scale_attn_weights: bool = True,
22
+ use_cache: bool = True,
23
+ bos_token_id: int = 10,
24
+ eos_token_id: int = 8,
25
+ pad_token_id: int = 0,
26
+ scale_attn_by_inverse_layer_idx: bool = False,
27
+ reorder_and_upcast_attn: bool = False,
28
+ attn_mode: str = 'sparse',
29
+ attn_stride: int | None = 128,
30
+ attn_c: int | None = 8,
31
+ use_pinyin: bool = False,
32
+ backward: bool = False,
33
+ tokenizer_class: str | None = 'LingLongTokenizer',
34
+ **kwargs,
35
+ ):
36
+ self.vocab_size = vocab_size
37
+ self.n_position = n_position
38
+ self.n_embd = n_embd
39
+ self.n_layer = n_layer
40
+ self.n_head = n_head
41
+ self.n_inner = n_inner
42
+ self.activation_function = activation_function
43
+ self.resid_pdrop = resid_pdrop
44
+ self.embd_pdrop = embd_pdrop
45
+ self.attn_pdrop = attn_pdrop
46
+ self.layer_norm_epsilon = layer_norm_epsilon
47
+ self.initializer_range = initializer_range
48
+ self.scale_attn_weights = scale_attn_weights
49
+ self.use_cache = use_cache
50
+ self.scale_attn_by_inverse_layer_idx = scale_attn_by_inverse_layer_idx
51
+ self.reorder_and_upcast_attn = reorder_and_upcast_attn
52
+ self.attn_mode = attn_mode
53
+ self.attn_stride = attn_stride
54
+ self.attn_c = attn_c
55
+ self.use_pinyin = use_pinyin
56
+ self.backward = backward
57
+
58
+ self.bos_token_id = bos_token_id
59
+ self.eos_token_id = eos_token_id
60
+ self.pad_token_id = pad_token_id
61
+
62
+ super().__init__(
63
+ bos_token_id=bos_token_id,
64
+ eos_token_id=eos_token_id,
65
+ pad_token_id=pad_token_id,
66
+ tokenizer_class=tokenizer_class,
67
+ **kwargs,
68
+ )
generation_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 10,
3
+ "do_sample": true,
4
+ "eos_token_id": 8,
5
+ "max_length": 1024,
6
+ "pad_token_id": 8,
7
+ "top_k": 20,
8
+ "transformers_version": "4.40.2"
9
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a1efac094e2ae53dee933e4435ff7ee4f6576747aae2e26201d6d408d6606b73
3
+ size 1267996160
modeling_linglong.py ADDED
@@ -0,0 +1,622 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import torch
3
+ import torch.utils.checkpoint
4
+
5
+ from torch import nn
6
+ from torch.cuda.amp import autocast
7
+ from transformers.utils import logging
8
+ from transformers.activations import ACT2FN
9
+ from transformers.pytorch_utils import Conv1D
10
+ from transformers.modeling_utils import PreTrainedModel
11
+ from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutput
12
+
13
+ from .configuration_linglong import LingLongConfig
14
+
15
+ logger = logging.get_logger(__name__)
16
+
17
+
18
+ class LingLongAttention(nn.Module):
19
+
20
+ def __init__(self, config, layer_idx=None):
21
+ super().__init__()
22
+
23
+ n_position = config.n_position
24
+ self.register_buffer(
25
+ 'bias',
26
+ torch.tril(torch.ones((n_position, n_position), dtype=torch.bool)).view(
27
+ 1, 1, n_position, n_position
28
+ ),
29
+ persistent=False,
30
+ )
31
+ self.register_buffer('masked_bias', torch.tensor(-1e4), persistent=False)
32
+
33
+ self.n_embd = config.n_embd
34
+ self.n_head = config.n_head
35
+ self.head_dim = self.n_embd // self.n_head
36
+ self.split_size = self.n_embd
37
+ if self.head_dim * self.n_head != self.n_embd:
38
+ raise ValueError(
39
+ f'`embed_dim` must be divisible by num_heads (got `embed_dim`: {self.n_embd} and `num_heads`:'
40
+ f' {self.n_head}).'
41
+ )
42
+
43
+ self.scale_attn_weights = config.scale_attn_weights
44
+
45
+ # Layer-wise attention scaling, reordering, and upcasting
46
+ self.scale_attn_by_inverse_layer_idx = config.scale_attn_by_inverse_layer_idx
47
+ self.layer_idx = layer_idx
48
+ self.reorder_and_upcast_attn = config.reorder_and_upcast_attn
49
+
50
+ self.c_attn = Conv1D(3 * self.n_embd, self.n_embd)
51
+ self.c_proj = Conv1D(self.n_embd, self.n_embd)
52
+
53
+ self.attn_dropout = nn.Dropout(config.attn_pdrop)
54
+ self.resid_dropout = nn.Dropout(config.resid_pdrop)
55
+
56
+ # LingLong sparse attention.
57
+ self.mode = config.attn_mode
58
+ self.stride = config.attn_stride
59
+ self.c = config.attn_c
60
+ self.causal_mask = None
61
+
62
+ def _causal_mask(self, query_length, key_length):
63
+ return self.bias[:, :, key_length - query_length: key_length, :key_length]
64
+
65
+ def _sparse_causal_mask(self, query_length, key_length):
66
+ layout = torch.zeros([key_length, key_length], dtype=torch.bool, device=self.bias.device)
67
+ for idx in range(self.c):
68
+ layout[:, (self.stride - 1 - idx)::self.stride] = 1
69
+ for q_idx in range(key_length):
70
+ row = q_idx // self.stride
71
+ layout[q_idx, row * self.stride:(row + 1) * self.stride] = 1
72
+ # Any query cannot attend to keys above it.
73
+ layout[q_idx, q_idx + 1:] = 0
74
+ return layout[(key_length - query_length):].view(1, 1, query_length, key_length)
75
+
76
+ def _attn(self, query, key, value, attention_mask=None):
77
+ attn_weights = torch.matmul(query, key.transpose(-1, -2))
78
+
79
+ if self.scale_attn_weights:
80
+ attn_weights = attn_weights / torch.full(
81
+ [], value.size(-1) ** 0.5, dtype=attn_weights.dtype, device=attn_weights.device
82
+ )
83
+
84
+ # Layer-wise attention scaling
85
+ if self.scale_attn_by_inverse_layer_idx:
86
+ attn_weights = attn_weights / float(self.layer_idx + 1)
87
+
88
+ # if only "normal" attention layer implements causal mask
89
+ query_length, key_length = query.size(-2), key.size(-2)
90
+ if self.causal_mask is None or self.causal_mask.size() != torch.Size([1, 1, query_length, key_length]):
91
+ if self.mode == 'sparse' and self.layer_idx % 2 != 0:
92
+ self.causal_mask = self._sparse_causal_mask(query_length, key_length)
93
+ else:
94
+ self.causal_mask = self._causal_mask(query_length, key_length)
95
+ mask_value = torch.finfo(attn_weights.dtype).min
96
+ # Need to be a tensor, otherwise we get error: `RuntimeError: expected scalar type float but found double`.
97
+ # Need to be on the same device, otherwise `RuntimeError: ..., x and y to be on the same device`
98
+ mask_value = torch.full([], mask_value, dtype=attn_weights.dtype, device=attn_weights.device)
99
+ attn_weights = torch.where(self.causal_mask, attn_weights.to(attn_weights.dtype), mask_value)
100
+
101
+ if attention_mask is not None:
102
+ # Apply the attention mask
103
+ attn_weights = attn_weights + attention_mask
104
+
105
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
106
+
107
+ # Downcast (if necessary) back to V's dtype (if in mixed-precision) -- No-Op otherwise
108
+ attn_weights = attn_weights.type(value.dtype)
109
+ attn_weights = self.attn_dropout(attn_weights)
110
+
111
+ attn_output = torch.matmul(attn_weights, value)
112
+
113
+ return attn_output, attn_weights
114
+
115
+ def _upcast_and_reordered_attn(self, query, key, value, attention_mask=None):
116
+ # Use `torch.baddbmm` (a bit more efficient w/ alpha param for scaling -- from Megatron-LM)
117
+ bsz, num_heads, q_seq_len, dk = query.size()
118
+ _, _, k_seq_len, _ = key.size()
119
+
120
+ # Preallocate attn_weights for `baddbmm`
121
+ attn_weights = torch.empty(bsz * num_heads, q_seq_len, k_seq_len, dtype=torch.float32, device=query.device)
122
+
123
+ # Compute Scale Factor
124
+ scale_factor = 1.0
125
+ if self.scale_attn_weights:
126
+ scale_factor /= float(value.size(-1)) ** 0.5
127
+
128
+ if self.scale_attn_by_inverse_layer_idx:
129
+ scale_factor /= float(self.layer_idx + 1)
130
+
131
+ # Upcast (turn off autocast) and reorder (Scale K by 1 / root(dk))
132
+ with autocast(enabled=False):
133
+ q, k = query.reshape(-1, q_seq_len, dk), key.transpose(-1, -2).reshape(-1, dk, k_seq_len)
134
+ attn_weights = torch.baddbmm(attn_weights, q.float(), k.float(), beta=0, alpha=scale_factor)
135
+ attn_weights = attn_weights.reshape(bsz, num_heads, q_seq_len, k_seq_len)
136
+
137
+ # if only "normal" attention layer implements causal mask
138
+ query_length, key_length = query.size(-2), key.size(-2)
139
+ if self.causal_mask is None or self.causal_mask.size() != torch.Size([1, 1, query_length, key_length]):
140
+ if self.mode == 'sparse' and self.layer_idx % 2 != 0:
141
+ self.causal_mask = self._sparse_causal_mask(query_length, key_length)
142
+ else:
143
+ self.causal_mask = self._causal_mask(query_length, key_length)
144
+ mask_value = torch.finfo(attn_weights.dtype).min
145
+ # Need to be a tensor, otherwise we get error: `RuntimeError: expected scalar type float but found double`.
146
+ # Need to be on the same device, otherwise `RuntimeError: ..., x and y to be on the same device`
147
+ mask_value = torch.tensor(mask_value, dtype=attn_weights.dtype).to(attn_weights.device)
148
+ attn_weights = torch.where(self.causal_mask, attn_weights, mask_value)
149
+
150
+ if attention_mask is not None:
151
+ # Apply the attention mask
152
+ attn_weights = attn_weights + attention_mask
153
+
154
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
155
+
156
+ # Downcast (if necessary) back to V's dtype (if in mixed-precision) -- No-Op if otherwise
157
+ if attn_weights.dtype != torch.float32:
158
+ raise RuntimeError('Error with upcasting, attn_weights does not have dtype torch.float32.')
159
+ attn_weights = attn_weights.type(value.dtype)
160
+ attn_weights = self.attn_dropout(attn_weights)
161
+
162
+ attn_output = torch.matmul(attn_weights, value)
163
+
164
+ return attn_output, attn_weights
165
+
166
+ @staticmethod
167
+ def _split_heads(tensor, num_heads, attn_head_size):
168
+ """
169
+ Splits hidden_size dim into attn_head_size and num_heads
170
+ """
171
+ new_shape = tensor.size()[:-1] + (num_heads, attn_head_size)
172
+ tensor = tensor.view(new_shape)
173
+ return tensor.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features)
174
+
175
+ @staticmethod
176
+ def _merge_heads(tensor, num_heads, attn_head_size):
177
+ """
178
+ Merges attn_head_size dim and num_attn_heads dim into hidden_size
179
+ """
180
+ tensor = tensor.permute(0, 2, 1, 3).contiguous()
181
+ new_shape = tensor.size()[:-2] + (num_heads * attn_head_size,)
182
+ return tensor.view(new_shape)
183
+
184
+ def forward(
185
+ self,
186
+ hidden_states,
187
+ layer_past=None,
188
+ attention_mask=None,
189
+ use_cache=False,
190
+ output_attentions=False,
191
+ ):
192
+ query, key, value = self.c_attn(hidden_states).split(self.split_size, dim=2)
193
+ query = self._split_heads(query, self.n_head, self.head_dim)
194
+ key = self._split_heads(key, self.n_head, self.head_dim)
195
+ value = self._split_heads(value, self.n_head, self.head_dim)
196
+
197
+ if layer_past is not None:
198
+ past_key, past_value = layer_past
199
+ key = torch.cat((past_key, key), dim=-2)
200
+ value = torch.cat((past_value, value), dim=-2)
201
+
202
+ if use_cache is True:
203
+ present = (key, value)
204
+ else:
205
+ present = None
206
+
207
+ if self.reorder_and_upcast_attn:
208
+ attn_output, attn_weights = self._upcast_and_reordered_attn(query, key, value, attention_mask)
209
+ else:
210
+ attn_output, attn_weights = self._attn(query, key, value, attention_mask)
211
+
212
+ attn_output = self._merge_heads(attn_output, self.n_head, self.head_dim)
213
+ attn_output = self.c_proj(attn_output)
214
+ attn_output = self.resid_dropout(attn_output)
215
+
216
+ outputs = (attn_output, present)
217
+ if output_attentions:
218
+ outputs += (attn_weights,)
219
+
220
+ return outputs # a, present, (attentions)
221
+
222
+
223
+ class LingLongMLP(nn.Module):
224
+
225
+ def __init__(self, intermediate_size, config):
226
+ super().__init__()
227
+ n_embd = config.n_embd
228
+ self.c_fc = Conv1D(intermediate_size, n_embd)
229
+ self.c_proj = Conv1D(n_embd, intermediate_size)
230
+ self.act = ACT2FN[config.activation_function]
231
+ self.dropout = nn.Dropout(config.resid_pdrop)
232
+
233
+ def forward(self, hidden_states):
234
+ hidden_states = self.c_fc(hidden_states)
235
+ hidden_states = self.act(hidden_states)
236
+ hidden_states = self.c_proj(hidden_states)
237
+ hidden_states = self.dropout(hidden_states)
238
+ return hidden_states
239
+
240
+
241
+ class LingLongBlock(nn.Module):
242
+
243
+ def __init__(self, config, layer_idx=None):
244
+ super().__init__()
245
+ n_embd = config.n_embd
246
+ inner_dim = config.n_inner if config.n_inner is not None else 4 * n_embd
247
+
248
+ self.ln_1 = nn.LayerNorm(n_embd, eps=config.layer_norm_epsilon)
249
+ self.attn = LingLongAttention(config, layer_idx=layer_idx)
250
+ self.ln_2 = nn.LayerNorm(n_embd, eps=config.layer_norm_epsilon)
251
+
252
+ self.mlp = LingLongMLP(inner_dim, config)
253
+
254
+ def forward(
255
+ self,
256
+ hidden_states,
257
+ layer_past=None,
258
+ attention_mask=None,
259
+ use_cache=False,
260
+ output_attentions=False,
261
+ ):
262
+ residual = hidden_states
263
+ hidden_states = self.ln_1(hidden_states)
264
+ attn_outputs = self.attn(
265
+ hidden_states,
266
+ layer_past=layer_past,
267
+ attention_mask=attention_mask,
268
+ use_cache=use_cache,
269
+ output_attentions=output_attentions,
270
+ )
271
+ attn_output = attn_outputs[0] # output_attn: a, present, (attentions)
272
+ outputs = attn_outputs[1:]
273
+ # residual connection
274
+ hidden_states = attn_output + residual
275
+
276
+ residual = hidden_states
277
+ hidden_states = self.ln_2(hidden_states)
278
+ feed_forward_hidden_states = self.mlp(hidden_states)
279
+ # residual connection
280
+ hidden_states = residual + feed_forward_hidden_states
281
+
282
+ if use_cache:
283
+ outputs = (hidden_states,) + outputs
284
+ else:
285
+ outputs = (hidden_states,) + outputs[1:]
286
+
287
+ return outputs # hidden_states, present, (attentions)
288
+
289
+
290
+ class LingLongPreTrainedModel(PreTrainedModel):
291
+ """
292
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
293
+ models.
294
+ """
295
+
296
+ config_class = LingLongConfig
297
+ base_model_prefix = 'transformer'
298
+ supports_gradient_checkpointing = True
299
+ _no_split_modules = ['LingLongBlock']
300
+ _skip_keys_device_placement = 'past_key_values'
301
+
302
+ def __init__(self, *inputs, **kwargs):
303
+ super().__init__(*inputs, **kwargs)
304
+
305
+ def _init_weights(self, module):
306
+ """Initialize the weights."""
307
+ if isinstance(module, (nn.Linear, Conv1D)):
308
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
309
+ if module.bias is not None:
310
+ module.bias.data.zero_()
311
+ elif isinstance(module, nn.Embedding):
312
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
313
+ if module.padding_idx is not None:
314
+ module.weight.data[module.padding_idx].zero_()
315
+ elif isinstance(module, nn.LayerNorm):
316
+ module.bias.data.zero_()
317
+ module.weight.data.fill_(1.0)
318
+
319
+ # Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme:
320
+ # > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale
321
+ # > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers.
322
+ # > -- GPT-2 :: https://openai.com/blog/better-language-models/
323
+ #
324
+ # Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py
325
+ for name, p in module.named_parameters():
326
+ if name == 'c_proj.weight':
327
+ # Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block
328
+ p.data.normal_(mean=0.0, std=(self.config.initializer_range / math.sqrt(2 * self.config.n_layer)))
329
+
330
+
331
+ class LingLongModel(LingLongPreTrainedModel):
332
+
333
+ def __init__(self, config: LingLongConfig):
334
+ super().__init__(config)
335
+ self.n_embd = config.n_embd
336
+ self.wte = nn.Embedding(config.vocab_size, self.n_embd)
337
+ self.wpe = nn.Embedding(config.n_position, self.n_embd)
338
+ self.drop = nn.Dropout(config.embd_pdrop)
339
+ self.h = nn.ModuleList([LingLongBlock(config, layer_idx=i) for i in range(config.n_layer)])
340
+ self.ln_f = nn.LayerNorm(self.n_embd, eps=config.layer_norm_epsilon)
341
+
342
+ # Model parallel
343
+ self.gradient_checkpointing = False
344
+
345
+ # Initialize weights and apply final processing
346
+ self.post_init()
347
+
348
+ def get_input_embeddings(self):
349
+ return self.wte
350
+
351
+ def set_input_embeddings(self, new_embeddings):
352
+ self.wte = new_embeddings
353
+
354
+ def forward(
355
+ self,
356
+ input_ids: torch.LongTensor | None = None,
357
+ past_key_values: tuple[tuple[torch.Tensor]] | None = None,
358
+ attention_mask: torch.FloatTensor | None = None,
359
+ position_ids: torch.LongTensor | None = None,
360
+ inputs_embeds: torch.FloatTensor | None = None,
361
+ use_cache: bool | None = None,
362
+ output_attentions: bool | None = None,
363
+ output_hidden_states: bool | None = None,
364
+ return_dict: bool | None = None,
365
+ ) -> tuple | BaseModelOutputWithPast:
366
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
367
+ output_hidden_states = (
368
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
369
+ )
370
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
371
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
372
+
373
+ if input_ids is not None and inputs_embeds is not None:
374
+ raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time.')
375
+ elif input_ids is not None:
376
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
377
+ input_shape = input_ids.size()
378
+ input_ids = input_ids.view(-1, input_shape[-1])
379
+ batch_size = input_ids.shape[0]
380
+ elif inputs_embeds is not None:
381
+ input_shape = inputs_embeds.size()[:-1]
382
+ batch_size = inputs_embeds.shape[0]
383
+ else:
384
+ raise ValueError('You have to specify either input_ids or inputs_embeds.')
385
+
386
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
387
+
388
+ if past_key_values is None:
389
+ past_length = 0
390
+ past_key_values = tuple([None] * len(self.h))
391
+ else:
392
+ past_length = past_key_values[0][0].size(-2)
393
+ if position_ids is None:
394
+ position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device)
395
+ position_ids = position_ids.unsqueeze(0)
396
+
397
+ # LingLongAttention mask.
398
+ if attention_mask is not None:
399
+ if batch_size <= 0:
400
+ raise ValueError('batch_size has to be defined and > 0.')
401
+ attention_mask = attention_mask.view(batch_size, -1)
402
+ # We create a 3D attention mask from a 2D tensor mask.
403
+ # Sizes are [batch_size, 1, 1, to_seq_length]
404
+ # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
405
+ # this attention mask is more simple than the triangular masking of causal attention
406
+ # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
407
+ attention_mask = attention_mask[:, None, None, :]
408
+
409
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
410
+ # masked positions, this operation will create a tensor which is 0.0 for
411
+ # positions we want to attend and the dtype's smallest value for masked positions.
412
+ # Since we are adding it to the raw scores before the softmax, this is
413
+ # effectively the same as removing these entirely.
414
+ attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility
415
+ attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min
416
+
417
+ if inputs_embeds is None:
418
+ inputs_embeds = self.wte(input_ids)
419
+ position_embeds = self.wpe(position_ids)
420
+ hidden_states = inputs_embeds + position_embeds
421
+
422
+ hidden_states = self.drop(hidden_states)
423
+
424
+ output_shape = (-1,) + input_shape[1:] + (hidden_states.size(-1),)
425
+
426
+ if self.gradient_checkpointing and self.training:
427
+ if use_cache:
428
+ # noinspection PyUnresolvedReferences
429
+ logger.warning_once(
430
+ '`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...'
431
+ )
432
+ use_cache = False
433
+
434
+ presents = () if use_cache else None
435
+ all_self_attentions = () if output_attentions else None
436
+ all_hidden_states = () if output_hidden_states else None
437
+ for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):
438
+ if output_hidden_states:
439
+ all_hidden_states = all_hidden_states + (hidden_states,)
440
+
441
+ if self.gradient_checkpointing and self.training:
442
+ outputs = self._gradient_checkpointing_func(
443
+ block.__call__,
444
+ hidden_states,
445
+ None,
446
+ attention_mask,
447
+ use_cache,
448
+ output_attentions,
449
+ )
450
+ else:
451
+ outputs = block(
452
+ hidden_states,
453
+ layer_past=layer_past,
454
+ attention_mask=attention_mask,
455
+ use_cache=use_cache,
456
+ output_attentions=output_attentions,
457
+ )
458
+
459
+ hidden_states = outputs[0]
460
+ if use_cache is True:
461
+ presents = presents + (outputs[1],)
462
+
463
+ if output_attentions:
464
+ all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)
465
+
466
+ hidden_states = self.ln_f(hidden_states)
467
+
468
+ hidden_states = hidden_states.view(output_shape)
469
+ # Add last hidden state
470
+ if output_hidden_states:
471
+ all_hidden_states = all_hidden_states + (hidden_states,)
472
+
473
+ if not return_dict:
474
+ return tuple(
475
+ v
476
+ for v in [hidden_states, presents, all_hidden_states, all_self_attentions]
477
+ if v is not None
478
+ )
479
+
480
+ return BaseModelOutputWithPast(
481
+ last_hidden_state=hidden_states,
482
+ past_key_values=presents,
483
+ hidden_states=all_hidden_states,
484
+ attentions=all_self_attentions,
485
+ )
486
+
487
+
488
+ class LingLongForCausalLM(LingLongPreTrainedModel):
489
+ _tied_weights_keys = ['lm_head.weight']
490
+
491
+ def __init__(self, config):
492
+ super().__init__(config)
493
+ self.transformer = LingLongModel(config)
494
+ self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
495
+
496
+ # Initialize weights and apply final processing
497
+ self.post_init()
498
+
499
+ def get_output_embeddings(self):
500
+ return self.lm_head
501
+
502
+ def set_output_embeddings(self, new_embeddings):
503
+ self.lm_head = new_embeddings
504
+
505
+ def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, **kwargs):
506
+ # Omit tokens covered by past_key_values
507
+ if past_key_values:
508
+ past_length = past_key_values[0][0].shape[2]
509
+
510
+ # Some generation methods already pass only the last input ID
511
+ if input_ids.shape[1] > past_length:
512
+ remove_prefix_length = past_length
513
+ else:
514
+ # Default to old behavior: keep only final ID
515
+ remove_prefix_length = input_ids.shape[1] - 1
516
+
517
+ input_ids = input_ids[:, remove_prefix_length:]
518
+
519
+ attention_mask = kwargs.get('attention_mask', None)
520
+ position_ids = kwargs.get('position_ids', None)
521
+
522
+ if attention_mask is not None and position_ids is None:
523
+ # create position_ids on the fly for batch generation
524
+ # noinspection PyUnresolvedReferences
525
+ position_ids = attention_mask.long().cumsum(-1) - 1
526
+ position_ids.masked_fill_(attention_mask == 0, 1)
527
+ if past_key_values:
528
+ position_ids = position_ids[:, -input_ids.shape[1]:]
529
+ else:
530
+ position_ids = None
531
+
532
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
533
+ if inputs_embeds is not None and past_key_values is None:
534
+ model_inputs = {'inputs_embeds': inputs_embeds}
535
+ else:
536
+ model_inputs = {'input_ids': input_ids}
537
+
538
+ model_inputs.update(
539
+ {
540
+ 'past_key_values': past_key_values,
541
+ 'use_cache': kwargs.get('use_cache'),
542
+ 'position_ids': position_ids,
543
+ 'attention_mask': attention_mask,
544
+ }
545
+ )
546
+
547
+ return model_inputs
548
+
549
+ def forward(
550
+ self,
551
+ input_ids: torch.LongTensor | None = None,
552
+ past_key_values: tuple[tuple[torch.Tensor]] | None = None,
553
+ attention_mask: torch.FloatTensor | None = None,
554
+ position_ids: torch.LongTensor | None = None,
555
+ inputs_embeds: torch.FloatTensor | None = None,
556
+ labels: torch.LongTensor | None = None,
557
+ use_cache: bool | None = None,
558
+ output_attentions: bool | None = None,
559
+ output_hidden_states: bool | None = None,
560
+ return_dict: bool | None = None,
561
+ ) -> tuple | CausalLMOutput:
562
+ r"""
563
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
564
+ Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
565
+ `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
566
+ are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
567
+ """
568
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
569
+
570
+ transformer_outputs = self.transformer(
571
+ input_ids,
572
+ past_key_values=past_key_values,
573
+ attention_mask=attention_mask,
574
+ position_ids=position_ids,
575
+ inputs_embeds=inputs_embeds,
576
+ use_cache=use_cache,
577
+ output_attentions=output_attentions,
578
+ output_hidden_states=output_hidden_states,
579
+ return_dict=return_dict,
580
+ )
581
+ hidden_states = transformer_outputs[0]
582
+
583
+ lm_logits = self.lm_head(hidden_states)
584
+
585
+ loss = None
586
+ if labels is not None:
587
+ # move labels to correct device to enable model parallelism
588
+ labels = labels.to(lm_logits.device)
589
+ # Shift so that tokens < n predict n
590
+ shift_logits = lm_logits[..., :-1, :].contiguous()
591
+ shift_labels = labels[..., 1:].contiguous()
592
+ # Flatten the tokens
593
+ loss_fct = nn.CrossEntropyLoss()
594
+ loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
595
+
596
+ if not return_dict:
597
+ output = (lm_logits,) + transformer_outputs[1:]
598
+ return ((loss,) + output) if loss is not None else output
599
+
600
+ return CausalLMOutput(
601
+ loss=loss,
602
+ logits=lm_logits,
603
+ hidden_states=transformer_outputs.hidden_states,
604
+ attentions=transformer_outputs.attentions,
605
+ )
606
+
607
+ @staticmethod
608
+ def _reorder_cache(
609
+ past_key_values: tuple[tuple[torch.Tensor]],
610
+ beam_idx: torch.Tensor,
611
+ **kwargs,
612
+ ) -> tuple[tuple[torch.Tensor]]:
613
+ """
614
+ This function is used to re-order the `past_key_values` cache if [`~PreTrainedModel.beam_search`] or
615
+ [`~PreTrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct
616
+ beam_idx at every generation step.
617
+ """
618
+ # noinspection PyTypeChecker
619
+ return tuple(
620
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)
621
+ for layer_past in past_key_values
622
+ )
special_tokens_map.json ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ {
4
+ "content": "<unused1>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false
9
+ },
10
+ {
11
+ "content": "<unused2>",
12
+ "lstrip": false,
13
+ "normalized": false,
14
+ "rstrip": false,
15
+ "single_word": false
16
+ },
17
+ {
18
+ "content": "<unused3>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ {
25
+ "content": "<unused4>",
26
+ "lstrip": false,
27
+ "normalized": false,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ },
31
+ {
32
+ "content": "<unused5>",
33
+ "lstrip": false,
34
+ "normalized": false,
35
+ "rstrip": false,
36
+ "single_word": false
37
+ },
38
+ {
39
+ "content": "<unused6>",
40
+ "lstrip": false,
41
+ "normalized": false,
42
+ "rstrip": false,
43
+ "single_word": false
44
+ },
45
+ {
46
+ "content": "<unused7>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false
51
+ },
52
+ {
53
+ "content": "<unused8>",
54
+ "lstrip": false,
55
+ "normalized": false,
56
+ "rstrip": false,
57
+ "single_word": false
58
+ },
59
+ {
60
+ "content": "<unused9>",
61
+ "lstrip": false,
62
+ "normalized": false,
63
+ "rstrip": false,
64
+ "single_word": false
65
+ },
66
+ {
67
+ "content": "<unused10>",
68
+ "lstrip": false,
69
+ "normalized": false,
70
+ "rstrip": false,
71
+ "single_word": false
72
+ }
73
+ ],
74
+ "bos_token": {
75
+ "content": "<|startoftext|>",
76
+ "lstrip": false,
77
+ "normalized": false,
78
+ "rstrip": false,
79
+ "single_word": false
80
+ },
81
+ "cls_token": {
82
+ "content": "<cls>",
83
+ "lstrip": false,
84
+ "normalized": false,
85
+ "rstrip": false,
86
+ "single_word": false
87
+ },
88
+ "eos_token": {
89
+ "content": "<|endoftext|>",
90
+ "lstrip": false,
91
+ "normalized": false,
92
+ "rstrip": false,
93
+ "single_word": false
94
+ },
95
+ "mask_token": {
96
+ "content": "<mask>",
97
+ "lstrip": false,
98
+ "normalized": false,
99
+ "rstrip": false,
100
+ "single_word": false
101
+ },
102
+ "pad_token": {
103
+ "content": "<pad>",
104
+ "lstrip": false,
105
+ "normalized": false,
106
+ "rstrip": false,
107
+ "single_word": false
108
+ },
109
+ "sep_token": {
110
+ "content": "<sep>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false
115
+ },
116
+ "unk_token": {
117
+ "content": "<unk>",
118
+ "lstrip": false,
119
+ "normalized": false,
120
+ "rstrip": false,
121
+ "single_word": false
122
+ }
123
+ }
tokenization_linglong_fast.py ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import string
2
+
3
+ from tokenizers import (
4
+ Tokenizer as HFTokenizer,
5
+ normalizers,
6
+ pre_tokenizers,
7
+ models,
8
+ decoders,
9
+ )
10
+ from transformers.tokenization_utils_fast import PreTrainedTokenizerFast
11
+
12
+
13
+ class LingLongTokenizerFast(PreTrainedTokenizerFast):
14
+ vocab_files_names = {'vocab_file': 'tokenizer.txt', 'tokenizer_file': 'tokenizer.json'}
15
+ model_input_names = ['input_ids', 'attention_mask']
16
+
17
+ class CustomDecoder:
18
+
19
+ @staticmethod
20
+ def decode_chain(tokens: list[str]) -> list[str]:
21
+ new_tokens = []
22
+ for token in tokens:
23
+ if token.startswith('##'):
24
+ new_tokens.append(token[2:])
25
+ else:
26
+ new_tokens.append(' ' + token)
27
+
28
+ # Remove whitespaces between Chinese characters.
29
+ # TODO: This will remove whitespaces between some English words as well. Need fix.
30
+ alphabet_set = set(list(string.ascii_letters))
31
+ for i in range(len(new_tokens)):
32
+ if new_tokens[i][0] == ' ':
33
+ if new_tokens[i][1] not in alphabet_set or i == 0:
34
+ new_tokens[i] = new_tokens[i][1:]
35
+ return new_tokens
36
+
37
+ def __init__(
38
+ self,
39
+ vocab_file: str | None = None,
40
+ tokenizer_file: str | None = None,
41
+ do_lower_case: bool = True,
42
+ do_basic_tokenize: bool = True,
43
+ unk_token: str = '<unk>',
44
+ sep_token: str = '<sep>',
45
+ pad_token: str = '<pad>',
46
+ cls_token: str = '<cls>',
47
+ mask_token: str = '<mask>',
48
+ bos_token: str = '<|startoftext|>',
49
+ eos_token: str = '<|endoftext|>',
50
+ tokenize_chinese_chars: bool = True,
51
+ strip_accents: bool | None = None,
52
+ **kwargs,
53
+ ):
54
+ backend_tokenizer = None
55
+ if tokenizer_file is None:
56
+ backend_tokenizer = HFTokenizer(
57
+ models.WordPiece.from_file(
58
+ vocab=vocab_file,
59
+ unk_token=unk_token,
60
+ max_input_chars_per_word=100,
61
+ ),
62
+ )
63
+ backend_tokenizer.add_special_tokens(
64
+ [unk_token, sep_token, pad_token, cls_token, mask_token, bos_token, eos_token],
65
+ )
66
+ normalizer_sequence = [normalizers.Replace('\n', sep_token)]
67
+ if do_basic_tokenize:
68
+ normalizer_sequence.append(
69
+ normalizers.BertNormalizer(
70
+ handle_chinese_chars=tokenize_chinese_chars,
71
+ strip_accents=strip_accents,
72
+ lowercase=do_lower_case,
73
+ ),
74
+ )
75
+ backend_tokenizer.normalizer = normalizers.Sequence(normalizer_sequence)
76
+ backend_tokenizer.pre_tokenizer = pre_tokenizers.Sequence([
77
+ pre_tokenizers.Digits(individual_digits=True),
78
+ pre_tokenizers.Punctuation(),
79
+ pre_tokenizers.WhitespaceSplit(),
80
+ ])
81
+ super().__init__(
82
+ tokenizer_file=tokenizer_file,
83
+ tokenizer_object=backend_tokenizer,
84
+ unk_token=unk_token,
85
+ sep_token=sep_token,
86
+ pad_token=pad_token,
87
+ cls_token=cls_token,
88
+ mask_token=mask_token,
89
+ bos_token=bos_token,
90
+ eos_token=eos_token,
91
+ do_lower_case=do_lower_case,
92
+ do_basic_tokenize=do_basic_tokenize,
93
+ tokenize_chinese_chars=tokenize_chinese_chars,
94
+ strip_accents=strip_accents,
95
+ **kwargs,
96
+ )
97
+ self._tokenizer.decoder = decoders.Decoder.custom(self.CustomDecoder())
98
+ self.add_special_tokens({'additional_special_tokens': [f'<unused{i}>' for i in range(1, 11)]})
99
+
100
+ def save_vocabulary(self, save_directory: str, filename_prefix: str | None = None) -> tuple[str]:
101
+ files = self.backend_tokenizer.model.save(save_directory, name=filename_prefix)
102
+ return tuple(files)
103
+
104
+ def save_pretrained(self, *args, **kwargs) -> tuple[str]:
105
+ self._tokenizer.decoder = decoders.WordPiece()
106
+ return super().save_pretrained(*args, **kwargs)
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,172 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "<pad>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "7": {
12
+ "content": "<unk>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "8": {
20
+ "content": "<|endoftext|>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "9": {
28
+ "content": "<sep>",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "10": {
36
+ "content": "<|startoftext|>",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ },
43
+ "13224": {
44
+ "content": "<unused1>",
45
+ "lstrip": false,
46
+ "normalized": false,
47
+ "rstrip": false,
48
+ "single_word": false,
49
+ "special": true
50
+ },
51
+ "13225": {
52
+ "content": "<unused2>",
53
+ "lstrip": false,
54
+ "normalized": false,
55
+ "rstrip": false,
56
+ "single_word": false,
57
+ "special": true
58
+ },
59
+ "13226": {
60
+ "content": "<unused3>",
61
+ "lstrip": false,
62
+ "normalized": false,
63
+ "rstrip": false,
64
+ "single_word": false,
65
+ "special": true
66
+ },
67
+ "13227": {
68
+ "content": "<unused4>",
69
+ "lstrip": false,
70
+ "normalized": false,
71
+ "rstrip": false,
72
+ "single_word": false,
73
+ "special": true
74
+ },
75
+ "13228": {
76
+ "content": "<unused5>",
77
+ "lstrip": false,
78
+ "normalized": false,
79
+ "rstrip": false,
80
+ "single_word": false,
81
+ "special": true
82
+ },
83
+ "13229": {
84
+ "content": "<unused6>",
85
+ "lstrip": false,
86
+ "normalized": false,
87
+ "rstrip": false,
88
+ "single_word": false,
89
+ "special": true
90
+ },
91
+ "13230": {
92
+ "content": "<unused7>",
93
+ "lstrip": false,
94
+ "normalized": false,
95
+ "rstrip": false,
96
+ "single_word": false,
97
+ "special": true
98
+ },
99
+ "13231": {
100
+ "content": "<unused8>",
101
+ "lstrip": false,
102
+ "normalized": false,
103
+ "rstrip": false,
104
+ "single_word": false,
105
+ "special": true
106
+ },
107
+ "13232": {
108
+ "content": "<unused9>",
109
+ "lstrip": false,
110
+ "normalized": false,
111
+ "rstrip": false,
112
+ "single_word": false,
113
+ "special": true
114
+ },
115
+ "13233": {
116
+ "content": "<unused10>",
117
+ "lstrip": false,
118
+ "normalized": false,
119
+ "rstrip": false,
120
+ "single_word": false,
121
+ "special": true
122
+ },
123
+ "13310": {
124
+ "content": "<cls>",
125
+ "lstrip": false,
126
+ "normalized": false,
127
+ "rstrip": false,
128
+ "single_word": false,
129
+ "special": true
130
+ },
131
+ "13311": {
132
+ "content": "<mask>",
133
+ "lstrip": false,
134
+ "normalized": false,
135
+ "rstrip": false,
136
+ "single_word": false,
137
+ "special": true
138
+ }
139
+ },
140
+ "additional_special_tokens": [
141
+ "<unused1>",
142
+ "<unused2>",
143
+ "<unused3>",
144
+ "<unused4>",
145
+ "<unused5>",
146
+ "<unused6>",
147
+ "<unused7>",
148
+ "<unused8>",
149
+ "<unused9>",
150
+ "<unused10>"
151
+ ],
152
+ "auto_map": {
153
+ "AutoTokenizer": [
154
+ null,
155
+ "tokenization_linglong_fast.LingLongTokenizerFast"
156
+ ]
157
+ },
158
+ "bos_token": "<|startoftext|>",
159
+ "clean_up_tokenization_spaces": true,
160
+ "cls_token": "<cls>",
161
+ "do_basic_tokenize": true,
162
+ "do_lower_case": true,
163
+ "eos_token": "<|endoftext|>",
164
+ "mask_token": "<mask>",
165
+ "model_max_length": 1000000000000000019884624838656,
166
+ "pad_token": "<pad>",
167
+ "sep_token": "<sep>",
168
+ "strip_accents": null,
169
+ "tokenize_chinese_chars": true,
170
+ "tokenizer_class": "LingLongTokenizer",
171
+ "unk_token": "<unk>"
172
+ }