PhelixZhen commited on
Commit
2e8e2e3
1 Parent(s): 6137bf4

Upload 25 files

Browse files
config.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/mnt/m/work/gen1/message1",
3
+ "add_bias_linear": false,
4
+ "add_qkv_bias": true,
5
+ "apply_query_key_layer_scaling": true,
6
+ "apply_residual_connection_post_layernorm": false,
7
+ "architectures": [
8
+ "ChatGLMForConditionalGeneration"
9
+ ],
10
+ "attention_dropout": 0.0,
11
+ "attention_softmax_in_fp32": true,
12
+ "auto_map": {
13
+ "AutoConfig": "configuration_chatglm.ChatGLMConfig",
14
+ "AutoModel": "modeling_chatglm.ChatGLMForConditionalGeneration",
15
+ "AutoModelForSeq2SeqLM": "modeling_chatglm.ChatGLMForConditionalGeneration"
16
+ },
17
+ "bias_dropout_fusion": true,
18
+ "eos_token_id": 2,
19
+ "ffn_hidden_size": 13696,
20
+ "fp32_residual_connection": false,
21
+ "hidden_dropout": 0.0,
22
+ "hidden_size": 4096,
23
+ "kv_channels": 128,
24
+ "layernorm_epsilon": 1e-05,
25
+ "model_type": "chatglm",
26
+ "multi_query_attention": true,
27
+ "multi_query_group_num": 2,
28
+ "num_attention_heads": 32,
29
+ "num_layers": 28,
30
+ "original_rope": true,
31
+ "pad_token_id": 0,
32
+ "padded_vocab_size": 65024,
33
+ "post_layer_norm": true,
34
+ "pre_seq_len": null,
35
+ "prefix_projection": false,
36
+ "quantization_bit": 0,
37
+ "rmsnorm": true,
38
+ "seq_length": 32768,
39
+ "tie_word_embeddings": false,
40
+ "torch_dtype": "float16",
41
+ "transformers_version": "4.32.0.dev0",
42
+ "use_cache": true,
43
+ "vocab_size": 65024
44
+ }
configuration_chatglm.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import PretrainedConfig
2
+
3
+
4
+ class ChatGLMConfig(PretrainedConfig):
5
+ model_type = "chatglm"
6
+ def __init__(
7
+ self,
8
+ num_layers=28,
9
+ padded_vocab_size=65024,
10
+ hidden_size=4096,
11
+ ffn_hidden_size=13696,
12
+ kv_channels=128,
13
+ num_attention_heads=32,
14
+ seq_length=2048,
15
+ hidden_dropout=0.0,
16
+ attention_dropout=0.0,
17
+ layernorm_epsilon=1e-5,
18
+ rmsnorm=True,
19
+ apply_residual_connection_post_layernorm=False,
20
+ post_layer_norm=True,
21
+ add_bias_linear=False,
22
+ add_qkv_bias=False,
23
+ interleaved_qkv=False,
24
+ bias_dropout_fusion=True,
25
+ multi_query_attention=False,
26
+ multi_query_group_num=1,
27
+ apply_query_key_layer_scaling=True,
28
+ attention_softmax_in_fp32=True,
29
+ fp32_residual_connection=False,
30
+ quantization_bit=0,
31
+ pre_seq_len=None,
32
+ prefix_projection=False,
33
+ **kwargs
34
+ ):
35
+ self.num_layers = num_layers
36
+ self.vocab_size = padded_vocab_size
37
+ self.padded_vocab_size = padded_vocab_size
38
+ self.hidden_size = hidden_size
39
+ self.ffn_hidden_size = ffn_hidden_size
40
+ self.kv_channels = kv_channels
41
+ self.num_attention_heads = num_attention_heads
42
+ self.seq_length = seq_length
43
+ self.hidden_dropout = hidden_dropout
44
+ self.attention_dropout = attention_dropout
45
+ self.layernorm_epsilon = layernorm_epsilon
46
+ self.rmsnorm = rmsnorm
47
+ self.apply_residual_connection_post_layernorm = apply_residual_connection_post_layernorm
48
+ self.post_layer_norm = post_layer_norm
49
+ self.add_bias_linear = add_bias_linear
50
+ self.add_qkv_bias = add_qkv_bias
51
+ self.bias_dropout_fusion = bias_dropout_fusion
52
+ self.multi_query_attention = multi_query_attention
53
+ self.multi_query_group_num = multi_query_group_num
54
+ self.apply_query_key_layer_scaling = apply_query_key_layer_scaling
55
+ self.attention_softmax_in_fp32 = attention_softmax_in_fp32
56
+ self.fp32_residual_connection = fp32_residual_connection
57
+ self.quantization_bit = quantization_bit
58
+ self.pre_seq_len = pre_seq_len
59
+ self.prefix_projection = prefix_projection
60
+ super().__init__(**kwargs)
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "eos_token_id": 2,
4
+ "pad_token_id": 0,
5
+ "transformers_version": "4.32.0.dev0"
6
+ }
modeling_chatglm.py ADDED
@@ -0,0 +1,1192 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ PyTorch ChatGLM model. """
2
+
3
+ import math
4
+ import copy
5
+ import warnings
6
+ import re
7
+ import sys
8
+
9
+ import torch
10
+ import torch.utils.checkpoint
11
+ import torch.nn.functional as F
12
+ from torch import nn
13
+ from torch.nn import CrossEntropyLoss, LayerNorm
14
+ from torch.nn.utils import skip_init
15
+ from typing import Optional, Tuple, Union, List, Callable, Dict, Any
16
+
17
+ from transformers.modeling_outputs import (
18
+ BaseModelOutputWithPast,
19
+ CausalLMOutputWithPast,
20
+ )
21
+ from transformers.modeling_utils import PreTrainedModel
22
+ from transformers.utils import logging
23
+ from transformers.generation.logits_process import LogitsProcessor
24
+ from transformers.generation.utils import LogitsProcessorList, StoppingCriteriaList, GenerationConfig, ModelOutput
25
+
26
+ from .configuration_chatglm import ChatGLMConfig
27
+
28
+ # flags required to enable jit fusion kernels
29
+
30
+ if sys.platform != 'darwin':
31
+ torch._C._jit_set_profiling_mode(False)
32
+ torch._C._jit_set_profiling_executor(False)
33
+ torch._C._jit_override_can_fuse_on_cpu(True)
34
+ torch._C._jit_override_can_fuse_on_gpu(True)
35
+
36
+ logger = logging.get_logger(__name__)
37
+
38
+ _CHECKPOINT_FOR_DOC = "THUDM/ChatGLM2-6B"
39
+ _CONFIG_FOR_DOC = "ChatGLM6BConfig"
40
+
41
+ CHATGLM_6B_PRETRAINED_MODEL_ARCHIVE_LIST = [
42
+ "THUDM/chatglm2-6b",
43
+ # See all ChatGLM models at https://huggingface.co/models?filter=chatglm
44
+ ]
45
+
46
+
47
+ def default_init(cls, *args, **kwargs):
48
+ return cls(*args, **kwargs)
49
+
50
+
51
+ class InvalidScoreLogitsProcessor(LogitsProcessor):
52
+ def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
53
+ if torch.isnan(scores).any() or torch.isinf(scores).any():
54
+ scores.zero_()
55
+ scores[..., 5] = 5e4
56
+ return scores
57
+
58
+
59
+ class PrefixEncoder(torch.nn.Module):
60
+ """
61
+ The torch.nn model to encode the prefix
62
+ Input shape: (batch-size, prefix-length)
63
+ Output shape: (batch-size, prefix-length, 2*layers*hidden)
64
+ """
65
+
66
+ def __init__(self, config: ChatGLMConfig):
67
+ super().__init__()
68
+ self.prefix_projection = config.prefix_projection
69
+ if self.prefix_projection:
70
+ # Use a two-layer MLP to encode the prefix
71
+ self.embedding = torch.nn.Embedding(config.pre_seq_len, config.hidden_size)
72
+ self.trans = torch.nn.Sequential(
73
+ torch.nn.Linear(config.hidden_size, config.hidden_size),
74
+ torch.nn.Tanh(),
75
+ torch.nn.Linear(config.hidden_size, config.num_layers * config.hidden_size * 2)
76
+ )
77
+ else:
78
+ self.embedding = torch.nn.Embedding(config.pre_seq_len,
79
+ config.num_layers * config.kv_channels * config.multi_query_group_num * 2)
80
+
81
+ def forward(self, prefix: torch.Tensor):
82
+ if self.prefix_projection:
83
+ prefix_tokens = self.embedding(prefix)
84
+ past_key_values = self.trans(prefix_tokens)
85
+ else:
86
+ past_key_values = self.embedding(prefix)
87
+ return past_key_values
88
+
89
+
90
+ def split_tensor_along_last_dim(
91
+ tensor: torch.Tensor,
92
+ num_partitions: int,
93
+ contiguous_split_chunks: bool = False,
94
+ ) -> List[torch.Tensor]:
95
+ """Split a tensor along its last dimension.
96
+
97
+ Arguments:
98
+ tensor: input tensor.
99
+ num_partitions: number of partitions to split the tensor
100
+ contiguous_split_chunks: If True, make each chunk contiguous
101
+ in memory.
102
+
103
+ Returns:
104
+ A list of Tensors
105
+ """
106
+ # Get the size and dimension.
107
+ last_dim = tensor.dim() - 1
108
+ last_dim_size = tensor.size()[last_dim] // num_partitions
109
+ # Split.
110
+ tensor_list = torch.split(tensor, last_dim_size, dim=last_dim)
111
+ # Note: torch.split does not create contiguous tensors by default.
112
+ if contiguous_split_chunks:
113
+ return tuple(chunk.contiguous() for chunk in tensor_list)
114
+
115
+ return tensor_list
116
+
117
+
118
+ class RotaryEmbedding(nn.Module):
119
+ def __init__(self, dim, original_impl=False, device=None, dtype=None):
120
+ super().__init__()
121
+ inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2, device=device).to(dtype=dtype) / dim))
122
+ self.register_buffer("inv_freq", inv_freq)
123
+ self.dim = dim
124
+ self.original_impl = original_impl
125
+
126
+ def forward_impl(
127
+ self, seq_len: int, n_elem: int, dtype: torch.dtype, device: torch.device, base: int = 10000
128
+ ):
129
+ """Enhanced Transformer with Rotary Position Embedding.
130
+
131
+ Derived from: https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/master/labml_nn/
132
+ transformers/rope/__init__.py. MIT License:
133
+ https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/master/license.
134
+ """
135
+ # $\Theta = {\theta_i = 10000^{\frac{2(i-1)}{d}}, i \in [1, 2, ..., \frac{d}{2}]}$
136
+ theta = 1.0 / (base ** (torch.arange(0, n_elem, 2, dtype=dtype, device=device) / n_elem))
137
+
138
+ # Create position indexes `[0, 1, ..., seq_len - 1]`
139
+ seq_idx = torch.arange(seq_len, dtype=dtype, device=device)
140
+
141
+ # Calculate the product of position index and $\theta_i$
142
+ idx_theta = torch.outer(seq_idx, theta).float()
143
+
144
+ cache = torch.stack([torch.cos(idx_theta), torch.sin(idx_theta)], dim=-1)
145
+
146
+ # this is to mimic the behaviour of complex32, else we will get different results
147
+ if dtype in (torch.float16, torch.bfloat16, torch.int8):
148
+ cache = cache.bfloat16() if dtype == torch.bfloat16 else cache.half()
149
+ return cache
150
+
151
+ def forward(self, max_seq_len, offset=0):
152
+ return self.forward_impl(
153
+ max_seq_len, self.dim, dtype=self.inv_freq.dtype, device=self.inv_freq.device
154
+ )
155
+
156
+
157
+ @torch.jit.script
158
+ def apply_rotary_pos_emb(x: torch.Tensor, rope_cache: torch.Tensor) -> torch.Tensor:
159
+ # x: [sq, b, np, hn]
160
+ sq, b, np, hn = x.size(0), x.size(1), x.size(2), x.size(3)
161
+ rot_dim = rope_cache.shape[-2] * 2
162
+ x, x_pass = x[..., :rot_dim], x[..., rot_dim:]
163
+ # truncate to support variable sizes
164
+ rope_cache = rope_cache[:sq]
165
+ xshaped = x.reshape(sq, -1, np, rot_dim // 2, 2)
166
+ rope_cache = rope_cache.view(sq, -1, 1, xshaped.size(3), 2)
167
+ x_out2 = torch.stack(
168
+ [
169
+ xshaped[..., 0] * rope_cache[..., 0] - xshaped[..., 1] * rope_cache[..., 1],
170
+ xshaped[..., 1] * rope_cache[..., 0] + xshaped[..., 0] * rope_cache[..., 1],
171
+ ],
172
+ -1,
173
+ )
174
+ x_out2 = x_out2.flatten(3)
175
+ return torch.cat((x_out2, x_pass), dim=-1)
176
+
177
+
178
+ class RMSNorm(torch.nn.Module):
179
+ def __init__(self, normalized_shape, eps=1e-5, device=None, dtype=None, **kwargs):
180
+ super().__init__()
181
+ self.weight = torch.nn.Parameter(torch.empty(normalized_shape, device=device, dtype=dtype))
182
+ self.eps = eps
183
+
184
+ def forward(self, hidden_states: torch.Tensor):
185
+ input_dtype = hidden_states.dtype
186
+ variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
187
+ hidden_states = hidden_states * torch.rsqrt(variance + self.eps)
188
+
189
+ return (self.weight * hidden_states).to(input_dtype)
190
+
191
+
192
+ class CoreAttention(torch.nn.Module):
193
+ def __init__(self, config: ChatGLMConfig, layer_number):
194
+ super(CoreAttention, self).__init__()
195
+
196
+ self.apply_query_key_layer_scaling = config.apply_query_key_layer_scaling
197
+ self.attention_softmax_in_fp32 = config.attention_softmax_in_fp32
198
+ if self.apply_query_key_layer_scaling:
199
+ self.attention_softmax_in_fp32 = True
200
+ self.layer_number = max(1, layer_number)
201
+
202
+ projection_size = config.kv_channels * config.num_attention_heads
203
+
204
+ # Per attention head and per partition values.
205
+ self.hidden_size_per_partition = projection_size
206
+ self.hidden_size_per_attention_head = projection_size // config.num_attention_heads
207
+ self.num_attention_heads_per_partition = config.num_attention_heads
208
+
209
+ coeff = None
210
+ self.norm_factor = math.sqrt(self.hidden_size_per_attention_head)
211
+ if self.apply_query_key_layer_scaling:
212
+ coeff = self.layer_number
213
+ self.norm_factor *= coeff
214
+ self.coeff = coeff
215
+
216
+ self.attention_dropout = torch.nn.Dropout(config.attention_dropout)
217
+
218
+ def forward(self, query_layer, key_layer, value_layer, attention_mask):
219
+ pytorch_major_version = int(torch.__version__.split('.')[0])
220
+ if pytorch_major_version >= 2:
221
+ query_layer, key_layer, value_layer = [k.permute(1, 2, 0, 3) for k in [query_layer, key_layer, value_layer]]
222
+ if attention_mask is None and query_layer.shape[2] == key_layer.shape[2]:
223
+ context_layer = torch.nn.functional.scaled_dot_product_attention(query_layer, key_layer, value_layer,
224
+ is_causal=True)
225
+ else:
226
+ if attention_mask is not None:
227
+ attention_mask = ~attention_mask
228
+ context_layer = torch.nn.functional.scaled_dot_product_attention(query_layer, key_layer, value_layer,
229
+ attention_mask)
230
+ context_layer = context_layer.permute(2, 0, 1, 3)
231
+ new_context_layer_shape = context_layer.size()[:-2] + (self.hidden_size_per_partition,)
232
+ context_layer = context_layer.reshape(*new_context_layer_shape)
233
+ else:
234
+ # Raw attention scores
235
+
236
+ # [b, np, sq, sk]
237
+ output_size = (query_layer.size(1), query_layer.size(2), query_layer.size(0), key_layer.size(0))
238
+
239
+ # [sq, b, np, hn] -> [sq, b * np, hn]
240
+ query_layer = query_layer.view(output_size[2], output_size[0] * output_size[1], -1)
241
+ # [sk, b, np, hn] -> [sk, b * np, hn]
242
+ key_layer = key_layer.view(output_size[3], output_size[0] * output_size[1], -1)
243
+
244
+ # preallocting input tensor: [b * np, sq, sk]
245
+ matmul_input_buffer = torch.empty(
246
+ output_size[0] * output_size[1], output_size[2], output_size[3], dtype=query_layer.dtype,
247
+ device=query_layer.device
248
+ )
249
+
250
+ # Raw attention scores. [b * np, sq, sk]
251
+ matmul_result = torch.baddbmm(
252
+ matmul_input_buffer,
253
+ query_layer.transpose(0, 1), # [b * np, sq, hn]
254
+ key_layer.transpose(0, 1).transpose(1, 2), # [b * np, hn, sk]
255
+ beta=0.0,
256
+ alpha=(1.0 / self.norm_factor),
257
+ )
258
+
259
+ # change view to [b, np, sq, sk]
260
+ attention_scores = matmul_result.view(*output_size)
261
+
262
+ # ===========================
263
+ # Attention probs and dropout
264
+ # ===========================
265
+
266
+ # attention scores and attention mask [b, np, sq, sk]
267
+ if self.attention_softmax_in_fp32:
268
+ attention_scores = attention_scores.float()
269
+ if self.coeff is not None:
270
+ attention_scores = attention_scores * self.coeff
271
+ if attention_mask is None and attention_scores.shape[2] == attention_scores.shape[3]:
272
+ attention_mask = torch.ones(output_size[0], 1, output_size[2], output_size[3],
273
+ device=attention_scores.device, dtype=torch.bool)
274
+ attention_mask.tril_()
275
+ attention_mask = ~attention_mask
276
+ if attention_mask is not None:
277
+ attention_scores = attention_scores.masked_fill(attention_mask, float("-inf"))
278
+ attention_probs = F.softmax(attention_scores, dim=-1)
279
+ attention_probs = attention_probs.type_as(value_layer)
280
+
281
+ # This is actually dropping out entire tokens to attend to, which might
282
+ # seem a bit unusual, but is taken from the original Transformer paper.
283
+ attention_probs = self.attention_dropout(attention_probs)
284
+ # =========================
285
+ # Context layer. [sq, b, hp]
286
+ # =========================
287
+
288
+ # value_layer -> context layer.
289
+ # [sk, b, np, hn] --> [b, np, sq, hn]
290
+
291
+ # context layer shape: [b, np, sq, hn]
292
+ output_size = (value_layer.size(1), value_layer.size(2), query_layer.size(0), value_layer.size(3))
293
+ # change view [sk, b * np, hn]
294
+ value_layer = value_layer.view(value_layer.size(0), output_size[0] * output_size[1], -1)
295
+ # change view [b * np, sq, sk]
296
+ attention_probs = attention_probs.view(output_size[0] * output_size[1], output_size[2], -1)
297
+ # matmul: [b * np, sq, hn]
298
+ context_layer = torch.bmm(attention_probs, value_layer.transpose(0, 1))
299
+ # change view [b, np, sq, hn]
300
+ context_layer = context_layer.view(*output_size)
301
+ # [b, np, sq, hn] --> [sq, b, np, hn]
302
+ context_layer = context_layer.permute(2, 0, 1, 3).contiguous()
303
+ # [sq, b, np, hn] --> [sq, b, hp]
304
+ new_context_layer_shape = context_layer.size()[:-2] + (self.hidden_size_per_partition,)
305
+ context_layer = context_layer.view(*new_context_layer_shape)
306
+
307
+ return context_layer
308
+
309
+
310
+ class SelfAttention(torch.nn.Module):
311
+ """Parallel self-attention layer abstract class.
312
+
313
+ Self-attention layer takes input with size [s, b, h]
314
+ and returns output of the same size.
315
+ """
316
+
317
+ def __init__(self, config: ChatGLMConfig, layer_number, device=None):
318
+ super(SelfAttention, self).__init__()
319
+ self.layer_number = max(1, layer_number)
320
+
321
+ self.projection_size = config.kv_channels * config.num_attention_heads
322
+
323
+ # Per attention head and per partition values.
324
+ self.hidden_size_per_attention_head = self.projection_size // config.num_attention_heads
325
+ self.num_attention_heads_per_partition = config.num_attention_heads
326
+
327
+ self.multi_query_attention = config.multi_query_attention
328
+ self.qkv_hidden_size = 3 * self.projection_size
329
+ if self.multi_query_attention:
330
+ self.num_multi_query_groups_per_partition = config.multi_query_group_num
331
+ self.qkv_hidden_size = (
332
+ self.projection_size + 2 * self.hidden_size_per_attention_head * config.multi_query_group_num
333
+ )
334
+ self.query_key_value = nn.Linear(config.hidden_size, self.qkv_hidden_size,
335
+ bias=config.add_bias_linear or config.add_qkv_bias,
336
+ device=device, **_config_to_kwargs(config)
337
+ )
338
+
339
+ self.core_attention = CoreAttention(config, self.layer_number)
340
+
341
+ # Output.
342
+ self.dense = nn.Linear(self.projection_size, config.hidden_size, bias=config.add_bias_linear,
343
+ device=device, **_config_to_kwargs(config)
344
+ )
345
+
346
+ def _allocate_memory(self, inference_max_sequence_len, batch_size, device=None, dtype=None):
347
+ if self.multi_query_attention:
348
+ num_attention_heads = self.num_multi_query_groups_per_partition
349
+ else:
350
+ num_attention_heads = self.num_attention_heads_per_partition
351
+ return torch.empty(
352
+ inference_max_sequence_len,
353
+ batch_size,
354
+ num_attention_heads,
355
+ self.hidden_size_per_attention_head,
356
+ dtype=dtype,
357
+ device=device,
358
+ )
359
+
360
+ def forward(
361
+ self, hidden_states, attention_mask, rotary_pos_emb, kv_cache=None, use_cache=True
362
+ ):
363
+ # hidden_states: [sq, b, h]
364
+
365
+ # =================================================
366
+ # Pre-allocate memory for key-values for inference.
367
+ # =================================================
368
+ # =====================
369
+ # Query, Key, and Value
370
+ # =====================
371
+
372
+ # Attention heads [sq, b, h] --> [sq, b, (np * 3 * hn)]
373
+ mixed_x_layer = self.query_key_value(hidden_states)
374
+
375
+ if self.multi_query_attention:
376
+ (query_layer, key_layer, value_layer) = mixed_x_layer.split(
377
+ [
378
+ self.num_attention_heads_per_partition * self.hidden_size_per_attention_head,
379
+ self.num_multi_query_groups_per_partition * self.hidden_size_per_attention_head,
380
+ self.num_multi_query_groups_per_partition * self.hidden_size_per_attention_head,
381
+ ],
382
+ dim=-1,
383
+ )
384
+ query_layer = query_layer.view(
385
+ query_layer.size()[:-1] + (self.num_attention_heads_per_partition, self.hidden_size_per_attention_head)
386
+ )
387
+ key_layer = key_layer.view(
388
+ key_layer.size()[:-1] + (self.num_multi_query_groups_per_partition, self.hidden_size_per_attention_head)
389
+ )
390
+ value_layer = value_layer.view(
391
+ value_layer.size()[:-1]
392
+ + (self.num_multi_query_groups_per_partition, self.hidden_size_per_attention_head)
393
+ )
394
+ else:
395
+ new_tensor_shape = mixed_x_layer.size()[:-1] + \
396
+ (self.num_attention_heads_per_partition,
397
+ 3 * self.hidden_size_per_attention_head)
398
+ mixed_x_layer = mixed_x_layer.view(*new_tensor_shape)
399
+
400
+ # [sq, b, np, 3 * hn] --> 3 [sq, b, np, hn]
401
+ (query_layer, key_layer, value_layer) = split_tensor_along_last_dim(mixed_x_layer, 3)
402
+
403
+ # apply relative positional encoding (rotary embedding)
404
+ if rotary_pos_emb is not None:
405
+ query_layer = apply_rotary_pos_emb(query_layer, rotary_pos_emb)
406
+ key_layer = apply_rotary_pos_emb(key_layer, rotary_pos_emb)
407
+
408
+ # adjust key and value for inference
409
+ if kv_cache is not None:
410
+ cache_k, cache_v = kv_cache
411
+ key_layer = torch.cat((cache_k, key_layer), dim=0)
412
+ value_layer = torch.cat((cache_v, value_layer), dim=0)
413
+ if use_cache:
414
+ kv_cache = (key_layer, value_layer)
415
+ else:
416
+ kv_cache = None
417
+
418
+ if self.multi_query_attention:
419
+ key_layer = key_layer.unsqueeze(-2)
420
+ key_layer = key_layer.expand(
421
+ -1, -1, -1, self.num_attention_heads_per_partition // self.num_multi_query_groups_per_partition, -1
422
+ )
423
+ key_layer = key_layer.contiguous().view(
424
+ key_layer.size()[:2] + (self.num_attention_heads_per_partition, self.hidden_size_per_attention_head)
425
+ )
426
+ value_layer = value_layer.unsqueeze(-2)
427
+ value_layer = value_layer.expand(
428
+ -1, -1, -1, self.num_attention_heads_per_partition // self.num_multi_query_groups_per_partition, -1
429
+ )
430
+ value_layer = value_layer.contiguous().view(
431
+ value_layer.size()[:2] + (self.num_attention_heads_per_partition, self.hidden_size_per_attention_head)
432
+ )
433
+
434
+ # ==================================
435
+ # core attention computation
436
+ # ==================================
437
+
438
+ context_layer = self.core_attention(query_layer, key_layer, value_layer, attention_mask)
439
+
440
+ # =================
441
+ # Output. [sq, b, h]
442
+ # =================
443
+
444
+ output = self.dense(context_layer)
445
+
446
+ return output, kv_cache
447
+
448
+
449
+ def _config_to_kwargs(args):
450
+ common_kwargs = {
451
+ "dtype": args.torch_dtype,
452
+ }
453
+ return common_kwargs
454
+
455
+
456
+ class MLP(torch.nn.Module):
457
+ """MLP.
458
+
459
+ MLP will take the input with h hidden state, project it to 4*h
460
+ hidden dimension, perform nonlinear transformation, and project the
461
+ state back into h hidden dimension.
462
+ """
463
+
464
+ def __init__(self, config: ChatGLMConfig, device=None):
465
+ super(MLP, self).__init__()
466
+
467
+ self.add_bias = config.add_bias_linear
468
+
469
+ # Project to 4h. If using swiglu double the output width, see https://arxiv.org/pdf/2002.05202.pdf
470
+ self.dense_h_to_4h = nn.Linear(
471
+ config.hidden_size,
472
+ config.ffn_hidden_size * 2,
473
+ bias=self.add_bias,
474
+ device=device,
475
+ **_config_to_kwargs(config)
476
+ )
477
+
478
+ def swiglu(x):
479
+ x = torch.chunk(x, 2, dim=-1)
480
+ return F.silu(x[0]) * x[1]
481
+
482
+ self.activation_func = swiglu
483
+
484
+ # Project back to h.
485
+ self.dense_4h_to_h = nn.Linear(
486
+ config.ffn_hidden_size,
487
+ config.hidden_size,
488
+ bias=self.add_bias,
489
+ device=device,
490
+ **_config_to_kwargs(config)
491
+ )
492
+
493
+ def forward(self, hidden_states):
494
+ # [s, b, 4hp]
495
+ intermediate_parallel = self.dense_h_to_4h(hidden_states)
496
+ intermediate_parallel = self.activation_func(intermediate_parallel)
497
+ # [s, b, h]
498
+ output = self.dense_4h_to_h(intermediate_parallel)
499
+ return output
500
+
501
+
502
+ class GLMBlock(torch.nn.Module):
503
+ """A single transformer layer.
504
+
505
+ Transformer layer takes input with size [s, b, h] and returns an
506
+ output of the same size.
507
+ """
508
+
509
+ def __init__(self, config: ChatGLMConfig, layer_number, device=None):
510
+ super(GLMBlock, self).__init__()
511
+ self.layer_number = layer_number
512
+
513
+ self.apply_residual_connection_post_layernorm = config.apply_residual_connection_post_layernorm
514
+
515
+ self.fp32_residual_connection = config.fp32_residual_connection
516
+
517
+ LayerNormFunc = RMSNorm if config.rmsnorm else LayerNorm
518
+ # Layernorm on the input data.
519
+ self.input_layernorm = LayerNormFunc(config.hidden_size, eps=config.layernorm_epsilon, device=device,
520
+ dtype=config.torch_dtype)
521
+
522
+ # Self attention.
523
+ self.self_attention = SelfAttention(config, layer_number, device=device)
524
+ self.hidden_dropout = config.hidden_dropout
525
+
526
+ # Layernorm on the attention output
527
+ self.post_attention_layernorm = LayerNormFunc(config.hidden_size, eps=config.layernorm_epsilon, device=device,
528
+ dtype=config.torch_dtype)
529
+
530
+ # MLP
531
+ self.mlp = MLP(config, device=device)
532
+
533
+ def forward(
534
+ self, hidden_states, attention_mask, rotary_pos_emb, kv_cache=None, use_cache=True,
535
+ ):
536
+ # hidden_states: [s, b, h]
537
+
538
+ # Layer norm at the beginning of the transformer layer.
539
+ layernorm_output = self.input_layernorm(hidden_states)
540
+ # Self attention.
541
+ attention_output, kv_cache = self.self_attention(
542
+ layernorm_output,
543
+ attention_mask,
544
+ rotary_pos_emb,
545
+ kv_cache=kv_cache,
546
+ use_cache=use_cache
547
+ )
548
+
549
+ # Residual connection.
550
+ if self.apply_residual_connection_post_layernorm:
551
+ residual = layernorm_output
552
+ else:
553
+ residual = hidden_states
554
+
555
+ layernorm_input = torch.nn.functional.dropout(attention_output, p=self.hidden_dropout, training=self.training)
556
+ layernorm_input = residual + layernorm_input
557
+
558
+ # Layer norm post the self attention.
559
+ layernorm_output = self.post_attention_layernorm(layernorm_input)
560
+
561
+ # MLP.
562
+ mlp_output = self.mlp(layernorm_output)
563
+
564
+ # Second residual connection.
565
+ if self.apply_residual_connection_post_layernorm:
566
+ residual = layernorm_output
567
+ else:
568
+ residual = layernorm_input
569
+
570
+ output = torch.nn.functional.dropout(mlp_output, p=self.hidden_dropout, training=self.training)
571
+ output = residual + output
572
+
573
+ return output, kv_cache
574
+
575
+
576
+ class GLMTransformer(torch.nn.Module):
577
+ """Transformer class."""
578
+
579
+ def __init__(self, config: ChatGLMConfig, device=None):
580
+ super(GLMTransformer, self).__init__()
581
+
582
+ self.fp32_residual_connection = config.fp32_residual_connection
583
+ self.post_layer_norm = config.post_layer_norm
584
+
585
+ # Number of layers.
586
+ self.num_layers = config.num_layers
587
+
588
+ # Transformer layers.
589
+ def build_layer(layer_number):
590
+ return GLMBlock(config, layer_number, device=device)
591
+
592
+ self.layers = torch.nn.ModuleList([build_layer(i + 1) for i in range(self.num_layers)])
593
+
594
+ if self.post_layer_norm:
595
+ LayerNormFunc = RMSNorm if config.rmsnorm else LayerNorm
596
+ # Final layer norm before output.
597
+ self.final_layernorm = LayerNormFunc(config.hidden_size, eps=config.layernorm_epsilon, device=device,
598
+ dtype=config.torch_dtype)
599
+
600
+ self.gradient_checkpointing = False
601
+
602
+ def _get_layer(self, layer_number):
603
+ return self.layers[layer_number]
604
+
605
+ def forward(
606
+ self, hidden_states, attention_mask, rotary_pos_emb, kv_caches=None,
607
+ use_cache: Optional[bool] = True,
608
+ output_hidden_states: Optional[bool] = False,
609
+ ):
610
+ if not kv_caches:
611
+ kv_caches = [None for _ in range(self.num_layers)]
612
+ presents = () if use_cache else None
613
+ if self.gradient_checkpointing and self.training:
614
+ if use_cache:
615
+ logger.warning_once(
616
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
617
+ )
618
+ use_cache = False
619
+
620
+ all_self_attentions = None
621
+ all_hidden_states = () if output_hidden_states else None
622
+ for index in range(self.num_layers):
623
+ if output_hidden_states:
624
+ all_hidden_states = all_hidden_states + (hidden_states,)
625
+
626
+ layer = self._get_layer(index)
627
+ if self.gradient_checkpointing and self.training:
628
+ layer_ret = torch.utils.checkpoint.checkpoint(
629
+ layer,
630
+ hidden_states,
631
+ attention_mask,
632
+ rotary_pos_emb,
633
+ kv_caches[index],
634
+ use_cache
635
+ )
636
+ else:
637
+ layer_ret = layer(
638
+ hidden_states,
639
+ attention_mask,
640
+ rotary_pos_emb,
641
+ kv_cache=kv_caches[index],
642
+ use_cache=use_cache
643
+ )
644
+ hidden_states, kv_cache = layer_ret
645
+ if use_cache:
646
+ presents = presents + (kv_cache,)
647
+
648
+ if output_hidden_states:
649
+ all_hidden_states = all_hidden_states + (hidden_states,)
650
+
651
+ # Final layer norm.
652
+ if self.post_layer_norm:
653
+ hidden_states = self.final_layernorm(hidden_states)
654
+
655
+ return hidden_states, presents, all_hidden_states, all_self_attentions
656
+
657
+
658
+ class ChatGLMPreTrainedModel(PreTrainedModel):
659
+ """
660
+ An abstract class to handle weights initialization and
661
+ a simple interface for downloading and loading pretrained models.
662
+ """
663
+
664
+ is_parallelizable = False
665
+ supports_gradient_checkpointing = True
666
+ config_class = ChatGLMConfig
667
+ base_model_prefix = "transformer"
668
+ _no_split_modules = ["GLMBlock"]
669
+
670
+ def _init_weights(self, module: nn.Module):
671
+ """Initialize the weights."""
672
+ return
673
+
674
+ def get_masks(self, input_ids, past_key_values, padding_mask=None):
675
+ batch_size, seq_length = input_ids.shape
676
+ full_attention_mask = torch.ones(batch_size, seq_length, seq_length, device=input_ids.device)
677
+ full_attention_mask.tril_()
678
+ past_length = 0
679
+ if past_key_values:
680
+ past_length = past_key_values[0][0].shape[0]
681
+ if past_length:
682
+ full_attention_mask = torch.cat((torch.ones(batch_size, seq_length, past_length,
683
+ device=input_ids.device), full_attention_mask), dim=-1)
684
+ if padding_mask is not None:
685
+ full_attention_mask = full_attention_mask * padding_mask.unsqueeze(1)
686
+ if not past_length and padding_mask is not None:
687
+ full_attention_mask -= padding_mask.unsqueeze(-1) - 1
688
+ full_attention_mask = (full_attention_mask < 0.5).bool()
689
+ full_attention_mask.unsqueeze_(1)
690
+ return full_attention_mask
691
+
692
+ def get_position_ids(self, input_ids, device):
693
+ batch_size, seq_length = input_ids.shape
694
+ position_ids = torch.arange(seq_length, dtype=torch.long, device=device).unsqueeze(0).repeat(batch_size, 1)
695
+ return position_ids
696
+
697
+ def _set_gradient_checkpointing(self, module, value=False):
698
+ if isinstance(module, GLMTransformer):
699
+ module.gradient_checkpointing = value
700
+
701
+
702
+ class Embedding(torch.nn.Module):
703
+ """Language model embeddings."""
704
+
705
+ def __init__(self, config: ChatGLMConfig, device=None):
706
+ super(Embedding, self).__init__()
707
+
708
+ self.hidden_size = config.hidden_size
709
+ # Word embeddings (parallel).
710
+ self.word_embeddings = nn.Embedding(
711
+ config.padded_vocab_size,
712
+ self.hidden_size,
713
+ dtype=config.torch_dtype,
714
+ device=device
715
+ )
716
+ self.fp32_residual_connection = config.fp32_residual_connection
717
+
718
+ def forward(self, input_ids):
719
+ # Embeddings.
720
+ words_embeddings = self.word_embeddings(input_ids)
721
+ embeddings = words_embeddings
722
+ # Data format change to avoid explicit tranposes : [b s h] --> [s b h].
723
+ embeddings = embeddings.transpose(0, 1).contiguous()
724
+ # If the input flag for fp32 residual connection is set, convert for float.
725
+ if self.fp32_residual_connection:
726
+ embeddings = embeddings.float()
727
+ return embeddings
728
+
729
+
730
+ class ChatGLMModel(ChatGLMPreTrainedModel):
731
+ def __init__(self, config: ChatGLMConfig, device=None, empty_init=True):
732
+ super().__init__(config)
733
+ if empty_init:
734
+ init_method = skip_init
735
+ else:
736
+ init_method = default_init
737
+ init_kwargs = {}
738
+ if device is not None:
739
+ init_kwargs["device"] = device
740
+ self.embedding = init_method(Embedding, config, **init_kwargs)
741
+ self.num_layers = config.num_layers
742
+ self.multi_query_group_num = config.multi_query_group_num
743
+ self.kv_channels = config.kv_channels
744
+
745
+ # Rotary positional embeddings
746
+ self.seq_length = config.seq_length
747
+ rotary_dim = (
748
+ config.hidden_size // config.num_attention_heads if config.kv_channels is None else config.kv_channels
749
+ )
750
+
751
+ self.rotary_pos_emb = RotaryEmbedding(rotary_dim // 2, original_impl=config.original_rope, device=device,
752
+ dtype=config.torch_dtype)
753
+ self.encoder = init_method(GLMTransformer, config, **init_kwargs)
754
+ self.output_layer = init_method(nn.Linear, config.hidden_size, config.padded_vocab_size, bias=False,
755
+ dtype=config.torch_dtype, **init_kwargs)
756
+ self.pre_seq_len = config.pre_seq_len
757
+ self.prefix_projection = config.prefix_projection
758
+ if self.pre_seq_len is not None:
759
+ for param in self.parameters():
760
+ param.requires_grad = False
761
+ self.prefix_tokens = torch.arange(self.pre_seq_len).long()
762
+ self.prefix_encoder = PrefixEncoder(config)
763
+ self.dropout = torch.nn.Dropout(0.1)
764
+
765
+ def get_input_embeddings(self):
766
+ return self.embedding.word_embeddings
767
+
768
+ def get_prompt(self, batch_size, device, dtype=torch.half):
769
+ prefix_tokens = self.prefix_tokens.unsqueeze(0).expand(batch_size, -1).to(device)
770
+ past_key_values = self.prefix_encoder(prefix_tokens).type(dtype)
771
+ past_key_values = past_key_values.view(
772
+ batch_size,
773
+ self.pre_seq_len,
774
+ self.num_layers * 2,
775
+ self.multi_query_group_num,
776
+ self.kv_channels
777
+ )
778
+ # seq_len, b, nh, hidden_size
779
+ past_key_values = self.dropout(past_key_values)
780
+ past_key_values = past_key_values.permute([2, 1, 0, 3, 4]).split(2)
781
+ return past_key_values
782
+
783
+ def forward(
784
+ self,
785
+ input_ids,
786
+ position_ids: Optional[torch.Tensor] = None,
787
+ attention_mask: Optional[torch.BoolTensor] = None,
788
+ full_attention_mask: Optional[torch.BoolTensor] = None,
789
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None,
790
+ inputs_embeds: Optional[torch.Tensor] = None,
791
+ use_cache: Optional[bool] = None,
792
+ output_hidden_states: Optional[bool] = None,
793
+ return_dict: Optional[bool] = None,
794
+ ):
795
+ output_hidden_states = (
796
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
797
+ )
798
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
799
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
800
+
801
+ batch_size, seq_length = input_ids.shape
802
+
803
+ if inputs_embeds is None:
804
+ inputs_embeds = self.embedding(input_ids)
805
+
806
+ if self.pre_seq_len is not None:
807
+ if past_key_values is None:
808
+ past_key_values = self.get_prompt(batch_size=batch_size, device=input_ids.device,
809
+ dtype=inputs_embeds.dtype)
810
+ if attention_mask is not None:
811
+ attention_mask = torch.cat([attention_mask.new_ones((batch_size, self.pre_seq_len)),
812
+ attention_mask], dim=-1)
813
+
814
+ if full_attention_mask is None:
815
+ if (attention_mask is not None and not attention_mask.all()) or (past_key_values and seq_length != 1):
816
+ full_attention_mask = self.get_masks(input_ids, past_key_values, padding_mask=attention_mask)
817
+
818
+ # Rotary positional embeddings
819
+ rotary_pos_emb = self.rotary_pos_emb(self.seq_length)
820
+ if position_ids is not None:
821
+ rotary_pos_emb = rotary_pos_emb[position_ids]
822
+ else:
823
+ rotary_pos_emb = rotary_pos_emb[None, :seq_length]
824
+ rotary_pos_emb = rotary_pos_emb.transpose(0, 1).contiguous()
825
+
826
+ # Run encoder.
827
+ hidden_states, presents, all_hidden_states, all_self_attentions = self.encoder(
828
+ inputs_embeds, full_attention_mask, rotary_pos_emb=rotary_pos_emb,
829
+ kv_caches=past_key_values, use_cache=use_cache, output_hidden_states=output_hidden_states
830
+ )
831
+
832
+ if not return_dict:
833
+ return tuple(v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None)
834
+
835
+ return BaseModelOutputWithPast(
836
+ last_hidden_state=hidden_states,
837
+ past_key_values=presents,
838
+ hidden_states=all_hidden_states,
839
+ attentions=all_self_attentions,
840
+ )
841
+
842
+ def quantize(self, weight_bit_width: int):
843
+ from .quantization import quantize
844
+ quantize(self.encoder, weight_bit_width)
845
+ return self
846
+
847
+
848
+ class ChatGLMForConditionalGeneration(ChatGLMPreTrainedModel):
849
+ def __init__(self, config: ChatGLMConfig, empty_init=True, device=None):
850
+ super().__init__(config)
851
+
852
+ self.max_sequence_length = config.max_length
853
+ self.transformer = ChatGLMModel(config, empty_init=empty_init, device=device)
854
+ self.config = config
855
+ self.quantized = False
856
+
857
+ if self.config.quantization_bit:
858
+ self.quantize(self.config.quantization_bit, empty_init=True)
859
+
860
+ def _update_model_kwargs_for_generation(
861
+ self,
862
+ outputs: ModelOutput,
863
+ model_kwargs: Dict[str, Any],
864
+ is_encoder_decoder: bool = False,
865
+ standardize_cache_format: bool = False,
866
+ ) -> Dict[str, Any]:
867
+ # update past_key_values
868
+ model_kwargs["past_key_values"] = self._extract_past_from_model_output(
869
+ outputs, standardize_cache_format=standardize_cache_format
870
+ )
871
+
872
+ # update attention mask
873
+ if "attention_mask" in model_kwargs:
874
+ attention_mask = model_kwargs["attention_mask"]
875
+ model_kwargs["attention_mask"] = torch.cat(
876
+ [attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))], dim=-1
877
+ )
878
+
879
+ # update position ids
880
+ if "position_ids" in model_kwargs:
881
+ position_ids = model_kwargs["position_ids"]
882
+ new_position_id = position_ids[..., -1:].clone()
883
+ new_position_id += 1
884
+ model_kwargs["position_ids"] = torch.cat(
885
+ [position_ids, new_position_id], dim=-1
886
+ )
887
+
888
+ model_kwargs["is_first_forward"] = False
889
+ return model_kwargs
890
+
891
+ def prepare_inputs_for_generation(
892
+ self,
893
+ input_ids: torch.LongTensor,
894
+ past_key_values: Optional[torch.Tensor] = None,
895
+ attention_mask: Optional[torch.Tensor] = None,
896
+ position_ids: Optional[torch.Tensor] = None,
897
+ is_first_forward: bool = True,
898
+ **kwargs
899
+ ) -> dict:
900
+ # only last token for input_ids if past is not None
901
+ if position_ids is None:
902
+ position_ids = self.get_position_ids(input_ids, device=input_ids.device)
903
+ if not is_first_forward:
904
+ position_ids = position_ids[..., -1:]
905
+ input_ids = input_ids[:, -1:]
906
+ return {
907
+ "input_ids": input_ids,
908
+ "past_key_values": past_key_values,
909
+ "position_ids": position_ids,
910
+ "attention_mask": attention_mask,
911
+ "return_last_logit": True
912
+ }
913
+
914
+ def forward(
915
+ self,
916
+ input_ids: Optional[torch.Tensor] = None,
917
+ position_ids: Optional[torch.Tensor] = None,
918
+ attention_mask: Optional[torch.Tensor] = None,
919
+ past_key_values: Optional[Tuple[torch.FloatTensor]] = None,
920
+ inputs_embeds: Optional[torch.Tensor] = None,
921
+ labels: Optional[torch.Tensor] = None,
922
+ use_cache: Optional[bool] = None,
923
+ output_attentions: Optional[bool] = None,
924
+ output_hidden_states: Optional[bool] = None,
925
+ return_dict: Optional[bool] = None,
926
+ return_last_logit: Optional[bool] = False,
927
+ ):
928
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
929
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
930
+
931
+ transformer_outputs = self.transformer(
932
+ input_ids=input_ids,
933
+ position_ids=position_ids,
934
+ attention_mask=attention_mask,
935
+ past_key_values=past_key_values,
936
+ inputs_embeds=inputs_embeds,
937
+ use_cache=use_cache,
938
+ output_hidden_states=output_hidden_states,
939
+ return_dict=return_dict,
940
+ )
941
+
942
+ hidden_states = transformer_outputs[0]
943
+ if return_last_logit:
944
+ hidden_states = hidden_states[-1:]
945
+ lm_logits = self.transformer.output_layer(hidden_states)
946
+ lm_logits = lm_logits.transpose(0, 1).contiguous()
947
+
948
+ loss = None
949
+ if labels is not None:
950
+ lm_logits = lm_logits.to(torch.float32)
951
+
952
+ # Shift so that tokens < n predict n
953
+ shift_logits = lm_logits[..., :-1, :].contiguous()
954
+ shift_labels = labels[..., 1:].contiguous()
955
+ # Flatten the tokens
956
+ loss_fct = CrossEntropyLoss(ignore_index=-100)
957
+ loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
958
+
959
+ lm_logits = lm_logits.to(hidden_states.dtype)
960
+ loss = loss.to(hidden_states.dtype)
961
+
962
+ if not return_dict:
963
+ output = (lm_logits,) + transformer_outputs[1:]
964
+ return ((loss,) + output) if loss is not None else output
965
+
966
+ return CausalLMOutputWithPast(
967
+ loss=loss,
968
+ logits=lm_logits,
969
+ past_key_values=transformer_outputs.past_key_values,
970
+ hidden_states=transformer_outputs.hidden_states,
971
+ attentions=transformer_outputs.attentions,
972
+ )
973
+
974
+ @staticmethod
975
+ def _reorder_cache(
976
+ past: Tuple[Tuple[torch.Tensor, torch.Tensor], ...], beam_idx: torch.LongTensor
977
+ ) -> Tuple[Tuple[torch.Tensor, torch.Tensor], ...]:
978
+ """
979
+ This function is used to re-order the `past_key_values` cache if [`~PreTrainedModel.beam_search`] or
980
+ [`~PreTrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct
981
+ beam_idx at every generation step.
982
+
983
+ Output shares the same memory storage as `past`.
984
+ """
985
+ return tuple(
986
+ (
987
+ layer_past[0].index_select(1, beam_idx.to(layer_past[0].device)),
988
+ layer_past[1].index_select(1, beam_idx.to(layer_past[1].device)),
989
+ )
990
+ for layer_past in past
991
+ )
992
+
993
+ def process_response(self, response):
994
+ response = response.strip()
995
+ response = response.replace("[[训练时间]]", "2023年")
996
+ return response
997
+
998
+ def build_inputs(self, tokenizer, query: str, history: List[Tuple[str, str]] = None):
999
+ prompt = tokenizer.build_prompt(query, history=history)
1000
+ inputs = tokenizer([prompt], return_tensors="pt")
1001
+ inputs = inputs.to(self.device)
1002
+ return inputs
1003
+
1004
+ def build_stream_inputs(self, tokenizer, query: str, history: List[Tuple[str, str]] = None):
1005
+ if history:
1006
+ prompt = "\n\n[Round {}]\n\n问:{}\n\n答:".format(len(history) + 1, query)
1007
+ input_ids = tokenizer.encode(prompt, add_special_tokens=False)
1008
+ input_ids = input_ids[1:]
1009
+ inputs = tokenizer.batch_encode_plus([(input_ids, None)], return_tensors="pt", add_special_tokens=False)
1010
+ else:
1011
+ prompt = "[Round {}]\n\n问:{}\n\n答:".format(len(history) + 1, query)
1012
+ inputs = tokenizer([prompt], return_tensors="pt")
1013
+ inputs = inputs.to(self.device)
1014
+ return inputs
1015
+
1016
+ @torch.no_grad()
1017
+ def chat(self, tokenizer, query: str, history: List[Tuple[str, str]] = None, max_length: int = 8192, num_beams=1,
1018
+ do_sample=True, top_p=0.8, temperature=0.8, logits_processor=None, **kwargs):
1019
+ if history is None:
1020
+ history = []
1021
+ if logits_processor is None:
1022
+ logits_processor = LogitsProcessorList()
1023
+ logits_processor.append(InvalidScoreLogitsProcessor())
1024
+ gen_kwargs = {"max_length": max_length, "num_beams": num_beams, "do_sample": do_sample, "top_p": top_p,
1025
+ "temperature": temperature, "logits_processor": logits_processor, **kwargs}
1026
+ inputs = self.build_inputs(tokenizer, query, history=history)
1027
+ outputs = self.generate(**inputs, **gen_kwargs)
1028
+ outputs = outputs.tolist()[0][len(inputs["input_ids"][0]):]
1029
+ response = tokenizer.decode(outputs)
1030
+ response = self.process_response(response)
1031
+ history = history + [(query, response)]
1032
+ return response, history
1033
+
1034
+ @torch.no_grad()
1035
+ def stream_chat(self, tokenizer, query: str, history: List[Tuple[str, str]] = None, past_key_values=None,
1036
+ max_length: int = 8192, do_sample=True, top_p=0.8, temperature=0.8, logits_processor=None,
1037
+ return_past_key_values=False, **kwargs):
1038
+ if history is None:
1039
+ history = []
1040
+ if logits_processor is None:
1041
+ logits_processor = LogitsProcessorList()
1042
+ logits_processor.append(InvalidScoreLogitsProcessor())
1043
+ gen_kwargs = {"max_length": max_length, "do_sample": do_sample, "top_p": top_p,
1044
+ "temperature": temperature, "logits_processor": logits_processor, **kwargs}
1045
+ if past_key_values is None and not return_past_key_values:
1046
+ inputs = self.build_inputs(tokenizer, query, history=history)
1047
+ else:
1048
+ inputs = self.build_stream_inputs(tokenizer, query, history=history)
1049
+ if past_key_values is not None:
1050
+ past_length = past_key_values[0][0].shape[0]
1051
+ if self.transformer.pre_seq_len is not None:
1052
+ past_length -= self.transformer.pre_seq_len
1053
+ inputs.position_ids += past_length
1054
+ attention_mask = inputs.attention_mask
1055
+ attention_mask = torch.cat((attention_mask.new_ones(1, past_length), attention_mask), dim=1)
1056
+ inputs['attention_mask'] = attention_mask
1057
+ for outputs in self.stream_generate(**inputs, past_key_values=past_key_values,
1058
+ return_past_key_values=return_past_key_values, **gen_kwargs):
1059
+ if return_past_key_values:
1060
+ outputs, past_key_values = outputs
1061
+ outputs = outputs.tolist()[0][len(inputs["input_ids"][0]):]
1062
+ response = tokenizer.decode(outputs)
1063
+ if response and response[-1] != "�":
1064
+ response = self.process_response(response)
1065
+ new_history = history + [(query, response)]
1066
+ if return_past_key_values:
1067
+ yield response, new_history, past_key_values
1068
+ else:
1069
+ yield response, new_history
1070
+
1071
+ @torch.no_grad()
1072
+ def stream_generate(
1073
+ self,
1074
+ input_ids,
1075
+ generation_config: Optional[GenerationConfig] = None,
1076
+ logits_processor: Optional[LogitsProcessorList] = None,
1077
+ stopping_criteria: Optional[StoppingCriteriaList] = None,
1078
+ prefix_allowed_tokens_fn: Optional[Callable[[int, torch.Tensor], List[int]]] = None,
1079
+ return_past_key_values=False,
1080
+ **kwargs,
1081
+ ):
1082
+ batch_size, input_ids_seq_length = input_ids.shape[0], input_ids.shape[-1]
1083
+
1084
+ if generation_config is None:
1085
+ generation_config = self.generation_config
1086
+ generation_config = copy.deepcopy(generation_config)
1087
+ model_kwargs = generation_config.update(**kwargs)
1088
+ bos_token_id, eos_token_id = generation_config.bos_token_id, generation_config.eos_token_id
1089
+
1090
+ if isinstance(eos_token_id, int):
1091
+ eos_token_id = [eos_token_id]
1092
+
1093
+ has_default_max_length = kwargs.get("max_length") is None and generation_config.max_length is not None
1094
+ if has_default_max_length and generation_config.max_new_tokens is None:
1095
+ warnings.warn(
1096
+ f"Using `max_length`'s default ({generation_config.max_length}) to control the generation length. "
1097
+ "This behaviour is deprecated and will be removed from the config in v5 of Transformers -- we"
1098
+ " recommend using `max_new_tokens` to control the maximum length of the generation.",
1099
+ UserWarning,
1100
+ )
1101
+ elif generation_config.max_new_tokens is not None:
1102
+ generation_config.max_length = generation_config.max_new_tokens + input_ids_seq_length
1103
+ if not has_default_max_length:
1104
+ logger.warn(
1105
+ f"Both `max_new_tokens` (={generation_config.max_new_tokens}) and `max_length`(="
1106
+ f"{generation_config.max_length}) seem to have been set. `max_new_tokens` will take precedence. "
1107
+ "Please refer to the documentation for more information. "
1108
+ "(https://huggingface.co/docs/transformers/main/en/main_classes/text_generation)",
1109
+ UserWarning,
1110
+ )
1111
+
1112
+ if input_ids_seq_length >= generation_config.max_length:
1113
+ input_ids_string = "decoder_input_ids" if self.config.is_encoder_decoder else "input_ids"
1114
+ logger.warning(
1115
+ f"Input length of {input_ids_string} is {input_ids_seq_length}, but `max_length` is set to"
1116
+ f" {generation_config.max_length}. This can lead to unexpected behavior. You should consider"
1117
+ " increasing `max_new_tokens`."
1118
+ )
1119
+
1120
+ # 2. Set generation parameters if not already defined
1121
+ logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList()
1122
+ stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList()
1123
+
1124
+ logits_processor = self._get_logits_processor(
1125
+ generation_config=generation_config,
1126
+ input_ids_seq_length=input_ids_seq_length,
1127
+ encoder_input_ids=input_ids,
1128
+ prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,
1129
+ logits_processor=logits_processor,
1130
+ )
1131
+
1132
+ stopping_criteria = self._get_stopping_criteria(
1133
+ generation_config=generation_config, stopping_criteria=stopping_criteria
1134
+ )
1135
+ logits_warper = self._get_logits_warper(generation_config)
1136
+
1137
+ unfinished_sequences = input_ids.new(input_ids.shape[0]).fill_(1)
1138
+ scores = None
1139
+ while True:
1140
+ model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs)
1141
+ # forward pass to get next token
1142
+ outputs = self(
1143
+ **model_inputs,
1144
+ return_dict=True,
1145
+ output_attentions=False,
1146
+ output_hidden_states=False,
1147
+ )
1148
+
1149
+ next_token_logits = outputs.logits[:, -1, :]
1150
+
1151
+ # pre-process distribution
1152
+ next_token_scores = logits_processor(input_ids, next_token_logits)
1153
+ next_token_scores = logits_warper(input_ids, next_token_scores)
1154
+
1155
+ # sample
1156
+ probs = nn.functional.softmax(next_token_scores, dim=-1)
1157
+ if generation_config.do_sample:
1158
+ next_tokens = torch.multinomial(probs, num_samples=1).squeeze(1)
1159
+ else:
1160
+ next_tokens = torch.argmax(probs, dim=-1)
1161
+
1162
+ # update generated ids, model inputs, and length for next step
1163
+ input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1)
1164
+ model_kwargs = self._update_model_kwargs_for_generation(
1165
+ outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder
1166
+ )
1167
+ unfinished_sequences = unfinished_sequences.mul((sum(next_tokens != i for i in eos_token_id)).long())
1168
+ if return_past_key_values:
1169
+ yield input_ids, outputs.past_key_values
1170
+ else:
1171
+ yield input_ids
1172
+ # stop when each sentence is finished, or if we exceed the maximum length
1173
+ if unfinished_sequences.max() == 0 or stopping_criteria(input_ids, scores):
1174
+ break
1175
+
1176
+ def quantize(self, bits: int, empty_init=False, device=None, **kwargs):
1177
+ if bits == 0:
1178
+ return
1179
+
1180
+ from .quantization import quantize
1181
+
1182
+ if self.quantized:
1183
+ logger.info("Already quantized.")
1184
+ return self
1185
+
1186
+ self.quantized = True
1187
+
1188
+ self.config.quantization_bit = bits
1189
+
1190
+ self.transformer.encoder = quantize(self.transformer.encoder, bits, empty_init=empty_init, device=device,
1191
+ **kwargs)
1192
+ return self
pytorch_model-00001-of-00015.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0bfe54a4578f905b5bbc98034c73f6e98f09630912e45b386e0bfb98134d5686
3
+ size 978368999
pytorch_model-00002-of-00015.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:71a2030f949888daa453461187fc793fbc4bde478e1338ce8578ed60e98ef8e6
3
+ size 849411887
pytorch_model-00003-of-00015.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f70865859eb37fbe9b8adacee5bcdf7cf7b52d13fd50b7dd776d71f83676f25f
3
+ size 815848587
pytorch_model-00004-of-00015.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:108c40a95ee6fd4999fef034d42cedad2ffc81547dc2dea1f0d5ac4792956db6
3
+ size 815848587
pytorch_model-00005-of-00015.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c69f192b68f4d0e4763827f6f4c9d7d1ff71e4fae56e03a9d77c8a8702426ae4
3
+ size 815848587
pytorch_model-00006-of-00015.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:220699bb4ea5865edc93daa2100a4308c4d50b079b165146708f797d91e55884
3
+ size 815848587
pytorch_model-00007-of-00015.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:932c10031db8fce43f874c146fac5ccb786efd77c593468cce57b9beac4e5b8f
3
+ size 815848587
pytorch_model-00008-of-00015.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1b4bd9c99175318ce96408c33862456f0453f0c953563009740eca77359c388b
3
+ size 815848587
pytorch_model-00009-of-00015.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d0c035a766accb320dc579bf39d3521075ba322b12fc32baa87d8928784dca78
3
+ size 815848587
pytorch_model-00010-of-00015.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:815f0b470afd6043d3ba045782cf58cfa8a9f81037f5ad05de2884afac951dd7
3
+ size 815848587
pytorch_model-00011-of-00015.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:56ae1ef80cff25f80634ad8430db606d741538840b64adb99045d04b16a99342
3
+ size 815848587
pytorch_model-00012-of-00015.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:12dbc253ad466dc7ee610a07db9dc458d9dcc4488161b9804b3009c3a562c37a
3
+ size 815848587
pytorch_model-00013-of-00015.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b07d57f86ff455b9ffc3d1c3aa1c3794a6ecde827a7e275e4b41768a8e06fd8c
3
+ size 815848587
pytorch_model-00014-of-00015.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9bfaa35c5acc9f6e270861f2c0dc3ff5822b7b8009b107f3bb1bf93440b8c11c
3
+ size 815848587
pytorch_model-00015-of-00015.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1267f80e607aa5c8f363756221c8d8c851b856f6d4dcb1a14b825002527e8f77
3
+ size 869279645
pytorch_model.bin.index.json ADDED
@@ -0,0 +1,208 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 12487168064
4
+ },
5
+ "weight_map": {
6
+ "lm_head.weight": "pytorch_model-00015-of-00015.bin",
7
+ "transformer.embedding.word_embeddings.weight": "pytorch_model-00001-of-00015.bin",
8
+ "transformer.encoder.final_layernorm.weight": "pytorch_model-00015-of-00015.bin",
9
+ "transformer.encoder.layers.0.input_layernorm.weight": "pytorch_model-00001-of-00015.bin",
10
+ "transformer.encoder.layers.0.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00015.bin",
11
+ "transformer.encoder.layers.0.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00015.bin",
12
+ "transformer.encoder.layers.0.post_attention_layernorm.weight": "pytorch_model-00001-of-00015.bin",
13
+ "transformer.encoder.layers.0.self_attention.dense.weight": "pytorch_model-00001-of-00015.bin",
14
+ "transformer.encoder.layers.0.self_attention.query_key_value.bias": "pytorch_model-00001-of-00015.bin",
15
+ "transformer.encoder.layers.0.self_attention.query_key_value.weight": "pytorch_model-00001-of-00015.bin",
16
+ "transformer.encoder.layers.1.input_layernorm.weight": "pytorch_model-00001-of-00015.bin",
17
+ "transformer.encoder.layers.1.mlp.dense_4h_to_h.weight": "pytorch_model-00002-of-00015.bin",
18
+ "transformer.encoder.layers.1.mlp.dense_h_to_4h.weight": "pytorch_model-00002-of-00015.bin",
19
+ "transformer.encoder.layers.1.post_attention_layernorm.weight": "pytorch_model-00002-of-00015.bin",
20
+ "transformer.encoder.layers.1.self_attention.dense.weight": "pytorch_model-00002-of-00015.bin",
21
+ "transformer.encoder.layers.1.self_attention.query_key_value.bias": "pytorch_model-00001-of-00015.bin",
22
+ "transformer.encoder.layers.1.self_attention.query_key_value.weight": "pytorch_model-00001-of-00015.bin",
23
+ "transformer.encoder.layers.10.input_layernorm.weight": "pytorch_model-00006-of-00015.bin",
24
+ "transformer.encoder.layers.10.mlp.dense_4h_to_h.weight": "pytorch_model-00006-of-00015.bin",
25
+ "transformer.encoder.layers.10.mlp.dense_h_to_4h.weight": "pytorch_model-00006-of-00015.bin",
26
+ "transformer.encoder.layers.10.post_attention_layernorm.weight": "pytorch_model-00006-of-00015.bin",
27
+ "transformer.encoder.layers.10.self_attention.dense.weight": "pytorch_model-00006-of-00015.bin",
28
+ "transformer.encoder.layers.10.self_attention.query_key_value.bias": "pytorch_model-00006-of-00015.bin",
29
+ "transformer.encoder.layers.10.self_attention.query_key_value.weight": "pytorch_model-00006-of-00015.bin",
30
+ "transformer.encoder.layers.11.input_layernorm.weight": "pytorch_model-00006-of-00015.bin",
31
+ "transformer.encoder.layers.11.mlp.dense_4h_to_h.weight": "pytorch_model-00007-of-00015.bin",
32
+ "transformer.encoder.layers.11.mlp.dense_h_to_4h.weight": "pytorch_model-00007-of-00015.bin",
33
+ "transformer.encoder.layers.11.post_attention_layernorm.weight": "pytorch_model-00006-of-00015.bin",
34
+ "transformer.encoder.layers.11.self_attention.dense.weight": "pytorch_model-00006-of-00015.bin",
35
+ "transformer.encoder.layers.11.self_attention.query_key_value.bias": "pytorch_model-00006-of-00015.bin",
36
+ "transformer.encoder.layers.11.self_attention.query_key_value.weight": "pytorch_model-00006-of-00015.bin",
37
+ "transformer.encoder.layers.12.input_layernorm.weight": "pytorch_model-00007-of-00015.bin",
38
+ "transformer.encoder.layers.12.mlp.dense_4h_to_h.weight": "pytorch_model-00007-of-00015.bin",
39
+ "transformer.encoder.layers.12.mlp.dense_h_to_4h.weight": "pytorch_model-00007-of-00015.bin",
40
+ "transformer.encoder.layers.12.post_attention_layernorm.weight": "pytorch_model-00007-of-00015.bin",
41
+ "transformer.encoder.layers.12.self_attention.dense.weight": "pytorch_model-00007-of-00015.bin",
42
+ "transformer.encoder.layers.12.self_attention.query_key_value.bias": "pytorch_model-00007-of-00015.bin",
43
+ "transformer.encoder.layers.12.self_attention.query_key_value.weight": "pytorch_model-00007-of-00015.bin",
44
+ "transformer.encoder.layers.13.input_layernorm.weight": "pytorch_model-00007-of-00015.bin",
45
+ "transformer.encoder.layers.13.mlp.dense_4h_to_h.weight": "pytorch_model-00008-of-00015.bin",
46
+ "transformer.encoder.layers.13.mlp.dense_h_to_4h.weight": "pytorch_model-00008-of-00015.bin",
47
+ "transformer.encoder.layers.13.post_attention_layernorm.weight": "pytorch_model-00007-of-00015.bin",
48
+ "transformer.encoder.layers.13.self_attention.dense.weight": "pytorch_model-00007-of-00015.bin",
49
+ "transformer.encoder.layers.13.self_attention.query_key_value.bias": "pytorch_model-00007-of-00015.bin",
50
+ "transformer.encoder.layers.13.self_attention.query_key_value.weight": "pytorch_model-00007-of-00015.bin",
51
+ "transformer.encoder.layers.14.input_layernorm.weight": "pytorch_model-00008-of-00015.bin",
52
+ "transformer.encoder.layers.14.mlp.dense_4h_to_h.weight": "pytorch_model-00008-of-00015.bin",
53
+ "transformer.encoder.layers.14.mlp.dense_h_to_4h.weight": "pytorch_model-00008-of-00015.bin",
54
+ "transformer.encoder.layers.14.post_attention_layernorm.weight": "pytorch_model-00008-of-00015.bin",
55
+ "transformer.encoder.layers.14.self_attention.dense.weight": "pytorch_model-00008-of-00015.bin",
56
+ "transformer.encoder.layers.14.self_attention.query_key_value.bias": "pytorch_model-00008-of-00015.bin",
57
+ "transformer.encoder.layers.14.self_attention.query_key_value.weight": "pytorch_model-00008-of-00015.bin",
58
+ "transformer.encoder.layers.15.input_layernorm.weight": "pytorch_model-00008-of-00015.bin",
59
+ "transformer.encoder.layers.15.mlp.dense_4h_to_h.weight": "pytorch_model-00009-of-00015.bin",
60
+ "transformer.encoder.layers.15.mlp.dense_h_to_4h.weight": "pytorch_model-00009-of-00015.bin",
61
+ "transformer.encoder.layers.15.post_attention_layernorm.weight": "pytorch_model-00008-of-00015.bin",
62
+ "transformer.encoder.layers.15.self_attention.dense.weight": "pytorch_model-00008-of-00015.bin",
63
+ "transformer.encoder.layers.15.self_attention.query_key_value.bias": "pytorch_model-00008-of-00015.bin",
64
+ "transformer.encoder.layers.15.self_attention.query_key_value.weight": "pytorch_model-00008-of-00015.bin",
65
+ "transformer.encoder.layers.16.input_layernorm.weight": "pytorch_model-00009-of-00015.bin",
66
+ "transformer.encoder.layers.16.mlp.dense_4h_to_h.weight": "pytorch_model-00009-of-00015.bin",
67
+ "transformer.encoder.layers.16.mlp.dense_h_to_4h.weight": "pytorch_model-00009-of-00015.bin",
68
+ "transformer.encoder.layers.16.post_attention_layernorm.weight": "pytorch_model-00009-of-00015.bin",
69
+ "transformer.encoder.layers.16.self_attention.dense.weight": "pytorch_model-00009-of-00015.bin",
70
+ "transformer.encoder.layers.16.self_attention.query_key_value.bias": "pytorch_model-00009-of-00015.bin",
71
+ "transformer.encoder.layers.16.self_attention.query_key_value.weight": "pytorch_model-00009-of-00015.bin",
72
+ "transformer.encoder.layers.17.input_layernorm.weight": "pytorch_model-00009-of-00015.bin",
73
+ "transformer.encoder.layers.17.mlp.dense_4h_to_h.weight": "pytorch_model-00010-of-00015.bin",
74
+ "transformer.encoder.layers.17.mlp.dense_h_to_4h.weight": "pytorch_model-00010-of-00015.bin",
75
+ "transformer.encoder.layers.17.post_attention_layernorm.weight": "pytorch_model-00009-of-00015.bin",
76
+ "transformer.encoder.layers.17.self_attention.dense.weight": "pytorch_model-00009-of-00015.bin",
77
+ "transformer.encoder.layers.17.self_attention.query_key_value.bias": "pytorch_model-00009-of-00015.bin",
78
+ "transformer.encoder.layers.17.self_attention.query_key_value.weight": "pytorch_model-00009-of-00015.bin",
79
+ "transformer.encoder.layers.18.input_layernorm.weight": "pytorch_model-00010-of-00015.bin",
80
+ "transformer.encoder.layers.18.mlp.dense_4h_to_h.weight": "pytorch_model-00010-of-00015.bin",
81
+ "transformer.encoder.layers.18.mlp.dense_h_to_4h.weight": "pytorch_model-00010-of-00015.bin",
82
+ "transformer.encoder.layers.18.post_attention_layernorm.weight": "pytorch_model-00010-of-00015.bin",
83
+ "transformer.encoder.layers.18.self_attention.dense.weight": "pytorch_model-00010-of-00015.bin",
84
+ "transformer.encoder.layers.18.self_attention.query_key_value.bias": "pytorch_model-00010-of-00015.bin",
85
+ "transformer.encoder.layers.18.self_attention.query_key_value.weight": "pytorch_model-00010-of-00015.bin",
86
+ "transformer.encoder.layers.19.input_layernorm.weight": "pytorch_model-00010-of-00015.bin",
87
+ "transformer.encoder.layers.19.mlp.dense_4h_to_h.weight": "pytorch_model-00011-of-00015.bin",
88
+ "transformer.encoder.layers.19.mlp.dense_h_to_4h.weight": "pytorch_model-00011-of-00015.bin",
89
+ "transformer.encoder.layers.19.post_attention_layernorm.weight": "pytorch_model-00010-of-00015.bin",
90
+ "transformer.encoder.layers.19.self_attention.dense.weight": "pytorch_model-00010-of-00015.bin",
91
+ "transformer.encoder.layers.19.self_attention.query_key_value.bias": "pytorch_model-00010-of-00015.bin",
92
+ "transformer.encoder.layers.19.self_attention.query_key_value.weight": "pytorch_model-00010-of-00015.bin",
93
+ "transformer.encoder.layers.2.input_layernorm.weight": "pytorch_model-00002-of-00015.bin",
94
+ "transformer.encoder.layers.2.mlp.dense_4h_to_h.weight": "pytorch_model-00002-of-00015.bin",
95
+ "transformer.encoder.layers.2.mlp.dense_h_to_4h.weight": "pytorch_model-00002-of-00015.bin",
96
+ "transformer.encoder.layers.2.post_attention_layernorm.weight": "pytorch_model-00002-of-00015.bin",
97
+ "transformer.encoder.layers.2.self_attention.dense.weight": "pytorch_model-00002-of-00015.bin",
98
+ "transformer.encoder.layers.2.self_attention.query_key_value.bias": "pytorch_model-00002-of-00015.bin",
99
+ "transformer.encoder.layers.2.self_attention.query_key_value.weight": "pytorch_model-00002-of-00015.bin",
100
+ "transformer.encoder.layers.20.input_layernorm.weight": "pytorch_model-00011-of-00015.bin",
101
+ "transformer.encoder.layers.20.mlp.dense_4h_to_h.weight": "pytorch_model-00011-of-00015.bin",
102
+ "transformer.encoder.layers.20.mlp.dense_h_to_4h.weight": "pytorch_model-00011-of-00015.bin",
103
+ "transformer.encoder.layers.20.post_attention_layernorm.weight": "pytorch_model-00011-of-00015.bin",
104
+ "transformer.encoder.layers.20.self_attention.dense.weight": "pytorch_model-00011-of-00015.bin",
105
+ "transformer.encoder.layers.20.self_attention.query_key_value.bias": "pytorch_model-00011-of-00015.bin",
106
+ "transformer.encoder.layers.20.self_attention.query_key_value.weight": "pytorch_model-00011-of-00015.bin",
107
+ "transformer.encoder.layers.21.input_layernorm.weight": "pytorch_model-00011-of-00015.bin",
108
+ "transformer.encoder.layers.21.mlp.dense_4h_to_h.weight": "pytorch_model-00012-of-00015.bin",
109
+ "transformer.encoder.layers.21.mlp.dense_h_to_4h.weight": "pytorch_model-00012-of-00015.bin",
110
+ "transformer.encoder.layers.21.post_attention_layernorm.weight": "pytorch_model-00011-of-00015.bin",
111
+ "transformer.encoder.layers.21.self_attention.dense.weight": "pytorch_model-00011-of-00015.bin",
112
+ "transformer.encoder.layers.21.self_attention.query_key_value.bias": "pytorch_model-00011-of-00015.bin",
113
+ "transformer.encoder.layers.21.self_attention.query_key_value.weight": "pytorch_model-00011-of-00015.bin",
114
+ "transformer.encoder.layers.22.input_layernorm.weight": "pytorch_model-00012-of-00015.bin",
115
+ "transformer.encoder.layers.22.mlp.dense_4h_to_h.weight": "pytorch_model-00012-of-00015.bin",
116
+ "transformer.encoder.layers.22.mlp.dense_h_to_4h.weight": "pytorch_model-00012-of-00015.bin",
117
+ "transformer.encoder.layers.22.post_attention_layernorm.weight": "pytorch_model-00012-of-00015.bin",
118
+ "transformer.encoder.layers.22.self_attention.dense.weight": "pytorch_model-00012-of-00015.bin",
119
+ "transformer.encoder.layers.22.self_attention.query_key_value.bias": "pytorch_model-00012-of-00015.bin",
120
+ "transformer.encoder.layers.22.self_attention.query_key_value.weight": "pytorch_model-00012-of-00015.bin",
121
+ "transformer.encoder.layers.23.input_layernorm.weight": "pytorch_model-00012-of-00015.bin",
122
+ "transformer.encoder.layers.23.mlp.dense_4h_to_h.weight": "pytorch_model-00013-of-00015.bin",
123
+ "transformer.encoder.layers.23.mlp.dense_h_to_4h.weight": "pytorch_model-00013-of-00015.bin",
124
+ "transformer.encoder.layers.23.post_attention_layernorm.weight": "pytorch_model-00012-of-00015.bin",
125
+ "transformer.encoder.layers.23.self_attention.dense.weight": "pytorch_model-00012-of-00015.bin",
126
+ "transformer.encoder.layers.23.self_attention.query_key_value.bias": "pytorch_model-00012-of-00015.bin",
127
+ "transformer.encoder.layers.23.self_attention.query_key_value.weight": "pytorch_model-00012-of-00015.bin",
128
+ "transformer.encoder.layers.24.input_layernorm.weight": "pytorch_model-00013-of-00015.bin",
129
+ "transformer.encoder.layers.24.mlp.dense_4h_to_h.weight": "pytorch_model-00013-of-00015.bin",
130
+ "transformer.encoder.layers.24.mlp.dense_h_to_4h.weight": "pytorch_model-00013-of-00015.bin",
131
+ "transformer.encoder.layers.24.post_attention_layernorm.weight": "pytorch_model-00013-of-00015.bin",
132
+ "transformer.encoder.layers.24.self_attention.dense.weight": "pytorch_model-00013-of-00015.bin",
133
+ "transformer.encoder.layers.24.self_attention.query_key_value.bias": "pytorch_model-00013-of-00015.bin",
134
+ "transformer.encoder.layers.24.self_attention.query_key_value.weight": "pytorch_model-00013-of-00015.bin",
135
+ "transformer.encoder.layers.25.input_layernorm.weight": "pytorch_model-00013-of-00015.bin",
136
+ "transformer.encoder.layers.25.mlp.dense_4h_to_h.weight": "pytorch_model-00014-of-00015.bin",
137
+ "transformer.encoder.layers.25.mlp.dense_h_to_4h.weight": "pytorch_model-00014-of-00015.bin",
138
+ "transformer.encoder.layers.25.post_attention_layernorm.weight": "pytorch_model-00013-of-00015.bin",
139
+ "transformer.encoder.layers.25.self_attention.dense.weight": "pytorch_model-00013-of-00015.bin",
140
+ "transformer.encoder.layers.25.self_attention.query_key_value.bias": "pytorch_model-00013-of-00015.bin",
141
+ "transformer.encoder.layers.25.self_attention.query_key_value.weight": "pytorch_model-00013-of-00015.bin",
142
+ "transformer.encoder.layers.26.input_layernorm.weight": "pytorch_model-00014-of-00015.bin",
143
+ "transformer.encoder.layers.26.mlp.dense_4h_to_h.weight": "pytorch_model-00014-of-00015.bin",
144
+ "transformer.encoder.layers.26.mlp.dense_h_to_4h.weight": "pytorch_model-00014-of-00015.bin",
145
+ "transformer.encoder.layers.26.post_attention_layernorm.weight": "pytorch_model-00014-of-00015.bin",
146
+ "transformer.encoder.layers.26.self_attention.dense.weight": "pytorch_model-00014-of-00015.bin",
147
+ "transformer.encoder.layers.26.self_attention.query_key_value.bias": "pytorch_model-00014-of-00015.bin",
148
+ "transformer.encoder.layers.26.self_attention.query_key_value.weight": "pytorch_model-00014-of-00015.bin",
149
+ "transformer.encoder.layers.27.input_layernorm.weight": "pytorch_model-00014-of-00015.bin",
150
+ "transformer.encoder.layers.27.mlp.dense_4h_to_h.weight": "pytorch_model-00015-of-00015.bin",
151
+ "transformer.encoder.layers.27.mlp.dense_h_to_4h.weight": "pytorch_model-00015-of-00015.bin",
152
+ "transformer.encoder.layers.27.post_attention_layernorm.weight": "pytorch_model-00014-of-00015.bin",
153
+ "transformer.encoder.layers.27.self_attention.dense.weight": "pytorch_model-00014-of-00015.bin",
154
+ "transformer.encoder.layers.27.self_attention.query_key_value.bias": "pytorch_model-00014-of-00015.bin",
155
+ "transformer.encoder.layers.27.self_attention.query_key_value.weight": "pytorch_model-00014-of-00015.bin",
156
+ "transformer.encoder.layers.3.input_layernorm.weight": "pytorch_model-00002-of-00015.bin",
157
+ "transformer.encoder.layers.3.mlp.dense_4h_to_h.weight": "pytorch_model-00003-of-00015.bin",
158
+ "transformer.encoder.layers.3.mlp.dense_h_to_4h.weight": "pytorch_model-00003-of-00015.bin",
159
+ "transformer.encoder.layers.3.post_attention_layernorm.weight": "pytorch_model-00002-of-00015.bin",
160
+ "transformer.encoder.layers.3.self_attention.dense.weight": "pytorch_model-00002-of-00015.bin",
161
+ "transformer.encoder.layers.3.self_attention.query_key_value.bias": "pytorch_model-00002-of-00015.bin",
162
+ "transformer.encoder.layers.3.self_attention.query_key_value.weight": "pytorch_model-00002-of-00015.bin",
163
+ "transformer.encoder.layers.4.input_layernorm.weight": "pytorch_model-00003-of-00015.bin",
164
+ "transformer.encoder.layers.4.mlp.dense_4h_to_h.weight": "pytorch_model-00003-of-00015.bin",
165
+ "transformer.encoder.layers.4.mlp.dense_h_to_4h.weight": "pytorch_model-00003-of-00015.bin",
166
+ "transformer.encoder.layers.4.post_attention_layernorm.weight": "pytorch_model-00003-of-00015.bin",
167
+ "transformer.encoder.layers.4.self_attention.dense.weight": "pytorch_model-00003-of-00015.bin",
168
+ "transformer.encoder.layers.4.self_attention.query_key_value.bias": "pytorch_model-00003-of-00015.bin",
169
+ "transformer.encoder.layers.4.self_attention.query_key_value.weight": "pytorch_model-00003-of-00015.bin",
170
+ "transformer.encoder.layers.5.input_layernorm.weight": "pytorch_model-00003-of-00015.bin",
171
+ "transformer.encoder.layers.5.mlp.dense_4h_to_h.weight": "pytorch_model-00004-of-00015.bin",
172
+ "transformer.encoder.layers.5.mlp.dense_h_to_4h.weight": "pytorch_model-00004-of-00015.bin",
173
+ "transformer.encoder.layers.5.post_attention_layernorm.weight": "pytorch_model-00003-of-00015.bin",
174
+ "transformer.encoder.layers.5.self_attention.dense.weight": "pytorch_model-00003-of-00015.bin",
175
+ "transformer.encoder.layers.5.self_attention.query_key_value.bias": "pytorch_model-00003-of-00015.bin",
176
+ "transformer.encoder.layers.5.self_attention.query_key_value.weight": "pytorch_model-00003-of-00015.bin",
177
+ "transformer.encoder.layers.6.input_layernorm.weight": "pytorch_model-00004-of-00015.bin",
178
+ "transformer.encoder.layers.6.mlp.dense_4h_to_h.weight": "pytorch_model-00004-of-00015.bin",
179
+ "transformer.encoder.layers.6.mlp.dense_h_to_4h.weight": "pytorch_model-00004-of-00015.bin",
180
+ "transformer.encoder.layers.6.post_attention_layernorm.weight": "pytorch_model-00004-of-00015.bin",
181
+ "transformer.encoder.layers.6.self_attention.dense.weight": "pytorch_model-00004-of-00015.bin",
182
+ "transformer.encoder.layers.6.self_attention.query_key_value.bias": "pytorch_model-00004-of-00015.bin",
183
+ "transformer.encoder.layers.6.self_attention.query_key_value.weight": "pytorch_model-00004-of-00015.bin",
184
+ "transformer.encoder.layers.7.input_layernorm.weight": "pytorch_model-00004-of-00015.bin",
185
+ "transformer.encoder.layers.7.mlp.dense_4h_to_h.weight": "pytorch_model-00005-of-00015.bin",
186
+ "transformer.encoder.layers.7.mlp.dense_h_to_4h.weight": "pytorch_model-00005-of-00015.bin",
187
+ "transformer.encoder.layers.7.post_attention_layernorm.weight": "pytorch_model-00004-of-00015.bin",
188
+ "transformer.encoder.layers.7.self_attention.dense.weight": "pytorch_model-00004-of-00015.bin",
189
+ "transformer.encoder.layers.7.self_attention.query_key_value.bias": "pytorch_model-00004-of-00015.bin",
190
+ "transformer.encoder.layers.7.self_attention.query_key_value.weight": "pytorch_model-00004-of-00015.bin",
191
+ "transformer.encoder.layers.8.input_layernorm.weight": "pytorch_model-00005-of-00015.bin",
192
+ "transformer.encoder.layers.8.mlp.dense_4h_to_h.weight": "pytorch_model-00005-of-00015.bin",
193
+ "transformer.encoder.layers.8.mlp.dense_h_to_4h.weight": "pytorch_model-00005-of-00015.bin",
194
+ "transformer.encoder.layers.8.post_attention_layernorm.weight": "pytorch_model-00005-of-00015.bin",
195
+ "transformer.encoder.layers.8.self_attention.dense.weight": "pytorch_model-00005-of-00015.bin",
196
+ "transformer.encoder.layers.8.self_attention.query_key_value.bias": "pytorch_model-00005-of-00015.bin",
197
+ "transformer.encoder.layers.8.self_attention.query_key_value.weight": "pytorch_model-00005-of-00015.bin",
198
+ "transformer.encoder.layers.9.input_layernorm.weight": "pytorch_model-00005-of-00015.bin",
199
+ "transformer.encoder.layers.9.mlp.dense_4h_to_h.weight": "pytorch_model-00006-of-00015.bin",
200
+ "transformer.encoder.layers.9.mlp.dense_h_to_4h.weight": "pytorch_model-00006-of-00015.bin",
201
+ "transformer.encoder.layers.9.post_attention_layernorm.weight": "pytorch_model-00005-of-00015.bin",
202
+ "transformer.encoder.layers.9.self_attention.dense.weight": "pytorch_model-00005-of-00015.bin",
203
+ "transformer.encoder.layers.9.self_attention.query_key_value.bias": "pytorch_model-00005-of-00015.bin",
204
+ "transformer.encoder.layers.9.self_attention.query_key_value.weight": "pytorch_model-00005-of-00015.bin",
205
+ "transformer.output_layer.weight": "pytorch_model-00015-of-00015.bin",
206
+ "transformer.rotary_pos_emb.inv_freq": "pytorch_model-00001-of-00015.bin"
207
+ }
208
+ }
quantization.py ADDED
@@ -0,0 +1,188 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from torch.nn import Linear
2
+ from torch.nn.parameter import Parameter
3
+
4
+ import bz2
5
+ import torch
6
+ import base64
7
+ import ctypes
8
+ from transformers.utils import logging
9
+
10
+ from typing import List
11
+ from functools import partial
12
+
13
+ logger = logging.get_logger(__name__)
14
+
15
+ try:
16
+ from cpm_kernels.kernels.base import LazyKernelCModule, KernelFunction, round_up
17
+
18
+ class Kernel:
19
+ def __init__(self, code: bytes, function_names: List[str]):
20
+ self.code = code
21
+ self._function_names = function_names
22
+ self._cmodule = LazyKernelCModule(self.code)
23
+
24
+ for name in self._function_names:
25
+ setattr(self, name, KernelFunction(self._cmodule, name))
26
+
27
+ quantization_code = "$QlpoOTFBWSZTWU9yuJUAQHN//////////f/n/8/n///n//bt4dTidcVx8X3V9FV/92/v4B7/AD5FBQFAAAChSgKpFCFAFVSigUAAAEKhSgUUqgFBKigqVREQAABQBQIANDTTIGI00BkZBkNGE0A0BkBkGQGRkaNAaAGQNBoGgDIAAYIGTI0DQAQAaGmmQMRpoDIyDIaMJoBoDIDIMgMjI0aA0AMgaDQNAGQAAwQMmRoGgAgA0NNMgYjTQGRkGQ0YTQDQGQGQZAZGRo0BoAZA0GgaAMgABggZMjQNABABoaaZAxGmgMjIMhowmgGgMgMgyAyMjRoDQAyBoNA0AZAADBAyZGgaAAmqU1NEgJqnptU/Sn4jRR6J6epk2pqb1Q/SgAPUGgyNNGjQ2SBpoAZAAGg0NB6mgDIAAAAA2oaApSREBNAARhGiYEaEwU8pvImlP0k2aam1GaGqbFNM1MHpTwmkepmyU9R6nqPKekHqNNPUxNGhp6n6p6QaZ6o9TG1GMqcoV9ly6nRanHlq6zPNbnGZNi6HSug+2nPiZ13XcnFYZW+45W11CumhzYhchOJ2GLLV1OBjBjGf4TptOddTSOcVxhqYZMYwZXZZY00zI1paX5X9J+b+f4e+x43RXSxXPOdquiGpduatGyXneN696M9t4HU2eR5XX/kPhP261NTx3JO1Ow7LyuDmeo9a7d351T1ZxnvnrvYnrXv/hXxPCeuYx2XsNmO003eg9J3Z6U7b23meJ4ri01OdzTk9BNO96brz+qT5nuvvH3ds/G+m/JcG/F2XYuhXlvO+jP7U3XgrzPN/lr8Sf1n6j4j7jZs+s/T0tNaNNYzTs12rxjwztHlnire3Nzc3N1wuBwOBwXBvZfoHpD7rFmR99V5vj3aXza3xdBbXMalubTg/jIv5dfAi54Pdc75j4z412n3Npj3Ld/ENm7a3b/Cod6h/ret1/5vn/C+l+gdslMvgPSLJ8d8q+U66fevYn/tW1chleEtNTGlcHCbLRlq0tHzF5tsbbZZfHjjLgZu42XCuC3NrdjTasZGNzgxPIrGqp7r3p7L2p5XjnpPSmTd5XtzqnB6U87zzg1Ol0zd0zsLszxR6lkxp35u6/teL0L0W922cR7Lu1lpL9CsHirzuM2T+BgsyViT6LHcm0/Vr6U/7LGGyJeqTEjt0PHWhF5mCT7R9mtlDwriYv0Tyr/OxYt6qp5r0mPVT0608TqnqMZaarU2nFwrTzzlrs1ed7z1ux60wyr4ydCaTi3enW8x68x0zU7tXSlcmPSW1mGpWJMg4zmPC2lK96tp0OE80y4MfEvnZj8zGluR6b22ki1Ou9V2nCd9xovcPvcYMZYy0lvN60ScZ45vN6yeCeeXFb1lVjnnCar5fwXwE2bzJ4HI1XVPXfXZMm44GUsMpYsmLB65TuVdm0cl0b+i/wGNN66XjeV7zuPpHcnK/juhhjdfId5jMdE5nN0dGmmm2zZs2cexD5n9p/dY352XsvXHaZNWWsmmS1atjR452nYudzvqv2HMRyvNNnlMcDl3R2+yx2uVrBubTW9icHDVtbNXlZm7jma1rM4VurZZd2y6nUau7ZXZ7bVU+mnoOVxZGMrVmvX60605JwmzGZhhhjTWtaaaMaaGTGmNMZasY0iX8VMUl8eepaIrzGSpemWOQyZORk2bNpjUybMmxqYmknCGCFynutfksaZpjTNMaaatM0xsxcGR0sociNqxNSmhhR1ZJPbsn8qyF0t2qH6iYBclclalbtTTcHTDsPaX6rlnElph2Jyumumtynv2Kk8GI7rsvXbIcJgHJOSaSXnnGaI3m87RtVXJOZ/YtgdTE6Wpha6ZlE8ayXkef1fh602r2WwvfMXtMdLlkfnLFdYYwYso+bWqm7yJqHXZGw2nrS5ZanSYnWlxBxMF1V940K2wdrI7R6OYf7DGGamMmTSbRhlS45xmVOumF1EyPCmHrrN8wwZOOrdNtLeMtzFzDlWnfTBxMk2NaXIZHBYxYLD4w8yju0ao65Vz1OIXoS9dLanwCe1PWrYuWMqf1if1z2k2yYfKJ741PDgno1ZQ8DRqvUny3mNoWTzGO6m1DkrJI8JiR5cSd+vZdGOO8nrMoc5+NDUFsMSXaZJeNlMmGLtJsovOsUp7I9S5VojKxF6bTVEelXqlfJobQr3LozSh2Jk7VcrVMfhXqszGWMzNqGhqZY0OadxkyyMssKugZR0KNFXBHlqwmJgTE/BNVMk6ItJXZMR0H47GpXv/DMOvNkmVuaV1PRfEdxuqc7Hcd+ZV/zTLaRxWk0nl9CdCeM6mn5rstHIBcpiuwmUZXeq81DacHI2rmrZ5SuE5mOZd6LQrZg9mx32TprA8BMo5jKN6yLTCi3WzQaZSuhzTtM1fUTGVpG8Tw+KXI0tjEpiWxtLYynOlktSbVlaI5kxP8TDH8kx50xoxi5KcA4pcja8KWLRlO/Ks6q06ergnvm1ca3Tq8Uw7LTUsmWyctXPWmpitl/uvGcWTGXGuAXDfhqazGmjkxcJW5hMMMMpYsXl2TZYtVOddG3XCarUt6Ptq9CZXSNzyuRzqRZOjsxdBbFVz6OA5HI43r1jityVlVpVkxmOsyaYWE1NTGq1sOVh36mHMcxtSvcy70edG0ZGR3I1Go1GRlV7mWWo1G0ZGRqlvH40l7o4m5xMWLLLYyNjnqc8556mdPqLJ31n/1nWOncxzG1tizrHs/Z+d2vP/B/l8wdJ6rHUn2nbbDq4p6htFtYzMMMTaZis1K5GKzGNmxhmUx2DDlZ/qNnIx41xnaMfCZWYaZWtNLTNW8ND4Fw1MyZOCdM428suKG1ehW8TesOydg7J+YYcD4cYR+8dFK6M4E3HM9ZfRNNL+Sn6rsl4DsrDl2HpPCnfxjGXtbZtYys1ttlyJ4T+BvexjGWRjMszK4Jpc77D3GyuVD7q0+G8m9G+2+rGm7cOR2y7FdtY2XUYx/oNlfRYxhMYyYZkyyg55enna9Kt/FFi6GMMwYwdwxWgxGMLKYmUyGExTKMZkMFhkymKuh0NOBNnBu+23LdwDoZYYzGGMxtORaTU1pjTGWTTGGtMrNWUsyyTTLLG1qy2ZjbK2DBllWqxMtBMaYZQmcE7zvvRcTkclUwdkxTaSdyySt/7fpL+T1v516Ji97fwr5JbLu305zMn5+GMTTZ9F+y7ExwmGVfG44yxn3dLv6l5i+Wth1jCrDq21nW9LqvvDzz3Vf3LLH/O/32TJ/erx3bXftO4eF+G956D952K/An4NfvOpjFjExjevP/UmE0fIoZXx6/w6lX/no3D0bLt+ixjieBM6ksRd0yB4Lt2SwYNE+gd1detlZWUnpiZfGfFaK+4PyCa/v18V8X75pe9fLXzp7l3VjF76vWZmHwGz1IZNWT7b8yddJ4q5kyrVdfru6atWc7bVYztL9Jf4GXvT+Y8m9/YsXP6H018a8D4XVOqvfzqeR+6yZOD8dPv0+U7/q5Pl+2dNb0MjzGVH5p6MNQ7cOWvw62U9aHE8DprDek+McLyvDz+te+9Zhq5+YTruufMcWMabqysTmZVWjKPfnK0wyVcrsuhjZRdLkHNvD72b9abriOSGIxiLixMOoalNPXzy+wT/tf+U6HHONfsz+xe8ufHBdQWWGWLA9if0rsnmrxK5LvRZQeWsTCsrmOYy8VteVfuRfcVTtDLItLIsMYxZLdU/DbtSemxF6Z6Zo5WBXE4tFdCyVMMXMTEMZXVlS6Xec2T4e0tHsRcEuWshcJ2YsNF5rUx1E8ifCq6Z+ZP7qdCeu/aTwFd53l16/o0NOw6O3dLavP4Hbi4RdmuDk6DoYaninC0+o4uZjbJ7Rxeu0/FbuFg+q7DVS6fQe0rZ6NDGUNNU6DEqOaLTicKnYZMnBWruljQxoaS3dZhocDge0bSTyOvdAbG5hxe2xji7E/L55xX13wWNDi6HCekcFxfCPGxY0MXC+s7afWaMdDyjyr+o8Rudm/NabOZvdl274zH4f5XK9z6On1Pe/K5TdPAslg77BjuO6Y3eO7GqvOPG/stknp1leyvLL0Z7bl9I4noMvLkzytLhWYzrOZzLXCORe028rORzOg4N/L0HlMOQ3Pgmnbb6KczlabORpu980q37TBqRu0/p3PO6234Bl03Ynuz+9W7gnsEcmvYaYY3aMYY0wx3pYd+ujsXauWdaY5Xkbtl23fPzFHiDB/QMo0yFjBllYxTQYYyxkrwn7JufwJ/PfgJ+C83X69ni6zvXcnyXabv0ncbLwsceS+RNlyN2mnneJtX0ngYO0+e+0+UnA+Wch3ji8hj5an4h+i6XBySU4n+R0roVcbw5yvHrmr4Yw8Y7x6c+9POPYHI5HI5HI5HI5HGXGww4nE4nrVyOR8XeqPEO7PLOiukYa3Novk5hV4cdtYZLI93e+uxff2jRo0aNGjRo0aNG1bVtW1dy3m83m8+tQ5ZzHw3nObwOu8La9Rc1dtkdS8A3eTk823tnktXWlxN6Oixe06zrN70Isd9jiOgZFq9yfkPqP/SLhN2Myl8jDM43bl1nbcb4cO57jlh8Jow6pzXZdL4dyODTuuhu77FyO27DdwdRxmvO+O+3N2+BdqyTwLHVczDVY4UPE4O66/ZO2cx1LFzVdSXtF7G4HMbrauOHRw6c8FdZ5m9fHZHYZXfTlZquyynSyTTKke6vcffSD9pzPA/G7n7jxPmuhc1DHMynPMrGL6AdewYmwu5ko+UUyTwrMv27rPH1v1nGqd87+p6N6LU8k3NEng53xXyHS97+44OSg/sy/hn+Se6yfYNjW0/uTgP+PvWYzLMmjhcLB/gGpri6H83/84eUXWT6T9Hsv7785z/7z4icpW+zfXypuR7rx/gMdZb1/wC678pcs8/2a3mDitGHxl9mfPlll5MafWWqxk/eYuTDgcNMzDGWLWvsuglNxs53GtN6uWpktlW1tZZYcuinMMWmnNnJydze3b2Y1McBxrBkXw799izLMZZYyy0TkbsGM4p03S2uVu5s/XXUdSdec6smVxZYYGpVmT8A+8ajuEyV5FatkvVru2x6uxGXXbH4A+jvgP4GMYy3iPLXzq/6z65+E005ey+cwMZD3fZcqc6xpjTFjQ0P3U+e++cPYmTIwj0nrK5NPTfl3WvpfLtXDcb2HQMudYOxFXQBor4L4T6vrOauFctYXJQ++NUWmJe5bmx1jDiZS1dTqWxo4GR8jm3fttpmPHppk9PEyv4/y8/sO07XacOmcqc0x2Vi9BvNJvN5oW8x4mOsydpidRxMYJPx06m1bqPzq9KtK8sxXNXFodD/+MYYaJTLwOhc9brCsV18oOR1i4tXChyTkq4lf4y1Ke+9axjDHqs1mfBbMXuP4Hzi+X7t8vzv7bHerrUPgPCxhjre4fXdfLNtNM+Jd+Zdh8xd8wP87uNPoPgv4W7/5P2BuxfsMabNnMnza+54Pdi5U671GPZY8CehX8Voeoo7FHpkeEc6715FwHZrIrUrHaviPUbPZHND+IhczrP6FcYvhOZ0Di/ETt0OI+YwNWR9r7tpf6WDeZKZDB1+z2IthOl1mPyb5FluvEx9h9d0NnM0Y1XPFkWIsk1WotJ0PBMmkvjvQTd0e71tfeV+8r8lQ/tpzpsmxJ+InrI/dj2UajUajVTUajatRqNRtGo1Go1Go4wjeMpZFMVV9CHbofPraLsJ3JpWV2XOoanCuFky4y3PPNxucK2uKC1Lbdb1eo+m5XomN6HfeZsabHLHRX/K+offtNGGmHWctcVcG44MdSqsOLY9VzX+Zxfxn2HPdWTpzWvkrtJ8M5zorrKcquRytJ5N5DZmcaW02l76nWO+BqPXm1A2Ry/0q71dH/mqrqeFjkYxjEXtsX8qubTk67rGycyqsdm4tZx5D6D5hhi0waaWmiaMP81Yjii5qxPlPuU/GfTL1Y5E6Jyfiq63qTa39A4J0sOGDgO9WF9bOXl0XfPRbsY2bPNKPy1YrFYrFYmRhhlTIyMjJWJYZHXuCXI8OoXsvfljGLFicNifpp2XunoPiG1wtx3p1Tah+/DD66OnVtVXP9rKbVxOnL0tR/rHtqB5UDErUVcl11D4qqvjpOcxX7armUNJB3LpW6bxVvD08e8h3odKKvyCFZBdSh2FVcST9xV3n3T8t1j7Kr9qgrqXg+13Pt5U7JCvFXVIV1YG5lRhkVYZJYYDDD4KOIMoHCp26WS8GB7uBh2zIdgq/PKyInjV2STShuoapUdCpX1yTwqq/z1VvET7Kh5nVPkO8YyxjLt2MaaMmWTLQvx3qnzltnXW0p2jxgbEtSny/Osv8Y9pLMXYoHVPAhkVdWVeODhR6q9/Sxe2liwwZWMVvFXfRkeIDxAePUPIrdJ4ey6yquzH+PD/bUOWAu05qVHtFd8rrKHSoeNIOUqrYr3FXyToqfYJgwmJdKpXXOwYYegNNGMzfZPp/t3t/DVs4zjNTN61rRqaWaa4NYbRjTa0tWwy2Y2tGN8ZO8ofNKq4j9SL7I+cSm4/6ovLV5HNXLI0jJidwrtk6ynCaP6Z++GjRlWS3tLeW129Mi9evxU9mtz6s5J3Z7M2ngTgnKvmpomxpaLCzPfmx0JWE+m3NLDDGOX47RctdYYNK5jakdqLkRlI39n590T5zctGSwwZZDJj6kW8XSi6ot2MmWWJ0DUT3nuvebBudScjZ79g8cWJ8av0k+/bE5WKd5MdbFpbDVMxu1DVMmtNZGJvq1mtRbn6M+g/kP0FwDwr7quZs7xosNGpbscyxhhd9TyJyFwbLcxlTasg75vW7TsV5K7ji44XPMMrdoj+Y3rT0Hie62nlYV/pwczzOmdLqLhYkzGMzCZWGMQzGMSsZYY6Di1t4nlJ+Em63mJxrVLxPbYxNEdgc1dU2iOKyoYYWjNrEeHTYybVk0atSa7ehuwsWMWTqn1TrnS6hYsi71d1+s+k+ic70e20fzE/VaTdxT9ZtU4GIXdeNx3X77guYYfpHeTQjaMX6brOu4OY4K7Y2d9mbHarI5ox3p4GpJ2Vd/Tst60f7j999pppjR+Q/Qf8J/VaORs3cji7FfFuN61+ui9s8hix1OCh5KGVV23BPXvZfz3CLyHpix+exi8z/KnCnosY2eunor+cxyPO/xJ0vKey9OvE9VjqaYu0x3Z3jd6o2b1T12D+F8l232lwaaacD5LE8LBxu7WTlbWraWpew8Xexjel3E+wWD4APITdNqR8F3R3T0lunCQ4GaE9R37DxeCYfcHi4xci5ovKfxVs55y2hf+65E/Xdp6jR5nrebTmi5incpkyOjs50JvrZwstbbW6kfuuQw+2mykf/EXNFzxfKTrxew929TR6bWnGL//F3JFOFCQT3K4lQ"
28
+
29
+ kernels = Kernel(
30
+ bz2.decompress(base64.b64decode(quantization_code)),
31
+ [
32
+ "int4WeightCompression",
33
+ "int4WeightExtractionFloat",
34
+ "int4WeightExtractionHalf",
35
+ "int8WeightExtractionFloat",
36
+ "int8WeightExtractionHalf",
37
+ ],
38
+ )
39
+ except Exception as exception:
40
+ kernels = None
41
+ logger.warning("Failed to load cpm_kernels:" + str(exception))
42
+
43
+
44
+ class W8A16Linear(torch.autograd.Function):
45
+ @staticmethod
46
+ def forward(ctx, inp: torch.Tensor, quant_w: torch.Tensor, scale_w: torch.Tensor, weight_bit_width):
47
+ ctx.inp_shape = inp.size()
48
+ ctx.weight_bit_width = weight_bit_width
49
+ out_features = quant_w.size(0)
50
+ inp = inp.contiguous().view(-1, inp.size(-1))
51
+ weight = extract_weight_to_half(quant_w, scale_w, weight_bit_width)
52
+ ctx.weight_shape = weight.size()
53
+ output = inp.mm(weight.t())
54
+ ctx.save_for_backward(inp, quant_w, scale_w)
55
+ return output.view(*(ctx.inp_shape[:-1] + (out_features,)))
56
+
57
+ @staticmethod
58
+ def backward(ctx, grad_output: torch.Tensor):
59
+ inp, quant_w, scale_w = ctx.saved_tensors
60
+ weight = extract_weight_to_half(quant_w, scale_w, ctx.weight_bit_width)
61
+ grad_output = grad_output.contiguous().view(-1, weight.size(0))
62
+ grad_input = grad_output.mm(weight)
63
+ grad_weight = grad_output.t().mm(inp)
64
+ return grad_input.view(ctx.inp_shape), grad_weight.view(ctx.weight_shape), None, None
65
+
66
+
67
+ def compress_int4_weight(weight: torch.Tensor): # (n, m)
68
+ with torch.cuda.device(weight.device):
69
+ n, m = weight.size(0), weight.size(1)
70
+ assert m % 2 == 0
71
+ m = m // 2
72
+ out = torch.empty(n, m, dtype=torch.int8, device="cuda")
73
+ stream = torch.cuda.current_stream()
74
+
75
+ gridDim = (n, 1, 1)
76
+ blockDim = (min(round_up(m, 32), 1024), 1, 1)
77
+
78
+ kernels.int4WeightCompression(
79
+ gridDim,
80
+ blockDim,
81
+ 0,
82
+ stream,
83
+ [ctypes.c_void_p(weight.data_ptr()), ctypes.c_void_p(out.data_ptr()), ctypes.c_int32(n), ctypes.c_int32(m)],
84
+ )
85
+ return out
86
+
87
+
88
+ def extract_weight_to_half(weight: torch.Tensor, scale_list: torch.Tensor, source_bit_width: int):
89
+ assert scale_list.dtype in [torch.half, torch.bfloat16]
90
+ assert weight.dtype in [torch.int8]
91
+ if source_bit_width == 8:
92
+ return weight.to(scale_list.dtype) * scale_list[:, None]
93
+ elif source_bit_width == 4:
94
+ func = (
95
+ kernels.int4WeightExtractionHalf if scale_list.dtype == torch.half else kernels.int4WeightExtractionBFloat16
96
+ )
97
+ else:
98
+ assert False, "Unsupported bit-width"
99
+
100
+ with torch.cuda.device(weight.device):
101
+ n, m = weight.size(0), weight.size(1)
102
+ out = torch.empty(n, m * (8 // source_bit_width), dtype=scale_list.dtype, device="cuda")
103
+ stream = torch.cuda.current_stream()
104
+
105
+ gridDim = (n, 1, 1)
106
+ blockDim = (min(round_up(m, 32), 1024), 1, 1)
107
+
108
+ func(
109
+ gridDim,
110
+ blockDim,
111
+ 0,
112
+ stream,
113
+ [
114
+ ctypes.c_void_p(weight.data_ptr()),
115
+ ctypes.c_void_p(scale_list.data_ptr()),
116
+ ctypes.c_void_p(out.data_ptr()),
117
+ ctypes.c_int32(n),
118
+ ctypes.c_int32(m),
119
+ ],
120
+ )
121
+ return out
122
+
123
+
124
+ class QuantizedLinear(torch.nn.Module):
125
+ def __init__(self, weight_bit_width: int, weight, bias=None, device="cpu", dtype=None, empty_init=False, *args,
126
+ **kwargs):
127
+ super().__init__()
128
+ self.weight_bit_width = weight_bit_width
129
+
130
+ shape = weight.shape
131
+
132
+ if weight is None or empty_init:
133
+ self.weight = torch.empty(shape[0], shape[1] * weight_bit_width // 8, dtype=torch.int8, device=device)
134
+ self.weight_scale = torch.empty(shape[0], dtype=dtype, device=device)
135
+ else:
136
+ self.weight_scale = weight.abs().max(dim=-1).values / ((2 ** (weight_bit_width - 1)) - 1)
137
+ self.weight = torch.round(weight / self.weight_scale[:, None]).to(torch.int8)
138
+ if weight_bit_width == 4:
139
+ self.weight = compress_int4_weight(self.weight)
140
+
141
+ self.weight = Parameter(self.weight.to(device), requires_grad=False)
142
+ self.weight_scale = Parameter(self.weight_scale.to(device), requires_grad=False)
143
+ self.bias = Parameter(bias.to(device), requires_grad=False) if bias is not None else None
144
+
145
+ def forward(self, input):
146
+ output = W8A16Linear.apply(input, self.weight, self.weight_scale, self.weight_bit_width)
147
+ if self.bias is not None:
148
+ output = output + self.bias
149
+ return output
150
+
151
+
152
+ def quantize(model, weight_bit_width, empty_init=False, device=None):
153
+ """Replace fp16 linear with quantized linear"""
154
+ for layer in model.layers:
155
+ layer.self_attention.query_key_value = QuantizedLinear(
156
+ weight_bit_width=weight_bit_width,
157
+ weight=layer.self_attention.query_key_value.weight.to(torch.cuda.current_device()),
158
+ bias=layer.self_attention.query_key_value.bias,
159
+ dtype=layer.self_attention.query_key_value.weight.dtype,
160
+ device=layer.self_attention.query_key_value.weight.device if device is None else device,
161
+ empty_init=empty_init
162
+ )
163
+ layer.self_attention.dense = QuantizedLinear(
164
+ weight_bit_width=weight_bit_width,
165
+ weight=layer.self_attention.dense.weight.to(torch.cuda.current_device()),
166
+ bias=layer.self_attention.dense.bias,
167
+ dtype=layer.self_attention.dense.weight.dtype,
168
+ device=layer.self_attention.dense.weight.device if device is None else device,
169
+ empty_init=empty_init
170
+ )
171
+ layer.mlp.dense_h_to_4h = QuantizedLinear(
172
+ weight_bit_width=weight_bit_width,
173
+ weight=layer.mlp.dense_h_to_4h.weight.to(torch.cuda.current_device()),
174
+ bias=layer.mlp.dense_h_to_4h.bias,
175
+ dtype=layer.mlp.dense_h_to_4h.weight.dtype,
176
+ device=layer.mlp.dense_h_to_4h.weight.device if device is None else device,
177
+ empty_init=empty_init
178
+ )
179
+ layer.mlp.dense_4h_to_h = QuantizedLinear(
180
+ weight_bit_width=weight_bit_width,
181
+ weight=layer.mlp.dense_4h_to_h.weight.to(torch.cuda.current_device()),
182
+ bias=layer.mlp.dense_4h_to_h.bias,
183
+ dtype=layer.mlp.dense_4h_to_h.weight.dtype,
184
+ device=layer.mlp.dense_4h_to_h.weight.device if device is None else device,
185
+ empty_init=empty_init
186
+ )
187
+
188
+ return model
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {}
tokenization_chatglm.py ADDED
@@ -0,0 +1,253 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ from typing import List, Optional, Union, Dict
4
+ from sentencepiece import SentencePieceProcessor
5
+ from transformers import PreTrainedTokenizer
6
+ from transformers.utils import logging, PaddingStrategy
7
+ from transformers.tokenization_utils_base import EncodedInput, BatchEncoding
8
+
9
+
10
+ class SPTokenizer:
11
+ def __init__(self, model_path: str):
12
+ # reload tokenizer
13
+ assert os.path.isfile(model_path), model_path
14
+ self.sp_model = SentencePieceProcessor(model_file=model_path)
15
+
16
+ # BOS / EOS token IDs
17
+ self.n_words: int = self.sp_model.vocab_size()
18
+ self.bos_id: int = self.sp_model.bos_id()
19
+ self.eos_id: int = self.sp_model.eos_id()
20
+ self.pad_id: int = self.sp_model.unk_id()
21
+ assert self.sp_model.vocab_size() == self.sp_model.get_piece_size()
22
+
23
+ special_tokens = ["[MASK]", "[gMASK]", "[sMASK]", "sop", "eop"]
24
+ self.special_tokens = {}
25
+ self.index_special_tokens = {}
26
+ for token in special_tokens:
27
+ self.special_tokens[token] = self.n_words
28
+ self.index_special_tokens[self.n_words] = token
29
+ self.n_words += 1
30
+
31
+ def tokenize(self, s: str):
32
+ return self.sp_model.EncodeAsPieces(s)
33
+
34
+ def encode(self, s: str, bos: bool = False, eos: bool = False) -> List[int]:
35
+ assert type(s) is str
36
+ t = self.sp_model.encode(s)
37
+ if bos:
38
+ t = [self.bos_id] + t
39
+ if eos:
40
+ t = t + [self.eos_id]
41
+ return t
42
+
43
+ def decode(self, t: List[int]) -> str:
44
+ return self.sp_model.decode(t)
45
+
46
+ def decode_tokens(self, tokens: List[str]) -> str:
47
+ text = self.sp_model.DecodePieces(tokens)
48
+ return text
49
+
50
+ def convert_token_to_id(self, token):
51
+ """ Converts a token (str) in an id using the vocab. """
52
+ if token in self.special_tokens:
53
+ return self.special_tokens[token]
54
+ return self.sp_model.PieceToId(token)
55
+
56
+ def convert_id_to_token(self, index):
57
+ """Converts an index (integer) in a token (str) using the vocab."""
58
+ if index in self.index_special_tokens or index in [self.eos_id, self.bos_id, self.pad_id] or index < 0:
59
+ return ""
60
+ return self.sp_model.IdToPiece(index)
61
+
62
+
63
+ class ChatGLMTokenizer(PreTrainedTokenizer):
64
+ vocab_files_names = {"vocab_file": "tokenizer.model"}
65
+
66
+ model_input_names = ["input_ids", "attention_mask", "position_ids"]
67
+
68
+ def __init__(self, vocab_file, padding_side="left", **kwargs):
69
+ super().__init__(padding_side=padding_side, **kwargs)
70
+ self.name = "GLMTokenizer"
71
+
72
+ self.vocab_file = vocab_file
73
+ self.tokenizer = SPTokenizer(vocab_file)
74
+ self.special_tokens = {
75
+ "<bos>": self.tokenizer.bos_id,
76
+ "<eos>": self.tokenizer.eos_id,
77
+ "<pad>": self.tokenizer.pad_id
78
+ }
79
+
80
+ def get_command(self, token):
81
+ if token in self.special_tokens:
82
+ return self.special_tokens[token]
83
+ assert token in self.tokenizer.special_tokens, f"{token} is not a special token for {self.name}"
84
+ return self.tokenizer.special_tokens[token]
85
+
86
+ @property
87
+ def pad_token(self) -> str:
88
+ return "<unk>"
89
+
90
+ @property
91
+ def pad_token_id(self):
92
+ return self.get_command("<pad>")
93
+
94
+ @property
95
+ def eos_token(self) -> str:
96
+ return "</s>"
97
+
98
+ @property
99
+ def eos_token_id(self):
100
+ return self.get_command("<eos>")
101
+
102
+ @property
103
+ def vocab_size(self):
104
+ return self.tokenizer.n_words
105
+
106
+ def get_vocab(self):
107
+ """ Returns vocab as a dict """
108
+ vocab = {self._convert_id_to_token(i): i for i in range(self.vocab_size)}
109
+ vocab.update(self.added_tokens_encoder)
110
+ return vocab
111
+
112
+ def _tokenize(self, text, **kwargs):
113
+ return self.tokenizer.tokenize(text)
114
+
115
+ def _convert_token_to_id(self, token):
116
+ """ Converts a token (str) in an id using the vocab. """
117
+ return self.tokenizer.convert_token_to_id(token)
118
+
119
+ def _convert_id_to_token(self, index):
120
+ """Converts an index (integer) in a token (str) using the vocab."""
121
+ return self.tokenizer.convert_id_to_token(index)
122
+
123
+ def convert_tokens_to_string(self, tokens: List[str]) -> str:
124
+ return self.tokenizer.decode_tokens(tokens)
125
+
126
+ def save_vocabulary(self, save_directory, filename_prefix=None):
127
+ """
128
+ Save the vocabulary and special tokens file to a directory.
129
+
130
+ Args:
131
+ save_directory (`str`):
132
+ The directory in which to save the vocabulary.
133
+ filename_prefix (`str`, *optional*):
134
+ An optional prefix to add to the named of the saved files.
135
+
136
+ Returns:
137
+ `Tuple(str)`: Paths to the files saved.
138
+ """
139
+ if os.path.isdir(save_directory):
140
+ vocab_file = os.path.join(
141
+ save_directory, self.vocab_files_names["vocab_file"]
142
+ )
143
+ else:
144
+ vocab_file = save_directory
145
+
146
+ with open(self.vocab_file, 'rb') as fin:
147
+ proto_str = fin.read()
148
+
149
+ with open(vocab_file, "wb") as writer:
150
+ writer.write(proto_str)
151
+
152
+ return (vocab_file,)
153
+
154
+ def get_prefix_tokens(self):
155
+ prefix_tokens = [self.get_command("[gMASK]"), self.get_command("sop")]
156
+ return prefix_tokens
157
+
158
+ def build_prompt(self, query, history=None):
159
+ if history is None:
160
+ history = []
161
+ prompt = ""
162
+ for i, (old_query, response) in enumerate(history):
163
+ prompt += "[Round {}]\n\n问:{}\n\n答:{}\n\n".format(i + 1, old_query, response)
164
+ prompt += "[Round {}]\n\n问:{}\n\n答:".format(len(history) + 1, query)
165
+ return prompt
166
+
167
+ def build_inputs_with_special_tokens(
168
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
169
+ ) -> List[int]:
170
+ """
171
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
172
+ adding special tokens. A BERT sequence has the following format:
173
+
174
+ - single sequence: `[CLS] X [SEP]`
175
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
176
+
177
+ Args:
178
+ token_ids_0 (`List[int]`):
179
+ List of IDs to which the special tokens will be added.
180
+ token_ids_1 (`List[int]`, *optional*):
181
+ Optional second list of IDs for sequence pairs.
182
+
183
+ Returns:
184
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
185
+ """
186
+ prefix_tokens = self.get_prefix_tokens()
187
+ token_ids_0 = prefix_tokens + token_ids_0
188
+ if token_ids_1 is not None:
189
+ token_ids_0 = token_ids_0 + token_ids_1 + [self.get_command("<eos>")]
190
+ return token_ids_0
191
+
192
+ def _pad(
193
+ self,
194
+ encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding],
195
+ max_length: Optional[int] = None,
196
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
197
+ pad_to_multiple_of: Optional[int] = None,
198
+ return_attention_mask: Optional[bool] = None,
199
+ ) -> dict:
200
+ """
201
+ Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
202
+
203
+ Args:
204
+ encoded_inputs:
205
+ Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).
206
+ max_length: maximum length of the returned list and optionally padding length (see below).
207
+ Will truncate by taking into account the special tokens.
208
+ padding_strategy: PaddingStrategy to use for padding.
209
+
210
+ - PaddingStrategy.LONGEST Pad to the longest sequence in the batch
211
+ - PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
212
+ - PaddingStrategy.DO_NOT_PAD: Do not pad
213
+ The tokenizer padding sides are defined in self.padding_side:
214
+
215
+ - 'left': pads on the left of the sequences
216
+ - 'right': pads on the right of the sequences
217
+ pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
218
+ This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
219
+ `>= 7.5` (Volta).
220
+ return_attention_mask:
221
+ (optional) Set to False to avoid returning attention mask (default: set to model specifics)
222
+ """
223
+ # Load from model defaults
224
+ assert self.padding_side == "left"
225
+
226
+ required_input = encoded_inputs[self.model_input_names[0]]
227
+ seq_length = len(required_input)
228
+
229
+ if padding_strategy == PaddingStrategy.LONGEST:
230
+ max_length = len(required_input)
231
+
232
+ if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
233
+ max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
234
+
235
+ needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length
236
+
237
+ # Initialize attention mask if not present.
238
+ if "attention_mask" not in encoded_inputs:
239
+ encoded_inputs["attention_mask"] = [1] * seq_length
240
+
241
+ if "position_ids" not in encoded_inputs:
242
+ encoded_inputs["position_ids"] = list(range(seq_length))
243
+
244
+ if needs_to_be_padded:
245
+ difference = max_length - len(required_input)
246
+
247
+ if "attention_mask" in encoded_inputs:
248
+ encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"]
249
+ if "position_ids" in encoded_inputs:
250
+ encoded_inputs["position_ids"] = [0] * difference + encoded_inputs["position_ids"]
251
+ encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input
252
+
253
+ return encoded_inputs
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e7dc4c393423b76e4373e5157ddc34803a0189ba96b21ddbb40269d31468a6f2
3
+ size 1018370
tokenizer_config.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_map": {
3
+ "AutoTokenizer": [
4
+ "tokenization_chatglm.ChatGLMTokenizer",
5
+ null
6
+ ]
7
+ },
8
+ "clean_up_tokenization_spaces": true,
9
+ "do_lower_case": false,
10
+ "model_max_length": 1000000000000000019884624838656,
11
+ "padding_side": "left",
12
+ "remove_space": false,
13
+ "tokenizer_class": "ChatGLMTokenizer"
14
+ }