ljsabc commited on
Commit
86d3e03
1 Parent(s): e07d539

Refactor to use upstream automodel.

Browse files
Files changed (3) hide show
  1. app.py +4 -4
  2. configuration_chatglm.py +0 -92
  3. modeling_chatglm.py +0 -1264
app.py CHANGED
@@ -15,16 +15,16 @@ T4级别的GPU已经可以很胜任这个任务了。
15
  ### 安装依赖
16
  """
17
 
18
- from modeling_chatglm import ChatGLMForConditionalGeneration
19
  import torch
20
  import sys
21
 
22
- from transformers import AutoTokenizer, GenerationConfig
23
 
24
- model = ChatGLMForConditionalGeneration.from_pretrained("THUDM/chatglm-6b").float()
25
  tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True)
26
 
27
- from peft import get_peft_model, LoraConfig, TaskType, PeftModel
28
  peft_path = 'ljsabc/Fujisaki_GLM' # change it to your own
29
  model = PeftModel.from_pretrained(
30
  model,
 
15
  ### 安装依赖
16
  """
17
 
18
+ #from modeling_chatglm import ChatGLMForConditionalGeneration
19
  import torch
20
  import sys
21
 
22
+ from transformers import AutoTokenizer, GenerationConfig, Automodel
23
 
24
+ model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).float()
25
  tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True)
26
 
27
+ from peft import PeftModel
28
  peft_path = 'ljsabc/Fujisaki_GLM' # change it to your own
29
  model = PeftModel.from_pretrained(
30
  model,
configuration_chatglm.py DELETED
@@ -1,92 +0,0 @@
1
- """ ChatGLM model configuration """
2
-
3
- from transformers.configuration_utils import PretrainedConfig
4
- from transformers.utils import logging
5
-
6
- logger = logging.get_logger(__name__)
7
-
8
-
9
- class ChatGLMConfig(PretrainedConfig):
10
- r"""
11
- This is the configuration class to store the configuration of a [`~ChatGLMModel`].
12
- It is used to instantiate an ChatGLM model according to the specified arguments, defining the model
13
- architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of
14
- the ChatGLM-6B [THUDM/ChatGLM-6B](https://huggingface.co/THUDM/chatglm-6b) architecture.
15
-
16
- Configuration objects inherit from [`PretrainedConfig`] and can be used
17
- to control the model outputs. Read the documentation from [`PretrainedConfig`]
18
- for more information.
19
-
20
-
21
- Args:
22
- vocab_size (`int`, *optional*, defaults to 150528):
23
- Vocabulary size of the ChatGLM-6B model. Defines the number of different tokens that can be represented by the
24
- `inputs_ids` passed when calling [`~ChatGLMModel`] or
25
- [`~TFChatGLMModel`].
26
- hidden_size (`int`, *optional*, defaults to 4096):
27
- Dimension of the encoder layers and the pooler layer.
28
- num_hidden_layers (`int`, *optional*, defaults to 28):
29
- Number of hidden layers in the Transformer encoder.
30
- num_attention_heads (`int`, *optional*, defaults to 32):
31
- Number of attention heads for each attention layer in the Transformer encoder.
32
- inner_hidden_size (`int`, *optional*, defaults to 16384):
33
- Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
34
- max_sequence_length (`int`, *optional*, defaults to 512):
35
- The maximum sequence length that this model might ever be used with.
36
- Typically set this to something large just in case (e.g., 512 or 1024 or 2048).
37
- layernorm_epsilon (`float`, *optional*, defaults to 1e-5):
38
- The epsilon used by the layer normalization layers.
39
- use_cache (`bool`, *optional*, defaults to `True`):
40
- Whether the model should return the last key/values attentions (not used by all models).
41
- Example:
42
-
43
- ```python
44
- >>> from configuration_chatglm import ChatGLMConfig
45
- >>> from modeling_chatglm import ChatGLMModel
46
-
47
- >>> # Initializing a ChatGLM-6B THUDM/ChatGLM-6B style configuration
48
- >>> configuration = ChatGLMConfig()
49
-
50
- >>> # Initializing a model from the THUDM/ChatGLM-6B style configuration
51
- >>> model = ChatGLMModel(configuration)
52
-
53
- >>> # Accessing the model configuration
54
- >>> configuration = model.config
55
- ```
56
- """
57
- model_type = "chatglm"
58
-
59
- def __init__(
60
- self,
61
- vocab_size=150528,
62
- hidden_size=4096,
63
- num_layers=28,
64
- num_attention_heads=32,
65
- layernorm_epsilon=1e-5,
66
- use_cache=False,
67
- bos_token_id=150004,
68
- eos_token_id=150005,
69
- pad_token_id=0,
70
- max_sequence_length=2048,
71
- inner_hidden_size=16384,
72
- position_encoding_2d=True,
73
- **kwargs
74
- ):
75
- self.num_layers = num_layers
76
- self.vocab_size = vocab_size
77
- self.hidden_size = hidden_size
78
- self.num_attention_heads = num_attention_heads
79
- self.max_sequence_length = max_sequence_length
80
- self.layernorm_epsilon = layernorm_epsilon
81
- self.inner_hidden_size = inner_hidden_size
82
- self.use_cache = use_cache
83
- self.bos_token_id = bos_token_id
84
- self.eos_token_id = eos_token_id
85
- self.pad_token_id = pad_token_id
86
- self.position_encoding_2d = position_encoding_2d
87
- super().__init__(
88
- pad_token_id=pad_token_id,
89
- bos_token_id=bos_token_id,
90
- eos_token_id=eos_token_id,
91
- **kwargs
92
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
modeling_chatglm.py DELETED
@@ -1,1264 +0,0 @@
1
- """ PyTorch ChatGLM model. """
2
-
3
- import math
4
- import copy
5
- import os
6
- import warnings
7
- import re
8
- import sys
9
-
10
- import torch
11
- import torch.utils.checkpoint
12
- import torch.nn.functional as F
13
- from torch import nn
14
- from torch.nn import CrossEntropyLoss, LayerNorm
15
- from torch.nn.utils import skip_init
16
- from typing import Optional, Tuple, Union, List, Callable
17
-
18
- from transformers.utils import (
19
- add_code_sample_docstrings,
20
- add_start_docstrings,
21
- add_start_docstrings_to_model_forward,
22
- )
23
- from transformers.modeling_outputs import (
24
- BaseModelOutputWithPast,
25
- CausalLMOutputWithPast,
26
- BaseModelOutputWithPastAndCrossAttentions,
27
- )
28
- from transformers.modeling_utils import PreTrainedModel
29
- from transformers.utils import logging
30
- from transformers.generation.logits_process import LogitsProcessor
31
- from transformers.generation.utils import LogitsProcessorList, StoppingCriteriaList, GenerationConfig
32
-
33
- from configuration_chatglm import ChatGLMConfig
34
-
35
- # flags required to enable jit fusion kernels
36
-
37
- if sys.platform != 'darwin':
38
- torch._C._jit_set_profiling_mode(False)
39
- torch._C._jit_set_profiling_executor(False)
40
- torch._C._jit_override_can_fuse_on_cpu(True)
41
- torch._C._jit_override_can_fuse_on_gpu(True)
42
-
43
- logger = logging.get_logger(__name__)
44
-
45
- _CHECKPOINT_FOR_DOC = "THUDM/ChatGLM-6B"
46
- _CONFIG_FOR_DOC = "ChatGLM6BConfig"
47
-
48
- CHATGLM_6B_PRETRAINED_MODEL_ARCHIVE_LIST = [
49
- "THUDM/chatglm-6b",
50
- # See all ChatGLM-6B models at https://huggingface.co/models?filter=chatglm
51
- ]
52
-
53
-
54
- class InvalidScoreLogitsProcessor(LogitsProcessor):
55
- def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
56
- if torch.isnan(scores).any() or torch.isinf(scores).any():
57
- scores.zero_()
58
- scores[..., 20005] = 5e4
59
- return scores
60
-
61
-
62
- def load_tf_weights_in_chatglm_6b(model, config, tf_checkpoint_path):
63
- """Load tf checkpoints in a pytorch model."""
64
- try:
65
- import re
66
-
67
- import numpy as np
68
- import tensorflow as tf
69
- except ImportError:
70
- logger.error(
71
- "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
72
- "https://www.tensorflow.org/install/ for installation instructions."
73
- )
74
- raise
75
- tf_path = os.path.abspath(tf_checkpoint_path)
76
- logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
77
- # Load weights from TF model
78
- init_vars = tf.train.list_variables(tf_path)
79
- names = []
80
- arrays = []
81
- for name, shape in init_vars:
82
- logger.info(f"Loading TF weight {name} with shape {shape}")
83
- array = tf.train.load_variable(tf_path, name)
84
- names.append(name)
85
- arrays.append(array)
86
-
87
- for name, array in zip(names, arrays):
88
- name = name.split("/")
89
- # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
90
- # which are not required for using pretrained model
91
- if any(
92
- n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
93
- for n in name
94
- ):
95
- logger.info(f"Skipping {'/'.join(name)}")
96
- continue
97
- pointer = model
98
- for m_name in name:
99
- if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
100
- scope_names = re.split(r"_(\d+)", m_name)
101
- else:
102
- scope_names = [m_name]
103
- if scope_names[0] == "kernel" or scope_names[0] == "gamma":
104
- pointer = getattr(pointer, "weight")
105
- elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
106
- pointer = getattr(pointer, "bias")
107
- elif scope_names[0] == "output_weights":
108
- pointer = getattr(pointer, "weight")
109
- elif scope_names[0] == "squad":
110
- pointer = getattr(pointer, "classifier")
111
- else:
112
- try:
113
- pointer = getattr(pointer, scope_names[0])
114
- except AttributeError:
115
- logger.info(f"Skipping {'/'.join(name)}")
116
- continue
117
- if len(scope_names) >= 2:
118
- num = int(scope_names[1])
119
- pointer = pointer[num]
120
- if m_name[-11:] == "_embeddings":
121
- pointer = getattr(pointer, "weight")
122
- elif m_name == "kernel":
123
- array = np.transpose(array)
124
- try:
125
- assert (
126
- pointer.shape == array.shape
127
- ), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
128
- except AssertionError as e:
129
- e.args += (pointer.shape, array.shape)
130
- raise
131
- logger.info(f"Initialize PyTorch weight {name}")
132
- pointer.data = torch.from_numpy(array)
133
- return model
134
-
135
-
136
- @torch.jit.script
137
- def gelu_impl(x):
138
- """OpenAI's gelu implementation."""
139
- return 0.5 * x * (1.0 + torch.tanh(0.7978845608028654 * x *
140
- (1.0 + 0.044715 * x * x)))
141
-
142
-
143
- def gelu(x):
144
- return gelu_impl(x)
145
-
146
-
147
- class RotaryEmbedding(torch.nn.Module):
148
- def __init__(self, dim, base=10000, precision=torch.half, learnable=False):
149
- super().__init__()
150
- inv_freq = 1. / (base ** (torch.arange(0, dim, 2).float() / dim))
151
- inv_freq = inv_freq.half()
152
- self.learnable = learnable
153
- if learnable:
154
- self.inv_freq = torch.nn.Parameter(inv_freq)
155
- self.max_seq_len_cached = None
156
- else:
157
- self.register_buffer('inv_freq', inv_freq)
158
- self.max_seq_len_cached = None
159
- self.cos_cached = None
160
- self.sin_cached = None
161
- self.precision = precision
162
-
163
- def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys,
164
- error_msgs):
165
- pass
166
-
167
- def forward(self, x, seq_dim=1, seq_len=None):
168
- if seq_len is None:
169
- seq_len = x.shape[seq_dim]
170
- if self.max_seq_len_cached is None or (seq_len > self.max_seq_len_cached):
171
- self.max_seq_len_cached = None if self.learnable else seq_len
172
- t = torch.arange(seq_len, device=x.device, dtype=self.inv_freq.dtype)
173
- freqs = torch.einsum('i,j->ij', t, self.inv_freq)
174
- # Different from paper, but it uses a different permutation in order to obtain the same calculation
175
- emb = torch.cat((freqs, freqs), dim=-1).to(x.device)
176
- if self.precision == torch.bfloat16:
177
- emb = emb.float()
178
-
179
- # [sx, 1 (b * np), hn]
180
- cos_cached = emb.cos()[:, None, :]
181
- sin_cached = emb.sin()[:, None, :]
182
- if self.precision == torch.bfloat16:
183
- cos_cached = cos_cached.bfloat16()
184
- sin_cached = sin_cached.bfloat16()
185
- if self.learnable:
186
- return cos_cached, sin_cached
187
- self.cos_cached, self.sin_cached = cos_cached, sin_cached
188
- return self.cos_cached[:seq_len, ...], self.sin_cached[:seq_len, ...]
189
-
190
-
191
- def rotate_half(x):
192
- x1, x2 = x[..., :x.shape[-1] // 2], x[..., x.shape[-1] // 2:]
193
- return torch.cat((-x2, x1), dim=x1.ndim - 1) # dim=-1 triggers a bug in earlier torch versions
194
-
195
-
196
- @torch.jit.script
197
- def apply_rotary_pos_emb_index(q, k, cos, sin, position_id):
198
- # position_id: [sq, b], q, k: [sq, b, np, hn], cos: [sq, 1, hn] -> [sq, b, 1, hn]
199
- cos, sin = F.embedding(position_id, cos.squeeze(1)).unsqueeze(2), \
200
- F.embedding(position_id, sin.squeeze(1)).unsqueeze(2)
201
- q, k = (q * cos) + (rotate_half(q) * sin), (k * cos) + (rotate_half(k) * sin)
202
- return q, k
203
-
204
-
205
- def attention_fn(
206
- self,
207
- query_layer,
208
- key_layer,
209
- value_layer,
210
- attention_mask,
211
- hidden_size_per_partition,
212
- layer_id,
213
- layer_past=None,
214
- scaling_attention_score=True,
215
- use_cache=False,
216
- ):
217
- if layer_past is not None:
218
- past_key, past_value = layer_past
219
- key_layer = torch.cat((past_key, key_layer), dim=0)
220
- value_layer = torch.cat((past_value, value_layer), dim=0)
221
-
222
- # seqlen, batch, num_attention_heads, hidden_size_per_attention_head
223
- seq_len, b, nh, hidden_size = key_layer.shape
224
-
225
- if use_cache:
226
- present = (key_layer, value_layer)
227
- else:
228
- present = None
229
-
230
- query_key_layer_scaling_coeff = float(layer_id + 1)
231
- if scaling_attention_score:
232
- query_layer = query_layer / (math.sqrt(hidden_size) * query_key_layer_scaling_coeff)
233
-
234
- # ===================================
235
- # Raw attention scores. [b, np, s, s]
236
- # ===================================
237
-
238
- # [b, np, sq, sk]
239
- output_size = (query_layer.size(1), query_layer.size(2), query_layer.size(0), key_layer.size(0))
240
-
241
- # [sq, b, np, hn] -> [sq, b * np, hn]
242
- query_layer = query_layer.view(output_size[2], output_size[0] * output_size[1], -1)
243
- # [sk, b, np, hn] -> [sk, b * np, hn]
244
- key_layer = key_layer.view(output_size[3], output_size[0] * output_size[1], -1)
245
-
246
- matmul_result = torch.empty(
247
- output_size[0] * output_size[1],
248
- output_size[2],
249
- output_size[3],
250
- dtype=query_layer.dtype,
251
- device=query_layer.device,
252
- )
253
-
254
- matmul_result = torch.baddbmm(
255
- matmul_result,
256
- query_layer.transpose(0, 1), # [b * np, sq, hn]
257
- key_layer.transpose(0, 1).transpose(1, 2), # [b * np, hn, sk]
258
- beta=0.0,
259
- alpha=1.0,
260
- )
261
-
262
- # change view to [b, np, sq, sk]
263
- attention_scores = matmul_result.view(*output_size)
264
-
265
- if self.scale_mask_softmax:
266
- self.scale_mask_softmax.scale = query_key_layer_scaling_coeff
267
- attention_probs = self.scale_mask_softmax(attention_scores, attention_mask.contiguous())
268
- else:
269
- if not (attention_mask == 0).all():
270
- # if auto-regressive, skip
271
- attention_scores.masked_fill_(attention_mask, -10000.0)
272
- dtype = attention_scores.dtype
273
- attention_scores = attention_scores.float()
274
- attention_scores = attention_scores * query_key_layer_scaling_coeff
275
-
276
- attention_probs = F.softmax(attention_scores, dim=-1)
277
-
278
- attention_probs = attention_probs.type(dtype)
279
-
280
- # =========================
281
- # Context layer. [sq, b, hp]
282
- # =========================
283
-
284
- # value_layer -> context layer.
285
- # [sk, b, np, hn] --> [b, np, sq, hn]
286
-
287
- # context layer shape: [b, np, sq, hn]
288
- output_size = (value_layer.size(1), value_layer.size(2), query_layer.size(0), value_layer.size(3))
289
-
290
- # change view [sk, b * np, hn]
291
- value_layer = value_layer.view(value_layer.size(0), output_size[0] * output_size[1], -1)
292
-
293
- # change view [b * np, sq, sk]
294
- attention_probs = attention_probs.view(output_size[0] * output_size[1], output_size[2], -1)
295
-
296
- # matmul: [b * np, sq, hn]
297
- context_layer = torch.bmm(attention_probs, value_layer.transpose(0, 1))
298
-
299
- # change view [b, np, sq, hn]
300
- context_layer = context_layer.view(*output_size)
301
-
302
- # [b, np, sq, hn] --> [sq, b, np, hn]
303
- context_layer = context_layer.permute(2, 0, 1, 3).contiguous()
304
-
305
- # [sq, b, np, hn] --> [sq, b, hp]
306
- new_context_layer_shape = context_layer.size()[:-2] + (hidden_size_per_partition,)
307
- context_layer = context_layer.view(*new_context_layer_shape)
308
-
309
- outputs = (context_layer, present, attention_probs)
310
-
311
- return outputs
312
-
313
-
314
- class SelfAttention(torch.nn.Module):
315
- def __init__(self, hidden_size, num_attention_heads,
316
- layer_id, hidden_size_per_attention_head=None, bias=True,
317
- params_dtype=torch.float, position_encoding_2d=True):
318
- super(SelfAttention, self).__init__()
319
-
320
- self.layer_id = layer_id
321
- self.hidden_size = hidden_size
322
- self.hidden_size_per_partition = hidden_size
323
- self.num_attention_heads = num_attention_heads
324
- self.num_attention_heads_per_partition = num_attention_heads
325
- self.position_encoding_2d = position_encoding_2d
326
- self.rotary_emb = RotaryEmbedding(
327
- self.hidden_size // (self.num_attention_heads * 2)
328
- if position_encoding_2d
329
- else self.hidden_size // self.num_attention_heads,
330
- base=10000,
331
- precision=torch.half,
332
- learnable=False,
333
- )
334
-
335
- self.scale_mask_softmax = None
336
-
337
- if hidden_size_per_attention_head is None:
338
- self.hidden_size_per_attention_head = hidden_size // num_attention_heads
339
- else:
340
- self.hidden_size_per_attention_head = hidden_size_per_attention_head
341
-
342
- self.inner_hidden_size = num_attention_heads * self.hidden_size_per_attention_head
343
-
344
- # Strided linear layer.
345
- self.query_key_value = skip_init(
346
- torch.nn.Linear,
347
- hidden_size,
348
- 3 * self.inner_hidden_size,
349
- bias=bias,
350
- dtype=params_dtype,
351
- )
352
-
353
- self.dense = skip_init(
354
- torch.nn.Linear,
355
- self.inner_hidden_size,
356
- hidden_size,
357
- bias=bias,
358
- dtype=params_dtype,
359
- )
360
-
361
- @staticmethod
362
- def attention_mask_func(attention_scores, attention_mask):
363
- attention_scores.masked_fill_(attention_mask, -10000.0)
364
- return attention_scores
365
-
366
- def split_tensor_along_last_dim(self, tensor, num_partitions,
367
- contiguous_split_chunks=False):
368
- """Split a tensor along its last dimension.
369
- Arguments:
370
- tensor: input tensor.
371
- num_partitions: number of partitions to split the tensor
372
- contiguous_split_chunks: If True, make each chunk contiguous
373
- in memory.
374
- """
375
- # Get the size and dimension.
376
- last_dim = tensor.dim() - 1
377
- last_dim_size = tensor.size()[last_dim] // num_partitions
378
- # Split.
379
- tensor_list = torch.split(tensor, last_dim_size, dim=last_dim)
380
- # Note: torch.split does not create contiguous tensors by default.
381
- if contiguous_split_chunks:
382
- return tuple(chunk.contiguous() for chunk in tensor_list)
383
-
384
- return tensor_list
385
-
386
- def forward(
387
- self,
388
- hidden_states: torch.Tensor,
389
- position_ids,
390
- attention_mask: torch.Tensor,
391
- layer_id,
392
- layer_past: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
393
- use_cache: bool = False,
394
- output_attentions: bool = False,
395
- ):
396
- """
397
- hidden_states: [seq_len, batch, hidden_size]
398
- attention_mask: [(1, 1), seq_len, seq_len]
399
- """
400
-
401
- # [seq_len, batch, 3 * hidden_size]
402
- mixed_raw_layer = self.query_key_value(hidden_states)
403
-
404
- # [seq_len, batch, 3 * hidden_size] --> [seq_len, batch, num_attention_heads, 3 * hidden_size_per_attention_head]
405
- new_tensor_shape = mixed_raw_layer.size()[:-1] + (
406
- self.num_attention_heads_per_partition,
407
- 3 * self.hidden_size_per_attention_head,
408
- )
409
- mixed_raw_layer = mixed_raw_layer.view(*new_tensor_shape)
410
-
411
- # [seq_len, batch, num_attention_heads, hidden_size_per_attention_head]
412
- (query_layer, key_layer, value_layer) = self.split_tensor_along_last_dim(mixed_raw_layer, 3)
413
-
414
- if self.position_encoding_2d:
415
- q1, q2 = query_layer.chunk(2, dim=(query_layer.ndim - 1))
416
- k1, k2 = key_layer.chunk(2, dim=(key_layer.ndim - 1))
417
- cos, sin = self.rotary_emb(q1, seq_len=position_ids.max() + 1)
418
- position_ids, block_position_ids = position_ids[:, 0, :].transpose(0, 1).contiguous(), \
419
- position_ids[:, 1, :].transpose(0, 1).contiguous()
420
- q1, k1 = apply_rotary_pos_emb_index(q1, k1, cos, sin, position_ids)
421
- q2, k2 = apply_rotary_pos_emb_index(q2, k2, cos, sin, block_position_ids)
422
- query_layer = torch.concat([q1, q2], dim=(q1.ndim - 1))
423
- key_layer = torch.concat([k1, k2], dim=(k1.ndim - 1))
424
- else:
425
- position_ids = position_ids.transpose(0, 1)
426
- cos, sin = self.rotary_emb(value_layer, seq_len=position_ids.max() + 1)
427
- # [seq_len, batch, num_attention_heads, hidden_size_per_attention_head]
428
- query_layer, key_layer = apply_rotary_pos_emb_index(query_layer, key_layer, cos, sin, position_ids)
429
-
430
- # [seq_len, batch, hidden_size]
431
- context_layer, present, attention_probs = attention_fn(
432
- self=self,
433
- query_layer=query_layer,
434
- key_layer=key_layer,
435
- value_layer=value_layer,
436
- attention_mask=attention_mask,
437
- hidden_size_per_partition=self.hidden_size_per_partition,
438
- layer_id=layer_id,
439
- layer_past=layer_past,
440
- use_cache=use_cache
441
- )
442
-
443
- output = self.dense(context_layer)
444
-
445
- outputs = (output, present)
446
-
447
- if output_attentions:
448
- outputs += (attention_probs,)
449
-
450
- return outputs # output, present, attention_probs
451
-
452
-
453
- class GEGLU(torch.nn.Module):
454
- def __init__(self):
455
- super().__init__()
456
- self.activation_fn = F.gelu
457
-
458
- def forward(self, x):
459
- # dim=-1 breaks in jit for pt<1.10
460
- x1, x2 = x.chunk(2, dim=(x.ndim - 1))
461
- return x1 * self.activation_fn(x2)
462
-
463
-
464
- class GLU(torch.nn.Module):
465
- def __init__(self, hidden_size, inner_hidden_size=None,
466
- layer_id=None, bias=True, activation_func=gelu, params_dtype=torch.float):
467
- super(GLU, self).__init__()
468
- self.layer_id = layer_id
469
- self.activation_func = activation_func
470
-
471
- # Project to 4h.
472
- self.hidden_size = hidden_size
473
- if inner_hidden_size is None:
474
- inner_hidden_size = 4 * hidden_size
475
- self.inner_hidden_size = inner_hidden_size
476
- self.dense_h_to_4h = skip_init(
477
- torch.nn.Linear,
478
- self.hidden_size,
479
- self.inner_hidden_size,
480
- bias=bias,
481
- dtype=params_dtype,
482
- )
483
- # Project back to h.
484
- self.dense_4h_to_h = skip_init(
485
- torch.nn.Linear,
486
- self.inner_hidden_size,
487
- self.hidden_size,
488
- bias=bias,
489
- dtype=params_dtype,
490
- )
491
-
492
- def forward(self, hidden_states):
493
- """
494
- hidden_states: [seq_len, batch, hidden_size]
495
- """
496
-
497
- # [seq_len, batch, inner_hidden_size]
498
- intermediate_parallel = self.dense_h_to_4h(hidden_states)
499
-
500
- intermediate_parallel = self.activation_func(intermediate_parallel)
501
-
502
- output = self.dense_4h_to_h(intermediate_parallel)
503
-
504
- return output
505
-
506
-
507
- class GLMBlock(torch.nn.Module):
508
- def __init__(
509
- self,
510
- hidden_size,
511
- num_attention_heads,
512
- layernorm_epsilon,
513
- layer_id,
514
- inner_hidden_size=None,
515
- hidden_size_per_attention_head=None,
516
- layernorm=LayerNorm,
517
- use_bias=True,
518
- params_dtype=torch.float,
519
- num_layers=28,
520
- position_encoding_2d=True
521
- ):
522
- super(GLMBlock, self).__init__()
523
- # Set output layer initialization if not provided.
524
-
525
- self.layer_id = layer_id
526
-
527
- # Layernorm on the input data.
528
- self.input_layernorm = layernorm(hidden_size, eps=layernorm_epsilon)
529
-
530
- self.position_encoding_2d = position_encoding_2d
531
-
532
- # Self attention.
533
- self.attention = SelfAttention(
534
- hidden_size,
535
- num_attention_heads,
536
- layer_id,
537
- hidden_size_per_attention_head=hidden_size_per_attention_head,
538
- bias=use_bias,
539
- params_dtype=params_dtype,
540
- position_encoding_2d=self.position_encoding_2d
541
- )
542
-
543
- # Layernorm on the input data.
544
- self.post_attention_layernorm = layernorm(hidden_size, eps=layernorm_epsilon)
545
-
546
- self.num_layers = num_layers
547
-
548
- # GLU
549
- self.mlp = GLU(
550
- hidden_size,
551
- inner_hidden_size=inner_hidden_size,
552
- bias=use_bias,
553
- layer_id=layer_id,
554
- params_dtype=params_dtype,
555
- )
556
-
557
- def forward(
558
- self,
559
- hidden_states: torch.Tensor,
560
- position_ids,
561
- attention_mask: torch.Tensor,
562
- layer_id,
563
- layer_past: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
564
- use_cache: bool = False,
565
- output_attentions: bool = False,
566
- ):
567
- """
568
- hidden_states: [seq_len, batch, hidden_size]
569
- attention_mask: [(1, 1), seq_len, seq_len]
570
- """
571
-
572
- # Layer norm at the begining of the transformer layer.
573
- # [seq_len, batch, hidden_size]
574
- attention_input = self.input_layernorm(hidden_states)
575
-
576
- # Self attention.
577
- attention_outputs = self.attention(
578
- attention_input,
579
- position_ids,
580
- attention_mask=attention_mask,
581
- layer_id=layer_id,
582
- layer_past=layer_past,
583
- use_cache=use_cache,
584
- output_attentions=output_attentions
585
- )
586
-
587
- attention_output = attention_outputs[0]
588
-
589
- outputs = attention_outputs[1:]
590
-
591
- # Residual connection.
592
- alpha = (2 * self.num_layers) ** 0.5
593
- hidden_states = attention_input * alpha + attention_output
594
-
595
- mlp_input = self.post_attention_layernorm(hidden_states)
596
-
597
- # MLP.
598
- mlp_output = self.mlp(mlp_input)
599
-
600
- # Second residual connection.
601
- output = mlp_input * alpha + mlp_output
602
-
603
- if use_cache:
604
- outputs = (output,) + outputs
605
- else:
606
- outputs = (output,) + outputs[1:]
607
-
608
- return outputs # hidden_states, present, attentions
609
-
610
-
611
- class ChatGLMPreTrainedModel(PreTrainedModel):
612
- """
613
- An abstract class to handle weights initialization and
614
- a simple interface for downloading and loading pretrained models.
615
- """
616
-
617
- is_parallelizable = False
618
- supports_gradient_checkpointing = False
619
- config_class = ChatGLMConfig
620
- base_model_prefix = "transformer"
621
- _no_split_modules = ["GLM6BBlock"]
622
-
623
- def __init__(self, *inputs, **kwargs):
624
- super().__init__(*inputs, **kwargs)
625
-
626
- def _init_weights(self, module: nn.Module):
627
- """Initialize the weights."""
628
- return
629
-
630
-
631
- CHATGLM_6B_START_DOCSTRING = r"""
632
- This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class.
633
- Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general
634
- usage and behavior.
635
-
636
- Parameters:
637
- config ([`~ChatGLM6BConfig`]): Model configuration class with all the parameters of the model.
638
- Initializing with a config file does not load the weights associated with the model, only the configuration.
639
- Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
640
- """
641
-
642
- CHATGLM_6B_INPUTS_DOCSTRING = r"""
643
- Args:
644
- input_ids (`torch.LongTensor` of shape `({0})`):
645
- Indices of input sequence tokens in the vocabulary.
646
-
647
- Indices can be obtained using [`ChatGLM6BTokenizer`].
648
- See [`PreTrainedTokenizer.encode`] and
649
- [`PreTrainedTokenizer.__call__`] for details.
650
-
651
- [What are input IDs?](../glossary#input-ids)
652
- attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
653
- Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
654
-
655
- - 1 for tokens that are **not masked**,
656
- - 0 for tokens that are **masked**.
657
-
658
- [What are attention masks?](../glossary#attention-mask)
659
- token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
660
- Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`:
661
-
662
- - 0 corresponds to a *sentence A* token,
663
- - 1 corresponds to a *sentence B* token.
664
-
665
- [What are token type IDs?](../glossary#token-type-ids)
666
- position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
667
- Indices of positions of each input sequence tokens in the position embeddings.
668
- Selected in the range `[0, config.max_position_embeddings - 1]`.
669
-
670
- [What are position IDs?](../glossary#position-ids)
671
- head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
672
- Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
673
-
674
- - 1 indicates the head is **not masked**,
675
- - 0 indicates the head is **masked**.
676
-
677
- inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
678
- Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
679
- This is useful if you want more control over how to convert *input_ids* indices into associated vectors
680
- than the model's internal embedding lookup matrix.
681
- output_attentions (`bool`, *optional*):
682
- Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
683
- tensors for more detail.
684
- output_hidden_states (`bool`, *optional*):
685
- Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
686
- more detail.
687
- return_dict (`bool`, *optional*):
688
- Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
689
- """
690
-
691
-
692
- @add_start_docstrings(
693
- "The bare ChatGLM-6B Model transformer outputting raw hidden-states without any specific head on top.",
694
- CHATGLM_6B_START_DOCSTRING,
695
- )
696
- class ChatGLMModel(ChatGLMPreTrainedModel):
697
- """
698
-
699
- The model can behave as an encoder (with only self-attention) as well
700
- as a decoder, in which case a layer of cross-attention is added between
701
- the self-attention layers, following the architecture described in [Attention is
702
- all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani,
703
- Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
704
-
705
- To behave as an decoder the model needs to be initialized with the
706
- `is_decoder` argument of the configuration set to `True`.
707
- To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder`
708
- argument and `add_cross_attention` set to `True`; an
709
- `encoder_hidden_states` is then expected as an input to the forward pass.
710
- """
711
-
712
- def __init__(self, config: ChatGLMConfig):
713
- super().__init__(config)
714
-
715
- # recording parameters
716
- self.max_sequence_length = config.max_sequence_length
717
- self.hidden_size = config.hidden_size
718
- self.params_dtype = torch.half
719
- self.num_attention_heads = config.num_attention_heads
720
- self.vocab_size = config.vocab_size
721
- self.num_layers = config.num_layers
722
- self.layernorm_epsilon = config.layernorm_epsilon
723
- self.inner_hidden_size = config.inner_hidden_size
724
- self.hidden_size_per_attention_head = self.hidden_size // self.num_attention_heads
725
- self.position_encoding_2d = config.position_encoding_2d
726
-
727
- self.word_embeddings = skip_init(
728
- torch.nn.Embedding,
729
- num_embeddings=self.vocab_size, embedding_dim=self.hidden_size,
730
- dtype=self.params_dtype
731
- )
732
-
733
- def get_layer(layer_id):
734
- return GLMBlock(
735
- self.hidden_size,
736
- self.num_attention_heads,
737
- self.layernorm_epsilon,
738
- layer_id,
739
- inner_hidden_size=self.inner_hidden_size,
740
- hidden_size_per_attention_head=self.hidden_size_per_attention_head,
741
- layernorm=LayerNorm,
742
- use_bias=True,
743
- params_dtype=self.params_dtype,
744
- position_encoding_2d=self.position_encoding_2d,
745
- )
746
-
747
- self.layers = torch.nn.ModuleList(
748
- [get_layer(layer_id) for layer_id in range(self.num_layers)]
749
- )
750
-
751
- # Final layer norm before output.
752
- self.final_layernorm = LayerNorm(self.hidden_size, eps=self.layernorm_epsilon)
753
-
754
- def get_input_embeddings(self):
755
- return self.word_embeddings
756
-
757
- def set_input_embeddings(self, new_embeddings: torch.Tensor):
758
- self.word_embeddings = new_embeddings
759
-
760
- def get_masks(self, seq, device):
761
- context_length = seq.index(self.config.bos_token_id) + 1
762
-
763
- attention_mask = torch.ones((1, len(seq), len(seq)), device=device)
764
- attention_mask.tril_()
765
- attention_mask[..., :context_length - 1] = 1
766
- attention_mask.unsqueeze_(1)
767
- attention_mask = (attention_mask < 0.5).bool()
768
-
769
- return attention_mask
770
-
771
- def get_position_ids(self, seq, mask_position, device, gmask=False):
772
- context_length = seq.index(self.config.bos_token_id) + 1
773
- if self.position_encoding_2d:
774
- seq_length = seq.index(self.config.bos_token_id)
775
- position_ids = torch.arange(context_length, dtype=torch.long, device=device)
776
- if not gmask:
777
- position_ids[seq_length:] = mask_position
778
- block_position_ids = torch.cat((
779
- torch.zeros(seq_length, dtype=torch.long, device=device),
780
- torch.arange(context_length - seq_length, dtype=torch.long, device=device) + 1
781
- ))
782
- position_ids = torch.stack((position_ids, block_position_ids), dim=0)
783
- else:
784
- position_ids = torch.arange(context_length, dtype=torch.long, device=device)
785
- if not gmask:
786
- position_ids[context_length - 1:] = mask_position
787
-
788
- position_ids = position_ids.unsqueeze(0)
789
-
790
- return position_ids
791
-
792
- @add_start_docstrings_to_model_forward(CHATGLM_6B_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
793
- @add_code_sample_docstrings(
794
- checkpoint=_CHECKPOINT_FOR_DOC,
795
- output_type=BaseModelOutputWithPastAndCrossAttentions,
796
- config_class=_CONFIG_FOR_DOC,
797
- )
798
- def forward(
799
- self,
800
- input_ids: Optional[torch.LongTensor] = None,
801
- position_ids: Optional[torch.LongTensor] = None,
802
- attention_mask: Optional[torch.Tensor] = None,
803
- past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None,
804
- inputs_embeds: Optional[torch.LongTensor] = None,
805
- use_cache: Optional[bool] = None,
806
- output_attentions: Optional[bool] = None,
807
- output_hidden_states: Optional[bool] = None,
808
- return_dict: Optional[bool] = None,
809
- ) -> Union[Tuple[torch.Tensor, ...], BaseModelOutputWithPast]:
810
-
811
- output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
812
- output_hidden_states = (
813
- output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
814
- )
815
- use_cache = use_cache if use_cache is not None else self.config.use_cache
816
- return_dict = return_dict if return_dict is not None else self.config.use_return_dict
817
-
818
- if input_ids is not None and inputs_embeds is not None:
819
- raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
820
- elif input_ids is not None:
821
- batch_size, seq_length = input_ids.shape[:2]
822
- elif inputs_embeds is not None:
823
- batch_size, seq_length, _ = inputs_embeds.shape[:2]
824
- else:
825
- raise ValueError("You have to specify either input_ids or inputs_embeds")
826
-
827
- if past_key_values is None:
828
- past_key_values = tuple([None] * len(self.layers))
829
- seq = input_ids[0].tolist()
830
-
831
- if attention_mask is None:
832
- attention_mask = self.get_masks(
833
- seq=seq,
834
- device=input_ids.device
835
- )
836
-
837
- if position_ids is None:
838
- MASK, gMASK = 150000, 150001
839
- mask_token = MASK if MASK in input_ids else gMASK
840
- use_gmask = False if MASK in input_ids else gMASK
841
-
842
- mask_position = seq.index(mask_token)
843
- position_ids = self.get_position_ids(
844
- seq=seq,
845
- mask_position=mask_position,
846
- device=input_ids.device,
847
- gmask=use_gmask
848
- )
849
-
850
- if inputs_embeds is None:
851
- inputs_embeds = self.word_embeddings(input_ids)
852
-
853
- # [seq_len, batch, hidden_size]
854
- hidden_states = inputs_embeds.transpose(0, 1)
855
-
856
- presents = () if use_cache else None
857
- all_self_attentions = () if output_attentions else None
858
- all_hidden_states = () if output_hidden_states else None
859
-
860
- seq_length_with_past = seq_length
861
- past_key_values_length = 0
862
- if past_key_values[0] is not None:
863
- past_key_values_length = past_key_values[0][0].shape[0]
864
- seq_length_with_past = seq_length_with_past + past_key_values_length
865
- if attention_mask is None:
866
- attention_mask = torch.zeros(1, 1, device=input_ids.device).bool()
867
-
868
- else:
869
- attention_mask = attention_mask.to(input_ids.device)
870
-
871
- for i, layer in enumerate(self.layers):
872
-
873
- if output_hidden_states:
874
- all_hidden_states = all_hidden_states + (hidden_states,)
875
-
876
- layer_ret = layer(
877
- hidden_states,
878
- position_ids=position_ids,
879
- attention_mask=attention_mask,
880
- layer_id=torch.tensor(i),
881
- layer_past=past_key_values[i],
882
- use_cache=use_cache,
883
- output_attentions=output_attentions
884
- )
885
-
886
- hidden_states = layer_ret[0]
887
-
888
- if use_cache:
889
- presents = presents + (layer_ret[1],)
890
-
891
- if output_attentions:
892
- all_self_attentions = all_self_attentions + (layer_ret[2 if use_cache else 1],)
893
-
894
- # Final layer norm.
895
- hidden_states = self.final_layernorm(hidden_states)
896
-
897
- if output_hidden_states:
898
- all_hidden_states = all_hidden_states + (hidden_states,)
899
-
900
- if not return_dict:
901
- return tuple(v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None)
902
-
903
- return BaseModelOutputWithPast(
904
- last_hidden_state=hidden_states,
905
- past_key_values=presents,
906
- hidden_states=all_hidden_states,
907
- attentions=all_self_attentions,
908
- )
909
-
910
-
911
- class ChatGLMForConditionalGeneration(ChatGLMPreTrainedModel):
912
- def __init__(self, config):
913
- super().__init__(config)
914
-
915
- # self.hidden_size = config.hidden_size
916
- # self.params_dtype = torch.half
917
- # self.vocab_size = config.vocab_size
918
- self.max_sequence_length = config.max_sequence_length
919
-
920
- self.position_encoding_2d = config.position_encoding_2d
921
-
922
- self.transformer = ChatGLMModel(config)
923
-
924
- self.lm_head = skip_init(
925
- nn.Linear,
926
- config.hidden_size,
927
- config.vocab_size,
928
- bias=False,
929
- dtype=torch.half
930
- )
931
-
932
- def get_output_embeddings(self):
933
- return self.lm_head
934
-
935
- def set_output_embeddings(self, new_embeddings):
936
- self.lm_head = new_embeddings
937
-
938
- def get_masks_and_position_ids(self, seq, mask_position, context_length, device, gmask=False):
939
- attention_mask = torch.ones((1, context_length, context_length), device=device)
940
- attention_mask.tril_()
941
- attention_mask[..., :context_length - 1] = 1
942
- attention_mask.unsqueeze_(1)
943
- attention_mask = (attention_mask < 0.5).bool()
944
-
945
- if self.position_encoding_2d:
946
- seq_length = seq.index(self.config.bos_token_id)
947
- position_ids = torch.arange(context_length, dtype=torch.long, device=device)
948
- if not gmask:
949
- position_ids[seq_length:] = mask_position
950
- block_position_ids = torch.cat((
951
- torch.zeros(seq_length, dtype=torch.long, device=device),
952
- torch.arange(context_length - seq_length, dtype=torch.long, device=device) + 1
953
- ))
954
- position_ids = torch.stack((position_ids, block_position_ids), dim=0)
955
- else:
956
- position_ids = torch.arange(context_length, dtype=torch.long, device=device)
957
- if not gmask:
958
- position_ids[context_length - 1:] = mask_position
959
-
960
- position_ids = position_ids.unsqueeze(0)
961
-
962
- return attention_mask, position_ids
963
-
964
- def prepare_inputs_for_generation(
965
- self,
966
- input_ids: torch.LongTensor,
967
- past: Optional[torch.Tensor] = None,
968
- past_key_values: Optional[torch.Tensor] = None,
969
- attention_mask: Optional[torch.Tensor] = None,
970
- **kwargs
971
- ) -> dict:
972
-
973
- MASK, gMASK = 150000, 150001
974
- mask_token = MASK if MASK in input_ids else gMASK
975
- use_gmask = False if MASK in input_ids else gMASK
976
- seq = input_ids[0].tolist()
977
- mask_position = seq.index(mask_token)
978
-
979
- if mask_token not in seq:
980
- raise ValueError("You have to add either [MASK] or [gMASK] in your input")
981
-
982
- # only last token for input_ids if past is not None
983
- if past is not None or past_key_values is not None:
984
- context_length = seq.index(self.config.bos_token_id)
985
- last_token = input_ids[:, -1].unsqueeze(-1)
986
- if self.position_encoding_2d:
987
- position_ids = torch.tensor([[[mask_position], [len(seq) - context_length]]], dtype=torch.long,
988
- device=input_ids.device)
989
- else:
990
- position_ids = torch.tensor([[mask_position]], dtype=torch.long, device=input_ids.device)
991
-
992
- if past is None:
993
- past = past_key_values
994
- return {
995
- "input_ids": last_token,
996
- "past_key_values": past,
997
- "position_ids": position_ids,
998
- }
999
- else:
1000
- attention_mask, position_ids = self.get_masks_and_position_ids(
1001
- seq=seq,
1002
- mask_position=mask_position,
1003
- context_length=len(seq),
1004
- device=input_ids.device,
1005
- gmask=use_gmask
1006
- )
1007
-
1008
- return {
1009
- "input_ids": input_ids,
1010
- "past_key_values": past,
1011
- "position_ids": position_ids,
1012
- "attention_mask": attention_mask
1013
- }
1014
-
1015
- def forward(
1016
- self,
1017
- input_ids: Optional[torch.Tensor] = None,
1018
- position_ids: Optional[torch.Tensor] = None,
1019
- attention_mask: Optional[torch.Tensor] = None,
1020
- past_key_values: Optional[Tuple[torch.FloatTensor]] = None,
1021
- inputs_embeds: Optional[torch.Tensor] = None,
1022
- labels: Optional[torch.Tensor] = None,
1023
- use_cache: Optional[bool] = None,
1024
- output_attentions: Optional[bool] = None,
1025
- output_hidden_states: Optional[bool] = None,
1026
- return_dict: Optional[bool] = None,
1027
- ):
1028
- use_cache = use_cache if use_cache is not None else self.config.use_cache
1029
- return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1030
-
1031
- transformer_outputs = self.transformer(
1032
- input_ids=input_ids,
1033
- position_ids=position_ids,
1034
- attention_mask=attention_mask,
1035
- past_key_values=past_key_values,
1036
- inputs_embeds=inputs_embeds,
1037
- use_cache=use_cache,
1038
- output_attentions=output_attentions,
1039
- output_hidden_states=output_hidden_states,
1040
- return_dict=return_dict,
1041
- )
1042
-
1043
- hidden_states = transformer_outputs[0]
1044
-
1045
- lm_logits = self.lm_head(hidden_states).permute(1, 0, 2).contiguous()
1046
-
1047
- loss = None
1048
- if labels is not None:
1049
- lm_logits = lm_logits.to(torch.float32)
1050
-
1051
- # Shift so that tokens < n predict n
1052
- shift_logits = lm_logits[..., :-1, :].contiguous()
1053
- shift_labels = labels[..., 1:].contiguous()
1054
- # Flatten the tokens
1055
- loss_fct = CrossEntropyLoss()
1056
- loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
1057
-
1058
- lm_logits = lm_logits.to(hidden_states.dtype)
1059
- loss = loss.to(hidden_states.dtype)
1060
-
1061
- if not return_dict:
1062
- output = (lm_logits,) + transformer_outputs[1:]
1063
- return ((loss,) + output) if loss is not None else output
1064
-
1065
- return CausalLMOutputWithPast(
1066
- loss=loss,
1067
- logits=lm_logits,
1068
- past_key_values=transformer_outputs.past_key_values,
1069
- hidden_states=transformer_outputs.hidden_states,
1070
- attentions=transformer_outputs.attentions,
1071
- )
1072
-
1073
- @staticmethod
1074
- def _reorder_cache(
1075
- past: Tuple[Tuple[torch.Tensor, torch.Tensor], ...], beam_idx: torch.LongTensor
1076
- ) -> Tuple[Tuple[torch.Tensor, torch.Tensor], ...]:
1077
- """
1078
- This function is used to re-order the `past_key_values` cache if [`~PreTrainedModel.beam_search`] or
1079
- [`~PreTrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct
1080
- beam_idx at every generation step.
1081
-
1082
- Output shares the same memory storage as `past`.
1083
- """
1084
- return tuple(
1085
- (
1086
- layer_past[0].index_select(1, beam_idx.to(layer_past[0].device)),
1087
- layer_past[1].index_select(1, beam_idx.to(layer_past[1].device)),
1088
- )
1089
- for layer_past in past
1090
- )
1091
-
1092
- def process_response(self, response):
1093
- response = response.strip()
1094
- response = response.replace("[[训练时间]]", "2023年")
1095
- punkts = [
1096
- [",", ","],
1097
- ["!", "!"],
1098
- [":", ":"],
1099
- [";", ";"],
1100
- ["\?", "?"],
1101
- ]
1102
- for item in punkts:
1103
- response = re.sub(r"([\u4e00-\u9fff])%s" % item[0], r"\1%s" % item[1], response)
1104
- response = re.sub(r"%s([\u4e00-\u9fff])" % item[0], r"%s\1" % item[1], response)
1105
- return response
1106
-
1107
- @torch.no_grad()
1108
- def chat(self, tokenizer, query: str, history: List[Tuple[str, str]] = None, max_length: int = 2048, num_beams=1,
1109
- do_sample=True, top_p=0.7, temperature=0.95, logits_processor=None, **kwargs):
1110
- if history is None:
1111
- history = []
1112
- if logits_processor is None:
1113
- logits_processor = LogitsProcessorList()
1114
- logits_processor.append(InvalidScoreLogitsProcessor())
1115
- gen_kwargs = {"max_length": max_length, "num_beams": num_beams, "do_sample": do_sample, "top_p": top_p,
1116
- "temperature": temperature, "logits_processor": logits_processor, **kwargs}
1117
- if not history:
1118
- prompt = query
1119
- else:
1120
- prompt = ""
1121
- for i, (old_query, response) in enumerate(history):
1122
- prompt += "[Round {}]\n问:{}\n答:{}\n".format(i, old_query, response)
1123
- prompt += "[Round {}]\n问:{}\n答:".format(len(history), query)
1124
- input_ids = tokenizer([prompt], return_tensors="pt", padding=True)
1125
- input_ids = input_ids.to(self.device)
1126
- outputs = self.generate(**input_ids, **gen_kwargs)
1127
- outputs = outputs.tolist()[0][len(input_ids["input_ids"][0]):]
1128
- response = tokenizer.decode(outputs)
1129
- response = self.process_response(response)
1130
- history = history + [(query, response)]
1131
- return response, history
1132
-
1133
- @torch.no_grad()
1134
- def stream_chat(self, tokenizer, query: str, history: List[Tuple[str, str]] = None, max_length: int = 2048,
1135
- do_sample=True, top_p=0.7, temperature=0.95, logits_processor=None, **kwargs):
1136
- if history is None:
1137
- history = []
1138
- if logits_processor is None:
1139
- logits_processor = LogitsProcessorList()
1140
- logits_processor.append(InvalidScoreLogitsProcessor())
1141
- gen_kwargs = {"max_length": max_length, "do_sample": do_sample, "top_p": top_p,
1142
- "temperature": temperature, "logits_processor": logits_processor, **kwargs}
1143
- if not history:
1144
- prompt = query
1145
- else:
1146
- prompt = ""
1147
- for i, (old_query, response) in enumerate(history):
1148
- prompt += "[Round {}]\n问:{}\n答:{}\n".format(i, old_query, response)
1149
- prompt += "[Round {}]\n问:{}\n答:".format(len(history), query)
1150
- input_ids = tokenizer([prompt], return_tensors="pt", padding=True)
1151
- input_ids = input_ids.to(self.device)
1152
- for outputs in self.stream_generate(**input_ids, **gen_kwargs):
1153
- outputs = outputs.tolist()[0][len(input_ids["input_ids"][0]):]
1154
- response = tokenizer.decode(outputs)
1155
- response = self.process_response(response)
1156
- new_history = history + [(query, response)]
1157
- yield response, new_history
1158
-
1159
- @torch.no_grad()
1160
- def stream_generate(
1161
- self,
1162
- input_ids,
1163
- generation_config: Optional[GenerationConfig] = None,
1164
- logits_processor: Optional[LogitsProcessorList] = None,
1165
- stopping_criteria: Optional[StoppingCriteriaList] = None,
1166
- prefix_allowed_tokens_fn: Optional[Callable[[int, torch.Tensor], List[int]]] = None,
1167
- **kwargs,
1168
- ):
1169
- batch_size, input_ids_seq_length = input_ids.shape[0], input_ids.shape[-1]
1170
-
1171
- if generation_config is None:
1172
- generation_config = self.generation_config
1173
- generation_config = copy.deepcopy(generation_config)
1174
- model_kwargs = generation_config.update(**kwargs)
1175
- bos_token_id, eos_token_id = generation_config.bos_token_id, generation_config.eos_token_id
1176
-
1177
- if isinstance(eos_token_id, int):
1178
- eos_token_id = [eos_token_id]
1179
-
1180
- has_default_max_length = kwargs.get("max_length") is None and generation_config.max_length is not None
1181
- if has_default_max_length and generation_config.max_new_tokens is None:
1182
- warnings.warn(
1183
- f"Using `max_length`'s default ({generation_config.max_length}) to control the generation length. "
1184
- "This behaviour is deprecated and will be removed from the config in v5 of Transformers -- we"
1185
- " recommend using `max_new_tokens` to control the maximum length of the generation.",
1186
- UserWarning,
1187
- )
1188
- elif generation_config.max_new_tokens is not None:
1189
- generation_config.max_length = generation_config.max_new_tokens + input_ids_seq_length
1190
- if not has_default_max_length:
1191
- logger.warn(
1192
- f"Both `max_new_tokens` (={generation_config.max_new_tokens}) and `max_length`(="
1193
- f"{generation_config.max_length}) seem to have been set. `max_new_tokens` will take precedence. "
1194
- "Please refer to the documentation for more information. "
1195
- "(https://huggingface.co/docs/transformers/main/en/main_classes/text_generation)",
1196
- UserWarning,
1197
- )
1198
-
1199
- if input_ids_seq_length >= generation_config.max_length:
1200
- input_ids_string = "decoder_input_ids" if self.config.is_encoder_decoder else "input_ids"
1201
- logger.warning(
1202
- f"Input length of {input_ids_string} is {input_ids_seq_length}, but `max_length` is set to"
1203
- f" {generation_config.max_length}. This can lead to unexpected behavior. You should consider"
1204
- " increasing `max_new_tokens`."
1205
- )
1206
-
1207
- # 2. Set generation parameters if not already defined
1208
- logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList()
1209
- stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList()
1210
-
1211
- logits_processor = self._get_logits_processor(
1212
- generation_config=generation_config,
1213
- input_ids_seq_length=input_ids_seq_length,
1214
- encoder_input_ids=input_ids,
1215
- prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,
1216
- logits_processor=logits_processor,
1217
- )
1218
-
1219
- stopping_criteria = self._get_stopping_criteria(
1220
- generation_config=generation_config, stopping_criteria=stopping_criteria
1221
- )
1222
- logits_warper = self._get_logits_warper(generation_config)
1223
-
1224
- unfinished_sequences = input_ids.new(input_ids.shape[0]).fill_(1)
1225
- scores = None
1226
- while True:
1227
- model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs)
1228
- # forward pass to get next token
1229
- outputs = self(
1230
- **model_inputs,
1231
- return_dict=True,
1232
- output_attentions=False,
1233
- output_hidden_states=False,
1234
- )
1235
-
1236
- next_token_logits = outputs.logits[:, -1, :]
1237
-
1238
- # pre-process distribution
1239
- next_token_scores = logits_processor(input_ids, next_token_logits)
1240
- next_token_scores = logits_warper(input_ids, next_token_scores)
1241
-
1242
- # sample
1243
- probs = nn.functional.softmax(next_token_scores, dim=-1)
1244
- if generation_config.do_sample:
1245
- next_tokens = torch.multinomial(probs, num_samples=1).squeeze(1)
1246
- else:
1247
- next_tokens = torch.argmax(probs, dim=-1)
1248
-
1249
- # update generated ids, model inputs, and length for next step
1250
- input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1)
1251
- model_kwargs = self._update_model_kwargs_for_generation(
1252
- outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder
1253
- )
1254
- unfinished_sequences = unfinished_sequences.mul((sum(next_tokens != i for i in eos_token_id)).long())
1255
-
1256
- # stop when each sentence is finished, or if we exceed the maximum length
1257
- if unfinished_sequences.max() == 0 or stopping_criteria(input_ids, scores):
1258
- break
1259
- yield input_ids
1260
-
1261
- def quantize(self, bits: int):
1262
- from .quantization import quantize
1263
- self.transformer = quantize(self.transformer, bits)
1264
- return self