openChatformer commited on
Commit
77a4a43
1 Parent(s): 46bec01

yingbao chatGlm model

Browse files
config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "openChatformer/yingbao-v1",
3
+ "architectures": [
4
+ "ChatGLMForConditionalGeneration"
5
+ ],
6
+ "auto_map": {
7
+ "AutoConfig": "configuration_chatglm.ChatGLMConfig",
8
+ "AutoModel": "modeling_chatglm.ChatGLMForConditionalGeneration",
9
+ "AutoModelForSeq2SeqLM": "modeling_chatglm.ChatGLMForConditionalGeneration"
10
+ },
11
+ "bos_token_id": 130004,
12
+ "eos_token_id": 130005,
13
+ "gmask_token_id": 130001,
14
+ "hidden_size": 4096,
15
+ "inner_hidden_size": 16384,
16
+ "layernorm_epsilon": 1e-05,
17
+ "mask_token_id": 130000,
18
+ "max_sequence_length": 2048,
19
+ "model_type": "chatglm",
20
+ "num_attention_heads": 32,
21
+ "num_layers": 28,
22
+ "pad_token_id": 3,
23
+ "position_encoding_2d": true,
24
+ "pre_seq_len": 8,
25
+ "prefix_projection": false,
26
+ "quantization_bit": 0,
27
+ "torch_dtype": "float16",
28
+ "transformers_version": "4.27.1",
29
+ "use_cache": true,
30
+ "vocab_size": 130528
31
+ }
configuration_chatglm.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ ChatGLM model configuration """
2
+
3
+ from transformers.configuration_utils import PretrainedConfig
4
+ from transformers.utils import logging
5
+
6
+ logger = logging.get_logger(__name__)
7
+
8
+
9
+ class ChatGLMConfig(PretrainedConfig):
10
+ r"""
11
+ This is the configuration class to store the configuration of a [`~ChatGLMModel`].
12
+ It is used to instantiate an ChatGLM model according to the specified arguments, defining the model
13
+ architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of
14
+ the ChatGLM-6B [THUDM/ChatGLM-6B](https://huggingface.co/THUDM/chatglm-6b) architecture.
15
+
16
+ Configuration objects inherit from [`PretrainedConfig`] and can be used
17
+ to control the model outputs. Read the documentation from [`PretrainedConfig`]
18
+ for more information.
19
+
20
+
21
+ Args:
22
+ vocab_size (`int`, *optional*, defaults to 150528):
23
+ Vocabulary size of the ChatGLM-6B model. Defines the number of different tokens that can be represented by the
24
+ `inputs_ids` passed when calling [`~ChatGLMModel`] or
25
+ [`~TFChatGLMModel`].
26
+ hidden_size (`int`, *optional*, defaults to 4096):
27
+ Dimension of the encoder layers and the pooler layer.
28
+ num_hidden_layers (`int`, *optional*, defaults to 28):
29
+ Number of hidden layers in the Transformer encoder.
30
+ num_attention_heads (`int`, *optional*, defaults to 32):
31
+ Number of attention heads for each attention layer in the Transformer encoder.
32
+ inner_hidden_size (`int`, *optional*, defaults to 16384):
33
+ Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
34
+ max_sequence_length (`int`, *optional*, defaults to 512):
35
+ The maximum sequence length that this model might ever be used with.
36
+ Typically set this to something large just in case (e.g., 512 or 1024 or 2048).
37
+ layernorm_epsilon (`float`, *optional*, defaults to 1e-5):
38
+ The epsilon used by the layer normalization layers.
39
+ use_cache (`bool`, *optional*, defaults to `True`):
40
+ Whether the model should return the last key/values attentions (not used by all models).
41
+ Example:
42
+
43
+ ```python
44
+ >>> from configuration_chatglm import ChatGLMConfig
45
+ >>> from modeling_chatglm import ChatGLMModel
46
+
47
+ >>> # Initializing a ChatGLM-6B THUDM/ChatGLM-6B style configuration
48
+ >>> configuration = ChatGLMConfig()
49
+
50
+ >>> # Initializing a model from the THUDM/ChatGLM-6B style configuration
51
+ >>> model = ChatGLMModel(configuration)
52
+
53
+ >>> # Accessing the model configuration
54
+ >>> configuration = model.config
55
+ ```
56
+ """
57
+ model_type = "chatglm"
58
+
59
+ def __init__(
60
+ self,
61
+ vocab_size=150528,
62
+ hidden_size=4096,
63
+ num_layers=28,
64
+ num_attention_heads=32,
65
+ layernorm_epsilon=1e-5,
66
+ use_cache=False,
67
+ bos_token_id=150004,
68
+ eos_token_id=150005,
69
+ mask_token_id=150000,
70
+ gmask_token_id=150001,
71
+ pad_token_id=0,
72
+ max_sequence_length=2048,
73
+ inner_hidden_size=16384,
74
+ position_encoding_2d=True,
75
+ quantization_bit=0,
76
+ pre_seq_len=None,
77
+ prefix_projection=False,
78
+ **kwargs
79
+ ):
80
+ self.num_layers = num_layers
81
+ self.vocab_size = vocab_size
82
+ self.hidden_size = hidden_size
83
+ self.num_attention_heads = num_attention_heads
84
+ self.max_sequence_length = max_sequence_length
85
+ self.layernorm_epsilon = layernorm_epsilon
86
+ self.inner_hidden_size = inner_hidden_size
87
+ self.use_cache = use_cache
88
+ self.bos_token_id = bos_token_id
89
+ self.eos_token_id = eos_token_id
90
+ self.pad_token_id = pad_token_id
91
+ self.mask_token_id = mask_token_id
92
+ self.gmask_token_id = gmask_token_id
93
+ self.position_encoding_2d = position_encoding_2d
94
+ self.quantization_bit = quantization_bit
95
+ self.pre_seq_len = pre_seq_len
96
+ self.prefix_projection = prefix_projection
97
+
98
+ super().__init__(
99
+ pad_token_id=pad_token_id,
100
+ bos_token_id=bos_token_id,
101
+ eos_token_id=eos_token_id,
102
+ **kwargs
103
+ )
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 130004,
4
+ "eos_token_id": 130005,
5
+ "pad_token_id": 3,
6
+ "transformers_version": "4.27.1"
7
+ }
ice_text.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5e974d9a69c242ce014c88c2b26089270f6198f3c0b700a887666cd3e816f17e
3
+ size 2706249
modeling_chatglm.py ADDED
@@ -0,0 +1,1436 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ PyTorch ChatGLM model. """
2
+
3
+ import math
4
+ import copy
5
+ import os
6
+ import warnings
7
+ import re
8
+ import sys
9
+
10
+ import torch
11
+ import torch.utils.checkpoint
12
+ import torch.nn.functional as F
13
+ from torch import nn
14
+ from torch.nn import CrossEntropyLoss, LayerNorm
15
+ from torch.nn.utils import skip_init
16
+ from typing import Optional, Tuple, Union, List, Callable, Dict, Any
17
+
18
+ from transformers.utils import (
19
+ add_code_sample_docstrings,
20
+ add_start_docstrings,
21
+ add_start_docstrings_to_model_forward,
22
+ )
23
+ from transformers.modeling_outputs import (
24
+ BaseModelOutputWithPast,
25
+ CausalLMOutputWithPast,
26
+ BaseModelOutputWithPastAndCrossAttentions,
27
+ )
28
+ from transformers.modeling_utils import PreTrainedModel
29
+ from transformers.utils import logging
30
+ from transformers.generation.logits_process import LogitsProcessor
31
+ from transformers.generation.utils import LogitsProcessorList, StoppingCriteriaList, GenerationConfig, ModelOutput
32
+
33
+ from .configuration_chatglm import ChatGLMConfig
34
+
35
+ # flags required to enable jit fusion kernels
36
+
37
+ if sys.platform != 'darwin':
38
+ torch._C._jit_set_profiling_mode(False)
39
+ torch._C._jit_set_profiling_executor(False)
40
+ torch._C._jit_override_can_fuse_on_cpu(True)
41
+ torch._C._jit_override_can_fuse_on_gpu(True)
42
+
43
+ logger = logging.get_logger(__name__)
44
+
45
+ _CHECKPOINT_FOR_DOC = "THUDM/ChatGLM-6B"
46
+ _CONFIG_FOR_DOC = "ChatGLM6BConfig"
47
+
48
+ CHATGLM_6B_PRETRAINED_MODEL_ARCHIVE_LIST = [
49
+ "THUDM/chatglm-6b",
50
+ # See all ChatGLM-6B models at https://huggingface.co/models?filter=chatglm
51
+ ]
52
+
53
+
54
+ class InvalidScoreLogitsProcessor(LogitsProcessor):
55
+ def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
56
+ if torch.isnan(scores).any() or torch.isinf(scores).any():
57
+ scores.zero_()
58
+ scores[..., 5] = 5e4
59
+ return scores
60
+
61
+
62
+ def load_tf_weights_in_chatglm_6b(model, config, tf_checkpoint_path):
63
+ """Load tf checkpoints in a pytorch model."""
64
+ try:
65
+ import re
66
+
67
+ import numpy as np
68
+ import tensorflow as tf
69
+ except ImportError:
70
+ logger.error(
71
+ "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
72
+ "https://www.tensorflow.org/install/ for installation instructions."
73
+ )
74
+ raise
75
+ tf_path = os.path.abspath(tf_checkpoint_path)
76
+ logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
77
+ # Load weights from TF model
78
+ init_vars = tf.train.list_variables(tf_path)
79
+ names = []
80
+ arrays = []
81
+ for name, shape in init_vars:
82
+ logger.info(f"Loading TF weight {name} with shape {shape}")
83
+ array = tf.train.load_variable(tf_path, name)
84
+ names.append(name)
85
+ arrays.append(array)
86
+
87
+ for name, array in zip(names, arrays):
88
+ name = name.split("/")
89
+ # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
90
+ # which are not required for using pretrained model
91
+ if any(
92
+ n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
93
+ for n in name
94
+ ):
95
+ logger.info(f"Skipping {'/'.join(name)}")
96
+ continue
97
+ pointer = model
98
+ for m_name in name:
99
+ if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
100
+ scope_names = re.split(r"_(\d+)", m_name)
101
+ else:
102
+ scope_names = [m_name]
103
+ if scope_names[0] == "kernel" or scope_names[0] == "gamma":
104
+ pointer = getattr(pointer, "weight")
105
+ elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
106
+ pointer = getattr(pointer, "bias")
107
+ elif scope_names[0] == "output_weights":
108
+ pointer = getattr(pointer, "weight")
109
+ elif scope_names[0] == "squad":
110
+ pointer = getattr(pointer, "classifier")
111
+ else:
112
+ try:
113
+ pointer = getattr(pointer, scope_names[0])
114
+ except AttributeError:
115
+ logger.info(f"Skipping {'/'.join(name)}")
116
+ continue
117
+ if len(scope_names) >= 2:
118
+ num = int(scope_names[1])
119
+ pointer = pointer[num]
120
+ if m_name[-11:] == "_embeddings":
121
+ pointer = getattr(pointer, "weight")
122
+ elif m_name == "kernel":
123
+ array = np.transpose(array)
124
+ try:
125
+ assert (
126
+ pointer.shape == array.shape
127
+ ), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
128
+ except AssertionError as e:
129
+ e.args += (pointer.shape, array.shape)
130
+ raise
131
+ logger.info(f"Initialize PyTorch weight {name}")
132
+ pointer.data = torch.from_numpy(array)
133
+ return model
134
+
135
+
136
+ class PrefixEncoder(torch.nn.Module):
137
+ """
138
+ The torch.nn model to encode the prefix
139
+ Input shape: (batch-size, prefix-length)
140
+ Output shape: (batch-size, prefix-length, 2*layers*hidden)
141
+ """
142
+
143
+ def __init__(self, config):
144
+ super().__init__()
145
+ self.prefix_projection = config.prefix_projection
146
+ if self.prefix_projection:
147
+ # Use a two-layer MLP to encode the prefix
148
+ self.embedding = torch.nn.Embedding(config.pre_seq_len, config.hidden_size)
149
+ self.trans = torch.nn.Sequential(
150
+ torch.nn.Linear(config.hidden_size, config.hidden_size),
151
+ torch.nn.Tanh(),
152
+ torch.nn.Linear(config.hidden_size, config.num_layers * config.hidden_size * 2)
153
+ )
154
+ else:
155
+ self.embedding = torch.nn.Embedding(config.pre_seq_len, config.num_layers * config.hidden_size * 2)
156
+
157
+ def forward(self, prefix: torch.Tensor):
158
+ if self.prefix_projection:
159
+ prefix_tokens = self.embedding(prefix)
160
+ past_key_values = self.trans(prefix_tokens)
161
+ else:
162
+ past_key_values = self.embedding(prefix)
163
+ return past_key_values
164
+
165
+
166
+ @torch.jit.script
167
+ def gelu_impl(x):
168
+ """OpenAI's gelu implementation."""
169
+ return 0.5 * x * (1.0 + torch.tanh(0.7978845608028654 * x *
170
+ (1.0 + 0.044715 * x * x)))
171
+
172
+
173
+ def gelu(x):
174
+ return gelu_impl(x)
175
+
176
+
177
+ class RotaryEmbedding(torch.nn.Module):
178
+ def __init__(self, dim, base=10000, precision=torch.half, learnable=False):
179
+ super().__init__()
180
+ inv_freq = 1. / (base ** (torch.arange(0, dim, 2).float() / dim))
181
+ inv_freq = inv_freq.half()
182
+ self.learnable = learnable
183
+ if learnable:
184
+ self.inv_freq = torch.nn.Parameter(inv_freq)
185
+ self.max_seq_len_cached = None
186
+ else:
187
+ self.register_buffer('inv_freq', inv_freq)
188
+ self.max_seq_len_cached = None
189
+ self.cos_cached = None
190
+ self.sin_cached = None
191
+ self.precision = precision
192
+
193
+ def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys,
194
+ error_msgs):
195
+ pass
196
+
197
+ def forward(self, x, seq_dim=1, seq_len=None):
198
+ if seq_len is None:
199
+ seq_len = x.shape[seq_dim]
200
+ if self.max_seq_len_cached is None or (seq_len > self.max_seq_len_cached):
201
+ self.max_seq_len_cached = None if self.learnable else seq_len
202
+ t = torch.arange(seq_len, device=x.device, dtype=self.inv_freq.dtype)
203
+ freqs = torch.einsum('i,j->ij', t, self.inv_freq)
204
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
205
+ emb = torch.cat((freqs, freqs), dim=-1).to(x.device)
206
+ if self.precision == torch.bfloat16:
207
+ emb = emb.float()
208
+
209
+ # [sx, 1 (b * np), hn]
210
+ cos_cached = emb.cos()[:, None, :]
211
+ sin_cached = emb.sin()[:, None, :]
212
+ if self.precision == torch.bfloat16:
213
+ cos_cached = cos_cached.bfloat16()
214
+ sin_cached = sin_cached.bfloat16()
215
+ if self.learnable:
216
+ return cos_cached, sin_cached
217
+ self.cos_cached, self.sin_cached = cos_cached, sin_cached
218
+ return self.cos_cached[:seq_len, ...], self.sin_cached[:seq_len, ...]
219
+
220
+ def _apply(self, fn):
221
+ if self.cos_cached is not None:
222
+ self.cos_cached = fn(self.cos_cached)
223
+ if self.sin_cached is not None:
224
+ self.sin_cached = fn(self.sin_cached)
225
+ return super()._apply(fn)
226
+
227
+
228
+ def rotate_half(x):
229
+ x1, x2 = x[..., :x.shape[-1] // 2], x[..., x.shape[-1] // 2:]
230
+ return torch.cat((-x2, x1), dim=x1.ndim - 1) # dim=-1 triggers a bug in earlier torch versions
231
+
232
+
233
+ @torch.jit.script
234
+ def apply_rotary_pos_emb_index(q, k, cos, sin, position_id):
235
+ # position_id: [sq, b], q, k: [sq, b, np, hn], cos: [sq, 1, hn] -> [sq, b, 1, hn]
236
+ cos, sin = F.embedding(position_id, cos.squeeze(1)).unsqueeze(2), \
237
+ F.embedding(position_id, sin.squeeze(1)).unsqueeze(2)
238
+ q, k = (q * cos) + (rotate_half(q) * sin), (k * cos) + (rotate_half(k) * sin)
239
+ return q, k
240
+
241
+
242
+ def attention_fn(
243
+ self,
244
+ query_layer,
245
+ key_layer,
246
+ value_layer,
247
+ attention_mask,
248
+ hidden_size_per_partition,
249
+ layer_id,
250
+ layer_past=None,
251
+ scaling_attention_score=True,
252
+ use_cache=False,
253
+ ):
254
+ if layer_past is not None:
255
+ past_key, past_value = layer_past[0], layer_past[1]
256
+ key_layer = torch.cat((past_key, key_layer), dim=0)
257
+ value_layer = torch.cat((past_value, value_layer), dim=0)
258
+
259
+ # seqlen, batch, num_attention_heads, hidden_size_per_attention_head
260
+ seq_len, b, nh, hidden_size = key_layer.shape
261
+
262
+ if use_cache:
263
+ present = (key_layer, value_layer)
264
+ else:
265
+ present = None
266
+
267
+ query_key_layer_scaling_coeff = float(layer_id + 1)
268
+ if scaling_attention_score:
269
+ query_layer = query_layer / (math.sqrt(hidden_size) * query_key_layer_scaling_coeff)
270
+
271
+ # ===================================
272
+ # Raw attention scores. [b, np, s, s]
273
+ # ===================================
274
+
275
+ # [b, np, sq, sk]
276
+ output_size = (query_layer.size(1), query_layer.size(2), query_layer.size(0), key_layer.size(0))
277
+
278
+ # [sq, b, np, hn] -> [sq, b * np, hn]
279
+ query_layer = query_layer.view(output_size[2], output_size[0] * output_size[1], -1)
280
+ # [sk, b, np, hn] -> [sk, b * np, hn]
281
+ key_layer = key_layer.view(output_size[3], output_size[0] * output_size[1], -1)
282
+
283
+ matmul_result = torch.zeros(
284
+ 1, 1, 1,
285
+ dtype=query_layer.dtype,
286
+ device=query_layer.device,
287
+ )
288
+
289
+ matmul_result = torch.baddbmm(
290
+ matmul_result,
291
+ query_layer.transpose(0, 1), # [b * np, sq, hn]
292
+ key_layer.transpose(0, 1).transpose(1, 2), # [b * np, hn, sk]
293
+ beta=0.0,
294
+ alpha=1.0,
295
+ )
296
+
297
+ # change view to [b, np, sq, sk]
298
+ attention_scores = matmul_result.view(*output_size)
299
+
300
+ if self.scale_mask_softmax:
301
+ self.scale_mask_softmax.scale = query_key_layer_scaling_coeff
302
+ attention_probs = self.scale_mask_softmax(attention_scores, attention_mask.contiguous())
303
+ else:
304
+ if not (attention_mask == 0).all():
305
+ # if auto-regressive, skip
306
+ attention_scores.masked_fill_(attention_mask, -10000.0)
307
+ dtype = attention_scores.dtype
308
+ attention_scores = attention_scores.float()
309
+ attention_scores = attention_scores * query_key_layer_scaling_coeff
310
+
311
+ attention_probs = F.softmax(attention_scores, dim=-1)
312
+
313
+ attention_probs = attention_probs.type(dtype)
314
+
315
+ # =========================
316
+ # Context layer. [sq, b, hp]
317
+ # =========================
318
+
319
+ # value_layer -> context layer.
320
+ # [sk, b, np, hn] --> [b, np, sq, hn]
321
+
322
+ # context layer shape: [b, np, sq, hn]
323
+ output_size = (value_layer.size(1), value_layer.size(2), query_layer.size(0), value_layer.size(3))
324
+
325
+ # change view [sk, b * np, hn]
326
+ value_layer = value_layer.view(value_layer.size(0), output_size[0] * output_size[1], -1)
327
+
328
+ # change view [b * np, sq, sk]
329
+ attention_probs = attention_probs.view(output_size[0] * output_size[1], output_size[2], -1)
330
+
331
+ # matmul: [b * np, sq, hn]
332
+ context_layer = torch.bmm(attention_probs, value_layer.transpose(0, 1))
333
+
334
+ # change view [b, np, sq, hn]
335
+ context_layer = context_layer.view(*output_size)
336
+
337
+ # [b, np, sq, hn] --> [sq, b, np, hn]
338
+ context_layer = context_layer.permute(2, 0, 1, 3).contiguous()
339
+
340
+ # [sq, b, np, hn] --> [sq, b, hp]
341
+ new_context_layer_shape = context_layer.size()[:-2] + (hidden_size_per_partition,)
342
+ context_layer = context_layer.view(*new_context_layer_shape)
343
+
344
+ outputs = (context_layer, present, attention_probs)
345
+
346
+ return outputs
347
+
348
+
349
+ def default_init(cls, *args, **kwargs):
350
+ return cls(*args, **kwargs)
351
+
352
+
353
+ class SelfAttention(torch.nn.Module):
354
+ def __init__(self, hidden_size, num_attention_heads,
355
+ layer_id, hidden_size_per_attention_head=None, bias=True,
356
+ params_dtype=torch.float, position_encoding_2d=True, empty_init=True):
357
+ if empty_init:
358
+ init_method = skip_init
359
+ else:
360
+ init_method = default_init
361
+ super(SelfAttention, self).__init__()
362
+
363
+ self.layer_id = layer_id
364
+ self.hidden_size = hidden_size
365
+ self.hidden_size_per_partition = hidden_size
366
+ self.num_attention_heads = num_attention_heads
367
+ self.num_attention_heads_per_partition = num_attention_heads
368
+ self.position_encoding_2d = position_encoding_2d
369
+ self.rotary_emb = RotaryEmbedding(
370
+ self.hidden_size // (self.num_attention_heads * 2)
371
+ if position_encoding_2d
372
+ else self.hidden_size // self.num_attention_heads,
373
+ base=10000,
374
+ precision=torch.half,
375
+ learnable=False,
376
+ )
377
+
378
+ self.scale_mask_softmax = None
379
+
380
+ if hidden_size_per_attention_head is None:
381
+ self.hidden_size_per_attention_head = hidden_size // num_attention_heads
382
+ else:
383
+ self.hidden_size_per_attention_head = hidden_size_per_attention_head
384
+
385
+ self.inner_hidden_size = num_attention_heads * self.hidden_size_per_attention_head
386
+
387
+ # Strided linear layer.
388
+ self.query_key_value = init_method(
389
+ torch.nn.Linear,
390
+ hidden_size,
391
+ 3 * self.inner_hidden_size,
392
+ bias=bias,
393
+ dtype=params_dtype,
394
+ )
395
+
396
+ self.dense = init_method(
397
+ torch.nn.Linear,
398
+ self.inner_hidden_size,
399
+ hidden_size,
400
+ bias=bias,
401
+ dtype=params_dtype,
402
+ )
403
+
404
+ @staticmethod
405
+ def attention_mask_func(attention_scores, attention_mask):
406
+ attention_scores.masked_fill_(attention_mask, -10000.0)
407
+ return attention_scores
408
+
409
+ def split_tensor_along_last_dim(self, tensor, num_partitions,
410
+ contiguous_split_chunks=False):
411
+ """Split a tensor along its last dimension.
412
+ Arguments:
413
+ tensor: input tensor.
414
+ num_partitions: number of partitions to split the tensor
415
+ contiguous_split_chunks: If True, make each chunk contiguous
416
+ in memory.
417
+ """
418
+ # Get the size and dimension.
419
+ last_dim = tensor.dim() - 1
420
+ last_dim_size = tensor.size()[last_dim] // num_partitions
421
+ # Split.
422
+ tensor_list = torch.split(tensor, last_dim_size, dim=last_dim)
423
+ # Note: torch.split does not create contiguous tensors by default.
424
+ if contiguous_split_chunks:
425
+ return tuple(chunk.contiguous() for chunk in tensor_list)
426
+
427
+ return tensor_list
428
+
429
+ def forward(
430
+ self,
431
+ hidden_states: torch.Tensor,
432
+ position_ids,
433
+ attention_mask: torch.Tensor,
434
+ layer_id,
435
+ layer_past: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
436
+ use_cache: bool = False,
437
+ output_attentions: bool = False,
438
+ ):
439
+ """
440
+ hidden_states: [seq_len, batch, hidden_size]
441
+ attention_mask: [(1, 1), seq_len, seq_len]
442
+ """
443
+
444
+ # [seq_len, batch, 3 * hidden_size]
445
+ mixed_raw_layer = self.query_key_value(hidden_states)
446
+
447
+ # [seq_len, batch, 3 * hidden_size] --> [seq_len, batch, num_attention_heads, 3 * hidden_size_per_attention_head]
448
+ new_tensor_shape = mixed_raw_layer.size()[:-1] + (
449
+ self.num_attention_heads_per_partition,
450
+ 3 * self.hidden_size_per_attention_head,
451
+ )
452
+ mixed_raw_layer = mixed_raw_layer.view(*new_tensor_shape)
453
+
454
+ # [seq_len, batch, num_attention_heads, hidden_size_per_attention_head]
455
+ (query_layer, key_layer, value_layer) = self.split_tensor_along_last_dim(mixed_raw_layer, 3)
456
+
457
+ if self.position_encoding_2d:
458
+ q1, q2 = query_layer.chunk(2, dim=(query_layer.ndim - 1))
459
+ k1, k2 = key_layer.chunk(2, dim=(key_layer.ndim - 1))
460
+ cos, sin = self.rotary_emb(q1, seq_len=position_ids.max() + 1)
461
+ position_ids, block_position_ids = position_ids[:, 0, :].transpose(0, 1).contiguous(), \
462
+ position_ids[:, 1, :].transpose(0, 1).contiguous()
463
+ q1, k1 = apply_rotary_pos_emb_index(q1, k1, cos, sin, position_ids)
464
+ q2, k2 = apply_rotary_pos_emb_index(q2, k2, cos, sin, block_position_ids)
465
+ query_layer = torch.concat([q1, q2], dim=(q1.ndim - 1))
466
+ key_layer = torch.concat([k1, k2], dim=(k1.ndim - 1))
467
+ else:
468
+ position_ids = position_ids.transpose(0, 1)
469
+ cos, sin = self.rotary_emb(value_layer, seq_len=position_ids.max() + 1)
470
+ # [seq_len, batch, num_attention_heads, hidden_size_per_attention_head]
471
+ query_layer, key_layer = apply_rotary_pos_emb_index(query_layer, key_layer, cos, sin, position_ids)
472
+
473
+ # [seq_len, batch, hidden_size]
474
+ context_layer, present, attention_probs = attention_fn(
475
+ self=self,
476
+ query_layer=query_layer,
477
+ key_layer=key_layer,
478
+ value_layer=value_layer,
479
+ attention_mask=attention_mask,
480
+ hidden_size_per_partition=self.hidden_size_per_partition,
481
+ layer_id=layer_id,
482
+ layer_past=layer_past,
483
+ use_cache=use_cache
484
+ )
485
+
486
+ output = self.dense(context_layer)
487
+
488
+ outputs = (output, present)
489
+
490
+ if output_attentions:
491
+ outputs += (attention_probs,)
492
+
493
+ return outputs # output, present, attention_probs
494
+
495
+
496
+ class GEGLU(torch.nn.Module):
497
+ def __init__(self):
498
+ super().__init__()
499
+ self.activation_fn = F.gelu
500
+
501
+ def forward(self, x):
502
+ # dim=-1 breaks in jit for pt<1.10
503
+ x1, x2 = x.chunk(2, dim=(x.ndim - 1))
504
+ return x1 * self.activation_fn(x2)
505
+
506
+
507
+ class GLU(torch.nn.Module):
508
+ def __init__(self, hidden_size, inner_hidden_size=None,
509
+ layer_id=None, bias=True, activation_func=gelu, params_dtype=torch.float, empty_init=True):
510
+ super(GLU, self).__init__()
511
+ if empty_init:
512
+ init_method = skip_init
513
+ else:
514
+ init_method = default_init
515
+ self.layer_id = layer_id
516
+ self.activation_func = activation_func
517
+
518
+ # Project to 4h.
519
+ self.hidden_size = hidden_size
520
+ if inner_hidden_size is None:
521
+ inner_hidden_size = 4 * hidden_size
522
+ self.inner_hidden_size = inner_hidden_size
523
+ self.dense_h_to_4h = init_method(
524
+ torch.nn.Linear,
525
+ self.hidden_size,
526
+ self.inner_hidden_size,
527
+ bias=bias,
528
+ dtype=params_dtype,
529
+ )
530
+ # Project back to h.
531
+ self.dense_4h_to_h = init_method(
532
+ torch.nn.Linear,
533
+ self.inner_hidden_size,
534
+ self.hidden_size,
535
+ bias=bias,
536
+ dtype=params_dtype,
537
+ )
538
+
539
+ def forward(self, hidden_states):
540
+ """
541
+ hidden_states: [seq_len, batch, hidden_size]
542
+ """
543
+
544
+ # [seq_len, batch, inner_hidden_size]
545
+ intermediate_parallel = self.dense_h_to_4h(hidden_states)
546
+
547
+ intermediate_parallel = self.activation_func(intermediate_parallel)
548
+
549
+ output = self.dense_4h_to_h(intermediate_parallel)
550
+
551
+ return output
552
+
553
+
554
+ class GLMBlock(torch.nn.Module):
555
+ def __init__(
556
+ self,
557
+ hidden_size,
558
+ num_attention_heads,
559
+ layernorm_epsilon,
560
+ layer_id,
561
+ inner_hidden_size=None,
562
+ hidden_size_per_attention_head=None,
563
+ layernorm=LayerNorm,
564
+ use_bias=True,
565
+ params_dtype=torch.float,
566
+ num_layers=28,
567
+ position_encoding_2d=True,
568
+ empty_init=True
569
+ ):
570
+ super(GLMBlock, self).__init__()
571
+ # Set output layer initialization if not provided.
572
+
573
+ self.layer_id = layer_id
574
+
575
+ # Layernorm on the input data.
576
+ self.input_layernorm = layernorm(hidden_size, eps=layernorm_epsilon)
577
+
578
+ self.position_encoding_2d = position_encoding_2d
579
+
580
+ # Self attention.
581
+ self.attention = SelfAttention(
582
+ hidden_size,
583
+ num_attention_heads,
584
+ layer_id,
585
+ hidden_size_per_attention_head=hidden_size_per_attention_head,
586
+ bias=use_bias,
587
+ params_dtype=params_dtype,
588
+ position_encoding_2d=self.position_encoding_2d,
589
+ empty_init=empty_init
590
+ )
591
+
592
+ # Layernorm on the input data.
593
+ self.post_attention_layernorm = layernorm(hidden_size, eps=layernorm_epsilon)
594
+
595
+ self.num_layers = num_layers
596
+
597
+ # GLU
598
+ self.mlp = GLU(
599
+ hidden_size,
600
+ inner_hidden_size=inner_hidden_size,
601
+ bias=use_bias,
602
+ layer_id=layer_id,
603
+ params_dtype=params_dtype,
604
+ empty_init=empty_init
605
+ )
606
+
607
+ def forward(
608
+ self,
609
+ hidden_states: torch.Tensor,
610
+ position_ids,
611
+ attention_mask: torch.Tensor,
612
+ layer_id,
613
+ layer_past: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
614
+ use_cache: bool = False,
615
+ output_attentions: bool = False,
616
+ ):
617
+ """
618
+ hidden_states: [seq_len, batch, hidden_size]
619
+ attention_mask: [(1, 1), seq_len, seq_len]
620
+ """
621
+
622
+ # Layer norm at the begining of the transformer layer.
623
+ # [seq_len, batch, hidden_size]
624
+ attention_input = self.input_layernorm(hidden_states)
625
+
626
+ # Self attention.
627
+ attention_outputs = self.attention(
628
+ attention_input,
629
+ position_ids,
630
+ attention_mask=attention_mask,
631
+ layer_id=layer_id,
632
+ layer_past=layer_past,
633
+ use_cache=use_cache,
634
+ output_attentions=output_attentions
635
+ )
636
+
637
+ attention_output = attention_outputs[0]
638
+
639
+ outputs = attention_outputs[1:]
640
+
641
+ # Residual connection.
642
+ alpha = (2 * self.num_layers) ** 0.5
643
+ hidden_states = attention_input * alpha + attention_output
644
+
645
+ mlp_input = self.post_attention_layernorm(hidden_states)
646
+
647
+ # MLP.
648
+ mlp_output = self.mlp(mlp_input)
649
+
650
+ # Second residual connection.
651
+ output = mlp_input * alpha + mlp_output
652
+
653
+ if use_cache:
654
+ outputs = (output,) + outputs
655
+ else:
656
+ outputs = (output,) + outputs[1:]
657
+
658
+ return outputs # hidden_states, present, attentions
659
+
660
+
661
+ class ChatGLMPreTrainedModel(PreTrainedModel):
662
+ """
663
+ An abstract class to handle weights initialization and
664
+ a simple interface for downloading and loading pretrained models.
665
+ """
666
+
667
+ is_parallelizable = False
668
+ supports_gradient_checkpointing = True
669
+ config_class = ChatGLMConfig
670
+ base_model_prefix = "transformer"
671
+ _no_split_modules = ["GLMBlock"]
672
+
673
+ def __init__(self, *inputs, **kwargs):
674
+ super().__init__(*inputs, **kwargs)
675
+
676
+ def _init_weights(self, module: nn.Module):
677
+ """Initialize the weights."""
678
+ return
679
+
680
+ def get_masks(self, input_ids, device):
681
+ batch_size, seq_length = input_ids.shape
682
+ context_lengths = [seq.tolist().index(self.config.bos_token_id) for seq in input_ids]
683
+ attention_mask = torch.ones((batch_size, seq_length, seq_length), device=device)
684
+ attention_mask.tril_()
685
+ for i, context_length in enumerate(context_lengths):
686
+ attention_mask[i, :, :context_length] = 1
687
+ attention_mask.unsqueeze_(1)
688
+ attention_mask = (attention_mask < 0.5).bool()
689
+
690
+ return attention_mask
691
+
692
+ def get_position_ids(self, input_ids, mask_positions, device, use_gmasks=None):
693
+ batch_size, seq_length = input_ids.shape
694
+ if use_gmasks is None:
695
+ use_gmasks = [False] * batch_size
696
+ context_lengths = [seq.tolist().index(self.config.bos_token_id) for seq in input_ids]
697
+ if self.position_encoding_2d:
698
+ position_ids = torch.arange(seq_length, dtype=torch.long, device=device).unsqueeze(0).repeat(batch_size, 1)
699
+ for i, context_length in enumerate(context_lengths):
700
+ position_ids[i, context_length:] = mask_positions[i]
701
+ block_position_ids = [torch.cat((
702
+ torch.zeros(context_length, dtype=torch.long, device=device),
703
+ torch.arange(seq_length - context_length, dtype=torch.long, device=device) + 1
704
+ )) for context_length in context_lengths]
705
+ block_position_ids = torch.stack(block_position_ids, dim=0)
706
+ position_ids = torch.stack((position_ids, block_position_ids), dim=1)
707
+ else:
708
+ position_ids = torch.arange(seq_length, dtype=torch.long, device=device).unsqueeze(0).repeat(batch_size, 1)
709
+ for i, context_length in enumerate(context_lengths):
710
+ if not use_gmasks[i]:
711
+ position_ids[context_length:] = mask_positions[i]
712
+
713
+ return position_ids
714
+
715
+ def _set_gradient_checkpointing(self, module, value=False):
716
+ if isinstance(module, ChatGLMModel):
717
+ module.gradient_checkpointing = value
718
+
719
+
720
+ CHATGLM_6B_START_DOCSTRING = r"""
721
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class.
722
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general
723
+ usage and behavior.
724
+
725
+ Parameters:
726
+ config ([`~ChatGLM6BConfig`]): Model configuration class with all the parameters of the model.
727
+ Initializing with a config file does not load the weights associated with the model, only the configuration.
728
+ Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
729
+ """
730
+
731
+ CHATGLM_6B_INPUTS_DOCSTRING = r"""
732
+ Args:
733
+ input_ids (`torch.LongTensor` of shape `({0})`):
734
+ Indices of input sequence tokens in the vocabulary.
735
+
736
+ Indices can be obtained using [`ChatGLM6BTokenizer`].
737
+ See [`PreTrainedTokenizer.encode`] and
738
+ [`PreTrainedTokenizer.__call__`] for details.
739
+
740
+ [What are input IDs?](../glossary#input-ids)
741
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
742
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
743
+
744
+ - 1 for tokens that are **not masked**,
745
+ - 0 for tokens that are **masked**.
746
+
747
+ [What are attention masks?](../glossary#attention-mask)
748
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
749
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`:
750
+
751
+ - 0 corresponds to a *sentence A* token,
752
+ - 1 corresponds to a *sentence B* token.
753
+
754
+ [What are token type IDs?](../glossary#token-type-ids)
755
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
756
+ Indices of positions of each input sequence tokens in the position embeddings.
757
+ Selected in the range `[0, config.max_position_embeddings - 1]`.
758
+
759
+ [What are position IDs?](../glossary#position-ids)
760
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
761
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
762
+
763
+ - 1 indicates the head is **not masked**,
764
+ - 0 indicates the head is **masked**.
765
+
766
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
767
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
768
+ This is useful if you want more control over how to convert *input_ids* indices into associated vectors
769
+ than the model's internal embedding lookup matrix.
770
+ output_attentions (`bool`, *optional*):
771
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
772
+ tensors for more detail.
773
+ output_hidden_states (`bool`, *optional*):
774
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
775
+ more detail.
776
+ return_dict (`bool`, *optional*):
777
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
778
+ """
779
+
780
+
781
+ @add_start_docstrings(
782
+ "The bare ChatGLM-6B Model transformer outputting raw hidden-states without any specific head on top.",
783
+ CHATGLM_6B_START_DOCSTRING,
784
+ )
785
+ class ChatGLMModel(ChatGLMPreTrainedModel):
786
+ """
787
+
788
+ The model can behave as an encoder (with only self-attention) as well
789
+ as a decoder, in which case a layer of cross-attention is added between
790
+ the self-attention layers, following the architecture described in [Attention is
791
+ all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani,
792
+ Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
793
+
794
+ To behave as an decoder the model needs to be initialized with the
795
+ `is_decoder` argument of the configuration set to `True`.
796
+ To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder`
797
+ argument and `add_cross_attention` set to `True`; an
798
+ `encoder_hidden_states` is then expected as an input to the forward pass.
799
+ """
800
+
801
+ def __init__(self, config: ChatGLMConfig, empty_init=True):
802
+ super().__init__(config)
803
+ if empty_init:
804
+ init_method = skip_init
805
+ else:
806
+ init_method = default_init
807
+ # recording parameters
808
+ self.max_sequence_length = config.max_sequence_length
809
+ self.hidden_size = config.hidden_size
810
+ self.params_dtype = torch.half
811
+ self.num_attention_heads = config.num_attention_heads
812
+ self.vocab_size = config.vocab_size
813
+ self.num_layers = config.num_layers
814
+ self.layernorm_epsilon = config.layernorm_epsilon
815
+ self.inner_hidden_size = config.inner_hidden_size
816
+ self.hidden_size_per_attention_head = self.hidden_size // self.num_attention_heads
817
+ self.position_encoding_2d = config.position_encoding_2d
818
+ self.pre_seq_len = config.pre_seq_len
819
+ self.prefix_projection = config.prefix_projection
820
+
821
+ self.word_embeddings = init_method(
822
+ torch.nn.Embedding,
823
+ num_embeddings=self.vocab_size, embedding_dim=self.hidden_size,
824
+ dtype=self.params_dtype
825
+ )
826
+ self.gradient_checkpointing = False
827
+
828
+ def get_layer(layer_id):
829
+ return GLMBlock(
830
+ self.hidden_size,
831
+ self.num_attention_heads,
832
+ self.layernorm_epsilon,
833
+ layer_id,
834
+ inner_hidden_size=self.inner_hidden_size,
835
+ hidden_size_per_attention_head=self.hidden_size_per_attention_head,
836
+ layernorm=LayerNorm,
837
+ use_bias=True,
838
+ params_dtype=self.params_dtype,
839
+ position_encoding_2d=self.position_encoding_2d,
840
+ empty_init=empty_init
841
+ )
842
+
843
+ self.layers = torch.nn.ModuleList(
844
+ [get_layer(layer_id) for layer_id in range(self.num_layers)]
845
+ )
846
+
847
+ # Final layer norm before output.
848
+ self.final_layernorm = LayerNorm(self.hidden_size, eps=self.layernorm_epsilon)
849
+
850
+ if self.pre_seq_len is not None:
851
+ for param in self.parameters():
852
+ param.requires_grad = False
853
+ self.prefix_tokens = torch.arange(self.pre_seq_len).long()
854
+ self.prefix_encoder = PrefixEncoder(config)
855
+ self.dropout = torch.nn.Dropout(0.1)
856
+
857
+ # total_params = sum(p.numel() for p in self.parameters())
858
+ # trainable_params = sum(p.numel() for p in self.parameters() if p.requires_grad)
859
+ # print("Using p-tuning v2: # trainable_params = {} / {}".format(trainable_params, total_params))
860
+
861
+ def get_input_embeddings(self):
862
+ return self.word_embeddings
863
+
864
+ def set_input_embeddings(self, new_embeddings: torch.Tensor):
865
+ self.word_embeddings = new_embeddings
866
+
867
+ def get_prompt(self, batch_size, device, dtype=torch.half):
868
+ prefix_tokens = self.prefix_tokens.unsqueeze(0).expand(batch_size, -1).to(device)
869
+ past_key_values = self.prefix_encoder(prefix_tokens).type(dtype)
870
+ past_key_values = past_key_values.view(
871
+ batch_size,
872
+ self.pre_seq_len,
873
+ self.num_layers * 2,
874
+ self.num_attention_heads,
875
+ self.hidden_size // self.num_attention_heads
876
+ )
877
+ # seq_len, b, nh, hidden_size
878
+ past_key_values = self.dropout(past_key_values)
879
+ past_key_values = past_key_values.permute([2, 1, 0, 3, 4]).split(2)
880
+ # past_key_values = [(v[0], v[1]) for v in past_key_values]
881
+ return past_key_values
882
+
883
+ @add_start_docstrings_to_model_forward(CHATGLM_6B_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
884
+ @add_code_sample_docstrings(
885
+ checkpoint=_CHECKPOINT_FOR_DOC,
886
+ output_type=BaseModelOutputWithPastAndCrossAttentions,
887
+ config_class=_CONFIG_FOR_DOC,
888
+ )
889
+ def forward(
890
+ self,
891
+ input_ids: Optional[torch.LongTensor] = None,
892
+ position_ids: Optional[torch.LongTensor] = None,
893
+ attention_mask: Optional[torch.Tensor] = None,
894
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None,
895
+ inputs_embeds: Optional[torch.LongTensor] = None,
896
+ use_cache: Optional[bool] = None,
897
+ output_attentions: Optional[bool] = None,
898
+ output_hidden_states: Optional[bool] = None,
899
+ return_dict: Optional[bool] = None,
900
+ ) -> Union[Tuple[torch.Tensor, ...], BaseModelOutputWithPast]:
901
+
902
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
903
+ output_hidden_states = (
904
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
905
+ )
906
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
907
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
908
+
909
+ if self.gradient_checkpointing and self.training:
910
+ if use_cache:
911
+ logger.warning_once(
912
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
913
+ )
914
+ use_cache = False
915
+
916
+ if input_ids is not None and inputs_embeds is not None:
917
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
918
+ elif input_ids is not None:
919
+ batch_size, seq_length = input_ids.shape[:2]
920
+ elif inputs_embeds is not None:
921
+ batch_size, seq_length, _ = inputs_embeds.shape[:2]
922
+ else:
923
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
924
+
925
+ if inputs_embeds is None:
926
+ inputs_embeds = self.word_embeddings(input_ids)
927
+
928
+ if past_key_values is None:
929
+ if self.pre_seq_len is not None:
930
+ past_key_values = self.get_prompt(batch_size=input_ids.shape[0], device=input_ids.device,
931
+ dtype=inputs_embeds.dtype)
932
+ else:
933
+ past_key_values = tuple([None] * len(self.layers))
934
+
935
+ if attention_mask is None:
936
+ attention_mask = self.get_masks(
937
+ input_ids,
938
+ device=input_ids.device
939
+ )
940
+
941
+
942
+ if position_ids is None:
943
+ MASK, gMASK = self.config.mask_token_id, self.config.gmask_token_id
944
+ seqs = input_ids.tolist()
945
+
946
+ mask_positions, use_gmasks = [], []
947
+ for seq in seqs:
948
+ mask_token = gMASK if gMASK in seq else MASK
949
+ use_gmask = mask_token == gMASK
950
+ mask_positions.append(seq.index(mask_token))
951
+ use_gmasks.append(use_gmask)
952
+
953
+ position_ids = self.get_position_ids(
954
+ input_ids,
955
+ mask_positions=mask_positions,
956
+ device=input_ids.device,
957
+ use_gmasks=use_gmasks
958
+ )
959
+
960
+ if self.pre_seq_len is not None and attention_mask is not None:
961
+ prefix_attention_mask = torch.ones(batch_size, 1, input_ids.size(-1), self.pre_seq_len).to(
962
+ attention_mask.device)
963
+ prefix_attention_mask = (prefix_attention_mask < 0.5).bool()
964
+ attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=3)
965
+
966
+ # [seq_len, batch, hidden_size]
967
+ hidden_states = inputs_embeds.transpose(0, 1)
968
+
969
+ presents = () if use_cache else None
970
+ all_self_attentions = () if output_attentions else None
971
+ all_hidden_states = () if output_hidden_states else None
972
+
973
+ if attention_mask is None:
974
+ attention_mask = torch.zeros(1, 1, device=input_ids.device).bool()
975
+
976
+ else:
977
+ attention_mask = attention_mask.to(input_ids.device)
978
+
979
+ for i, layer in enumerate(self.layers):
980
+
981
+ if output_hidden_states:
982
+ all_hidden_states = all_hidden_states + (hidden_states,)
983
+ layer_past = past_key_values[i]
984
+
985
+ if self.gradient_checkpointing and self.training:
986
+ layer_ret = torch.utils.checkpoint.checkpoint(
987
+ layer,
988
+ hidden_states,
989
+ position_ids,
990
+ attention_mask,
991
+ torch.tensor(i),
992
+ layer_past,
993
+ use_cache,
994
+ output_attentions
995
+ )
996
+ else:
997
+ layer_ret = layer(
998
+ hidden_states,
999
+ position_ids=position_ids,
1000
+ attention_mask=attention_mask,
1001
+ layer_id=torch.tensor(i),
1002
+ layer_past=layer_past,
1003
+ use_cache=use_cache,
1004
+ output_attentions=output_attentions
1005
+ )
1006
+
1007
+ hidden_states = layer_ret[0]
1008
+
1009
+ if use_cache:
1010
+ presents = presents + (layer_ret[1],)
1011
+
1012
+ if output_attentions:
1013
+ all_self_attentions = all_self_attentions + (layer_ret[2 if use_cache else 1],)
1014
+
1015
+ # Final layer norm.
1016
+ hidden_states = self.final_layernorm(hidden_states)
1017
+
1018
+ if output_hidden_states:
1019
+ all_hidden_states = all_hidden_states + (hidden_states,)
1020
+
1021
+ if not return_dict:
1022
+ return tuple(v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None)
1023
+
1024
+ return BaseModelOutputWithPast(
1025
+ last_hidden_state=hidden_states,
1026
+ past_key_values=presents,
1027
+ hidden_states=all_hidden_states,
1028
+ attentions=all_self_attentions,
1029
+ )
1030
+
1031
+
1032
+ class ChatGLMForConditionalGeneration(ChatGLMPreTrainedModel):
1033
+ def __init__(self, config: ChatGLMConfig, empty_init=True):
1034
+ super().__init__(config)
1035
+ if empty_init:
1036
+ init_method = skip_init
1037
+ else:
1038
+ init_method = default_init
1039
+
1040
+ # self.hidden_size = config.hidden_size
1041
+ # self.params_dtype = torch.half
1042
+ # self.vocab_size = config.vocab_size
1043
+ self.max_sequence_length = config.max_sequence_length
1044
+
1045
+ self.position_encoding_2d = config.position_encoding_2d
1046
+
1047
+ self.transformer = ChatGLMModel(config, empty_init=empty_init)
1048
+
1049
+ self.lm_head = init_method(
1050
+ nn.Linear,
1051
+ config.hidden_size,
1052
+ config.vocab_size,
1053
+ bias=False,
1054
+ dtype=torch.half
1055
+ )
1056
+
1057
+ self.config = config
1058
+
1059
+ self.quantized = False
1060
+
1061
+ if self.config.quantization_bit:
1062
+ self.quantize(self.config.quantization_bit, empty_init=True)
1063
+
1064
+ def get_output_embeddings(self):
1065
+ return self.lm_head
1066
+
1067
+ def set_output_embeddings(self, new_embeddings):
1068
+ self.lm_head = new_embeddings
1069
+
1070
+ def _update_model_kwargs_for_generation(
1071
+ self,
1072
+ outputs: ModelOutput,
1073
+ model_kwargs: Dict[str, Any],
1074
+ is_encoder_decoder: bool = False,
1075
+ standardize_cache_format: bool = False,
1076
+ ) -> Dict[str, Any]:
1077
+ # update past_key_values
1078
+ model_kwargs["past_key_values"] = self._extract_past_from_model_output(
1079
+ outputs, standardize_cache_format=standardize_cache_format
1080
+ )
1081
+
1082
+ # update attention mask
1083
+ if "attention_mask" in model_kwargs:
1084
+ attention_mask = model_kwargs["attention_mask"]
1085
+ if attention_mask is not None and attention_mask.dtype == torch.bool:
1086
+ attention_mask = torch.cat(
1087
+ [attention_mask, attention_mask.new_ones((*attention_mask.shape[:3], 1))], dim=3)
1088
+ new_attention_mask = attention_mask[:, :, -1:].clone()
1089
+ new_attention_mask[..., -1] = False
1090
+ model_kwargs["attention_mask"] = torch.cat(
1091
+ [attention_mask, new_attention_mask], dim=2
1092
+ )
1093
+
1094
+ # update position ids
1095
+ if "position_ids" in model_kwargs:
1096
+ position_ids = model_kwargs["position_ids"]
1097
+ new_position_id = position_ids[..., -1:].clone()
1098
+ new_position_id[:, 1, :] += 1
1099
+ model_kwargs["position_ids"] = torch.cat(
1100
+ [position_ids, new_position_id], dim=-1
1101
+ )
1102
+
1103
+ return model_kwargs
1104
+
1105
+ def prepare_inputs_for_generation(
1106
+ self,
1107
+ input_ids: torch.LongTensor,
1108
+ past: Optional[torch.Tensor] = None,
1109
+ past_key_values: Optional[torch.Tensor] = None,
1110
+ attention_mask: Optional[torch.Tensor] = None,
1111
+ position_ids: Optional[torch.Tensor] = None,
1112
+ **kwargs
1113
+ ) -> dict:
1114
+ batch_size, seq_length = input_ids.shape
1115
+ MASK, gMASK = self.config.mask_token_id, self.config.gmask_token_id
1116
+ seqs = input_ids.tolist()
1117
+ mask_positions, use_gmasks = [], []
1118
+ for seq in seqs:
1119
+ mask_token = gMASK if gMASK in seq else MASK
1120
+ use_gmask = mask_token == gMASK
1121
+ mask_positions.append(seq.index(mask_token))
1122
+ use_gmasks.append(use_gmask)
1123
+
1124
+ # only last token for input_ids if past is not None
1125
+ if past is not None or past_key_values is not None:
1126
+ last_token = input_ids[:, -1].unsqueeze(-1)
1127
+ if attention_mask is not None and attention_mask.dtype == torch.bool:
1128
+ attention_mask = attention_mask[:, :, -1:]
1129
+ else:
1130
+ attention_mask = None
1131
+ if position_ids is not None:
1132
+ position_ids = position_ids[..., -1:]
1133
+ else:
1134
+ context_lengths = [seq.index(self.config.bos_token_id) for seq in seqs]
1135
+ if self.position_encoding_2d:
1136
+ position_ids = torch.tensor(
1137
+ [[mask_position, seq_length - context_length] for mask_position, context_length in
1138
+ zip(mask_positions, context_lengths)], dtype=torch.long, device=input_ids.device).unsqueeze(-1)
1139
+ else:
1140
+ position_ids = torch.tensor([mask_position for mask_position in mask_positions], dtype=torch.long,
1141
+ device=input_ids.device).unsqueeze(-1)
1142
+
1143
+ if past is None:
1144
+ past = past_key_values
1145
+ return {
1146
+ "input_ids": last_token,
1147
+ "past_key_values": past,
1148
+ "position_ids": position_ids,
1149
+ "attention_mask": attention_mask
1150
+ }
1151
+ else:
1152
+ if attention_mask is not None and attention_mask.dtype != torch.bool:
1153
+ logger.warning_once(f"The dtype of attention mask ({attention_mask.dtype}) is not bool")
1154
+ attention_mask = None
1155
+ if attention_mask is None:
1156
+ attention_mask = self.get_masks(
1157
+ input_ids,
1158
+ device=input_ids.device
1159
+ )
1160
+ if position_ids is None:
1161
+ position_ids = self.get_position_ids(
1162
+ input_ids,
1163
+ device=input_ids.device,
1164
+ mask_positions=mask_positions,
1165
+ use_gmasks=use_gmasks
1166
+ )
1167
+
1168
+ return {
1169
+ "input_ids": input_ids,
1170
+ "past_key_values": past,
1171
+ "position_ids": position_ids,
1172
+ "attention_mask": attention_mask
1173
+ }
1174
+
1175
+ def forward(
1176
+ self,
1177
+ input_ids: Optional[torch.Tensor] = None,
1178
+ position_ids: Optional[torch.Tensor] = None,
1179
+ attention_mask: Optional[torch.Tensor] = None,
1180
+ past_key_values: Optional[Tuple[torch.FloatTensor]] = None,
1181
+ inputs_embeds: Optional[torch.Tensor] = None,
1182
+ labels: Optional[torch.Tensor] = None,
1183
+ use_cache: Optional[bool] = None,
1184
+ output_attentions: Optional[bool] = None,
1185
+ output_hidden_states: Optional[bool] = None,
1186
+ return_dict: Optional[bool] = None,
1187
+ ):
1188
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
1189
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1190
+
1191
+ transformer_outputs = self.transformer(
1192
+ input_ids=input_ids,
1193
+ position_ids=position_ids,
1194
+ attention_mask=attention_mask,
1195
+ past_key_values=past_key_values,
1196
+ inputs_embeds=inputs_embeds,
1197
+ use_cache=use_cache,
1198
+ output_attentions=output_attentions,
1199
+ output_hidden_states=output_hidden_states,
1200
+ return_dict=return_dict,
1201
+ )
1202
+
1203
+ hidden_states = transformer_outputs[0]
1204
+
1205
+ lm_logits = self.lm_head(hidden_states).permute(1, 0, 2).contiguous()
1206
+
1207
+ loss = None
1208
+ if labels is not None:
1209
+ lm_logits = lm_logits.to(torch.float32)
1210
+
1211
+ # Shift so that tokens < n predict n
1212
+ shift_logits = lm_logits[..., :-1, :].contiguous()
1213
+ shift_labels = labels[..., 1:].contiguous()
1214
+ # Flatten the tokens
1215
+ loss_fct = CrossEntropyLoss(ignore_index=-100)
1216
+ loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
1217
+
1218
+ lm_logits = lm_logits.to(hidden_states.dtype)
1219
+ loss = loss.to(hidden_states.dtype)
1220
+
1221
+ if not return_dict:
1222
+ output = (lm_logits,) + transformer_outputs[1:]
1223
+ return ((loss,) + output) if loss is not None else output
1224
+
1225
+ return CausalLMOutputWithPast(
1226
+ loss=loss,
1227
+ logits=lm_logits,
1228
+ past_key_values=transformer_outputs.past_key_values,
1229
+ hidden_states=transformer_outputs.hidden_states,
1230
+ attentions=transformer_outputs.attentions,
1231
+ )
1232
+
1233
+ @staticmethod
1234
+ def _reorder_cache(
1235
+ past: Tuple[Tuple[torch.Tensor, torch.Tensor], ...], beam_idx: torch.LongTensor
1236
+ ) -> Tuple[Tuple[torch.Tensor, torch.Tensor], ...]:
1237
+ """
1238
+ This function is used to re-order the `past_key_values` cache if [`~PreTrainedModel.beam_search`] or
1239
+ [`~PreTrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct
1240
+ beam_idx at every generation step.
1241
+
1242
+ Output shares the same memory storage as `past`.
1243
+ """
1244
+ return tuple(
1245
+ (
1246
+ layer_past[0].index_select(1, beam_idx.to(layer_past[0].device)),
1247
+ layer_past[1].index_select(1, beam_idx.to(layer_past[1].device)),
1248
+ )
1249
+ for layer_past in past
1250
+ )
1251
+
1252
+ def process_response(self, response):
1253
+ response = response.strip()
1254
+ response = response.replace("[[训练时间]]", "2023年")
1255
+ punkts = [
1256
+ [",", ","],
1257
+ ["!", "!"],
1258
+ [":", ":"],
1259
+ [";", ";"],
1260
+ ["\?", "?"],
1261
+ ]
1262
+ for item in punkts:
1263
+ response = re.sub(r"([\u4e00-\u9fff])%s" % item[0], r"\1%s" % item[1], response)
1264
+ response = re.sub(r"%s([\u4e00-\u9fff])" % item[0], r"%s\1" % item[1], response)
1265
+ return response
1266
+
1267
+ @torch.no_grad()
1268
+ def chat(self, tokenizer, query: str, history: List[Tuple[str, str]] = None, max_length: int = 2048, num_beams=1,
1269
+ do_sample=True, top_p=0.7, temperature=0.95, logits_processor=None, **kwargs):
1270
+ if history is None:
1271
+ history = []
1272
+ if logits_processor is None:
1273
+ logits_processor = LogitsProcessorList()
1274
+ logits_processor.append(InvalidScoreLogitsProcessor())
1275
+ gen_kwargs = {"max_length": max_length, "num_beams": num_beams, "do_sample": do_sample, "top_p": top_p,
1276
+ "temperature": temperature, "logits_processor": logits_processor, **kwargs}
1277
+ if not history:
1278
+ prompt = query
1279
+ else:
1280
+ prompt = ""
1281
+ for i, (old_query, response) in enumerate(history):
1282
+ prompt += "[Round {}]\n问:{}\n答:{}\n".format(i, old_query, response)
1283
+ prompt += "[Round {}]\n问:{}\n答:".format(len(history), query)
1284
+ inputs = tokenizer([prompt], return_tensors="pt")
1285
+ inputs = inputs.to(self.device)
1286
+ outputs = self.generate(**inputs, **gen_kwargs)
1287
+ outputs = outputs.tolist()[0][len(inputs["input_ids"][0]):]
1288
+ response = tokenizer.decode(outputs)
1289
+ response = self.process_response(response)
1290
+ history = history + [(query, response)]
1291
+ return response, history
1292
+
1293
+ @torch.no_grad()
1294
+ def stream_chat(self, tokenizer, query: str, history: List[Tuple[str, str]] = None, max_length: int = 2048,
1295
+ do_sample=True, top_p=0.7, temperature=0.95, logits_processor=None, **kwargs):
1296
+ if history is None:
1297
+ history = []
1298
+ if logits_processor is None:
1299
+ logits_processor = LogitsProcessorList()
1300
+ logits_processor.append(InvalidScoreLogitsProcessor())
1301
+ gen_kwargs = {"max_length": max_length, "do_sample": do_sample, "top_p": top_p,
1302
+ "temperature": temperature, "logits_processor": logits_processor, **kwargs}
1303
+ if not history:
1304
+ prompt = query
1305
+ else:
1306
+ prompt = ""
1307
+ for i, (old_query, response) in enumerate(history):
1308
+ prompt += "[Round {}]\n问:{}\n答:{}\n".format(i, old_query, response)
1309
+ prompt += "[Round {}]\n问:{}\n答:".format(len(history), query)
1310
+ inputs = tokenizer([prompt], return_tensors="pt")
1311
+ inputs = inputs.to(self.device)
1312
+ for outputs in self.stream_generate(**inputs, **gen_kwargs):
1313
+ outputs = outputs.tolist()[0][len(inputs["input_ids"][0]):]
1314
+ response = tokenizer.decode(outputs)
1315
+ response = self.process_response(response)
1316
+ new_history = history + [(query, response)]
1317
+ yield response, new_history
1318
+
1319
+ @torch.no_grad()
1320
+ def stream_generate(
1321
+ self,
1322
+ input_ids,
1323
+ generation_config: Optional[GenerationConfig] = None,
1324
+ logits_processor: Optional[LogitsProcessorList] = None,
1325
+ stopping_criteria: Optional[StoppingCriteriaList] = None,
1326
+ prefix_allowed_tokens_fn: Optional[Callable[[int, torch.Tensor], List[int]]] = None,
1327
+ **kwargs,
1328
+ ):
1329
+ batch_size, input_ids_seq_length = input_ids.shape[0], input_ids.shape[-1]
1330
+
1331
+ if generation_config is None:
1332
+ generation_config = self.generation_config
1333
+ generation_config = copy.deepcopy(generation_config)
1334
+ model_kwargs = generation_config.update(**kwargs)
1335
+ bos_token_id, eos_token_id = generation_config.bos_token_id, generation_config.eos_token_id
1336
+
1337
+ if isinstance(eos_token_id, int):
1338
+ eos_token_id = [eos_token_id]
1339
+
1340
+ has_default_max_length = kwargs.get("max_length") is None and generation_config.max_length is not None
1341
+ if has_default_max_length and generation_config.max_new_tokens is None:
1342
+ warnings.warn(
1343
+ f"Using `max_length`'s default ({generation_config.max_length}) to control the generation length. "
1344
+ "This behaviour is deprecated and will be removed from the config in v5 of Transformers -- we"
1345
+ " recommend using `max_new_tokens` to control the maximum length of the generation.",
1346
+ UserWarning,
1347
+ )
1348
+ elif generation_config.max_new_tokens is not None:
1349
+ generation_config.max_length = generation_config.max_new_tokens + input_ids_seq_length
1350
+ if not has_default_max_length:
1351
+ logger.warn(
1352
+ f"Both `max_new_tokens` (={generation_config.max_new_tokens}) and `max_length`(="
1353
+ f"{generation_config.max_length}) seem to have been set. `max_new_tokens` will take precedence. "
1354
+ "Please refer to the documentation for more information. "
1355
+ "(https://huggingface.co/docs/transformers/main/en/main_classes/text_generation)",
1356
+ UserWarning,
1357
+ )
1358
+
1359
+ if input_ids_seq_length >= generation_config.max_length:
1360
+ input_ids_string = "decoder_input_ids" if self.config.is_encoder_decoder else "input_ids"
1361
+ logger.warning(
1362
+ f"Input length of {input_ids_string} is {input_ids_seq_length}, but `max_length` is set to"
1363
+ f" {generation_config.max_length}. This can lead to unexpected behavior. You should consider"
1364
+ " increasing `max_new_tokens`."
1365
+ )
1366
+
1367
+ # 2. Set generation parameters if not already defined
1368
+ logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList()
1369
+ stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList()
1370
+
1371
+ logits_processor = self._get_logits_processor(
1372
+ generation_config=generation_config,
1373
+ input_ids_seq_length=input_ids_seq_length,
1374
+ encoder_input_ids=input_ids,
1375
+ prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,
1376
+ logits_processor=logits_processor,
1377
+ )
1378
+
1379
+ stopping_criteria = self._get_stopping_criteria(
1380
+ generation_config=generation_config, stopping_criteria=stopping_criteria
1381
+ )
1382
+ logits_warper = self._get_logits_warper(generation_config)
1383
+
1384
+ unfinished_sequences = input_ids.new(input_ids.shape[0]).fill_(1)
1385
+ scores = None
1386
+ while True:
1387
+ model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs)
1388
+ # forward pass to get next token
1389
+ outputs = self(
1390
+ **model_inputs,
1391
+ return_dict=True,
1392
+ output_attentions=False,
1393
+ output_hidden_states=False,
1394
+ )
1395
+
1396
+ next_token_logits = outputs.logits[:, -1, :]
1397
+
1398
+ # pre-process distribution
1399
+ next_token_scores = logits_processor(input_ids, next_token_logits)
1400
+ next_token_scores = logits_warper(input_ids, next_token_scores)
1401
+
1402
+ # sample
1403
+ probs = nn.functional.softmax(next_token_scores, dim=-1)
1404
+ if generation_config.do_sample:
1405
+ next_tokens = torch.multinomial(probs, num_samples=1).squeeze(1)
1406
+ else:
1407
+ next_tokens = torch.argmax(probs, dim=-1)
1408
+
1409
+ # update generated ids, model inputs, and length for next step
1410
+ input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1)
1411
+ model_kwargs = self._update_model_kwargs_for_generation(
1412
+ outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder
1413
+ )
1414
+ unfinished_sequences = unfinished_sequences.mul((sum(next_tokens != i for i in eos_token_id)).long())
1415
+
1416
+ # stop when each sentence is finished, or if we exceed the maximum length
1417
+ if unfinished_sequences.max() == 0 or stopping_criteria(input_ids, scores):
1418
+ break
1419
+ yield input_ids
1420
+
1421
+ def quantize(self, bits: int, empty_init=False, **kwargs):
1422
+ if bits == 0:
1423
+ return
1424
+
1425
+ from .quantization import quantize
1426
+
1427
+ if self.quantized:
1428
+ logger.info("Already quantized.")
1429
+ return self
1430
+
1431
+ self.quantized = True
1432
+
1433
+ self.config.quantization_bit = bits
1434
+
1435
+ self.transformer = quantize(self.transformer, bits, empty_init=empty_init, **kwargs)
1436
+ return self
pytorch_model.bin.index.json ADDED
@@ -0,0 +1,376 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 13419529984
4
+ },
5
+ "weight_map": {
6
+ "lm_head.weight": "pytorch_model-00007-of-00007.bin",
7
+ "transformer.final_layernorm.bias": "pytorch_model-00007-of-00007.bin",
8
+ "transformer.final_layernorm.weight": "pytorch_model-00007-of-00007.bin",
9
+ "transformer.layers.0.attention.dense.bias": "pytorch_model-00001-of-00007.bin",
10
+ "transformer.layers.0.attention.dense.weight": "pytorch_model-00001-of-00007.bin",
11
+ "transformer.layers.0.attention.query_key_value.bias": "pytorch_model-00001-of-00007.bin",
12
+ "transformer.layers.0.attention.query_key_value.weight": "pytorch_model-00001-of-00007.bin",
13
+ "transformer.layers.0.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00007.bin",
14
+ "transformer.layers.0.input_layernorm.bias": "pytorch_model-00001-of-00007.bin",
15
+ "transformer.layers.0.input_layernorm.weight": "pytorch_model-00001-of-00007.bin",
16
+ "transformer.layers.0.mlp.dense_4h_to_h.bias": "pytorch_model-00001-of-00007.bin",
17
+ "transformer.layers.0.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00007.bin",
18
+ "transformer.layers.0.mlp.dense_h_to_4h.bias": "pytorch_model-00001-of-00007.bin",
19
+ "transformer.layers.0.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00007.bin",
20
+ "transformer.layers.0.post_attention_layernorm.bias": "pytorch_model-00001-of-00007.bin",
21
+ "transformer.layers.0.post_attention_layernorm.weight": "pytorch_model-00001-of-00007.bin",
22
+ "transformer.layers.1.attention.dense.bias": "pytorch_model-00001-of-00007.bin",
23
+ "transformer.layers.1.attention.dense.weight": "pytorch_model-00001-of-00007.bin",
24
+ "transformer.layers.1.attention.query_key_value.bias": "pytorch_model-00001-of-00007.bin",
25
+ "transformer.layers.1.attention.query_key_value.weight": "pytorch_model-00001-of-00007.bin",
26
+ "transformer.layers.1.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00007.bin",
27
+ "transformer.layers.1.input_layernorm.bias": "pytorch_model-00001-of-00007.bin",
28
+ "transformer.layers.1.input_layernorm.weight": "pytorch_model-00001-of-00007.bin",
29
+ "transformer.layers.1.mlp.dense_4h_to_h.bias": "pytorch_model-00001-of-00007.bin",
30
+ "transformer.layers.1.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00007.bin",
31
+ "transformer.layers.1.mlp.dense_h_to_4h.bias": "pytorch_model-00001-of-00007.bin",
32
+ "transformer.layers.1.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00007.bin",
33
+ "transformer.layers.1.post_attention_layernorm.bias": "pytorch_model-00001-of-00007.bin",
34
+ "transformer.layers.1.post_attention_layernorm.weight": "pytorch_model-00001-of-00007.bin",
35
+ "transformer.layers.10.attention.dense.bias": "pytorch_model-00003-of-00007.bin",
36
+ "transformer.layers.10.attention.dense.weight": "pytorch_model-00003-of-00007.bin",
37
+ "transformer.layers.10.attention.query_key_value.bias": "pytorch_model-00003-of-00007.bin",
38
+ "transformer.layers.10.attention.query_key_value.weight": "pytorch_model-00003-of-00007.bin",
39
+ "transformer.layers.10.attention.rotary_emb.inv_freq": "pytorch_model-00003-of-00007.bin",
40
+ "transformer.layers.10.input_layernorm.bias": "pytorch_model-00003-of-00007.bin",
41
+ "transformer.layers.10.input_layernorm.weight": "pytorch_model-00003-of-00007.bin",
42
+ "transformer.layers.10.mlp.dense_4h_to_h.bias": "pytorch_model-00003-of-00007.bin",
43
+ "transformer.layers.10.mlp.dense_4h_to_h.weight": "pytorch_model-00003-of-00007.bin",
44
+ "transformer.layers.10.mlp.dense_h_to_4h.bias": "pytorch_model-00003-of-00007.bin",
45
+ "transformer.layers.10.mlp.dense_h_to_4h.weight": "pytorch_model-00003-of-00007.bin",
46
+ "transformer.layers.10.post_attention_layernorm.bias": "pytorch_model-00003-of-00007.bin",
47
+ "transformer.layers.10.post_attention_layernorm.weight": "pytorch_model-00003-of-00007.bin",
48
+ "transformer.layers.11.attention.dense.bias": "pytorch_model-00003-of-00007.bin",
49
+ "transformer.layers.11.attention.dense.weight": "pytorch_model-00003-of-00007.bin",
50
+ "transformer.layers.11.attention.query_key_value.bias": "pytorch_model-00003-of-00007.bin",
51
+ "transformer.layers.11.attention.query_key_value.weight": "pytorch_model-00003-of-00007.bin",
52
+ "transformer.layers.11.attention.rotary_emb.inv_freq": "pytorch_model-00003-of-00007.bin",
53
+ "transformer.layers.11.input_layernorm.bias": "pytorch_model-00003-of-00007.bin",
54
+ "transformer.layers.11.input_layernorm.weight": "pytorch_model-00003-of-00007.bin",
55
+ "transformer.layers.11.mlp.dense_4h_to_h.bias": "pytorch_model-00004-of-00007.bin",
56
+ "transformer.layers.11.mlp.dense_4h_to_h.weight": "pytorch_model-00004-of-00007.bin",
57
+ "transformer.layers.11.mlp.dense_h_to_4h.bias": "pytorch_model-00003-of-00007.bin",
58
+ "transformer.layers.11.mlp.dense_h_to_4h.weight": "pytorch_model-00003-of-00007.bin",
59
+ "transformer.layers.11.post_attention_layernorm.bias": "pytorch_model-00003-of-00007.bin",
60
+ "transformer.layers.11.post_attention_layernorm.weight": "pytorch_model-00003-of-00007.bin",
61
+ "transformer.layers.12.attention.dense.bias": "pytorch_model-00004-of-00007.bin",
62
+ "transformer.layers.12.attention.dense.weight": "pytorch_model-00004-of-00007.bin",
63
+ "transformer.layers.12.attention.query_key_value.bias": "pytorch_model-00004-of-00007.bin",
64
+ "transformer.layers.12.attention.query_key_value.weight": "pytorch_model-00004-of-00007.bin",
65
+ "transformer.layers.12.attention.rotary_emb.inv_freq": "pytorch_model-00004-of-00007.bin",
66
+ "transformer.layers.12.input_layernorm.bias": "pytorch_model-00004-of-00007.bin",
67
+ "transformer.layers.12.input_layernorm.weight": "pytorch_model-00004-of-00007.bin",
68
+ "transformer.layers.12.mlp.dense_4h_to_h.bias": "pytorch_model-00004-of-00007.bin",
69
+ "transformer.layers.12.mlp.dense_4h_to_h.weight": "pytorch_model-00004-of-00007.bin",
70
+ "transformer.layers.12.mlp.dense_h_to_4h.bias": "pytorch_model-00004-of-00007.bin",
71
+ "transformer.layers.12.mlp.dense_h_to_4h.weight": "pytorch_model-00004-of-00007.bin",
72
+ "transformer.layers.12.post_attention_layernorm.bias": "pytorch_model-00004-of-00007.bin",
73
+ "transformer.layers.12.post_attention_layernorm.weight": "pytorch_model-00004-of-00007.bin",
74
+ "transformer.layers.13.attention.dense.bias": "pytorch_model-00004-of-00007.bin",
75
+ "transformer.layers.13.attention.dense.weight": "pytorch_model-00004-of-00007.bin",
76
+ "transformer.layers.13.attention.query_key_value.bias": "pytorch_model-00004-of-00007.bin",
77
+ "transformer.layers.13.attention.query_key_value.weight": "pytorch_model-00004-of-00007.bin",
78
+ "transformer.layers.13.attention.rotary_emb.inv_freq": "pytorch_model-00004-of-00007.bin",
79
+ "transformer.layers.13.input_layernorm.bias": "pytorch_model-00004-of-00007.bin",
80
+ "transformer.layers.13.input_layernorm.weight": "pytorch_model-00004-of-00007.bin",
81
+ "transformer.layers.13.mlp.dense_4h_to_h.bias": "pytorch_model-00004-of-00007.bin",
82
+ "transformer.layers.13.mlp.dense_4h_to_h.weight": "pytorch_model-00004-of-00007.bin",
83
+ "transformer.layers.13.mlp.dense_h_to_4h.bias": "pytorch_model-00004-of-00007.bin",
84
+ "transformer.layers.13.mlp.dense_h_to_4h.weight": "pytorch_model-00004-of-00007.bin",
85
+ "transformer.layers.13.post_attention_layernorm.bias": "pytorch_model-00004-of-00007.bin",
86
+ "transformer.layers.13.post_attention_layernorm.weight": "pytorch_model-00004-of-00007.bin",
87
+ "transformer.layers.14.attention.dense.bias": "pytorch_model-00004-of-00007.bin",
88
+ "transformer.layers.14.attention.dense.weight": "pytorch_model-00004-of-00007.bin",
89
+ "transformer.layers.14.attention.query_key_value.bias": "pytorch_model-00004-of-00007.bin",
90
+ "transformer.layers.14.attention.query_key_value.weight": "pytorch_model-00004-of-00007.bin",
91
+ "transformer.layers.14.attention.rotary_emb.inv_freq": "pytorch_model-00004-of-00007.bin",
92
+ "transformer.layers.14.input_layernorm.bias": "pytorch_model-00004-of-00007.bin",
93
+ "transformer.layers.14.input_layernorm.weight": "pytorch_model-00004-of-00007.bin",
94
+ "transformer.layers.14.mlp.dense_4h_to_h.bias": "pytorch_model-00004-of-00007.bin",
95
+ "transformer.layers.14.mlp.dense_4h_to_h.weight": "pytorch_model-00004-of-00007.bin",
96
+ "transformer.layers.14.mlp.dense_h_to_4h.bias": "pytorch_model-00004-of-00007.bin",
97
+ "transformer.layers.14.mlp.dense_h_to_4h.weight": "pytorch_model-00004-of-00007.bin",
98
+ "transformer.layers.14.post_attention_layernorm.bias": "pytorch_model-00004-of-00007.bin",
99
+ "transformer.layers.14.post_attention_layernorm.weight": "pytorch_model-00004-of-00007.bin",
100
+ "transformer.layers.15.attention.dense.bias": "pytorch_model-00004-of-00007.bin",
101
+ "transformer.layers.15.attention.dense.weight": "pytorch_model-00004-of-00007.bin",
102
+ "transformer.layers.15.attention.query_key_value.bias": "pytorch_model-00004-of-00007.bin",
103
+ "transformer.layers.15.attention.query_key_value.weight": "pytorch_model-00004-of-00007.bin",
104
+ "transformer.layers.15.attention.rotary_emb.inv_freq": "pytorch_model-00004-of-00007.bin",
105
+ "transformer.layers.15.input_layernorm.bias": "pytorch_model-00004-of-00007.bin",
106
+ "transformer.layers.15.input_layernorm.weight": "pytorch_model-00004-of-00007.bin",
107
+ "transformer.layers.15.mlp.dense_4h_to_h.bias": "pytorch_model-00004-of-00007.bin",
108
+ "transformer.layers.15.mlp.dense_4h_to_h.weight": "pytorch_model-00004-of-00007.bin",
109
+ "transformer.layers.15.mlp.dense_h_to_4h.bias": "pytorch_model-00004-of-00007.bin",
110
+ "transformer.layers.15.mlp.dense_h_to_4h.weight": "pytorch_model-00004-of-00007.bin",
111
+ "transformer.layers.15.post_attention_layernorm.bias": "pytorch_model-00004-of-00007.bin",
112
+ "transformer.layers.15.post_attention_layernorm.weight": "pytorch_model-00004-of-00007.bin",
113
+ "transformer.layers.16.attention.dense.bias": "pytorch_model-00004-of-00007.bin",
114
+ "transformer.layers.16.attention.dense.weight": "pytorch_model-00004-of-00007.bin",
115
+ "transformer.layers.16.attention.query_key_value.bias": "pytorch_model-00004-of-00007.bin",
116
+ "transformer.layers.16.attention.query_key_value.weight": "pytorch_model-00004-of-00007.bin",
117
+ "transformer.layers.16.attention.rotary_emb.inv_freq": "pytorch_model-00004-of-00007.bin",
118
+ "transformer.layers.16.input_layernorm.bias": "pytorch_model-00004-of-00007.bin",
119
+ "transformer.layers.16.input_layernorm.weight": "pytorch_model-00004-of-00007.bin",
120
+ "transformer.layers.16.mlp.dense_4h_to_h.bias": "pytorch_model-00005-of-00007.bin",
121
+ "transformer.layers.16.mlp.dense_4h_to_h.weight": "pytorch_model-00005-of-00007.bin",
122
+ "transformer.layers.16.mlp.dense_h_to_4h.bias": "pytorch_model-00005-of-00007.bin",
123
+ "transformer.layers.16.mlp.dense_h_to_4h.weight": "pytorch_model-00005-of-00007.bin",
124
+ "transformer.layers.16.post_attention_layernorm.bias": "pytorch_model-00004-of-00007.bin",
125
+ "transformer.layers.16.post_attention_layernorm.weight": "pytorch_model-00004-of-00007.bin",
126
+ "transformer.layers.17.attention.dense.bias": "pytorch_model-00005-of-00007.bin",
127
+ "transformer.layers.17.attention.dense.weight": "pytorch_model-00005-of-00007.bin",
128
+ "transformer.layers.17.attention.query_key_value.bias": "pytorch_model-00005-of-00007.bin",
129
+ "transformer.layers.17.attention.query_key_value.weight": "pytorch_model-00005-of-00007.bin",
130
+ "transformer.layers.17.attention.rotary_emb.inv_freq": "pytorch_model-00005-of-00007.bin",
131
+ "transformer.layers.17.input_layernorm.bias": "pytorch_model-00005-of-00007.bin",
132
+ "transformer.layers.17.input_layernorm.weight": "pytorch_model-00005-of-00007.bin",
133
+ "transformer.layers.17.mlp.dense_4h_to_h.bias": "pytorch_model-00005-of-00007.bin",
134
+ "transformer.layers.17.mlp.dense_4h_to_h.weight": "pytorch_model-00005-of-00007.bin",
135
+ "transformer.layers.17.mlp.dense_h_to_4h.bias": "pytorch_model-00005-of-00007.bin",
136
+ "transformer.layers.17.mlp.dense_h_to_4h.weight": "pytorch_model-00005-of-00007.bin",
137
+ "transformer.layers.17.post_attention_layernorm.bias": "pytorch_model-00005-of-00007.bin",
138
+ "transformer.layers.17.post_attention_layernorm.weight": "pytorch_model-00005-of-00007.bin",
139
+ "transformer.layers.18.attention.dense.bias": "pytorch_model-00005-of-00007.bin",
140
+ "transformer.layers.18.attention.dense.weight": "pytorch_model-00005-of-00007.bin",
141
+ "transformer.layers.18.attention.query_key_value.bias": "pytorch_model-00005-of-00007.bin",
142
+ "transformer.layers.18.attention.query_key_value.weight": "pytorch_model-00005-of-00007.bin",
143
+ "transformer.layers.18.attention.rotary_emb.inv_freq": "pytorch_model-00005-of-00007.bin",
144
+ "transformer.layers.18.input_layernorm.bias": "pytorch_model-00005-of-00007.bin",
145
+ "transformer.layers.18.input_layernorm.weight": "pytorch_model-00005-of-00007.bin",
146
+ "transformer.layers.18.mlp.dense_4h_to_h.bias": "pytorch_model-00005-of-00007.bin",
147
+ "transformer.layers.18.mlp.dense_4h_to_h.weight": "pytorch_model-00005-of-00007.bin",
148
+ "transformer.layers.18.mlp.dense_h_to_4h.bias": "pytorch_model-00005-of-00007.bin",
149
+ "transformer.layers.18.mlp.dense_h_to_4h.weight": "pytorch_model-00005-of-00007.bin",
150
+ "transformer.layers.18.post_attention_layernorm.bias": "pytorch_model-00005-of-00007.bin",
151
+ "transformer.layers.18.post_attention_layernorm.weight": "pytorch_model-00005-of-00007.bin",
152
+ "transformer.layers.19.attention.dense.bias": "pytorch_model-00005-of-00007.bin",
153
+ "transformer.layers.19.attention.dense.weight": "pytorch_model-00005-of-00007.bin",
154
+ "transformer.layers.19.attention.query_key_value.bias": "pytorch_model-00005-of-00007.bin",
155
+ "transformer.layers.19.attention.query_key_value.weight": "pytorch_model-00005-of-00007.bin",
156
+ "transformer.layers.19.attention.rotary_emb.inv_freq": "pytorch_model-00005-of-00007.bin",
157
+ "transformer.layers.19.input_layernorm.bias": "pytorch_model-00005-of-00007.bin",
158
+ "transformer.layers.19.input_layernorm.weight": "pytorch_model-00005-of-00007.bin",
159
+ "transformer.layers.19.mlp.dense_4h_to_h.bias": "pytorch_model-00005-of-00007.bin",
160
+ "transformer.layers.19.mlp.dense_4h_to_h.weight": "pytorch_model-00005-of-00007.bin",
161
+ "transformer.layers.19.mlp.dense_h_to_4h.bias": "pytorch_model-00005-of-00007.bin",
162
+ "transformer.layers.19.mlp.dense_h_to_4h.weight": "pytorch_model-00005-of-00007.bin",
163
+ "transformer.layers.19.post_attention_layernorm.bias": "pytorch_model-00005-of-00007.bin",
164
+ "transformer.layers.19.post_attention_layernorm.weight": "pytorch_model-00005-of-00007.bin",
165
+ "transformer.layers.2.attention.dense.bias": "pytorch_model-00002-of-00007.bin",
166
+ "transformer.layers.2.attention.dense.weight": "pytorch_model-00002-of-00007.bin",
167
+ "transformer.layers.2.attention.query_key_value.bias": "pytorch_model-00001-of-00007.bin",
168
+ "transformer.layers.2.attention.query_key_value.weight": "pytorch_model-00001-of-00007.bin",
169
+ "transformer.layers.2.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00007.bin",
170
+ "transformer.layers.2.input_layernorm.bias": "pytorch_model-00001-of-00007.bin",
171
+ "transformer.layers.2.input_layernorm.weight": "pytorch_model-00001-of-00007.bin",
172
+ "transformer.layers.2.mlp.dense_4h_to_h.bias": "pytorch_model-00002-of-00007.bin",
173
+ "transformer.layers.2.mlp.dense_4h_to_h.weight": "pytorch_model-00002-of-00007.bin",
174
+ "transformer.layers.2.mlp.dense_h_to_4h.bias": "pytorch_model-00002-of-00007.bin",
175
+ "transformer.layers.2.mlp.dense_h_to_4h.weight": "pytorch_model-00002-of-00007.bin",
176
+ "transformer.layers.2.post_attention_layernorm.bias": "pytorch_model-00002-of-00007.bin",
177
+ "transformer.layers.2.post_attention_layernorm.weight": "pytorch_model-00002-of-00007.bin",
178
+ "transformer.layers.20.attention.dense.bias": "pytorch_model-00005-of-00007.bin",
179
+ "transformer.layers.20.attention.dense.weight": "pytorch_model-00005-of-00007.bin",
180
+ "transformer.layers.20.attention.query_key_value.bias": "pytorch_model-00005-of-00007.bin",
181
+ "transformer.layers.20.attention.query_key_value.weight": "pytorch_model-00005-of-00007.bin",
182
+ "transformer.layers.20.attention.rotary_emb.inv_freq": "pytorch_model-00005-of-00007.bin",
183
+ "transformer.layers.20.input_layernorm.bias": "pytorch_model-00005-of-00007.bin",
184
+ "transformer.layers.20.input_layernorm.weight": "pytorch_model-00005-of-00007.bin",
185
+ "transformer.layers.20.mlp.dense_4h_to_h.bias": "pytorch_model-00005-of-00007.bin",
186
+ "transformer.layers.20.mlp.dense_4h_to_h.weight": "pytorch_model-00005-of-00007.bin",
187
+ "transformer.layers.20.mlp.dense_h_to_4h.bias": "pytorch_model-00005-of-00007.bin",
188
+ "transformer.layers.20.mlp.dense_h_to_4h.weight": "pytorch_model-00005-of-00007.bin",
189
+ "transformer.layers.20.post_attention_layernorm.bias": "pytorch_model-00005-of-00007.bin",
190
+ "transformer.layers.20.post_attention_layernorm.weight": "pytorch_model-00005-of-00007.bin",
191
+ "transformer.layers.21.attention.dense.bias": "pytorch_model-00006-of-00007.bin",
192
+ "transformer.layers.21.attention.dense.weight": "pytorch_model-00006-of-00007.bin",
193
+ "transformer.layers.21.attention.query_key_value.bias": "pytorch_model-00005-of-00007.bin",
194
+ "transformer.layers.21.attention.query_key_value.weight": "pytorch_model-00005-of-00007.bin",
195
+ "transformer.layers.21.attention.rotary_emb.inv_freq": "pytorch_model-00005-of-00007.bin",
196
+ "transformer.layers.21.input_layernorm.bias": "pytorch_model-00005-of-00007.bin",
197
+ "transformer.layers.21.input_layernorm.weight": "pytorch_model-00005-of-00007.bin",
198
+ "transformer.layers.21.mlp.dense_4h_to_h.bias": "pytorch_model-00006-of-00007.bin",
199
+ "transformer.layers.21.mlp.dense_4h_to_h.weight": "pytorch_model-00006-of-00007.bin",
200
+ "transformer.layers.21.mlp.dense_h_to_4h.bias": "pytorch_model-00006-of-00007.bin",
201
+ "transformer.layers.21.mlp.dense_h_to_4h.weight": "pytorch_model-00006-of-00007.bin",
202
+ "transformer.layers.21.post_attention_layernorm.bias": "pytorch_model-00006-of-00007.bin",
203
+ "transformer.layers.21.post_attention_layernorm.weight": "pytorch_model-00006-of-00007.bin",
204
+ "transformer.layers.22.attention.dense.bias": "pytorch_model-00006-of-00007.bin",
205
+ "transformer.layers.22.attention.dense.weight": "pytorch_model-00006-of-00007.bin",
206
+ "transformer.layers.22.attention.query_key_value.bias": "pytorch_model-00006-of-00007.bin",
207
+ "transformer.layers.22.attention.query_key_value.weight": "pytorch_model-00006-of-00007.bin",
208
+ "transformer.layers.22.attention.rotary_emb.inv_freq": "pytorch_model-00006-of-00007.bin",
209
+ "transformer.layers.22.input_layernorm.bias": "pytorch_model-00006-of-00007.bin",
210
+ "transformer.layers.22.input_layernorm.weight": "pytorch_model-00006-of-00007.bin",
211
+ "transformer.layers.22.mlp.dense_4h_to_h.bias": "pytorch_model-00006-of-00007.bin",
212
+ "transformer.layers.22.mlp.dense_4h_to_h.weight": "pytorch_model-00006-of-00007.bin",
213
+ "transformer.layers.22.mlp.dense_h_to_4h.bias": "pytorch_model-00006-of-00007.bin",
214
+ "transformer.layers.22.mlp.dense_h_to_4h.weight": "pytorch_model-00006-of-00007.bin",
215
+ "transformer.layers.22.post_attention_layernorm.bias": "pytorch_model-00006-of-00007.bin",
216
+ "transformer.layers.22.post_attention_layernorm.weight": "pytorch_model-00006-of-00007.bin",
217
+ "transformer.layers.23.attention.dense.bias": "pytorch_model-00006-of-00007.bin",
218
+ "transformer.layers.23.attention.dense.weight": "pytorch_model-00006-of-00007.bin",
219
+ "transformer.layers.23.attention.query_key_value.bias": "pytorch_model-00006-of-00007.bin",
220
+ "transformer.layers.23.attention.query_key_value.weight": "pytorch_model-00006-of-00007.bin",
221
+ "transformer.layers.23.attention.rotary_emb.inv_freq": "pytorch_model-00006-of-00007.bin",
222
+ "transformer.layers.23.input_layernorm.bias": "pytorch_model-00006-of-00007.bin",
223
+ "transformer.layers.23.input_layernorm.weight": "pytorch_model-00006-of-00007.bin",
224
+ "transformer.layers.23.mlp.dense_4h_to_h.bias": "pytorch_model-00006-of-00007.bin",
225
+ "transformer.layers.23.mlp.dense_4h_to_h.weight": "pytorch_model-00006-of-00007.bin",
226
+ "transformer.layers.23.mlp.dense_h_to_4h.bias": "pytorch_model-00006-of-00007.bin",
227
+ "transformer.layers.23.mlp.dense_h_to_4h.weight": "pytorch_model-00006-of-00007.bin",
228
+ "transformer.layers.23.post_attention_layernorm.bias": "pytorch_model-00006-of-00007.bin",
229
+ "transformer.layers.23.post_attention_layernorm.weight": "pytorch_model-00006-of-00007.bin",
230
+ "transformer.layers.24.attention.dense.bias": "pytorch_model-00006-of-00007.bin",
231
+ "transformer.layers.24.attention.dense.weight": "pytorch_model-00006-of-00007.bin",
232
+ "transformer.layers.24.attention.query_key_value.bias": "pytorch_model-00006-of-00007.bin",
233
+ "transformer.layers.24.attention.query_key_value.weight": "pytorch_model-00006-of-00007.bin",
234
+ "transformer.layers.24.attention.rotary_emb.inv_freq": "pytorch_model-00006-of-00007.bin",
235
+ "transformer.layers.24.input_layernorm.bias": "pytorch_model-00006-of-00007.bin",
236
+ "transformer.layers.24.input_layernorm.weight": "pytorch_model-00006-of-00007.bin",
237
+ "transformer.layers.24.mlp.dense_4h_to_h.bias": "pytorch_model-00006-of-00007.bin",
238
+ "transformer.layers.24.mlp.dense_4h_to_h.weight": "pytorch_model-00006-of-00007.bin",
239
+ "transformer.layers.24.mlp.dense_h_to_4h.bias": "pytorch_model-00006-of-00007.bin",
240
+ "transformer.layers.24.mlp.dense_h_to_4h.weight": "pytorch_model-00006-of-00007.bin",
241
+ "transformer.layers.24.post_attention_layernorm.bias": "pytorch_model-00006-of-00007.bin",
242
+ "transformer.layers.24.post_attention_layernorm.weight": "pytorch_model-00006-of-00007.bin",
243
+ "transformer.layers.25.attention.dense.bias": "pytorch_model-00006-of-00007.bin",
244
+ "transformer.layers.25.attention.dense.weight": "pytorch_model-00006-of-00007.bin",
245
+ "transformer.layers.25.attention.query_key_value.bias": "pytorch_model-00006-of-00007.bin",
246
+ "transformer.layers.25.attention.query_key_value.weight": "pytorch_model-00006-of-00007.bin",
247
+ "transformer.layers.25.attention.rotary_emb.inv_freq": "pytorch_model-00006-of-00007.bin",
248
+ "transformer.layers.25.input_layernorm.bias": "pytorch_model-00006-of-00007.bin",
249
+ "transformer.layers.25.input_layernorm.weight": "pytorch_model-00006-of-00007.bin",
250
+ "transformer.layers.25.mlp.dense_4h_to_h.bias": "pytorch_model-00006-of-00007.bin",
251
+ "transformer.layers.25.mlp.dense_4h_to_h.weight": "pytorch_model-00006-of-00007.bin",
252
+ "transformer.layers.25.mlp.dense_h_to_4h.bias": "pytorch_model-00006-of-00007.bin",
253
+ "transformer.layers.25.mlp.dense_h_to_4h.weight": "pytorch_model-00006-of-00007.bin",
254
+ "transformer.layers.25.post_attention_layernorm.bias": "pytorch_model-00006-of-00007.bin",
255
+ "transformer.layers.25.post_attention_layernorm.weight": "pytorch_model-00006-of-00007.bin",
256
+ "transformer.layers.26.attention.dense.bias": "pytorch_model-00007-of-00007.bin",
257
+ "transformer.layers.26.attention.dense.weight": "pytorch_model-00007-of-00007.bin",
258
+ "transformer.layers.26.attention.query_key_value.bias": "pytorch_model-00007-of-00007.bin",
259
+ "transformer.layers.26.attention.query_key_value.weight": "pytorch_model-00007-of-00007.bin",
260
+ "transformer.layers.26.attention.rotary_emb.inv_freq": "pytorch_model-00006-of-00007.bin",
261
+ "transformer.layers.26.input_layernorm.bias": "pytorch_model-00006-of-00007.bin",
262
+ "transformer.layers.26.input_layernorm.weight": "pytorch_model-00006-of-00007.bin",
263
+ "transformer.layers.26.mlp.dense_4h_to_h.bias": "pytorch_model-00007-of-00007.bin",
264
+ "transformer.layers.26.mlp.dense_4h_to_h.weight": "pytorch_model-00007-of-00007.bin",
265
+ "transformer.layers.26.mlp.dense_h_to_4h.bias": "pytorch_model-00007-of-00007.bin",
266
+ "transformer.layers.26.mlp.dense_h_to_4h.weight": "pytorch_model-00007-of-00007.bin",
267
+ "transformer.layers.26.post_attention_layernorm.bias": "pytorch_model-00007-of-00007.bin",
268
+ "transformer.layers.26.post_attention_layernorm.weight": "pytorch_model-00007-of-00007.bin",
269
+ "transformer.layers.27.attention.dense.bias": "pytorch_model-00007-of-00007.bin",
270
+ "transformer.layers.27.attention.dense.weight": "pytorch_model-00007-of-00007.bin",
271
+ "transformer.layers.27.attention.query_key_value.bias": "pytorch_model-00007-of-00007.bin",
272
+ "transformer.layers.27.attention.query_key_value.weight": "pytorch_model-00007-of-00007.bin",
273
+ "transformer.layers.27.attention.rotary_emb.inv_freq": "pytorch_model-00007-of-00007.bin",
274
+ "transformer.layers.27.input_layernorm.bias": "pytorch_model-00007-of-00007.bin",
275
+ "transformer.layers.27.input_layernorm.weight": "pytorch_model-00007-of-00007.bin",
276
+ "transformer.layers.27.mlp.dense_4h_to_h.bias": "pytorch_model-00007-of-00007.bin",
277
+ "transformer.layers.27.mlp.dense_4h_to_h.weight": "pytorch_model-00007-of-00007.bin",
278
+ "transformer.layers.27.mlp.dense_h_to_4h.bias": "pytorch_model-00007-of-00007.bin",
279
+ "transformer.layers.27.mlp.dense_h_to_4h.weight": "pytorch_model-00007-of-00007.bin",
280
+ "transformer.layers.27.post_attention_layernorm.bias": "pytorch_model-00007-of-00007.bin",
281
+ "transformer.layers.27.post_attention_layernorm.weight": "pytorch_model-00007-of-00007.bin",
282
+ "transformer.layers.3.attention.dense.bias": "pytorch_model-00002-of-00007.bin",
283
+ "transformer.layers.3.attention.dense.weight": "pytorch_model-00002-of-00007.bin",
284
+ "transformer.layers.3.attention.query_key_value.bias": "pytorch_model-00002-of-00007.bin",
285
+ "transformer.layers.3.attention.query_key_value.weight": "pytorch_model-00002-of-00007.bin",
286
+ "transformer.layers.3.attention.rotary_emb.inv_freq": "pytorch_model-00002-of-00007.bin",
287
+ "transformer.layers.3.input_layernorm.bias": "pytorch_model-00002-of-00007.bin",
288
+ "transformer.layers.3.input_layernorm.weight": "pytorch_model-00002-of-00007.bin",
289
+ "transformer.layers.3.mlp.dense_4h_to_h.bias": "pytorch_model-00002-of-00007.bin",
290
+ "transformer.layers.3.mlp.dense_4h_to_h.weight": "pytorch_model-00002-of-00007.bin",
291
+ "transformer.layers.3.mlp.dense_h_to_4h.bias": "pytorch_model-00002-of-00007.bin",
292
+ "transformer.layers.3.mlp.dense_h_to_4h.weight": "pytorch_model-00002-of-00007.bin",
293
+ "transformer.layers.3.post_attention_layernorm.bias": "pytorch_model-00002-of-00007.bin",
294
+ "transformer.layers.3.post_attention_layernorm.weight": "pytorch_model-00002-of-00007.bin",
295
+ "transformer.layers.4.attention.dense.bias": "pytorch_model-00002-of-00007.bin",
296
+ "transformer.layers.4.attention.dense.weight": "pytorch_model-00002-of-00007.bin",
297
+ "transformer.layers.4.attention.query_key_value.bias": "pytorch_model-00002-of-00007.bin",
298
+ "transformer.layers.4.attention.query_key_value.weight": "pytorch_model-00002-of-00007.bin",
299
+ "transformer.layers.4.attention.rotary_emb.inv_freq": "pytorch_model-00002-of-00007.bin",
300
+ "transformer.layers.4.input_layernorm.bias": "pytorch_model-00002-of-00007.bin",
301
+ "transformer.layers.4.input_layernorm.weight": "pytorch_model-00002-of-00007.bin",
302
+ "transformer.layers.4.mlp.dense_4h_to_h.bias": "pytorch_model-00002-of-00007.bin",
303
+ "transformer.layers.4.mlp.dense_4h_to_h.weight": "pytorch_model-00002-of-00007.bin",
304
+ "transformer.layers.4.mlp.dense_h_to_4h.bias": "pytorch_model-00002-of-00007.bin",
305
+ "transformer.layers.4.mlp.dense_h_to_4h.weight": "pytorch_model-00002-of-00007.bin",
306
+ "transformer.layers.4.post_attention_layernorm.bias": "pytorch_model-00002-of-00007.bin",
307
+ "transformer.layers.4.post_attention_layernorm.weight": "pytorch_model-00002-of-00007.bin",
308
+ "transformer.layers.5.attention.dense.bias": "pytorch_model-00002-of-00007.bin",
309
+ "transformer.layers.5.attention.dense.weight": "pytorch_model-00002-of-00007.bin",
310
+ "transformer.layers.5.attention.query_key_value.bias": "pytorch_model-00002-of-00007.bin",
311
+ "transformer.layers.5.attention.query_key_value.weight": "pytorch_model-00002-of-00007.bin",
312
+ "transformer.layers.5.attention.rotary_emb.inv_freq": "pytorch_model-00002-of-00007.bin",
313
+ "transformer.layers.5.input_layernorm.bias": "pytorch_model-00002-of-00007.bin",
314
+ "transformer.layers.5.input_layernorm.weight": "pytorch_model-00002-of-00007.bin",
315
+ "transformer.layers.5.mlp.dense_4h_to_h.bias": "pytorch_model-00002-of-00007.bin",
316
+ "transformer.layers.5.mlp.dense_4h_to_h.weight": "pytorch_model-00002-of-00007.bin",
317
+ "transformer.layers.5.mlp.dense_h_to_4h.bias": "pytorch_model-00002-of-00007.bin",
318
+ "transformer.layers.5.mlp.dense_h_to_4h.weight": "pytorch_model-00002-of-00007.bin",
319
+ "transformer.layers.5.post_attention_layernorm.bias": "pytorch_model-00002-of-00007.bin",
320
+ "transformer.layers.5.post_attention_layernorm.weight": "pytorch_model-00002-of-00007.bin",
321
+ "transformer.layers.6.attention.dense.bias": "pytorch_model-00002-of-00007.bin",
322
+ "transformer.layers.6.attention.dense.weight": "pytorch_model-00002-of-00007.bin",
323
+ "transformer.layers.6.attention.query_key_value.bias": "pytorch_model-00002-of-00007.bin",
324
+ "transformer.layers.6.attention.query_key_value.weight": "pytorch_model-00002-of-00007.bin",
325
+ "transformer.layers.6.attention.rotary_emb.inv_freq": "pytorch_model-00002-of-00007.bin",
326
+ "transformer.layers.6.input_layernorm.bias": "pytorch_model-00002-of-00007.bin",
327
+ "transformer.layers.6.input_layernorm.weight": "pytorch_model-00002-of-00007.bin",
328
+ "transformer.layers.6.mlp.dense_4h_to_h.bias": "pytorch_model-00002-of-00007.bin",
329
+ "transformer.layers.6.mlp.dense_4h_to_h.weight": "pytorch_model-00002-of-00007.bin",
330
+ "transformer.layers.6.mlp.dense_h_to_4h.bias": "pytorch_model-00002-of-00007.bin",
331
+ "transformer.layers.6.mlp.dense_h_to_4h.weight": "pytorch_model-00002-of-00007.bin",
332
+ "transformer.layers.6.post_attention_layernorm.bias": "pytorch_model-00002-of-00007.bin",
333
+ "transformer.layers.6.post_attention_layernorm.weight": "pytorch_model-00002-of-00007.bin",
334
+ "transformer.layers.7.attention.dense.bias": "pytorch_model-00003-of-00007.bin",
335
+ "transformer.layers.7.attention.dense.weight": "pytorch_model-00003-of-00007.bin",
336
+ "transformer.layers.7.attention.query_key_value.bias": "pytorch_model-00003-of-00007.bin",
337
+ "transformer.layers.7.attention.query_key_value.weight": "pytorch_model-00003-of-00007.bin",
338
+ "transformer.layers.7.attention.rotary_emb.inv_freq": "pytorch_model-00002-of-00007.bin",
339
+ "transformer.layers.7.input_layernorm.bias": "pytorch_model-00002-of-00007.bin",
340
+ "transformer.layers.7.input_layernorm.weight": "pytorch_model-00002-of-00007.bin",
341
+ "transformer.layers.7.mlp.dense_4h_to_h.bias": "pytorch_model-00003-of-00007.bin",
342
+ "transformer.layers.7.mlp.dense_4h_to_h.weight": "pytorch_model-00003-of-00007.bin",
343
+ "transformer.layers.7.mlp.dense_h_to_4h.bias": "pytorch_model-00003-of-00007.bin",
344
+ "transformer.layers.7.mlp.dense_h_to_4h.weight": "pytorch_model-00003-of-00007.bin",
345
+ "transformer.layers.7.post_attention_layernorm.bias": "pytorch_model-00003-of-00007.bin",
346
+ "transformer.layers.7.post_attention_layernorm.weight": "pytorch_model-00003-of-00007.bin",
347
+ "transformer.layers.8.attention.dense.bias": "pytorch_model-00003-of-00007.bin",
348
+ "transformer.layers.8.attention.dense.weight": "pytorch_model-00003-of-00007.bin",
349
+ "transformer.layers.8.attention.query_key_value.bias": "pytorch_model-00003-of-00007.bin",
350
+ "transformer.layers.8.attention.query_key_value.weight": "pytorch_model-00003-of-00007.bin",
351
+ "transformer.layers.8.attention.rotary_emb.inv_freq": "pytorch_model-00003-of-00007.bin",
352
+ "transformer.layers.8.input_layernorm.bias": "pytorch_model-00003-of-00007.bin",
353
+ "transformer.layers.8.input_layernorm.weight": "pytorch_model-00003-of-00007.bin",
354
+ "transformer.layers.8.mlp.dense_4h_to_h.bias": "pytorch_model-00003-of-00007.bin",
355
+ "transformer.layers.8.mlp.dense_4h_to_h.weight": "pytorch_model-00003-of-00007.bin",
356
+ "transformer.layers.8.mlp.dense_h_to_4h.bias": "pytorch_model-00003-of-00007.bin",
357
+ "transformer.layers.8.mlp.dense_h_to_4h.weight": "pytorch_model-00003-of-00007.bin",
358
+ "transformer.layers.8.post_attention_layernorm.bias": "pytorch_model-00003-of-00007.bin",
359
+ "transformer.layers.8.post_attention_layernorm.weight": "pytorch_model-00003-of-00007.bin",
360
+ "transformer.layers.9.attention.dense.bias": "pytorch_model-00003-of-00007.bin",
361
+ "transformer.layers.9.attention.dense.weight": "pytorch_model-00003-of-00007.bin",
362
+ "transformer.layers.9.attention.query_key_value.bias": "pytorch_model-00003-of-00007.bin",
363
+ "transformer.layers.9.attention.query_key_value.weight": "pytorch_model-00003-of-00007.bin",
364
+ "transformer.layers.9.attention.rotary_emb.inv_freq": "pytorch_model-00003-of-00007.bin",
365
+ "transformer.layers.9.input_layernorm.bias": "pytorch_model-00003-of-00007.bin",
366
+ "transformer.layers.9.input_layernorm.weight": "pytorch_model-00003-of-00007.bin",
367
+ "transformer.layers.9.mlp.dense_4h_to_h.bias": "pytorch_model-00003-of-00007.bin",
368
+ "transformer.layers.9.mlp.dense_4h_to_h.weight": "pytorch_model-00003-of-00007.bin",
369
+ "transformer.layers.9.mlp.dense_h_to_4h.bias": "pytorch_model-00003-of-00007.bin",
370
+ "transformer.layers.9.mlp.dense_h_to_4h.weight": "pytorch_model-00003-of-00007.bin",
371
+ "transformer.layers.9.post_attention_layernorm.bias": "pytorch_model-00003-of-00007.bin",
372
+ "transformer.layers.9.post_attention_layernorm.weight": "pytorch_model-00003-of-00007.bin",
373
+ "transformer.prefix_encoder.embedding.weight": "pytorch_model-00007-of-00007.bin",
374
+ "transformer.word_embeddings.weight": "pytorch_model-00001-of-00007.bin"
375
+ }
376
+ }