shwu commited on
Commit
46280bb
1 Parent(s): 8dc243b
README.md ADDED
File without changes
config.json ADDED
@@ -0,0 +1,262 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_commit_hash": null,
3
+ "architectures": [
4
+ "BlipFor2ChatGLM"
5
+ ],
6
+ "initializer_factor": 1.0,
7
+ "initializer_range": 0.02,
8
+ "is_encoder_decoder": true,
9
+ "num_query_tokens": 32,
10
+ "qformer_config": {
11
+ "_name_or_path": "",
12
+ "add_cross_attention": false,
13
+ "architectures": null,
14
+ "attention_probs_dropout_prob": 0.1,
15
+ "bad_words_ids": null,
16
+ "begin_suppress_tokens": null,
17
+ "bos_token_id": null,
18
+ "chunk_size_feed_forward": 0,
19
+ "classifier_dropout": null,
20
+ "cross_attention_frequency": 2,
21
+ "cross_attention_hidden_size": null,
22
+ "decoder_start_token_id": null,
23
+ "diversity_penalty": 0.0,
24
+ "do_sample": false,
25
+ "early_stopping": false,
26
+ "encoder_hidden_size": 1408,
27
+ "encoder_no_repeat_ngram_size": 0,
28
+ "eos_token_id": null,
29
+ "exponential_decay_length_penalty": null,
30
+ "finetuning_task": null,
31
+ "forced_bos_token_id": null,
32
+ "forced_eos_token_id": null,
33
+ "hidden_act": "gelu",
34
+ "hidden_dropout_prob": 0.1,
35
+ "hidden_size": 768,
36
+ "id2label": {
37
+ "0": "LABEL_0",
38
+ "1": "LABEL_1"
39
+ },
40
+ "initializer_range": 0.02,
41
+ "intermediate_size": 3072,
42
+ "is_decoder": false,
43
+ "is_encoder_decoder": false,
44
+ "label2id": {
45
+ "LABEL_0": 0,
46
+ "LABEL_1": 1
47
+ },
48
+ "layer_norm_eps": 1e-12,
49
+ "length_penalty": 1.0,
50
+ "max_length": 20,
51
+ "max_position_embeddings": 512,
52
+ "min_length": 0,
53
+ "model_type": "blip_2_qformer",
54
+ "no_repeat_ngram_size": 0,
55
+ "num_attention_heads": 12,
56
+ "num_beam_groups": 1,
57
+ "num_beams": 1,
58
+ "num_hidden_layers": 12,
59
+ "num_return_sequences": 1,
60
+ "output_attentions": false,
61
+ "output_hidden_states": false,
62
+ "output_scores": false,
63
+ "pad_token_id": 0,
64
+ "position_embedding_type": "absolute",
65
+ "prefix": null,
66
+ "problem_type": null,
67
+ "pruned_heads": {},
68
+ "remove_invalid_values": false,
69
+ "repetition_penalty": 1.0,
70
+ "return_dict": true,
71
+ "return_dict_in_generate": false,
72
+ "sep_token_id": null,
73
+ "suppress_tokens": null,
74
+ "task_specific_params": null,
75
+ "temperature": 1.0,
76
+ "tf_legacy_loss": false,
77
+ "tie_encoder_decoder": false,
78
+ "tie_word_embeddings": true,
79
+ "tokenizer_class": null,
80
+ "top_k": 50,
81
+ "top_p": 1.0,
82
+ "torch_dtype": null,
83
+ "torchscript": false,
84
+ "transformers_version": "4.27.3",
85
+ "typical_p": 1.0,
86
+ "use_bfloat16": false,
87
+ "vocab_size": 30522
88
+ },
89
+ "text_config": {
90
+ "_name_or_path": "THUDM/chatglm-6b",
91
+ "add_cross_attention": false,
92
+ "architectures": [
93
+ "ChatGLMModel"
94
+ ],
95
+ "auto_map": {
96
+ "AutoConfig": "configuration_chatglm.ChatGLMConfig",
97
+ "AutoModel": "modeling_chatglm.ChatGLMForConditionalGeneration",
98
+ "AutoModelForSeq2SeqLM": "modeling_chatglm.ChatGLMForConditionalGeneration"
99
+ },
100
+ "bad_words_ids": null,
101
+ "begin_suppress_tokens": null,
102
+ "bos_token_id": 130004,
103
+ "chunk_size_feed_forward": 0,
104
+ "cross_attention_hidden_size": null,
105
+ "decoder_start_token_id": null,
106
+ "diversity_penalty": 0.0,
107
+ "do_sample": false,
108
+ "early_stopping": false,
109
+ "encoder_no_repeat_ngram_size": 0,
110
+ "eos_token_id": 130005,
111
+ "exponential_decay_length_penalty": null,
112
+ "finetuning_task": null,
113
+ "forced_bos_token_id": null,
114
+ "forced_eos_token_id": null,
115
+ "gmask_token_id": 130001,
116
+ "hidden_size": 4096,
117
+ "id2label": {
118
+ "0": "LABEL_0",
119
+ "1": "LABEL_1"
120
+ },
121
+ "inner_hidden_size": 16384,
122
+ "is_decoder": false,
123
+ "is_encoder_decoder": false,
124
+ "label2id": {
125
+ "LABEL_0": 0,
126
+ "LABEL_1": 1
127
+ },
128
+ "layernorm_epsilon": 1e-05,
129
+ "length_penalty": 1.0,
130
+ "mask_token_id": 130000,
131
+ "max_length": 20,
132
+ "max_sequence_length": 2048,
133
+ "min_length": 0,
134
+ "model_type": "chatglm",
135
+ "no_repeat_ngram_size": 0,
136
+ "num_attention_heads": 32,
137
+ "num_beam_groups": 1,
138
+ "num_beams": 1,
139
+ "num_layers": 28,
140
+ "num_return_sequences": 1,
141
+ "output_attentions": false,
142
+ "output_hidden_states": false,
143
+ "output_scores": false,
144
+ "pad_token_id": 3,
145
+ "position_encoding_2d": true,
146
+ "pre_seq_len": null,
147
+ "prefix": null,
148
+ "prefix_projection": false,
149
+ "problem_type": null,
150
+ "pruned_heads": {},
151
+ "quantization_bit": 0,
152
+ "remove_invalid_values": false,
153
+ "repetition_penalty": 1.0,
154
+ "return_dict": true,
155
+ "return_dict_in_generate": false,
156
+ "sep_token_id": null,
157
+ "suppress_tokens": null,
158
+ "task_specific_params": null,
159
+ "temperature": 1.0,
160
+ "tf_legacy_loss": false,
161
+ "tie_encoder_decoder": false,
162
+ "tie_word_embeddings": true,
163
+ "tokenizer_class": null,
164
+ "top_k": 50,
165
+ "top_p": 1.0,
166
+ "torch_dtype": "float16",
167
+ "torchscript": false,
168
+ "transformers_version": "4.27.3",
169
+ "typical_p": 1.0,
170
+ "use_bfloat16": false,
171
+ "use_cache": true,
172
+ "vocab_size": 130528
173
+ },
174
+ "tie_word_embeddings": false,
175
+ "torch_dtype": "float32",
176
+ "transformers_version": null,
177
+ "use_decoder_only_language_model": false,
178
+ "vision_config": {
179
+ "_name_or_path": "",
180
+ "add_cross_attention": false,
181
+ "architectures": null,
182
+ "attention_dropout": 0.0,
183
+ "bad_words_ids": null,
184
+ "begin_suppress_tokens": null,
185
+ "bos_token_id": null,
186
+ "chunk_size_feed_forward": 0,
187
+ "cross_attention_hidden_size": null,
188
+ "decoder_start_token_id": null,
189
+ "diversity_penalty": 0.0,
190
+ "do_sample": false,
191
+ "dropout": 0.0,
192
+ "early_stopping": false,
193
+ "encoder_no_repeat_ngram_size": 0,
194
+ "eos_token_id": null,
195
+ "exponential_decay_length_penalty": null,
196
+ "finetuning_task": null,
197
+ "forced_bos_token_id": null,
198
+ "forced_eos_token_id": null,
199
+ "hidden_act": "gelu",
200
+ "hidden_size": 1408,
201
+ "id2label": {
202
+ "0": "LABEL_0",
203
+ "1": "LABEL_1"
204
+ },
205
+ "image_size": 224,
206
+ "initializer_factor": 1.0,
207
+ "initializer_range": 1e-10,
208
+ "intermediate_size": 6144,
209
+ "is_decoder": false,
210
+ "is_encoder_decoder": false,
211
+ "label2id": {
212
+ "LABEL_0": 0,
213
+ "LABEL_1": 1
214
+ },
215
+ "layer_norm_eps": 1e-05,
216
+ "length_penalty": 1.0,
217
+ "max_length": 20,
218
+ "min_length": 0,
219
+ "model_type": "blip_2_vision_model",
220
+ "no_repeat_ngram_size": 0,
221
+ "num_attention_heads": 16,
222
+ "num_beam_groups": 1,
223
+ "num_beams": 1,
224
+ "num_channels": 3,
225
+ "num_hidden_layers": 39,
226
+ "num_return_sequences": 1,
227
+ "output_attentions": false,
228
+ "output_hidden_states": false,
229
+ "output_scores": false,
230
+ "pad_token_id": null,
231
+ "patch_size": 14,
232
+ "prefix": null,
233
+ "problem_type": null,
234
+ "projection_dim": 512,
235
+ "pruned_heads": {},
236
+ "qkv_bias": true,
237
+ "remove_invalid_values": false,
238
+ "repetition_penalty": 1.0,
239
+ "return_dict": true,
240
+ "return_dict_in_generate": false,
241
+ "sep_token_id": null,
242
+ "suppress_tokens": null,
243
+ "task_specific_params": null,
244
+ "temperature": 1.0,
245
+ "tf_legacy_loss": false,
246
+ "tie_encoder_decoder": false,
247
+ "tie_word_embeddings": true,
248
+ "tokenizer_class": null,
249
+ "top_k": 50,
250
+ "top_p": 1.0,
251
+ "torch_dtype": null,
252
+ "torchscript": false,
253
+ "transformers_version": "4.27.3",
254
+ "typical_p": 1.0,
255
+ "use_bfloat16": false
256
+ },
257
+ "auto_map": {
258
+ "AutoConfig": "configuration_blip2chatglm.Blip2ChatGLMConfig",
259
+ "AutoModel": "modeling_blip2chatglm.Blip2ForChatGLM",
260
+ "AutoModelForCausalLM": "modeling_blip2chatglm.Blip2ChatGLM"
261
+ }
262
+ }
configuration_blip2chatglm.py ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import (
2
+ PretrainedConfig,
3
+ Blip2VisionConfig, Blip2QFormerConfig
4
+ )
5
+ from .configuration_chatglm import ChatGLMConfig
6
+
7
+ import copy
8
+ from transformers.configuration_utils import PretrainedConfig
9
+ from transformers.utils import logging
10
+
11
+ logger = logging.get_logger(__name__)
12
+
13
+
14
+
15
+ class Blip2ChatGLMConfig(PretrainedConfig):
16
+ """Mainly based on Blip2Config
17
+
18
+ Args:
19
+ PretrainedConfig (_type_): _description_
20
+ """
21
+ is_composition = True
22
+
23
+ def __init__(self, vision_config=None, qformer_config=None, text_config=None, num_query_tokens=32, **kwargs):
24
+ super().__init__(**kwargs)
25
+
26
+ if vision_config is None:
27
+ vision_config = {}
28
+ logger.info("vision_config is None. initializing the Blip2VisionConfig with default values.")
29
+
30
+ if qformer_config is None:
31
+ qformer_config = {}
32
+ logger.info("qformer_config is None. Initializing the Blip2QFormerConfig with default values.")
33
+
34
+ if text_config is None:
35
+ text_config = {}
36
+ logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`).")
37
+
38
+ self.vision_config = Blip2VisionConfig(**vision_config)
39
+ self.qformer_config = Blip2QFormerConfig(**qformer_config)
40
+ # text_model_type = text_config["model_type"] if "model_type" in text_config else "opt"
41
+ # self.text_config = CONFIG_MAPPING[text_model_type](**text_config)
42
+ self.text_config = ChatGLMConfig(**text_config)
43
+
44
+ # self.tie_word_embeddings = self.text_config.tie_word_embeddings
45
+ self.tie_word_embeddings = False # I don't know what this is
46
+ # self.is_encoder_decoder = self.text_config.is_encoder_decoder
47
+ self.is_encoder_decoder = True # chatglm is an encoder-decoder model
48
+
49
+ self.num_query_tokens = num_query_tokens
50
+ self.qformer_config.encoder_hidden_size = self.vision_config.hidden_size
51
+ # self.use_decoder_only_language_model = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
52
+ self.use_decoder_only_language_model = False # chatglm is an encoder-decoder model
53
+ self.initializer_factor = 1.0
54
+ self.initializer_range = 0.02
55
+
56
+ @classmethod
57
+ def from_vision_qformer_text_configs(
58
+ cls,
59
+ vision_config: Blip2VisionConfig,
60
+ qformer_config: Blip2QFormerConfig,
61
+ text_config: PretrainedConfig,
62
+ **kwargs,
63
+ ):
64
+ r"""
65
+ Instantiate a [`Blip2Config`] (or a derived class) from a BLIP-2 vision model, Q-Former and language model
66
+ configurations.
67
+
68
+ Returns:
69
+ [`Blip2Config`]: An instance of a configuration object
70
+ """
71
+
72
+ return cls(
73
+ vision_config=vision_config.to_dict(),
74
+ qformer_config=qformer_config.to_dict(),
75
+ text_config=text_config.to_dict(),
76
+ **kwargs,
77
+ )
78
+
79
+ def to_dict(self):
80
+ """
81
+ Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`].
82
+
83
+ Returns:
84
+ `Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,
85
+ """
86
+ output = copy.deepcopy(self.__dict__)
87
+ output["vision_config"] = self.vision_config.to_dict()
88
+ output["qformer_config"] = self.qformer_config.to_dict()
89
+ output["text_config"] = self.text_config.to_dict()
90
+ output["model_type"] = self.__class__.model_type
91
+ return output
configuration_chatglm.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ ChatGLM model configuration """
2
+
3
+ from transformers.configuration_utils import PretrainedConfig
4
+ from transformers.utils import logging
5
+
6
+ logger = logging.get_logger(__name__)
7
+
8
+
9
+ class ChatGLMConfig(PretrainedConfig):
10
+ r"""
11
+ This is the configuration class to store the configuration of a [`~ChatGLMModel`].
12
+ It is used to instantiate an ChatGLM model according to the specified arguments, defining the model
13
+ architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of
14
+ the ChatGLM-6B [THUDM/ChatGLM-6B](https://huggingface.co/THUDM/chatglm-6b) architecture.
15
+
16
+ Configuration objects inherit from [`PretrainedConfig`] and can be used
17
+ to control the model outputs. Read the documentation from [`PretrainedConfig`]
18
+ for more information.
19
+
20
+
21
+ Args:
22
+ vocab_size (`int`, *optional*, defaults to 150528):
23
+ Vocabulary size of the ChatGLM-6B model. Defines the number of different tokens that can be represented by the
24
+ `inputs_ids` passed when calling [`~ChatGLMModel`] or
25
+ [`~TFChatGLMModel`].
26
+ hidden_size (`int`, *optional*, defaults to 4096):
27
+ Dimension of the encoder layers and the pooler layer.
28
+ num_hidden_layers (`int`, *optional*, defaults to 28):
29
+ Number of hidden layers in the Transformer encoder.
30
+ num_attention_heads (`int`, *optional*, defaults to 32):
31
+ Number of attention heads for each attention layer in the Transformer encoder.
32
+ inner_hidden_size (`int`, *optional*, defaults to 16384):
33
+ Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
34
+ max_sequence_length (`int`, *optional*, defaults to 512):
35
+ The maximum sequence length that this model might ever be used with.
36
+ Typically set this to something large just in case (e.g., 512 or 1024 or 2048).
37
+ layernorm_epsilon (`float`, *optional*, defaults to 1e-5):
38
+ The epsilon used by the layer normalization layers.
39
+ use_cache (`bool`, *optional*, defaults to `True`):
40
+ Whether the model should return the last key/values attentions (not used by all models).
41
+ Example:
42
+
43
+ ```python
44
+ >>> from configuration_chatglm import ChatGLMConfig
45
+ >>> from modeling_chatglm import ChatGLMModel
46
+
47
+ >>> # Initializing a ChatGLM-6B THUDM/ChatGLM-6B style configuration
48
+ >>> configuration = ChatGLMConfig()
49
+
50
+ >>> # Initializing a model from the THUDM/ChatGLM-6B style configuration
51
+ >>> model = ChatGLMModel(configuration)
52
+
53
+ >>> # Accessing the model configuration
54
+ >>> configuration = model.config
55
+ ```
56
+ """
57
+ model_type = "chatglm"
58
+
59
+ def __init__(
60
+ self,
61
+ vocab_size=150528,
62
+ hidden_size=4096,
63
+ num_layers=28,
64
+ num_attention_heads=32,
65
+ layernorm_epsilon=1e-5,
66
+ use_cache=False,
67
+ bos_token_id=150004,
68
+ eos_token_id=150005,
69
+ mask_token_id=150000,
70
+ gmask_token_id=150001,
71
+ pad_token_id=0,
72
+ max_sequence_length=2048,
73
+ inner_hidden_size=16384,
74
+ position_encoding_2d=True,
75
+ quantization_bit=0,
76
+ pre_seq_len=None,
77
+ prefix_projection=False,
78
+ **kwargs
79
+ ):
80
+ self.num_layers = num_layers
81
+ self.vocab_size = vocab_size
82
+ self.hidden_size = hidden_size
83
+ self.num_attention_heads = num_attention_heads
84
+ self.max_sequence_length = max_sequence_length
85
+ self.layernorm_epsilon = layernorm_epsilon
86
+ self.inner_hidden_size = inner_hidden_size
87
+ self.use_cache = use_cache
88
+ self.bos_token_id = bos_token_id
89
+ self.eos_token_id = eos_token_id
90
+ self.pad_token_id = pad_token_id
91
+ self.mask_token_id = mask_token_id
92
+ self.gmask_token_id = gmask_token_id
93
+ self.position_encoding_2d = position_encoding_2d
94
+ self.quantization_bit = quantization_bit
95
+ self.pre_seq_len = pre_seq_len
96
+ self.prefix_projection = prefix_projection
97
+
98
+ super().__init__(
99
+ pad_token_id=pad_token_id,
100
+ bos_token_id=bos_token_id,
101
+ eos_token_id=eos_token_id,
102
+ **kwargs
103
+ )
modeling_blip2chatglm.py ADDED
@@ -0,0 +1,362 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ from typing import Callable, List, Optional, Tuple, Union
3
+ import torch
4
+ import warnings
5
+ from torch import Tensor, nn
6
+
7
+ from transformers import (
8
+ PreTrainedModel,
9
+ Blip2VisionModel,
10
+ Blip2QFormerModel,
11
+ GenerationConfig,
12
+ )
13
+ from transformers.utils import logging
14
+ from transformers.generation.utils import LogitsProcessorList, StoppingCriteriaList
15
+
16
+ from .modeling_chatglm import (
17
+ ChatGLMForConditionalGeneration,
18
+ InvalidScoreLogitsProcessor,
19
+ )
20
+ from .configuration_blip2chatglm import Blip2ChatGLMConfig
21
+
22
+
23
+ logger = logging.get_logger(__name__)
24
+
25
+
26
+ class Blip2ForChatGLM(PreTrainedModel):
27
+ def __init__(self, config: Blip2ChatGLMConfig):
28
+ super().__init__(config)
29
+
30
+ self.vision_model = Blip2VisionModel(config.vision_config)
31
+
32
+ self.query_tokens = nn.Parameter(
33
+ torch.zeros(1, config.num_query_tokens, config.qformer_config.hidden_size)
34
+ )
35
+ self.qformer = Blip2QFormerModel(config.qformer_config)
36
+
37
+ self.language_projection = nn.Linear(
38
+ config.qformer_config.hidden_size, config.text_config.hidden_size
39
+ )
40
+
41
+ def forward(
42
+ self,
43
+ pixel_values: torch.FloatTensor,
44
+ output_attentions: Optional[bool] = None,
45
+ output_hidden_states: Optional[bool] = None,
46
+ return_dict: Optional[bool] = None,
47
+ ):
48
+ return_dict = (
49
+ return_dict if return_dict is not None else self.config.use_return_dict
50
+ )
51
+
52
+ # step 1: forward the images through the vision encoder,
53
+ # to get image embeddings of shape (batch_size, seq_len, hidden_size)
54
+ vision_outputs = self.vision_model.forward(
55
+ pixel_values=pixel_values,
56
+ output_attentions=output_attentions,
57
+ output_hidden_states=output_hidden_states,
58
+ return_dict=return_dict,
59
+ )
60
+ image_embeds = vision_outputs[0]
61
+
62
+ # step 2: forward the query tokens through the QFormer, using the image embeddings for cross-attention
63
+ image_attention_mask = torch.ones(
64
+ image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device
65
+ )
66
+
67
+ query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1)
68
+ query_outputs = self.qformer.forward(
69
+ query_embeds=query_tokens,
70
+ encoder_hidden_states=image_embeds,
71
+ encoder_attention_mask=image_attention_mask,
72
+ output_attentions=output_attentions,
73
+ output_hidden_states=output_hidden_states,
74
+ return_dict=return_dict,
75
+ )
76
+ query_output = query_outputs[0]
77
+
78
+ # step 3: use the language model, conditioned on the query outputs and the prompt
79
+ language_model_inputs = self.language_projection.forward(query_output)
80
+
81
+ return vision_outputs, query_outputs, language_model_inputs
82
+
83
+
84
+ class Blip2ChatGLM(PreTrainedModel):
85
+ config_class = Blip2ChatGLMConfig
86
+
87
+ def __init__(
88
+ self,
89
+ config: Blip2ChatGLMConfig,
90
+ blip2: Blip2ForChatGLM,
91
+ lm: ChatGLMForConditionalGeneration,
92
+ ) -> None:
93
+ super().__init__(config)
94
+ self.blip2 = blip2
95
+ self.language = lm
96
+
97
+ @torch.no_grad()
98
+ def stream_chat(
99
+ self,
100
+ tokenizer,
101
+ query: Union[str, Tuple[str, torch.Tensor]],
102
+ history: List[Tuple[Union[str, Tuple[str, torch.Tensor]], str]] = [],
103
+ num_beams=5,
104
+ max_length=128,
105
+ top_p=0.9,
106
+ do_sample=True,
107
+ temperature=1,
108
+ ):
109
+ device = self.blip2.device
110
+ # 1. Prepare token ids
111
+ images = []
112
+ image_slots = []
113
+
114
+ nvtokens = self.blip2.query_tokens.size(1)
115
+ if history:
116
+ input_ids = tokenizer(
117
+ f"[Round {len(history)}]\n问:", add_special_tokens=False
118
+ ).input_ids
119
+ slot_offset = len(input_ids)
120
+ if isinstance(query, tuple):
121
+ qtext, qimg = query
122
+ # image slot, embedding will be replaced by image embeddings
123
+ input_ids.extend([tokenizer.unk_token_id] * nvtokens)
124
+ else:
125
+ qtext = query
126
+ qimg = None
127
+ input_ids += tokenizer(qtext + f"\n答:").input_ids
128
+ if qimg is not None:
129
+ images.append(qimg)
130
+ image_slots.append(len(input_ids) - slot_offset) # count from backward
131
+
132
+ for ri, (q, r) in enumerate(reversed(history)):
133
+ if len(input_ids) >= max_length:
134
+ break
135
+ i = len(history) - ri - 1
136
+ cur_input_ids: List[int] = tokenizer(
137
+ f"[Round {i}]\n问:", add_special_tokens=False
138
+ ).input_ids
139
+ slot_offset = len(cur_input_ids)
140
+ if isinstance(q, tuple):
141
+ qtext, qimg = q
142
+ # image slot, embedding will be replaced by image embeddings
143
+ cur_input_ids.extend([tokenizer.unk_token_id] * nvtokens)
144
+ else:
145
+ qtext = q
146
+ qimg = None
147
+ cur_input_ids += tokenizer(
148
+ qtext + f"\n答:{r}\n", add_special_tokens=False
149
+ ).input_ids
150
+ input_ids = cur_input_ids + input_ids
151
+ if qimg is not None:
152
+ images.append(qimg)
153
+ image_slots.append(
154
+ len(input_ids) - slot_offset
155
+ ) # count from backward
156
+ else:
157
+ input_ids = []
158
+ if isinstance(query, tuple):
159
+ qtext, qimg = query
160
+ # image slot, embedding will be replaced by image embeddings
161
+ input_ids.extend([tokenizer.unk_token_id] * nvtokens)
162
+ else:
163
+ qtext = query
164
+ qimg = None
165
+ input_ids += tokenizer(qtext).input_ids
166
+ if qimg is not None:
167
+ images.append(qimg)
168
+ image_slots.append(len(input_ids)) # count from backward
169
+
170
+ if len(input_ids) >= max_length:
171
+ # truncate
172
+ if image_slots[-1] > max_length and image_slots[-1] - nvtokens < max_length:
173
+ # A non-intact image slot is not allowed
174
+ input_ids = input_ids[-(image_slots[-1] - nvtokens) :]
175
+ else:
176
+ input_ids = input_ids[-max_length:]
177
+ if image_slots[-1] > max_length:
178
+ image_slots.pop()
179
+ images.pop()
180
+
181
+ # 2. Prepare image embeddings
182
+ if len(images) != 0:
183
+ image = torch.cat(list(images), dim=0)
184
+ vision_outputs = self.blip2.vision_model.forward(image)
185
+ image_embeds = vision_outputs[0]
186
+ image_atts = torch.ones(image_embeds.size()[:-1], dtype=torch.long).to(
187
+ device
188
+ )
189
+
190
+ query_tokens = self.blip2.query_tokens.expand(image_embeds.shape[0], -1, -1)
191
+ query_outputs = self.blip2.qformer.forward(
192
+ query_embeds=query_tokens,
193
+ encoder_hidden_states=image_embeds,
194
+ encoder_attention_mask=image_atts,
195
+ )
196
+ query_output = query_outputs[0]
197
+
198
+ vtokens = self.blip2.language_projection(query_output)
199
+ else:
200
+ vtokens = []
201
+
202
+ # 3. Place image embeddings into slots
203
+ input_ids = torch.as_tensor(input_ids, dtype=torch.long).to(device).unsqueeze(0)
204
+ inputs_embeds = self.language.transformer.word_embeddings(input_ids)
205
+ for slot, vimg in zip(image_slots, vtokens):
206
+ inputs_embeds[0][-slot : -slot + nvtokens, :] = vimg
207
+
208
+ logits_processor = LogitsProcessorList()
209
+ logits_processor.append(InvalidScoreLogitsProcessor())
210
+ gen_kwargs = {
211
+ "max_length": max_length,
212
+ "num_beams": num_beams,
213
+ "do_sample": do_sample,
214
+ "top_p": top_p,
215
+ "temperature": temperature,
216
+ "logits_processor": logits_processor,
217
+ }
218
+
219
+ for outputs in self.mm_stream_generate(
220
+ input_ids=input_ids, inputs_embeds=inputs_embeds, **gen_kwargs
221
+ ):
222
+ outputs = outputs.tolist()[0][len(input_ids[0]) :]
223
+ response = tokenizer.decode(outputs)
224
+ response = self.language.process_response(response)
225
+ new_history = history + [(query, response)]
226
+ yield response, new_history
227
+
228
+ @torch.no_grad()
229
+ def mm_stream_generate(
230
+ self,
231
+ input_ids,
232
+ inputs_embeds,
233
+ generation_config: Optional[GenerationConfig] = None,
234
+ logits_processor: Optional[LogitsProcessorList] = None,
235
+ stopping_criteria: Optional[StoppingCriteriaList] = None,
236
+ prefix_allowed_tokens_fn: Optional[
237
+ Callable[[int, torch.Tensor], List[int]]
238
+ ] = None,
239
+ **kwargs,
240
+ ):
241
+ batch_size, input_ids_seq_length = input_ids.shape[0], input_ids.shape[-1]
242
+
243
+ if generation_config is None:
244
+ generation_config = self.language.generation_config
245
+ generation_config = copy.deepcopy(generation_config)
246
+ model_kwargs = generation_config.update(**kwargs)
247
+ bos_token_id, eos_token_id = (
248
+ generation_config.bos_token_id,
249
+ generation_config.eos_token_id,
250
+ )
251
+
252
+ if isinstance(eos_token_id, int):
253
+ eos_token_id = [eos_token_id]
254
+
255
+ has_default_max_length = (
256
+ kwargs.get("max_length") is None
257
+ and generation_config.max_length is not None
258
+ )
259
+ if has_default_max_length and generation_config.max_new_tokens is None:
260
+ warnings.warn(
261
+ f"Using `max_length`'s default ({generation_config.max_length}) to control the generation length. "
262
+ "This behaviour is deprecated and will be removed from the config in v5 of Transformers -- we"
263
+ " recommend using `max_new_tokens` to control the maximum length of the generation.",
264
+ UserWarning,
265
+ )
266
+ elif generation_config.max_new_tokens is not None:
267
+ generation_config.max_length = (
268
+ generation_config.max_new_tokens + input_ids_seq_length
269
+ )
270
+ if not has_default_max_length:
271
+ logger.warn(
272
+ f"Both `max_new_tokens` (={generation_config.max_new_tokens}) and `max_length`(="
273
+ f"{generation_config.max_length}) seem to have been set. `max_new_tokens` will take precedence. "
274
+ "Please refer to the documentation for more information. "
275
+ "(https://huggingface.co/docs/transformers/main/en/main_classes/text_generation)",
276
+ UserWarning,
277
+ )
278
+
279
+ if input_ids_seq_length >= generation_config.max_length:
280
+ input_ids_string = (
281
+ "decoder_input_ids"
282
+ if self.language.config.is_encoder_decoder
283
+ else "input_ids"
284
+ )
285
+ logger.warning(
286
+ f"Input length of {input_ids_string} is {input_ids_seq_length}, but `max_length` is set to"
287
+ f" {generation_config.max_length}. This can lead to unexpected behavior. You should consider"
288
+ " increasing `max_new_tokens`."
289
+ )
290
+
291
+ # 2. Set generation parameters if not already defined
292
+ logits_processor = (
293
+ logits_processor if logits_processor is not None else LogitsProcessorList()
294
+ )
295
+ stopping_criteria = (
296
+ stopping_criteria
297
+ if stopping_criteria is not None
298
+ else StoppingCriteriaList()
299
+ )
300
+
301
+ logits_processor = self.language._get_logits_processor(
302
+ generation_config=generation_config,
303
+ input_ids_seq_length=input_ids_seq_length,
304
+ encoder_input_ids=input_ids,
305
+ prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,
306
+ logits_processor=logits_processor,
307
+ )
308
+
309
+ stopping_criteria = self.language._get_stopping_criteria(
310
+ generation_config=generation_config, stopping_criteria=stopping_criteria
311
+ )
312
+ logits_warper = self.language._get_logits_warper(generation_config)
313
+
314
+ unfinished_sequences = input_ids.new(input_ids.shape[0]).fill_(1)
315
+ scores = None
316
+ while True:
317
+ model_inputs = self.language.prepare_inputs_for_generation(
318
+ input_ids, inputs_embeds=inputs_embeds, **model_kwargs
319
+ )
320
+ # forward pass to get next token
321
+ outputs = self.language(
322
+ **model_inputs,
323
+ return_dict=True,
324
+ output_attentions=False,
325
+ output_hidden_states=False,
326
+ )
327
+
328
+ next_token_logits = outputs.logits[:, -1, :]
329
+
330
+ # pre-process distribution
331
+ next_token_scores = logits_processor(input_ids, next_token_logits)
332
+ next_token_scores = logits_warper(input_ids, next_token_scores)
333
+
334
+ # sample
335
+ probs = nn.functional.softmax(next_token_scores, dim=-1)
336
+ if generation_config.do_sample:
337
+ next_tokens = torch.multinomial(probs, num_samples=1).squeeze(1)
338
+ else:
339
+ next_tokens = torch.argmax(probs, dim=-1)
340
+
341
+ # update generated ids, model inputs, and length for next step
342
+ input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1)
343
+ inputs_embeds = torch.cat(
344
+ [
345
+ inputs_embeds,
346
+ self.language.get_input_embeddings()(next_tokens)[:, None, :],
347
+ ],
348
+ dim=1,
349
+ )
350
+ model_kwargs = self.language._update_model_kwargs_for_generation(
351
+ outputs,
352
+ model_kwargs,
353
+ is_encoder_decoder=self.language.config.is_encoder_decoder,
354
+ )
355
+ unfinished_sequences = unfinished_sequences.mul(
356
+ (sum(next_tokens != i for i in eos_token_id)).long()
357
+ )
358
+
359
+ # stop when each sentence is finished, or if we exceed the maximum length
360
+ if unfinished_sequences.max() == 0 or stopping_criteria(input_ids, scores):
361
+ break
362
+ yield input_ids
modeling_chatglm.py ADDED
@@ -0,0 +1,1417 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ PyTorch ChatGLM model. """
2
+
3
+ import math
4
+ import copy
5
+ import os
6
+ import warnings
7
+ import re
8
+ import sys
9
+
10
+ import torch
11
+ import torch.utils.checkpoint
12
+ import torch.nn.functional as F
13
+ from torch import nn
14
+ from torch.nn import CrossEntropyLoss, LayerNorm
15
+ from torch.nn.utils import skip_init
16
+ from typing import Optional, Tuple, Union, List, Callable, Dict, Any
17
+
18
+ from transformers.utils import (
19
+ add_code_sample_docstrings,
20
+ add_start_docstrings,
21
+ add_start_docstrings_to_model_forward,
22
+ )
23
+ from transformers.modeling_outputs import (
24
+ BaseModelOutputWithPast,
25
+ CausalLMOutputWithPast,
26
+ BaseModelOutputWithPastAndCrossAttentions,
27
+ )
28
+ from transformers.modeling_utils import PreTrainedModel
29
+ from transformers.utils import logging
30
+ from transformers.generation.logits_process import LogitsProcessor
31
+ from transformers.generation.utils import LogitsProcessorList, StoppingCriteriaList, GenerationConfig, ModelOutput
32
+
33
+ from .configuration_chatglm import ChatGLMConfig
34
+
35
+ # flags required to enable jit fusion kernels
36
+
37
+ if sys.platform != 'darwin':
38
+ torch._C._jit_set_profiling_mode(False)
39
+ torch._C._jit_set_profiling_executor(False)
40
+ torch._C._jit_override_can_fuse_on_cpu(True)
41
+ torch._C._jit_override_can_fuse_on_gpu(True)
42
+
43
+ logger = logging.get_logger(__name__)
44
+
45
+ _CHECKPOINT_FOR_DOC = "THUDM/ChatGLM-6B"
46
+ _CONFIG_FOR_DOC = "ChatGLM6BConfig"
47
+
48
+ CHATGLM_6B_PRETRAINED_MODEL_ARCHIVE_LIST = [
49
+ "THUDM/chatglm-6b",
50
+ # See all ChatGLM-6B models at https://huggingface.co/models?filter=chatglm
51
+ ]
52
+
53
+
54
+ class InvalidScoreLogitsProcessor(LogitsProcessor):
55
+ def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
56
+ if torch.isnan(scores).any() or torch.isinf(scores).any():
57
+ scores.zero_()
58
+ scores[..., 20005] = 5e4
59
+ return scores
60
+
61
+
62
+ def load_tf_weights_in_chatglm_6b(model, config, tf_checkpoint_path):
63
+ """Load tf checkpoints in a pytorch model."""
64
+ try:
65
+ import re
66
+
67
+ import numpy as np
68
+ import tensorflow as tf
69
+ except ImportError:
70
+ logger.error(
71
+ "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
72
+ "https://www.tensorflow.org/install/ for installation instructions."
73
+ )
74
+ raise
75
+ tf_path = os.path.abspath(tf_checkpoint_path)
76
+ logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
77
+ # Load weights from TF model
78
+ init_vars = tf.train.list_variables(tf_path)
79
+ names = []
80
+ arrays = []
81
+ for name, shape in init_vars:
82
+ logger.info(f"Loading TF weight {name} with shape {shape}")
83
+ array = tf.train.load_variable(tf_path, name)
84
+ names.append(name)
85
+ arrays.append(array)
86
+
87
+ for name, array in zip(names, arrays):
88
+ name = name.split("/")
89
+ # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
90
+ # which are not required for using pretrained model
91
+ if any(
92
+ n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
93
+ for n in name
94
+ ):
95
+ logger.info(f"Skipping {'/'.join(name)}")
96
+ continue
97
+ pointer = model
98
+ for m_name in name:
99
+ if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
100
+ scope_names = re.split(r"_(\d+)", m_name)
101
+ else:
102
+ scope_names = [m_name]
103
+ if scope_names[0] == "kernel" or scope_names[0] == "gamma":
104
+ pointer = getattr(pointer, "weight")
105
+ elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
106
+ pointer = getattr(pointer, "bias")
107
+ elif scope_names[0] == "output_weights":
108
+ pointer = getattr(pointer, "weight")
109
+ elif scope_names[0] == "squad":
110
+ pointer = getattr(pointer, "classifier")
111
+ else:
112
+ try:
113
+ pointer = getattr(pointer, scope_names[0])
114
+ except AttributeError:
115
+ logger.info(f"Skipping {'/'.join(name)}")
116
+ continue
117
+ if len(scope_names) >= 2:
118
+ num = int(scope_names[1])
119
+ pointer = pointer[num]
120
+ if m_name[-11:] == "_embeddings":
121
+ pointer = getattr(pointer, "weight")
122
+ elif m_name == "kernel":
123
+ array = np.transpose(array)
124
+ try:
125
+ assert (
126
+ pointer.shape == array.shape
127
+ ), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
128
+ except AssertionError as e:
129
+ e.args += (pointer.shape, array.shape)
130
+ raise
131
+ logger.info(f"Initialize PyTorch weight {name}")
132
+ pointer.data = torch.from_numpy(array)
133
+ return model
134
+
135
+
136
+ class PrefixEncoder(torch.nn.Module):
137
+ """
138
+ The torch.nn model to encode the prefix
139
+ Input shape: (batch-size, prefix-length)
140
+ Output shape: (batch-size, prefix-length, 2*layers*hidden)
141
+ """
142
+
143
+ def __init__(self, config):
144
+ super().__init__()
145
+ self.prefix_projection = config.prefix_projection
146
+ if self.prefix_projection:
147
+ # Use a two-layer MLP to encode the prefix
148
+ self.embedding = torch.nn.Embedding(config.pre_seq_len, config.hidden_size)
149
+ self.trans = torch.nn.Sequential(
150
+ torch.nn.Linear(config.hidden_size, config.hidden_size),
151
+ torch.nn.Tanh(),
152
+ torch.nn.Linear(config.hidden_size, config.num_layers * config.hidden_size * 2)
153
+ )
154
+ else:
155
+ self.embedding = torch.nn.Embedding(config.pre_seq_len, config.num_layers * config.hidden_size * 2)
156
+
157
+ def forward(self, prefix: torch.Tensor):
158
+ if self.prefix_projection:
159
+ prefix_tokens = self.embedding(prefix)
160
+ past_key_values = self.trans(prefix_tokens)
161
+ else:
162
+ past_key_values = self.embedding(prefix)
163
+ return past_key_values
164
+
165
+
166
+ @torch.jit.script
167
+ def gelu_impl(x):
168
+ """OpenAI's gelu implementation."""
169
+ return 0.5 * x * (1.0 + torch.tanh(0.7978845608028654 * x *
170
+ (1.0 + 0.044715 * x * x)))
171
+
172
+
173
+ def gelu(x):
174
+ return gelu_impl(x)
175
+
176
+
177
+ class RotaryEmbedding(torch.nn.Module):
178
+ def __init__(self, dim, base=10000, precision=torch.half, learnable=False):
179
+ super().__init__()
180
+ inv_freq = 1. / (base ** (torch.arange(0, dim, 2).float() / dim))
181
+ inv_freq = inv_freq.half()
182
+ self.learnable = learnable
183
+ if learnable:
184
+ self.inv_freq = torch.nn.Parameter(inv_freq)
185
+ self.max_seq_len_cached = None
186
+ else:
187
+ self.register_buffer('inv_freq', inv_freq)
188
+ self.max_seq_len_cached = None
189
+ self.cos_cached = None
190
+ self.sin_cached = None
191
+ self.precision = precision
192
+
193
+ def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys,
194
+ error_msgs):
195
+ pass
196
+
197
+ def forward(self, x, seq_dim=1, seq_len=None):
198
+ if seq_len is None:
199
+ seq_len = x.shape[seq_dim]
200
+ if self.max_seq_len_cached is None or (seq_len > self.max_seq_len_cached):
201
+ self.max_seq_len_cached = None if self.learnable else seq_len
202
+ t = torch.arange(seq_len, device=x.device, dtype=self.inv_freq.dtype)
203
+ freqs = torch.einsum('i,j->ij', t, self.inv_freq)
204
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
205
+ emb = torch.cat((freqs, freqs), dim=-1).to(x.device)
206
+ if self.precision == torch.bfloat16:
207
+ emb = emb.float()
208
+
209
+ # [sx, 1 (b * np), hn]
210
+ cos_cached = emb.cos()[:, None, :]
211
+ sin_cached = emb.sin()[:, None, :]
212
+ if self.precision == torch.bfloat16:
213
+ cos_cached = cos_cached.bfloat16()
214
+ sin_cached = sin_cached.bfloat16()
215
+ if self.learnable:
216
+ return cos_cached, sin_cached
217
+ self.cos_cached, self.sin_cached = cos_cached, sin_cached
218
+ return self.cos_cached[:seq_len, ...], self.sin_cached[:seq_len, ...]
219
+
220
+ def _apply(self, fn):
221
+ if self.cos_cached is not None:
222
+ self.cos_cached = fn(self.cos_cached)
223
+ if self.sin_cached is not None:
224
+ self.sin_cached = fn(self.sin_cached)
225
+ return super()._apply(fn)
226
+
227
+
228
+ def rotate_half(x):
229
+ x1, x2 = x[..., :x.shape[-1] // 2], x[..., x.shape[-1] // 2:]
230
+ return torch.cat((-x2, x1), dim=x1.ndim - 1) # dim=-1 triggers a bug in earlier torch versions
231
+
232
+
233
+ @torch.jit.script
234
+ def apply_rotary_pos_emb_index(q, k, cos, sin, position_id):
235
+ # position_id: [sq, b], q, k: [sq, b, np, hn], cos: [sq, 1, hn] -> [sq, b, 1, hn]
236
+ cos, sin = F.embedding(position_id, cos.squeeze(1)).unsqueeze(2), \
237
+ F.embedding(position_id, sin.squeeze(1)).unsqueeze(2)
238
+ q, k = (q * cos) + (rotate_half(q) * sin), (k * cos) + (rotate_half(k) * sin)
239
+ return q, k
240
+
241
+
242
+ def attention_fn(
243
+ self,
244
+ query_layer,
245
+ key_layer,
246
+ value_layer,
247
+ attention_mask,
248
+ hidden_size_per_partition,
249
+ layer_id,
250
+ layer_past=None,
251
+ scaling_attention_score=True,
252
+ use_cache=False,
253
+ ):
254
+ if layer_past is not None:
255
+ past_key, past_value = layer_past[0], layer_past[1]
256
+ key_layer = torch.cat((past_key, key_layer), dim=0)
257
+ value_layer = torch.cat((past_value, value_layer), dim=0)
258
+
259
+ # seqlen, batch, num_attention_heads, hidden_size_per_attention_head
260
+ seq_len, b, nh, hidden_size = key_layer.shape
261
+
262
+ if use_cache:
263
+ present = (key_layer, value_layer)
264
+ else:
265
+ present = None
266
+
267
+ query_key_layer_scaling_coeff = float(layer_id + 1)
268
+ if scaling_attention_score:
269
+ query_layer = query_layer / (math.sqrt(hidden_size) * query_key_layer_scaling_coeff)
270
+
271
+ # ===================================
272
+ # Raw attention scores. [b, np, s, s]
273
+ # ===================================
274
+
275
+ # [b, np, sq, sk]
276
+ output_size = (query_layer.size(1), query_layer.size(2), query_layer.size(0), key_layer.size(0))
277
+
278
+ # [sq, b, np, hn] -> [sq, b * np, hn]
279
+ query_layer = query_layer.view(output_size[2], output_size[0] * output_size[1], -1)
280
+ # [sk, b, np, hn] -> [sk, b * np, hn]
281
+ key_layer = key_layer.view(output_size[3], output_size[0] * output_size[1], -1)
282
+
283
+ matmul_result = torch.empty(
284
+ output_size[0] * output_size[1],
285
+ output_size[2],
286
+ output_size[3],
287
+ dtype=query_layer.dtype,
288
+ device=query_layer.device,
289
+ )
290
+
291
+ matmul_result = torch.baddbmm(
292
+ matmul_result,
293
+ query_layer.transpose(0, 1), # [b * np, sq, hn]
294
+ key_layer.transpose(0, 1).transpose(1, 2), # [b * np, hn, sk]
295
+ beta=0.0,
296
+ alpha=1.0,
297
+ )
298
+
299
+ # change view to [b, np, sq, sk]
300
+ attention_scores = matmul_result.view(*output_size)
301
+
302
+ if self.scale_mask_softmax:
303
+ self.scale_mask_softmax.scale = query_key_layer_scaling_coeff
304
+ attention_probs = self.scale_mask_softmax(attention_scores, attention_mask.contiguous())
305
+ else:
306
+ if not (attention_mask == 0).all():
307
+ # if auto-regressive, skip
308
+ attention_scores.masked_fill_(attention_mask, -10000.0)
309
+ dtype = attention_scores.dtype
310
+ attention_scores = attention_scores.float()
311
+ attention_scores = attention_scores * query_key_layer_scaling_coeff
312
+
313
+ attention_probs = F.softmax(attention_scores, dim=-1)
314
+
315
+ attention_probs = attention_probs.type(dtype)
316
+
317
+ # =========================
318
+ # Context layer. [sq, b, hp]
319
+ # =========================
320
+
321
+ # value_layer -> context layer.
322
+ # [sk, b, np, hn] --> [b, np, sq, hn]
323
+
324
+ # context layer shape: [b, np, sq, hn]
325
+ output_size = (value_layer.size(1), value_layer.size(2), query_layer.size(0), value_layer.size(3))
326
+
327
+ # change view [sk, b * np, hn]
328
+ value_layer = value_layer.view(value_layer.size(0), output_size[0] * output_size[1], -1)
329
+
330
+ # change view [b * np, sq, sk]
331
+ attention_probs = attention_probs.view(output_size[0] * output_size[1], output_size[2], -1)
332
+
333
+ # matmul: [b * np, sq, hn]
334
+ context_layer = torch.bmm(attention_probs, value_layer.transpose(0, 1))
335
+
336
+ # change view [b, np, sq, hn]
337
+ context_layer = context_layer.view(*output_size)
338
+
339
+ # [b, np, sq, hn] --> [sq, b, np, hn]
340
+ context_layer = context_layer.permute(2, 0, 1, 3).contiguous()
341
+
342
+ # [sq, b, np, hn] --> [sq, b, hp]
343
+ new_context_layer_shape = context_layer.size()[:-2] + (hidden_size_per_partition,)
344
+ context_layer = context_layer.view(*new_context_layer_shape)
345
+
346
+ outputs = (context_layer, present, attention_probs)
347
+
348
+ return outputs
349
+
350
+
351
+ class SelfAttention(torch.nn.Module):
352
+ def __init__(self, hidden_size, num_attention_heads,
353
+ layer_id, hidden_size_per_attention_head=None, bias=True,
354
+ params_dtype=torch.float, position_encoding_2d=True):
355
+ super(SelfAttention, self).__init__()
356
+
357
+ self.layer_id = layer_id
358
+ self.hidden_size = hidden_size
359
+ self.hidden_size_per_partition = hidden_size
360
+ self.num_attention_heads = num_attention_heads
361
+ self.num_attention_heads_per_partition = num_attention_heads
362
+ self.position_encoding_2d = position_encoding_2d
363
+ self.rotary_emb = RotaryEmbedding(
364
+ self.hidden_size // (self.num_attention_heads * 2)
365
+ if position_encoding_2d
366
+ else self.hidden_size // self.num_attention_heads,
367
+ base=10000,
368
+ precision=torch.half,
369
+ learnable=False,
370
+ )
371
+
372
+ self.scale_mask_softmax = None
373
+
374
+ if hidden_size_per_attention_head is None:
375
+ self.hidden_size_per_attention_head = hidden_size // num_attention_heads
376
+ else:
377
+ self.hidden_size_per_attention_head = hidden_size_per_attention_head
378
+
379
+ self.inner_hidden_size = num_attention_heads * self.hidden_size_per_attention_head
380
+
381
+ # Strided linear layer.
382
+ self.query_key_value = skip_init(
383
+ torch.nn.Linear,
384
+ hidden_size,
385
+ 3 * self.inner_hidden_size,
386
+ bias=bias,
387
+ dtype=params_dtype,
388
+ )
389
+
390
+ self.dense = skip_init(
391
+ torch.nn.Linear,
392
+ self.inner_hidden_size,
393
+ hidden_size,
394
+ bias=bias,
395
+ dtype=params_dtype,
396
+ )
397
+
398
+ @staticmethod
399
+ def attention_mask_func(attention_scores, attention_mask):
400
+ attention_scores.masked_fill_(attention_mask, -10000.0)
401
+ return attention_scores
402
+
403
+ def split_tensor_along_last_dim(self, tensor, num_partitions,
404
+ contiguous_split_chunks=False):
405
+ """Split a tensor along its last dimension.
406
+ Arguments:
407
+ tensor: input tensor.
408
+ num_partitions: number of partitions to split the tensor
409
+ contiguous_split_chunks: If True, make each chunk contiguous
410
+ in memory.
411
+ """
412
+ # Get the size and dimension.
413
+ last_dim = tensor.dim() - 1
414
+ last_dim_size = tensor.size()[last_dim] // num_partitions
415
+ # Split.
416
+ tensor_list = torch.split(tensor, last_dim_size, dim=last_dim)
417
+ # Note: torch.split does not create contiguous tensors by default.
418
+ if contiguous_split_chunks:
419
+ return tuple(chunk.contiguous() for chunk in tensor_list)
420
+
421
+ return tensor_list
422
+
423
+ def forward(
424
+ self,
425
+ hidden_states: torch.Tensor,
426
+ position_ids,
427
+ attention_mask: torch.Tensor,
428
+ layer_id,
429
+ layer_past: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
430
+ use_cache: bool = False,
431
+ output_attentions: bool = False,
432
+ ):
433
+ """
434
+ hidden_states: [seq_len, batch, hidden_size]
435
+ attention_mask: [(1, 1), seq_len, seq_len]
436
+ """
437
+
438
+ # [seq_len, batch, 3 * hidden_size]
439
+ mixed_raw_layer = self.query_key_value(hidden_states)
440
+
441
+ # [seq_len, batch, 3 * hidden_size] --> [seq_len, batch, num_attention_heads, 3 * hidden_size_per_attention_head]
442
+ new_tensor_shape = mixed_raw_layer.size()[:-1] + (
443
+ self.num_attention_heads_per_partition,
444
+ 3 * self.hidden_size_per_attention_head,
445
+ )
446
+ mixed_raw_layer = mixed_raw_layer.view(*new_tensor_shape)
447
+
448
+ # [seq_len, batch, num_attention_heads, hidden_size_per_attention_head]
449
+ (query_layer, key_layer, value_layer) = self.split_tensor_along_last_dim(mixed_raw_layer, 3)
450
+
451
+ if self.position_encoding_2d:
452
+ q1, q2 = query_layer.chunk(2, dim=(query_layer.ndim - 1))
453
+ k1, k2 = key_layer.chunk(2, dim=(key_layer.ndim - 1))
454
+ cos, sin = self.rotary_emb(q1, seq_len=position_ids.max() + 1)
455
+ position_ids, block_position_ids = position_ids[:, 0, :].transpose(0, 1).contiguous(), \
456
+ position_ids[:, 1, :].transpose(0, 1).contiguous()
457
+ q1, k1 = apply_rotary_pos_emb_index(q1, k1, cos, sin, position_ids)
458
+ q2, k2 = apply_rotary_pos_emb_index(q2, k2, cos, sin, block_position_ids)
459
+ query_layer = torch.concat([q1, q2], dim=(q1.ndim - 1))
460
+ key_layer = torch.concat([k1, k2], dim=(k1.ndim - 1))
461
+ else:
462
+ position_ids = position_ids.transpose(0, 1)
463
+ cos, sin = self.rotary_emb(value_layer, seq_len=position_ids.max() + 1)
464
+ # [seq_len, batch, num_attention_heads, hidden_size_per_attention_head]
465
+ query_layer, key_layer = apply_rotary_pos_emb_index(query_layer, key_layer, cos, sin, position_ids)
466
+
467
+ # [seq_len, batch, hidden_size]
468
+ context_layer, present, attention_probs = attention_fn(
469
+ self=self,
470
+ query_layer=query_layer,
471
+ key_layer=key_layer,
472
+ value_layer=value_layer,
473
+ attention_mask=attention_mask,
474
+ hidden_size_per_partition=self.hidden_size_per_partition,
475
+ layer_id=layer_id,
476
+ layer_past=layer_past,
477
+ use_cache=use_cache
478
+ )
479
+
480
+ output = self.dense(context_layer)
481
+
482
+ outputs = (output, present)
483
+
484
+ if output_attentions:
485
+ outputs += (attention_probs,)
486
+
487
+ return outputs # output, present, attention_probs
488
+
489
+
490
+ class GEGLU(torch.nn.Module):
491
+ def __init__(self):
492
+ super().__init__()
493
+ self.activation_fn = F.gelu
494
+
495
+ def forward(self, x):
496
+ # dim=-1 breaks in jit for pt<1.10
497
+ x1, x2 = x.chunk(2, dim=(x.ndim - 1))
498
+ return x1 * self.activation_fn(x2)
499
+
500
+
501
+ class GLU(torch.nn.Module):
502
+ def __init__(self, hidden_size, inner_hidden_size=None,
503
+ layer_id=None, bias=True, activation_func=gelu, params_dtype=torch.float):
504
+ super(GLU, self).__init__()
505
+ self.layer_id = layer_id
506
+ self.activation_func = activation_func
507
+
508
+ # Project to 4h.
509
+ self.hidden_size = hidden_size
510
+ if inner_hidden_size is None:
511
+ inner_hidden_size = 4 * hidden_size
512
+ self.inner_hidden_size = inner_hidden_size
513
+ self.dense_h_to_4h = skip_init(
514
+ torch.nn.Linear,
515
+ self.hidden_size,
516
+ self.inner_hidden_size,
517
+ bias=bias,
518
+ dtype=params_dtype,
519
+ )
520
+ # Project back to h.
521
+ self.dense_4h_to_h = skip_init(
522
+ torch.nn.Linear,
523
+ self.inner_hidden_size,
524
+ self.hidden_size,
525
+ bias=bias,
526
+ dtype=params_dtype,
527
+ )
528
+
529
+ def forward(self, hidden_states):
530
+ """
531
+ hidden_states: [seq_len, batch, hidden_size]
532
+ """
533
+
534
+ # [seq_len, batch, inner_hidden_size]
535
+ intermediate_parallel = self.dense_h_to_4h(hidden_states)
536
+
537
+ intermediate_parallel = self.activation_func(intermediate_parallel)
538
+
539
+ output = self.dense_4h_to_h(intermediate_parallel)
540
+
541
+ return output
542
+
543
+
544
+ class GLMBlock(torch.nn.Module):
545
+ def __init__(
546
+ self,
547
+ hidden_size,
548
+ num_attention_heads,
549
+ layernorm_epsilon,
550
+ layer_id,
551
+ inner_hidden_size=None,
552
+ hidden_size_per_attention_head=None,
553
+ layernorm=LayerNorm,
554
+ use_bias=True,
555
+ params_dtype=torch.float,
556
+ num_layers=28,
557
+ position_encoding_2d=True
558
+ ):
559
+ super(GLMBlock, self).__init__()
560
+ # Set output layer initialization if not provided.
561
+
562
+ self.layer_id = layer_id
563
+
564
+ # Layernorm on the input data.
565
+ self.input_layernorm = layernorm(hidden_size, eps=layernorm_epsilon)
566
+
567
+ self.position_encoding_2d = position_encoding_2d
568
+
569
+ # Self attention.
570
+ self.attention = SelfAttention(
571
+ hidden_size,
572
+ num_attention_heads,
573
+ layer_id,
574
+ hidden_size_per_attention_head=hidden_size_per_attention_head,
575
+ bias=use_bias,
576
+ params_dtype=params_dtype,
577
+ position_encoding_2d=self.position_encoding_2d
578
+ )
579
+
580
+ # Layernorm on the input data.
581
+ self.post_attention_layernorm = layernorm(hidden_size, eps=layernorm_epsilon)
582
+
583
+ self.num_layers = num_layers
584
+
585
+ # GLU
586
+ self.mlp = GLU(
587
+ hidden_size,
588
+ inner_hidden_size=inner_hidden_size,
589
+ bias=use_bias,
590
+ layer_id=layer_id,
591
+ params_dtype=params_dtype,
592
+ )
593
+
594
+ def forward(
595
+ self,
596
+ hidden_states: torch.Tensor,
597
+ position_ids,
598
+ attention_mask: torch.Tensor,
599
+ layer_id,
600
+ layer_past: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
601
+ use_cache: bool = False,
602
+ output_attentions: bool = False,
603
+ ):
604
+ """
605
+ hidden_states: [seq_len, batch, hidden_size]
606
+ attention_mask: [(1, 1), seq_len, seq_len]
607
+ """
608
+
609
+ # Layer norm at the begining of the transformer layer.
610
+ # [seq_len, batch, hidden_size]
611
+ attention_input = self.input_layernorm(hidden_states)
612
+
613
+ # Self attention.
614
+ attention_outputs = self.attention(
615
+ attention_input,
616
+ position_ids,
617
+ attention_mask=attention_mask,
618
+ layer_id=layer_id,
619
+ layer_past=layer_past,
620
+ use_cache=use_cache,
621
+ output_attentions=output_attentions
622
+ )
623
+
624
+ attention_output = attention_outputs[0]
625
+
626
+ outputs = attention_outputs[1:]
627
+
628
+ # Residual connection.
629
+ alpha = (2 * self.num_layers) ** 0.5
630
+ hidden_states = attention_input * alpha + attention_output
631
+
632
+ mlp_input = self.post_attention_layernorm(hidden_states)
633
+
634
+ # MLP.
635
+ mlp_output = self.mlp(mlp_input)
636
+
637
+ # Second residual connection.
638
+ output = mlp_input * alpha + mlp_output
639
+
640
+ if use_cache:
641
+ outputs = (output,) + outputs
642
+ else:
643
+ outputs = (output,) + outputs[1:]
644
+
645
+ return outputs # hidden_states, present, attentions
646
+
647
+
648
+ class ChatGLMPreTrainedModel(PreTrainedModel):
649
+ """
650
+ An abstract class to handle weights initialization and
651
+ a simple interface for downloading and loading pretrained models.
652
+ """
653
+
654
+ is_parallelizable = False
655
+ supports_gradient_checkpointing = True
656
+ config_class = ChatGLMConfig
657
+ base_model_prefix = "transformer"
658
+ _no_split_modules = ["GLMBlock"]
659
+
660
+ def __init__(self, *inputs, **kwargs):
661
+ super().__init__(*inputs, **kwargs)
662
+
663
+ def _init_weights(self, module: nn.Module):
664
+ """Initialize the weights."""
665
+ return
666
+
667
+ def get_masks(self, input_ids, device):
668
+ batch_size, seq_length = input_ids.shape
669
+ context_lengths = [seq.tolist().index(self.config.bos_token_id) for seq in input_ids]
670
+ attention_mask = torch.ones((batch_size, seq_length, seq_length), device=device)
671
+ attention_mask.tril_()
672
+ for i, context_length in enumerate(context_lengths):
673
+ attention_mask[i, :, :context_length] = 1
674
+ attention_mask.unsqueeze_(1)
675
+ attention_mask = (attention_mask < 0.5).bool()
676
+
677
+ return attention_mask
678
+
679
+ def get_position_ids(self, input_ids, mask_positions, device, gmask=False):
680
+ batch_size, seq_length = input_ids.shape
681
+ context_lengths = [seq.tolist().index(self.config.bos_token_id) for seq in input_ids]
682
+ if self.position_encoding_2d:
683
+ position_ids = torch.arange(seq_length, dtype=torch.long, device=device).unsqueeze(0).repeat(batch_size, 1)
684
+ for i, context_length in enumerate(context_lengths):
685
+ position_ids[i, context_length:] = mask_positions[i]
686
+ block_position_ids = [torch.cat((
687
+ torch.zeros(context_length, dtype=torch.long, device=device),
688
+ torch.arange(seq_length - context_length, dtype=torch.long, device=device) + 1
689
+ )) for context_length in context_lengths]
690
+ block_position_ids = torch.stack(block_position_ids, dim=0)
691
+ position_ids = torch.stack((position_ids, block_position_ids), dim=1)
692
+ else:
693
+ position_ids = torch.arange(seq_length, dtype=torch.long, device=device).unsqueeze(0).repeat(batch_size, 1)
694
+ if not gmask:
695
+ for i, context_length in enumerate(context_lengths):
696
+ position_ids[context_length:] = mask_positions[i]
697
+
698
+ return position_ids
699
+
700
+ def _set_gradient_checkpointing(self, module, value=False):
701
+ if isinstance(module, ChatGLMModel):
702
+ module.gradient_checkpointing = value
703
+
704
+
705
+ CHATGLM_6B_START_DOCSTRING = r"""
706
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class.
707
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general
708
+ usage and behavior.
709
+
710
+ Parameters:
711
+ config ([`~ChatGLM6BConfig`]): Model configuration class with all the parameters of the model.
712
+ Initializing with a config file does not load the weights associated with the model, only the configuration.
713
+ Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
714
+ """
715
+
716
+ CHATGLM_6B_INPUTS_DOCSTRING = r"""
717
+ Args:
718
+ input_ids (`torch.LongTensor` of shape `({0})`):
719
+ Indices of input sequence tokens in the vocabulary.
720
+
721
+ Indices can be obtained using [`ChatGLM6BTokenizer`].
722
+ See [`PreTrainedTokenizer.encode`] and
723
+ [`PreTrainedTokenizer.__call__`] for details.
724
+
725
+ [What are input IDs?](../glossary#input-ids)
726
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
727
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
728
+
729
+ - 1 for tokens that are **not masked**,
730
+ - 0 for tokens that are **masked**.
731
+
732
+ [What are attention masks?](../glossary#attention-mask)
733
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
734
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`:
735
+
736
+ - 0 corresponds to a *sentence A* token,
737
+ - 1 corresponds to a *sentence B* token.
738
+
739
+ [What are token type IDs?](../glossary#token-type-ids)
740
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
741
+ Indices of positions of each input sequence tokens in the position embeddings.
742
+ Selected in the range `[0, config.max_position_embeddings - 1]`.
743
+
744
+ [What are position IDs?](../glossary#position-ids)
745
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
746
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
747
+
748
+ - 1 indicates the head is **not masked**,
749
+ - 0 indicates the head is **masked**.
750
+
751
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
752
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
753
+ This is useful if you want more control over how to convert *input_ids* indices into associated vectors
754
+ than the model's internal embedding lookup matrix.
755
+ output_attentions (`bool`, *optional*):
756
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
757
+ tensors for more detail.
758
+ output_hidden_states (`bool`, *optional*):
759
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
760
+ more detail.
761
+ return_dict (`bool`, *optional*):
762
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
763
+ """
764
+
765
+
766
+ @add_start_docstrings(
767
+ "The bare ChatGLM-6B Model transformer outputting raw hidden-states without any specific head on top.",
768
+ CHATGLM_6B_START_DOCSTRING,
769
+ )
770
+ class ChatGLMModel(ChatGLMPreTrainedModel):
771
+ """
772
+
773
+ The model can behave as an encoder (with only self-attention) as well
774
+ as a decoder, in which case a layer of cross-attention is added between
775
+ the self-attention layers, following the architecture described in [Attention is
776
+ all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani,
777
+ Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
778
+
779
+ To behave as an decoder the model needs to be initialized with the
780
+ `is_decoder` argument of the configuration set to `True`.
781
+ To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder`
782
+ argument and `add_cross_attention` set to `True`; an
783
+ `encoder_hidden_states` is then expected as an input to the forward pass.
784
+ """
785
+
786
+ def __init__(self, config: ChatGLMConfig):
787
+ super().__init__(config)
788
+
789
+ # recording parameters
790
+ self.max_sequence_length = config.max_sequence_length
791
+ self.hidden_size = config.hidden_size
792
+ self.params_dtype = torch.half
793
+ self.num_attention_heads = config.num_attention_heads
794
+ self.vocab_size = config.vocab_size
795
+ self.num_layers = config.num_layers
796
+ self.layernorm_epsilon = config.layernorm_epsilon
797
+ self.inner_hidden_size = config.inner_hidden_size
798
+ self.hidden_size_per_attention_head = self.hidden_size // self.num_attention_heads
799
+ self.position_encoding_2d = config.position_encoding_2d
800
+ self.pre_seq_len = config.pre_seq_len
801
+ self.prefix_projection = config.prefix_projection
802
+
803
+ self.word_embeddings = skip_init(
804
+ torch.nn.Embedding,
805
+ num_embeddings=self.vocab_size, embedding_dim=self.hidden_size,
806
+ dtype=self.params_dtype
807
+ )
808
+ self.gradient_checkpointing = False
809
+
810
+ def get_layer(layer_id):
811
+ return GLMBlock(
812
+ self.hidden_size,
813
+ self.num_attention_heads,
814
+ self.layernorm_epsilon,
815
+ layer_id,
816
+ inner_hidden_size=self.inner_hidden_size,
817
+ hidden_size_per_attention_head=self.hidden_size_per_attention_head,
818
+ layernorm=LayerNorm,
819
+ use_bias=True,
820
+ params_dtype=self.params_dtype,
821
+ position_encoding_2d=self.position_encoding_2d,
822
+ )
823
+
824
+ self.layers = torch.nn.ModuleList(
825
+ [get_layer(layer_id) for layer_id in range(self.num_layers)]
826
+ )
827
+
828
+ # Final layer norm before output.
829
+ self.final_layernorm = LayerNorm(self.hidden_size, eps=self.layernorm_epsilon)
830
+
831
+ if self.pre_seq_len is not None:
832
+ for param in self.parameters():
833
+ param.requires_grad = False
834
+ self.prefix_tokens = torch.arange(self.pre_seq_len).long()
835
+ self.prefix_encoder = PrefixEncoder(config)
836
+ self.dropout = torch.nn.Dropout(0.1)
837
+
838
+ # total_params = sum(p.numel() for p in self.parameters())
839
+ # trainable_params = sum(p.numel() for p in self.parameters() if p.requires_grad)
840
+ # print("Using p-tuning v2: # trainable_params = {} / {}".format(trainable_params, total_params))
841
+
842
+ def get_input_embeddings(self):
843
+ return self.word_embeddings
844
+
845
+ def set_input_embeddings(self, new_embeddings: torch.Tensor):
846
+ self.word_embeddings = new_embeddings
847
+
848
+ def get_prompt(self, batch_size, device, dtype=torch.half):
849
+ prefix_tokens = self.prefix_tokens.unsqueeze(0).expand(batch_size, -1).to(device)
850
+ past_key_values = self.prefix_encoder(prefix_tokens).type(dtype)
851
+ past_key_values = past_key_values.view(
852
+ batch_size,
853
+ self.pre_seq_len,
854
+ self.num_layers * 2,
855
+ self.num_attention_heads,
856
+ self.hidden_size // self.num_attention_heads
857
+ )
858
+ # seq_len, b, nh, hidden_size
859
+ past_key_values = self.dropout(past_key_values)
860
+ past_key_values = past_key_values.permute([2, 1, 0, 3, 4]).split(2)
861
+ # past_key_values = [(v[0], v[1]) for v in past_key_values]
862
+ return past_key_values
863
+
864
+ @add_start_docstrings_to_model_forward(CHATGLM_6B_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
865
+ @add_code_sample_docstrings(
866
+ checkpoint=_CHECKPOINT_FOR_DOC,
867
+ output_type=BaseModelOutputWithPastAndCrossAttentions,
868
+ config_class=_CONFIG_FOR_DOC,
869
+ )
870
+ def forward(
871
+ self,
872
+ input_ids: Optional[torch.LongTensor] = None,
873
+ position_ids: Optional[torch.LongTensor] = None,
874
+ attention_mask: Optional[torch.Tensor] = None,
875
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None,
876
+ inputs_embeds: Optional[torch.LongTensor] = None,
877
+ use_cache: Optional[bool] = None,
878
+ output_attentions: Optional[bool] = None,
879
+ output_hidden_states: Optional[bool] = None,
880
+ return_dict: Optional[bool] = None,
881
+ ) -> Union[Tuple[torch.Tensor, ...], BaseModelOutputWithPast]:
882
+
883
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
884
+ output_hidden_states = (
885
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
886
+ )
887
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
888
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
889
+
890
+ if self.gradient_checkpointing and self.training:
891
+ if use_cache:
892
+ logger.warning_once(
893
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
894
+ )
895
+ use_cache = False
896
+
897
+ if input_ids is not None and inputs_embeds is not None:
898
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
899
+ elif input_ids is not None:
900
+ batch_size, seq_length = input_ids.shape[:2]
901
+ elif inputs_embeds is not None:
902
+ # NOTE: fix
903
+ batch_size, seq_length = inputs_embeds.shape[:2]
904
+ else:
905
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
906
+
907
+ if inputs_embeds is None:
908
+ inputs_embeds = self.word_embeddings(input_ids)
909
+
910
+ if past_key_values is None:
911
+ if self.pre_seq_len is not None:
912
+ past_key_values = self.get_prompt(batch_size=input_ids.shape[0], device=input_ids.device,
913
+ dtype=inputs_embeds.dtype)
914
+ else:
915
+ past_key_values = tuple([None] * len(self.layers))
916
+
917
+ if attention_mask is None:
918
+ attention_mask = self.get_masks(
919
+ input_ids,
920
+ device=input_ids.device
921
+ )
922
+
923
+
924
+ if position_ids is None:
925
+ MASK, gMASK = self.config.mask_token_id, self.config.gmask_token_id
926
+ mask_token = gMASK if gMASK in input_ids else MASK
927
+ use_gmask = True if gMASK in input_ids else False
928
+
929
+ mask_positions = [seq.tolist().index(mask_token) for seq in input_ids]
930
+ position_ids = self.get_position_ids(
931
+ input_ids,
932
+ mask_positions=mask_positions,
933
+ device=input_ids.device,
934
+ gmask=use_gmask
935
+ )
936
+
937
+ if self.pre_seq_len is not None and attention_mask is not None:
938
+ prefix_attention_mask = torch.ones(batch_size, 1, input_ids.size(-1), self.pre_seq_len).to(
939
+ attention_mask.device)
940
+ prefix_attention_mask = (prefix_attention_mask < 0.5).bool()
941
+ attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=3)
942
+
943
+ # [seq_len, batch, hidden_size]
944
+ hidden_states = inputs_embeds.transpose(0, 1)
945
+
946
+ presents = () if use_cache else None
947
+ all_self_attentions = () if output_attentions else None
948
+ all_hidden_states = () if output_hidden_states else None
949
+
950
+ if attention_mask is None:
951
+ attention_mask = torch.zeros(1, 1, device=input_ids.device).bool()
952
+
953
+ else:
954
+ pass
955
+ # NOTE: this is a hack to make the code work with the LAVIS training
956
+ # attention_mask = attention_mask.to(input_ids.device)
957
+
958
+ for i, layer in enumerate(self.layers):
959
+
960
+ if output_hidden_states:
961
+ all_hidden_states = all_hidden_states + (hidden_states,)
962
+ layer_past = past_key_values[i]
963
+
964
+ if self.gradient_checkpointing and self.training:
965
+ layer_ret = torch.utils.checkpoint.checkpoint(
966
+ layer,
967
+ hidden_states,
968
+ position_ids,
969
+ attention_mask,
970
+ torch.tensor(i),
971
+ layer_past,
972
+ use_cache,
973
+ output_attentions
974
+ )
975
+ else:
976
+ layer_ret = layer(
977
+ hidden_states,
978
+ position_ids=position_ids,
979
+ attention_mask=attention_mask,
980
+ layer_id=torch.tensor(i),
981
+ layer_past=layer_past,
982
+ use_cache=use_cache,
983
+ output_attentions=output_attentions
984
+ )
985
+
986
+ hidden_states = layer_ret[0]
987
+
988
+ if use_cache:
989
+ presents = presents + (layer_ret[1],)
990
+
991
+ if output_attentions:
992
+ all_self_attentions = all_self_attentions + (layer_ret[2 if use_cache else 1],)
993
+
994
+ # Final layer norm.
995
+ hidden_states = self.final_layernorm(hidden_states)
996
+
997
+ if output_hidden_states:
998
+ all_hidden_states = all_hidden_states + (hidden_states,)
999
+
1000
+ if not return_dict:
1001
+ return tuple(v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None)
1002
+
1003
+ return BaseModelOutputWithPast(
1004
+ last_hidden_state=hidden_states,
1005
+ past_key_values=presents,
1006
+ hidden_states=all_hidden_states,
1007
+ attentions=all_self_attentions,
1008
+ )
1009
+
1010
+
1011
+ class ChatGLMForConditionalGeneration(ChatGLMPreTrainedModel):
1012
+ def __init__(self, config: ChatGLMConfig):
1013
+ super().__init__(config)
1014
+
1015
+ # self.hidden_size = config.hidden_size
1016
+ # self.params_dtype = torch.half
1017
+ # self.vocab_size = config.vocab_size
1018
+ self.max_sequence_length = config.max_sequence_length
1019
+
1020
+ self.position_encoding_2d = config.position_encoding_2d
1021
+
1022
+ self.transformer = ChatGLMModel(config)
1023
+
1024
+ self.lm_head = skip_init(
1025
+ nn.Linear,
1026
+ config.hidden_size,
1027
+ config.vocab_size,
1028
+ bias=False,
1029
+ dtype=torch.half
1030
+ )
1031
+
1032
+ self.config = config
1033
+
1034
+ self.quantized = False
1035
+
1036
+ if self.config.quantization_bit:
1037
+ self.quantize(self.config.quantization_bit, empty_init=True)
1038
+
1039
+ def get_output_embeddings(self):
1040
+ return self.lm_head
1041
+
1042
+ def set_output_embeddings(self, new_embeddings):
1043
+ self.lm_head = new_embeddings
1044
+
1045
+ def _update_model_kwargs_for_generation(
1046
+ self,
1047
+ outputs: ModelOutput,
1048
+ model_kwargs: Dict[str, Any],
1049
+ is_encoder_decoder: bool = False,
1050
+ standardize_cache_format: bool = False,
1051
+ ) -> Dict[str, Any]:
1052
+ # update past_key_values
1053
+ model_kwargs["past_key_values"] = self._extract_past_from_model_output(
1054
+ outputs, standardize_cache_format=standardize_cache_format
1055
+ )
1056
+
1057
+ # update attention mask
1058
+ if "attention_mask" in model_kwargs:
1059
+ attention_mask = model_kwargs["attention_mask"]
1060
+ if attention_mask is not None and attention_mask.dtype == torch.bool:
1061
+ attention_mask = torch.cat(
1062
+ [attention_mask, attention_mask.new_ones((*attention_mask.shape[:3], 1))], dim=3)
1063
+ new_attention_mask = attention_mask[:, :, -1:].clone()
1064
+ new_attention_mask[..., -1] = False
1065
+ model_kwargs["attention_mask"] = torch.cat(
1066
+ [attention_mask, new_attention_mask], dim=2
1067
+ )
1068
+
1069
+ # update position ids
1070
+ if "position_ids" in model_kwargs:
1071
+ position_ids = model_kwargs["position_ids"]
1072
+ new_position_id = position_ids[..., -1:].clone()
1073
+ new_position_id[:, 1, :] += 1
1074
+ model_kwargs["position_ids"] = torch.cat(
1075
+ [position_ids, new_position_id], dim=-1
1076
+ )
1077
+
1078
+ return model_kwargs
1079
+
1080
+ def prepare_inputs_for_generation(
1081
+ self,
1082
+ input_ids: torch.LongTensor,
1083
+ inputs_embeds: Optional[torch.Tensor] = None,
1084
+ past: Optional[torch.Tensor] = None,
1085
+ past_key_values: Optional[torch.Tensor] = None,
1086
+ attention_mask: Optional[torch.Tensor] = None,
1087
+ position_ids: Optional[torch.Tensor] = None,
1088
+ **kwargs
1089
+ ) -> dict:
1090
+ batch_size, seq_length = input_ids.shape
1091
+ MASK, gMASK = self.config.mask_token_id, self.config.gmask_token_id
1092
+ mask_token = gMASK if gMASK in input_ids else MASK
1093
+ use_gmask = True if gMASK in input_ids else False
1094
+ seqs = input_ids.tolist()
1095
+ mask_positions = [seq.index(mask_token) for seq in seqs]
1096
+
1097
+ # only last token for input_ids if past is not None
1098
+ if past is not None or past_key_values is not None:
1099
+ last_token = input_ids[:, -1].unsqueeze(-1)
1100
+ if attention_mask is not None and attention_mask.dtype == torch.bool:
1101
+ attention_mask = attention_mask[:, :, -1:]
1102
+ else:
1103
+ attention_mask = None
1104
+ if position_ids is not None:
1105
+ position_ids = position_ids[..., -1:]
1106
+ else:
1107
+ context_lengths = [seq.index(self.config.bos_token_id) for seq in seqs]
1108
+ if self.position_encoding_2d:
1109
+ position_ids = torch.tensor(
1110
+ [[mask_position, seq_length - context_length] for mask_position, context_length in
1111
+ zip(mask_positions, context_lengths)], dtype=torch.long, device=input_ids.device).unsqueeze(-1)
1112
+ else:
1113
+ position_ids = torch.tensor([mask_position for mask_position in mask_positions], dtype=torch.long,
1114
+ device=input_ids.device).unsqueeze(-1)
1115
+
1116
+ if past is None:
1117
+ past = past_key_values
1118
+ return {
1119
+ "input_ids": last_token,
1120
+ "past_key_values": past,
1121
+ "position_ids": position_ids,
1122
+ "attention_mask": attention_mask
1123
+ }
1124
+ else:
1125
+ if attention_mask is not None and attention_mask.dtype != torch.bool:
1126
+ logger.warning_once(f"The dtype of attention mask ({attention_mask.dtype}) is not bool")
1127
+ attention_mask = None
1128
+ if attention_mask is None:
1129
+ attention_mask = self.get_masks(
1130
+ input_ids,
1131
+ device=input_ids.device
1132
+ )
1133
+ if position_ids is None:
1134
+ position_ids = self.get_position_ids(
1135
+ input_ids,
1136
+ device=input_ids.device,
1137
+ mask_positions=mask_positions,
1138
+ gmask=use_gmask
1139
+ )
1140
+ if inputs_embeds is not None:
1141
+ assert input_ids.size(1) == inputs_embeds.size(1), f"Make sure that both input_ids ({input_ids.size(1)}) and inputs_embeds ({inputs_embeds.size(1)}) have the same length."
1142
+ return {
1143
+ "inputs_embeds": inputs_embeds,
1144
+ "past_key_values": past,
1145
+ "position_ids": position_ids,
1146
+ "attention_mask": attention_mask
1147
+ }
1148
+ else:
1149
+ return {
1150
+ "input_ids": input_ids,
1151
+ "past_key_values": past,
1152
+ "position_ids": position_ids,
1153
+ "attention_mask": attention_mask
1154
+ }
1155
+
1156
+ def forward(
1157
+ self,
1158
+ input_ids: Optional[torch.Tensor] = None,
1159
+ position_ids: Optional[torch.Tensor] = None,
1160
+ attention_mask: Optional[torch.Tensor] = None,
1161
+ past_key_values: Optional[Tuple[torch.FloatTensor]] = None,
1162
+ inputs_embeds: Optional[torch.Tensor] = None,
1163
+ labels: Optional[torch.Tensor] = None,
1164
+ use_cache: Optional[bool] = None,
1165
+ output_attentions: Optional[bool] = None,
1166
+ output_hidden_states: Optional[bool] = None,
1167
+ return_dict: Optional[bool] = None,
1168
+ ):
1169
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
1170
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1171
+
1172
+ transformer_outputs = self.transformer(
1173
+ input_ids=input_ids,
1174
+ position_ids=position_ids,
1175
+ attention_mask=attention_mask,
1176
+ past_key_values=past_key_values,
1177
+ inputs_embeds=inputs_embeds,
1178
+ use_cache=use_cache,
1179
+ output_attentions=output_attentions,
1180
+ output_hidden_states=output_hidden_states,
1181
+ return_dict=return_dict,
1182
+ )
1183
+
1184
+ hidden_states = transformer_outputs[0]
1185
+
1186
+ lm_logits = self.lm_head(hidden_states).permute(1, 0, 2).contiguous()
1187
+
1188
+ loss = None
1189
+ if labels is not None:
1190
+ lm_logits = lm_logits.to(torch.float32)
1191
+
1192
+ # Shift so that tokens < n predict n
1193
+ shift_logits = lm_logits[..., :-1, :].contiguous()
1194
+ shift_labels = labels[..., 1:].contiguous()
1195
+ # Flatten the tokens
1196
+ loss_fct = CrossEntropyLoss(ignore_index=-100)
1197
+ loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
1198
+
1199
+ lm_logits = lm_logits.to(hidden_states.dtype)
1200
+ loss = loss.to(hidden_states.dtype)
1201
+
1202
+ if not return_dict:
1203
+ output = (lm_logits,) + transformer_outputs[1:]
1204
+ return ((loss,) + output) if loss is not None else output
1205
+
1206
+ return CausalLMOutputWithPast(
1207
+ loss=loss,
1208
+ logits=lm_logits,
1209
+ past_key_values=transformer_outputs.past_key_values,
1210
+ hidden_states=transformer_outputs.hidden_states,
1211
+ attentions=transformer_outputs.attentions,
1212
+ )
1213
+
1214
+ @staticmethod
1215
+ def _reorder_cache(
1216
+ past: Tuple[Tuple[torch.Tensor, torch.Tensor], ...], beam_idx: torch.LongTensor
1217
+ ) -> Tuple[Tuple[torch.Tensor, torch.Tensor], ...]:
1218
+ """
1219
+ This function is used to re-order the `past_key_values` cache if [`~PreTrainedModel.beam_search`] or
1220
+ [`~PreTrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct
1221
+ beam_idx at every generation step.
1222
+
1223
+ Output shares the same memory storage as `past`.
1224
+ """
1225
+ return tuple(
1226
+ (
1227
+ layer_past[0].index_select(1, beam_idx.to(layer_past[0].device)),
1228
+ layer_past[1].index_select(1, beam_idx.to(layer_past[1].device)),
1229
+ )
1230
+ for layer_past in past
1231
+ )
1232
+
1233
+ def process_response(self, response):
1234
+ response = response.strip()
1235
+ response = response.replace("[[训练时间]]", "2023年")
1236
+ punkts = [
1237
+ [",", ","],
1238
+ ["!", "!"],
1239
+ [":", ":"],
1240
+ [";", ";"],
1241
+ ["\?", "?"],
1242
+ ]
1243
+ for item in punkts:
1244
+ response = re.sub(r"([\u4e00-\u9fff])%s" % item[0], r"\1%s" % item[1], response)
1245
+ response = re.sub(r"%s([\u4e00-\u9fff])" % item[0], r"%s\1" % item[1], response)
1246
+ return response
1247
+
1248
+ @torch.no_grad()
1249
+ def chat(self, tokenizer, query: str, history: List[Tuple[str, str]] = None, max_length: int = 2048, num_beams=1,
1250
+ do_sample=True, top_p=0.7, temperature=0.95, logits_processor=None, **kwargs):
1251
+ if history is None:
1252
+ history = []
1253
+ if logits_processor is None:
1254
+ logits_processor = LogitsProcessorList()
1255
+ logits_processor.append(InvalidScoreLogitsProcessor())
1256
+ gen_kwargs = {"max_length": max_length, "num_beams": num_beams, "do_sample": do_sample, "top_p": top_p,
1257
+ "temperature": temperature, "logits_processor": logits_processor, **kwargs}
1258
+ if not history:
1259
+ prompt = query
1260
+ else:
1261
+ prompt = ""
1262
+ for i, (old_query, response) in enumerate(history):
1263
+ prompt += "[Round {}]\n问:{}\n答:{}\n".format(i, old_query, response)
1264
+ prompt += "[Round {}]\n问��{}\n答:".format(len(history), query)
1265
+ inputs = tokenizer([prompt], return_tensors="pt")
1266
+ inputs = inputs.to(self.device)
1267
+ outputs = self.generate(**inputs, **gen_kwargs)
1268
+ outputs = outputs.tolist()[0][len(inputs["input_ids"][0]):]
1269
+ response = tokenizer.decode(outputs)
1270
+ response = self.process_response(response)
1271
+ history = history + [(query, response)]
1272
+ return response, history
1273
+
1274
+ @torch.no_grad()
1275
+ def stream_chat(self, tokenizer, query: str, history: List[Tuple[str, str]] = None, max_length: int = 2048,
1276
+ do_sample=True, top_p=0.7, temperature=0.95, logits_processor=None, **kwargs):
1277
+ if history is None:
1278
+ history = []
1279
+ if logits_processor is None:
1280
+ logits_processor = LogitsProcessorList()
1281
+ logits_processor.append(InvalidScoreLogitsProcessor())
1282
+ gen_kwargs = {"max_length": max_length, "do_sample": do_sample, "top_p": top_p,
1283
+ "temperature": temperature, "logits_processor": logits_processor, **kwargs}
1284
+ if not history:
1285
+ prompt = query
1286
+ else:
1287
+ prompt = ""
1288
+ for i, (old_query, response) in enumerate(history):
1289
+ prompt += "[Round {}]\n问:{}\n答:{}\n".format(i, old_query, response)
1290
+ prompt += "[Round {}]\n问:{}\n答:".format(len(history), query)
1291
+ inputs = tokenizer([prompt], return_tensors="pt")
1292
+ inputs = inputs.to(self.device)
1293
+ for outputs in self.stream_generate(**inputs, **gen_kwargs):
1294
+ outputs = outputs.tolist()[0][len(inputs["input_ids"][0]):]
1295
+ response = tokenizer.decode(outputs)
1296
+ response = self.process_response(response)
1297
+ new_history = history + [(query, response)]
1298
+ yield response, new_history
1299
+
1300
+ @torch.no_grad()
1301
+ def stream_generate(
1302
+ self,
1303
+ input_ids,
1304
+ generation_config: Optional[GenerationConfig] = None,
1305
+ logits_processor: Optional[LogitsProcessorList] = None,
1306
+ stopping_criteria: Optional[StoppingCriteriaList] = None,
1307
+ prefix_allowed_tokens_fn: Optional[Callable[[int, torch.Tensor], List[int]]] = None,
1308
+ **kwargs,
1309
+ ):
1310
+ batch_size, input_ids_seq_length = input_ids.shape[0], input_ids.shape[-1]
1311
+
1312
+ if generation_config is None:
1313
+ generation_config = self.generation_config
1314
+ generation_config = copy.deepcopy(generation_config)
1315
+ model_kwargs = generation_config.update(**kwargs)
1316
+ bos_token_id, eos_token_id = generation_config.bos_token_id, generation_config.eos_token_id
1317
+
1318
+ if isinstance(eos_token_id, int):
1319
+ eos_token_id = [eos_token_id]
1320
+
1321
+ has_default_max_length = kwargs.get("max_length") is None and generation_config.max_length is not None
1322
+ if has_default_max_length and generation_config.max_new_tokens is None:
1323
+ warnings.warn(
1324
+ f"Using `max_length`'s default ({generation_config.max_length}) to control the generation length. "
1325
+ "This behaviour is deprecated and will be removed from the config in v5 of Transformers -- we"
1326
+ " recommend using `max_new_tokens` to control the maximum length of the generation.",
1327
+ UserWarning,
1328
+ )
1329
+ elif generation_config.max_new_tokens is not None:
1330
+ generation_config.max_length = generation_config.max_new_tokens + input_ids_seq_length
1331
+ if not has_default_max_length:
1332
+ logger.warn(
1333
+ f"Both `max_new_tokens` (={generation_config.max_new_tokens}) and `max_length`(="
1334
+ f"{generation_config.max_length}) seem to have been set. `max_new_tokens` will take precedence. "
1335
+ "Please refer to the documentation for more information. "
1336
+ "(https://huggingface.co/docs/transformers/main/en/main_classes/text_generation)",
1337
+ UserWarning,
1338
+ )
1339
+
1340
+ if input_ids_seq_length >= generation_config.max_length:
1341
+ input_ids_string = "decoder_input_ids" if self.config.is_encoder_decoder else "input_ids"
1342
+ logger.warning(
1343
+ f"Input length of {input_ids_string} is {input_ids_seq_length}, but `max_length` is set to"
1344
+ f" {generation_config.max_length}. This can lead to unexpected behavior. You should consider"
1345
+ " increasing `max_new_tokens`."
1346
+ )
1347
+
1348
+ # 2. Set generation parameters if not already defined
1349
+ logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList()
1350
+ stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList()
1351
+
1352
+ logits_processor = self._get_logits_processor(
1353
+ generation_config=generation_config,
1354
+ input_ids_seq_length=input_ids_seq_length,
1355
+ encoder_input_ids=input_ids,
1356
+ prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,
1357
+ logits_processor=logits_processor,
1358
+ )
1359
+
1360
+ stopping_criteria = self._get_stopping_criteria(
1361
+ generation_config=generation_config, stopping_criteria=stopping_criteria
1362
+ )
1363
+ logits_warper = self._get_logits_warper(generation_config)
1364
+
1365
+ unfinished_sequences = input_ids.new(input_ids.shape[0]).fill_(1)
1366
+ scores = None
1367
+ while True:
1368
+ model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs)
1369
+ # forward pass to get next token
1370
+ outputs = self(
1371
+ **model_inputs,
1372
+ return_dict=True,
1373
+ output_attentions=False,
1374
+ output_hidden_states=False,
1375
+ )
1376
+
1377
+ next_token_logits = outputs.logits[:, -1, :]
1378
+
1379
+ # pre-process distribution
1380
+ next_token_scores = logits_processor(input_ids, next_token_logits)
1381
+ next_token_scores = logits_warper(input_ids, next_token_scores)
1382
+
1383
+ # sample
1384
+ probs = nn.functional.softmax(next_token_scores, dim=-1)
1385
+ if generation_config.do_sample:
1386
+ next_tokens = torch.multinomial(probs, num_samples=1).squeeze(1)
1387
+ else:
1388
+ next_tokens = torch.argmax(probs, dim=-1)
1389
+
1390
+ # update generated ids, model inputs, and length for next step
1391
+ input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1)
1392
+ model_kwargs = self._update_model_kwargs_for_generation(
1393
+ outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder
1394
+ )
1395
+ unfinished_sequences = unfinished_sequences.mul((sum(next_tokens != i for i in eos_token_id)).long())
1396
+
1397
+ # stop when each sentence is finished, or if we exceed the maximum length
1398
+ if unfinished_sequences.max() == 0 or stopping_criteria(input_ids, scores):
1399
+ break
1400
+ yield input_ids
1401
+
1402
+ def quantize(self, bits: int, empty_init=False, **kwargs):
1403
+ if bits == 0:
1404
+ return
1405
+
1406
+ from .quantization import quantize
1407
+
1408
+ if self.quantized:
1409
+ logger.info("Already quantized.")
1410
+ return self
1411
+
1412
+ self.quantized = True
1413
+
1414
+ self.config.quantization_bit = bits
1415
+
1416
+ self.transformer = quantize(self.transformer, bits, empty_init=empty_init, **kwargs)
1417
+ return self
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4f62d72c97cb28762f2fcb9e9b00e1d23c7d546da79fb4cfde386231b9b8d956
3
+ size 4377310673