Aratako commited on
Commit
1210357
1 Parent(s): 847d0ab

upload model

Browse files
added_tokens.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "<|endoftext|>": 151643,
3
+ "<|im_end|>": 151645,
4
+ "<|im_start|>": 151644
5
+ }
config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "C:\\Qwen1.5-72B-Chat",
3
+ "architectures": [
4
+ "Qwen2ForCausalLM"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "auto_map": {
8
+ "AutoConfig": "configuration_qwen2.Qwen2Config",
9
+ "AutoModelForCausalLM": "modeling_qwen2.Qwen2ForCausalLM"
10
+ },
11
+ "bos_token_id": 151643,
12
+ "eos_token_id": 151645,
13
+ "hidden_act": "silu",
14
+ "hidden_size": 8192,
15
+ "initializer_range": 0.02,
16
+ "intermediate_size": 24576,
17
+ "max_position_embeddings": 32768,
18
+ "max_window_layers": 70,
19
+ "model_type": "qwen2",
20
+ "num_attention_heads": 64,
21
+ "num_experts_per_tok": 2,
22
+ "num_hidden_layers": 80,
23
+ "num_key_value_heads": 64,
24
+ "num_local_experts": 2,
25
+ "rms_norm_eps": 1e-06,
26
+ "rope_theta": 1000000.0,
27
+ "sliding_window": null,
28
+ "tie_word_embeddings": false,
29
+ "torch_dtype": "bfloat16",
30
+ "transformers_version": "4.39.1",
31
+ "use_cache": true,
32
+ "use_sliding_window": false,
33
+ "vocab_size": 152064
34
+ }
configuration_qwen2.py ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 The Qwen team, Alibaba Group and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Qwen2 model configuration"""
16
+
17
+ from transformers.configuration_utils import PretrainedConfig
18
+ from transformers.utils import logging
19
+
20
+ logger = logging.get_logger(__name__)
21
+
22
+ QWEN2_PRETRAINED_CONFIG_ARCHIVE_MAP = {
23
+ "Qwen/Qwen2-7B-beta": "https://huggingface.co/Qwen/Qwen2-7B-beta/resolve/main/config.json",
24
+ }
25
+
26
+
27
+ class Qwen2Config(PretrainedConfig):
28
+ r"""
29
+ This is the configuration class to store the configuration of a [`Qwen2Model`]. It is used to instantiate a
30
+ Qwen2 model according to the specified arguments, defining the model architecture. Instantiating a configuration
31
+ with the defaults will yield a similar configuration to that of
32
+ Qwen2-7B-beta [Qwen/Qwen2-7B-beta](https://huggingface.co/Qwen/Qwen2-7B-beta).
33
+
34
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
35
+ documentation from [`PretrainedConfig`] for more information.
36
+
37
+
38
+ Args:
39
+ vocab_size (`int`, *optional*, defaults to 151936):
40
+ Vocabulary size of the Qwen2 model. Defines the number of different tokens that can be represented by the
41
+ `inputs_ids` passed when calling [`Qwen2Model`]
42
+ hidden_size (`int`, *optional*, defaults to 4096):
43
+ Dimension of the hidden representations.
44
+ intermediate_size (`int`, *optional*, defaults to 22016):
45
+ Dimension of the MLP representations.
46
+ num_hidden_layers (`int`, *optional*, defaults to 32):
47
+ Number of hidden layers in the Transformer encoder.
48
+ num_attention_heads (`int`, *optional*, defaults to 32):
49
+ Number of attention heads for each attention layer in the Transformer encoder.
50
+ num_key_value_heads (`int`, *optional*, defaults to 32):
51
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
52
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
53
+ `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
54
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
55
+ by meanpooling all the original heads within that group. For more details checkout [this
56
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to `32`.
57
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
58
+ The non-linear activation function (function or string) in the decoder.
59
+ max_position_embeddings (`int`, *optional*, defaults to 32768):
60
+ The maximum sequence length that this model might ever be used with.
61
+ initializer_range (`float`, *optional*, defaults to 0.02):
62
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
63
+ rms_norm_eps (`float`, *optional*, defaults to 1e-06):
64
+ The epsilon used by the rms normalization layers.
65
+ use_cache (`bool`, *optional*, defaults to `True`):
66
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
67
+ relevant if `config.is_decoder=True`.
68
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
69
+ Whether the model's input and output word embeddings should be tied.
70
+ rope_theta (`float`, *optional*, defaults to 10000.0):
71
+ The base period of the RoPE embeddings.
72
+ use_sliding_window (`bool`, *optional*, defaults to `False`):
73
+ Whether to use sliding window attention.
74
+ sliding_window (`int`, *optional*, defaults to 4096):
75
+ Sliding window attention (SWA) window size. If not specified, will default to `4096`.
76
+ max_window_layers (`int`, *optional*, defaults to 28):
77
+ The number of layers that use SWA (Sliding Window Attention). The bottom layers use SWA while the top use full attention.
78
+ attention_dropout (`float`, *optional*, defaults to 0.0):
79
+ The dropout ratio for the attention probabilities.
80
+
81
+ ```python
82
+ >>> from transformers import Qwen2Model, Qwen2Config
83
+
84
+ >>> # Initializing a Qwen2 style configuration
85
+ >>> configuration = Qwen2Config()
86
+
87
+ >>> # Initializing a model from the Qwen2-7B style configuration
88
+ >>> model = Qwen2Model(configuration)
89
+
90
+ >>> # Accessing the model configuration
91
+ >>> configuration = model.config
92
+ ```"""
93
+
94
+ model_type = "qwen2"
95
+ keys_to_ignore_at_inference = ["past_key_values"]
96
+
97
+ def __init__(
98
+ self,
99
+ vocab_size=151936,
100
+ hidden_size=4096,
101
+ intermediate_size=22016,
102
+ num_hidden_layers=32,
103
+ num_attention_heads=32,
104
+ num_key_value_heads=32,
105
+ hidden_act="silu",
106
+ max_position_embeddings=32768,
107
+ initializer_range=0.02,
108
+ rms_norm_eps=1e-6,
109
+ use_cache=True,
110
+ tie_word_embeddings=False,
111
+ rope_theta=10000.0,
112
+ use_sliding_window=False,
113
+ sliding_window=4096,
114
+ max_window_layers=28,
115
+ attention_dropout=0.0,
116
+ num_experts_per_tok=2,
117
+ num_local_experts=2,
118
+ **kwargs,
119
+ ):
120
+ self.vocab_size = vocab_size
121
+ self.max_position_embeddings = max_position_embeddings
122
+ self.hidden_size = hidden_size
123
+ self.intermediate_size = intermediate_size
124
+ self.num_hidden_layers = num_hidden_layers
125
+ self.num_attention_heads = num_attention_heads
126
+ self.use_sliding_window = use_sliding_window
127
+ self.sliding_window = sliding_window
128
+ self.max_window_layers = max_window_layers
129
+
130
+ # for backward compatibility
131
+ if num_key_value_heads is None:
132
+ num_key_value_heads = num_attention_heads
133
+
134
+ self.num_key_value_heads = num_key_value_heads
135
+ self.hidden_act = hidden_act
136
+ self.initializer_range = initializer_range
137
+ self.rms_norm_eps = rms_norm_eps
138
+ self.use_cache = use_cache
139
+ self.rope_theta = rope_theta
140
+ self.attention_dropout = attention_dropout
141
+ self.num_experts_per_tok = num_experts_per_tok
142
+ self.num_local_experts = num_local_experts
143
+
144
+ super().__init__(
145
+ tie_word_embeddings=tie_word_embeddings,
146
+ **kwargs,
147
+ )
generation_config.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 151643,
3
+ "pad_token_id": 151643,
4
+ "do_sample": true,
5
+ "eos_token_id": [
6
+ 151645,
7
+ 151643
8
+ ],
9
+ "repetition_penalty": 1.05,
10
+ "temperature": 0.7,
11
+ "top_p": 0.8,
12
+ "top_k": 20,
13
+ "transformers_version": "4.37.0"
14
+ }
mergekit_moe_config.yml ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ base_model: ./Qwen1.5-72B-Chat
2
+ gate_mode: random
3
+ dtype: bfloat16
4
+ experts:
5
+ - source_model: ./Qwen1.5-72B-Chat
6
+ positive_prompts: []
7
+ - source_model: ./Liberated-Qwen1.5-72B
8
+ positive_prompts: []
9
+ tokenizer_source: model:./Qwen1.5-72B-Chat
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model-00001-of-00025.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d82bfa2cf8033765335a8594145b89697fe6f5642726e4a6bd567af5e7bf2968
3
+ size 9949006568
model-00002-of-00025.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1085a0f1eb0f1e5386b1a202234fc1c63c1d118d5a85bd91d03e02ac7a50aa28
3
+ size 9798228240
model-00003-of-00025.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4fab3a5cad9ebfd558c0d75a1b5c87c7962ec7cd8b574bca52fe9673ffedcf86
3
+ size 9663927704
model-00004-of-00025.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df1272bd42071830a41576791947f11a5fef844cf0213948981e4a7d482b608e
3
+ size 9663927712
model-00005-of-00025.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d2f5fc81b15ff05ff02ec181144d164deba4990790b367bb6abf8b4f921c27c5
3
+ size 9932412928
model-00006-of-00025.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c2b208fa93acb47ac8977bb16520c444f74c116c6c029de0759004c4ba9740dc
3
+ size 9932396424
model-00007-of-00025.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7c69f450121baf30677d9fd086612d39916df2c05b1f7d2b9235d04541be9995
3
+ size 9663927752
model-00008-of-00025.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ddf33c280305f5b06e1cfb38be356c3178918d97d0cba3b3fa0d62ecf4eed077
3
+ size 9932412928
model-00009-of-00025.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:29cc726ccd32a07cedb7f36531979493b88619a967b168881a901abfe854c8a5
3
+ size 9932396424
model-00010-of-00025.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cec453625fece050e0139fc4d77cb6ee4ab11dcf5941e71150a5858f780294d8
3
+ size 9663927752
model-00011-of-00025.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e90d8c678405f4c67d1a78baf0106f1da439e2f4aa559b4139bd50e3d8794f4e
3
+ size 9932412928
model-00012-of-00025.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:822e27a591db33aa76f333d2309add7605b9f2fb382308fafb4e386ba7e5d0d7
3
+ size 9932396424
model-00013-of-00025.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dcf7a8fd2b1096fd9757f4751ba59c230bedb138df22eb51a231516ac922f4ac
3
+ size 9663927752
model-00014-of-00025.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bca2d727acbf811cce222af2e9f7641f81945c2d79d61f4851240f16ee374d6b
3
+ size 9932412928
model-00015-of-00025.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5ba84a3b435eccd6df73f2e60ef0c081f5957e552db4882f176b848e979f0f0a
3
+ size 9932396424
model-00016-of-00025.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dca5743536d439882060230b3285ce9be8ee3c7850364faebde5856962c709eb
3
+ size 9663927752
model-00017-of-00025.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fb22edf9d90c7196a2b8906680831911471e807ac5b522eef6fea78167d9259b
3
+ size 9932412928
model-00018-of-00025.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:56662f3626a9777e41d0e18501470968a00537941eba14962517a0c216d1c21e
3
+ size 9932396424
model-00019-of-00025.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f4cbba0bd0317313b6b255e1679141c239a8e6fc09470ce83c02918125e99ce1
3
+ size 9663927752
model-00020-of-00025.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ffd4785bf7f1a8e9e3c92eddd5fea073f2835e0f371ee2661147f0d93370aeab
3
+ size 9932412928
model-00021-of-00025.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:06f6744d8e2cf03ebc806f4b4759d2e6066e382e87ef645225a50930db3f1136
3
+ size 9932396424
model-00022-of-00025.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:415a42d67378e4761be95067b24c62320c0a536e8b3aabff437987162dd815a0
3
+ size 9663927752
model-00023-of-00025.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d7411cba38f4ff53cd396cd08d8c3f7189f84db90b56c912bdbc2d0385d9c060
3
+ size 9932412928
model-00024-of-00025.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d8807775befed021ec8e95d0fbae35426fef1480dd1e519178e2aee5b67c734d
3
+ size 9932396424
model-00025-of-00025.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8397123e77e98ca075574d55cf7c87734c353079f137ac3c1a785e32eaeae044
3
+ size 5103053792
model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
modeling_qwen2.py ADDED
@@ -0,0 +1,1607 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 The Qwen team, Alibaba Group and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
5
+ # and OPT implementations in this library. It has been modified from its
6
+ # original forms to accommodate minor architectural differences compared
7
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
8
+ #
9
+ # Licensed under the Apache License, Version 2.0 (the "License");
10
+ # you may not use this file except in compliance with the License.
11
+ # You may obtain a copy of the License at
12
+ #
13
+ # http://www.apache.org/licenses/LICENSE-2.0
14
+ #
15
+ # Unless required by applicable law or agreed to in writing, software
16
+ # distributed under the License is distributed on an "AS IS" BASIS,
17
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
+ # See the License for the specific language governing permissions and
19
+ # limitations under the License.
20
+ """ PyTorch Qwen2 model."""
21
+ import inspect
22
+ import math
23
+ import warnings
24
+ from typing import List, Optional, Tuple, Union
25
+
26
+ import torch
27
+ import torch.nn.functional as F
28
+ import torch.utils.checkpoint
29
+ from torch import nn
30
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
31
+ from transformers.activations import ACT2FN
32
+ from transformers.cache_utils import Cache, DynamicCache
33
+ from transformers.modeling_attn_mask_utils import (
34
+ _prepare_4d_causal_attention_mask,
35
+ _prepare_4d_causal_attention_mask_for_sdpa,
36
+ )
37
+ from transformers.modeling_outputs import (
38
+ BaseModelOutputWithPast,
39
+ CausalLMOutputWithPast,
40
+ SequenceClassifierOutputWithPast,
41
+ )
42
+ from transformers.modeling_utils import PreTrainedModel
43
+ from transformers.utils import (
44
+ add_start_docstrings,
45
+ add_start_docstrings_to_model_forward,
46
+ is_flash_attn_2_available,
47
+ is_flash_attn_greater_or_equal_2_10,
48
+ logging,
49
+ replace_return_docstrings,
50
+ )
51
+
52
+ from .configuration_qwen2 import Qwen2Config
53
+
54
+ try:
55
+ from flash_attn import flash_attn_func, flash_attn_varlen_func
56
+ from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
57
+
58
+ _flash_supports_window_size = "window_size" in list(
59
+ inspect.signature(flash_attn_func).parameters
60
+ )
61
+ except:
62
+ pad_input, unpad_input = None, None
63
+ FlashRotaryEmbedding = None
64
+ FlashSelfAttention, FlashCrossAttention = None, None
65
+ FusedDense = None
66
+
67
+ logger = logging.get_logger(__name__)
68
+
69
+
70
+ _CHECKPOINT_FOR_DOC = "Qwen/Qwen2-7B-beta"
71
+ _CONFIG_FOR_DOC = "Qwen2Config"
72
+
73
+ QWEN2_PRETRAINED_MODEL_ARCHIVE_LIST = [
74
+ "Qwen/Qwen2-7B-beta",
75
+ # See all Qwen2 models at https://huggingface.co/models?filter=qwen2
76
+ ]
77
+
78
+
79
+ # Copied from transformers.models.llama.modeling_llama._get_unpad_data
80
+ def _get_unpad_data(attention_mask):
81
+ seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
82
+ indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
83
+ max_seqlen_in_batch = seqlens_in_batch.max().item()
84
+ cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))
85
+ return (
86
+ indices,
87
+ cu_seqlens,
88
+ max_seqlen_in_batch,
89
+ )
90
+
91
+
92
+ # Copied from transformers.models.llama.modeling_llama.LlamaRMSNorm with Llama->Qwen2
93
+ class Qwen2RMSNorm(nn.Module):
94
+ def __init__(self, hidden_size, eps=1e-6):
95
+ """
96
+ Qwen2RMSNorm is equivalent to T5LayerNorm
97
+ """
98
+ super().__init__()
99
+ self.weight = nn.Parameter(torch.ones(hidden_size))
100
+ self.variance_epsilon = eps
101
+
102
+ def forward(self, hidden_states):
103
+ input_dtype = hidden_states.dtype
104
+ hidden_states = hidden_states.to(torch.float32)
105
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
106
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
107
+ return self.weight * hidden_states.to(input_dtype)
108
+
109
+
110
+ # Copied from transformers.models.mistral.modeling_mistral.MistralRotaryEmbedding with Mistral->Qwen2
111
+ class Qwen2RotaryEmbedding(nn.Module):
112
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
113
+ super().__init__()
114
+
115
+ self.dim = dim
116
+ self.max_position_embeddings = max_position_embeddings
117
+ self.base = base
118
+ inv_freq = 1.0 / (
119
+ self.base
120
+ ** (
121
+ torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(device)
122
+ / self.dim
123
+ )
124
+ )
125
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
126
+
127
+ # Build here to make `torch.jit.trace` work.
128
+ self._set_cos_sin_cache(
129
+ seq_len=max_position_embeddings,
130
+ device=self.inv_freq.device,
131
+ dtype=torch.get_default_dtype(),
132
+ )
133
+
134
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
135
+ self.max_seq_len_cached = seq_len
136
+ t = torch.arange(
137
+ self.max_seq_len_cached, device=device, dtype=torch.int64
138
+ ).type_as(self.inv_freq)
139
+
140
+ freqs = torch.outer(t, self.inv_freq)
141
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
142
+ emb = torch.cat((freqs, freqs), dim=-1)
143
+ self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
144
+ self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
145
+
146
+ def forward(self, x, seq_len=None):
147
+ # x: [bs, num_attention_heads, seq_len, head_size]
148
+ if seq_len > self.max_seq_len_cached:
149
+ self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype)
150
+
151
+ return (
152
+ self.cos_cached[:seq_len].to(dtype=x.dtype),
153
+ self.sin_cached[:seq_len].to(dtype=x.dtype),
154
+ )
155
+
156
+
157
+ # Copied from transformers.models.llama.modeling_llama.rotate_half
158
+ def rotate_half(x):
159
+ """Rotates half the hidden dims of the input."""
160
+ x1 = x[..., : x.shape[-1] // 2]
161
+ x2 = x[..., x.shape[-1] // 2 :]
162
+ return torch.cat((-x2, x1), dim=-1)
163
+
164
+
165
+ # Copied from transformers.models.mistral.modeling_mistral.apply_rotary_pos_emb
166
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1):
167
+ """Applies Rotary Position Embedding to the query and key tensors.
168
+
169
+ Args:
170
+ q (`torch.Tensor`): The query tensor.
171
+ k (`torch.Tensor`): The key tensor.
172
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
173
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
174
+ position_ids (`torch.Tensor`):
175
+ The position indices of the tokens corresponding to the query and key tensors. For example, this can be
176
+ used to pass offsetted position ids when working with a KV-cache.
177
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
178
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
179
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
180
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
181
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
182
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
183
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
184
+ Returns:
185
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
186
+ """
187
+ cos = cos[position_ids].unsqueeze(unsqueeze_dim)
188
+ sin = sin[position_ids].unsqueeze(unsqueeze_dim)
189
+ q_embed = (q * cos) + (rotate_half(q) * sin)
190
+ k_embed = (k * cos) + (rotate_half(k) * sin)
191
+ return q_embed, k_embed
192
+
193
+
194
+ # Copied from transformers.models.mistral.modeling_mistral.MistralMLP with Mistral->Qwen2
195
+ class Qwen2MLP(nn.Module):
196
+ def __init__(self, config):
197
+ super().__init__()
198
+ self.config = config
199
+ self.hidden_size = config.hidden_size
200
+ self.intermediate_size = config.intermediate_size
201
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
202
+ self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
203
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
204
+ self.act_fn = ACT2FN[config.hidden_act]
205
+
206
+ def forward(self, x):
207
+ return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
208
+
209
+
210
+ class Qwen2MoE(nn.Module):
211
+ def __init__(self, config):
212
+ super().__init__()
213
+ self.config = config
214
+ self.hidden_size = config.hidden_size
215
+ self.num_local_experts = config.num_local_experts
216
+ self.num_experts_per_tok = config.num_experts_per_tok
217
+ self.mlp = nn.ModuleList(
218
+ [Qwen2MLP(config) for i in range(self.num_local_experts)]
219
+ )
220
+ self.gate = nn.Linear(self.hidden_size, self.num_local_experts, bias=False)
221
+
222
+ def forward(self, x):
223
+ orig_shape = x.shape
224
+ x = x.view(-1, x.shape[-1])
225
+
226
+ scores = self.gate(x)
227
+ expert_weights, expert_indices = torch.topk(
228
+ scores, self.num_experts_per_tok, dim=-1
229
+ )
230
+ expert_weights = expert_weights.softmax(dim=-1)
231
+ flat_expert_indices = expert_indices.view(-1)
232
+
233
+ x = x.repeat_interleave(self.num_experts_per_tok, dim=0)
234
+ y = torch.empty_like(x)
235
+ for i, expert in enumerate(self.mlp):
236
+ y[flat_expert_indices == i] = expert(x[flat_expert_indices == i])
237
+ y = (y.view(*expert_weights.shape, -1) * expert_weights.unsqueeze(-1)).sum(
238
+ dim=1
239
+ )
240
+ return y.view(*orig_shape)
241
+
242
+
243
+ # Copied from transformers.models.llama.modeling_llama.repeat_kv
244
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
245
+ """
246
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
247
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
248
+ """
249
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
250
+ if n_rep == 1:
251
+ return hidden_states
252
+ hidden_states = hidden_states[:, :, None, :, :].expand(
253
+ batch, num_key_value_heads, n_rep, slen, head_dim
254
+ )
255
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
256
+
257
+
258
+ class Qwen2Attention(nn.Module):
259
+ """
260
+ Multi-headed attention from 'Attention Is All You Need' paper. Modified to use sliding window attention: Longformer
261
+ and "Generating Long Sequences with Sparse Transformers".
262
+ """
263
+
264
+ def __init__(self, config: Qwen2Config, layer_idx: Optional[int] = None):
265
+ super().__init__()
266
+ self.config = config
267
+ self.layer_idx = layer_idx
268
+ if layer_idx is None:
269
+ logger.warning_once(
270
+ f"Instantiating {self.__class__.__name__} without passing `layer_idx` is not recommended and will "
271
+ "to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` "
272
+ "when creating this class."
273
+ )
274
+
275
+ self.hidden_size = config.hidden_size
276
+ self.num_heads = config.num_attention_heads
277
+ self.head_dim = self.hidden_size // self.num_heads
278
+ self.num_key_value_heads = config.num_key_value_heads
279
+ self.num_key_value_groups = self.num_heads // self.num_key_value_heads
280
+ self.max_position_embeddings = config.max_position_embeddings
281
+ self.rope_theta = config.rope_theta
282
+ self.is_causal = True
283
+ self.attention_dropout = config.attention_dropout
284
+
285
+ if (self.head_dim * self.num_heads) != self.hidden_size:
286
+ raise ValueError(
287
+ f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
288
+ f" and `num_heads`: {self.num_heads})."
289
+ )
290
+ self.q_proj = nn.Linear(
291
+ self.hidden_size, self.num_heads * self.head_dim, bias=True
292
+ )
293
+ self.k_proj = nn.Linear(
294
+ self.hidden_size, self.num_key_value_heads * self.head_dim, bias=True
295
+ )
296
+ self.v_proj = nn.Linear(
297
+ self.hidden_size, self.num_key_value_heads * self.head_dim, bias=True
298
+ )
299
+ self.o_proj = nn.Linear(
300
+ self.num_heads * self.head_dim, self.hidden_size, bias=False
301
+ )
302
+
303
+ self.rotary_emb = Qwen2RotaryEmbedding(
304
+ self.head_dim,
305
+ max_position_embeddings=self.max_position_embeddings,
306
+ base=self.rope_theta,
307
+ )
308
+
309
+ def forward(
310
+ self,
311
+ hidden_states: torch.Tensor,
312
+ attention_mask: Optional[torch.Tensor] = None,
313
+ position_ids: Optional[torch.LongTensor] = None,
314
+ past_key_value: Optional[Cache] = None,
315
+ output_attentions: bool = False,
316
+ use_cache: bool = False,
317
+ **kwargs,
318
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
319
+ if "padding_mask" in kwargs:
320
+ warnings.warn(
321
+ "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`"
322
+ )
323
+ bsz, q_len, _ = hidden_states.size()
324
+
325
+ query_states = self.q_proj(hidden_states)
326
+ key_states = self.k_proj(hidden_states)
327
+ value_states = self.v_proj(hidden_states)
328
+
329
+ query_states = query_states.view(
330
+ bsz, q_len, self.num_heads, self.head_dim
331
+ ).transpose(1, 2)
332
+ key_states = key_states.view(
333
+ bsz, q_len, self.num_key_value_heads, self.head_dim
334
+ ).transpose(1, 2)
335
+ value_states = value_states.view(
336
+ bsz, q_len, self.num_key_value_heads, self.head_dim
337
+ ).transpose(1, 2)
338
+
339
+ kv_seq_len = key_states.shape[-2]
340
+ if past_key_value is not None:
341
+ if self.layer_idx is None:
342
+ raise ValueError(
343
+ f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} "
344
+ "for auto-regressive decoding with k/v caching, please make sure to initialize the attention class "
345
+ "with a layer index."
346
+ )
347
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
348
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
349
+ query_states, key_states = apply_rotary_pos_emb(
350
+ query_states, key_states, cos, sin, position_ids
351
+ )
352
+
353
+ if past_key_value is not None:
354
+ cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models
355
+ key_states, value_states = past_key_value.update(
356
+ key_states, value_states, self.layer_idx, cache_kwargs
357
+ )
358
+
359
+ # repeat k/v heads if n_kv_heads < n_heads
360
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
361
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
362
+
363
+ attn_weights = torch.matmul(
364
+ query_states, key_states.transpose(2, 3)
365
+ ) / math.sqrt(self.head_dim)
366
+
367
+ if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
368
+ raise ValueError(
369
+ f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is"
370
+ f" {attn_weights.size()}"
371
+ )
372
+
373
+ if attention_mask is not None:
374
+ if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
375
+ raise ValueError(
376
+ f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
377
+ )
378
+
379
+ attn_weights = attn_weights + attention_mask
380
+
381
+ # upcast attention to fp32
382
+ attn_weights = nn.functional.softmax(
383
+ attn_weights, dim=-1, dtype=torch.float32
384
+ ).to(query_states.dtype)
385
+ attn_weights = nn.functional.dropout(
386
+ attn_weights, p=self.attention_dropout, training=self.training
387
+ )
388
+ attn_output = torch.matmul(attn_weights, value_states)
389
+
390
+ if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
391
+ raise ValueError(
392
+ f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
393
+ f" {attn_output.size()}"
394
+ )
395
+
396
+ attn_output = attn_output.transpose(1, 2).contiguous()
397
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
398
+
399
+ attn_output = self.o_proj(attn_output)
400
+
401
+ if not output_attentions:
402
+ attn_weights = None
403
+
404
+ return attn_output, attn_weights, past_key_value
405
+
406
+
407
+ class Qwen2FlashAttention2(Qwen2Attention):
408
+ """
409
+ Qwen2 flash attention module, following Qwen2 attention module. This module inherits from `Qwen2Attention`
410
+ as the weights of the module stays untouched. The only required change would be on the forward pass
411
+ where it needs to correctly call the public API of flash attention and deal with padding tokens
412
+ in case the input contains any of them. Additionally, for sliding window attention, we apply SWA only to the bottom
413
+ config.max_window_layers layers.
414
+ """
415
+
416
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2.__init__
417
+ def __init__(self, *args, **kwargs):
418
+ super().__init__(*args, **kwargs)
419
+
420
+ # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
421
+ # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
422
+ # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
423
+ self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
424
+
425
+ def forward(
426
+ self,
427
+ hidden_states: torch.Tensor,
428
+ attention_mask: Optional[torch.Tensor] = None,
429
+ position_ids: Optional[torch.LongTensor] = None,
430
+ past_key_value: Optional[Cache] = None,
431
+ output_attentions: bool = False,
432
+ use_cache: bool = False,
433
+ **kwargs,
434
+ ):
435
+ if "padding_mask" in kwargs:
436
+ warnings.warn(
437
+ "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`"
438
+ )
439
+
440
+ # overwrite attention_mask with padding_mask
441
+ attention_mask = kwargs.pop("padding_mask")
442
+ bsz, q_len, _ = hidden_states.size()
443
+
444
+ query_states = self.q_proj(hidden_states)
445
+ key_states = self.k_proj(hidden_states)
446
+ value_states = self.v_proj(hidden_states)
447
+
448
+ query_states = query_states.view(
449
+ bsz, q_len, self.num_heads, self.head_dim
450
+ ).transpose(1, 2)
451
+ key_states = key_states.view(
452
+ bsz, q_len, self.num_key_value_heads, self.head_dim
453
+ ).transpose(1, 2)
454
+ value_states = value_states.view(
455
+ bsz, q_len, self.num_key_value_heads, self.head_dim
456
+ ).transpose(1, 2)
457
+
458
+ kv_seq_len = key_states.shape[-2]
459
+ if past_key_value is not None:
460
+ if self.layer_idx is None:
461
+ raise ValueError(
462
+ f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} "
463
+ "for auto-regressive decoding with k/v caching, please make sure to initialize the attention class "
464
+ "with a layer index."
465
+ )
466
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
467
+
468
+ # Because the input can be padded, the absolute sequence length depends on the max position id.
469
+ rotary_seq_len = max(kv_seq_len, position_ids[:, -1].max().item()) + 1
470
+ cos, sin = self.rotary_emb(value_states, seq_len=rotary_seq_len)
471
+
472
+ query_states, key_states = apply_rotary_pos_emb(
473
+ query_states, key_states, cos, sin, position_ids
474
+ )
475
+
476
+ use_sliding_windows = (
477
+ _flash_supports_window_size
478
+ and getattr(self.config, "sliding_window", None) is not None
479
+ and kv_seq_len > self.config.sliding_window
480
+ and self.config.use_sliding_window
481
+ )
482
+
483
+ if not _flash_supports_window_size:
484
+ logger.warning_once(
485
+ "The current flash attention version does not support sliding window attention, for a more memory efficient implementation"
486
+ " make sure to upgrade flash-attn library."
487
+ )
488
+
489
+ if past_key_value is not None:
490
+ # Activate slicing cache only if the config has a value `sliding_windows` attribute
491
+ cache_has_contents = past_key_value.get_seq_length(self.layer_idx) > 0
492
+ if (
493
+ getattr(self.config, "sliding_window", None) is not None
494
+ and kv_seq_len > self.config.sliding_window
495
+ and cache_has_contents
496
+ ):
497
+ slicing_tokens = 1 - self.config.sliding_window
498
+
499
+ past_key = past_key_value[self.layer_idx][0]
500
+ past_value = past_key_value[self.layer_idx][1]
501
+
502
+ past_key = past_key[:, :, slicing_tokens:, :].contiguous()
503
+ past_value = past_value[:, :, slicing_tokens:, :].contiguous()
504
+
505
+ if past_key.shape[-2] != self.config.sliding_window - 1:
506
+ raise ValueError(
507
+ f"past key must have a shape of (`batch_size, num_heads, self.config.sliding_window-1, head_dim`), got"
508
+ f" {past_key.shape}"
509
+ )
510
+
511
+ if attention_mask is not None:
512
+ attention_mask = attention_mask[:, slicing_tokens:]
513
+ attention_mask = torch.cat(
514
+ [attention_mask, torch.ones_like(attention_mask[:, -1:])],
515
+ dim=-1,
516
+ )
517
+
518
+ cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models
519
+ key_states, value_states = past_key_value.update(
520
+ key_states, value_states, self.layer_idx, cache_kwargs
521
+ )
522
+
523
+ # repeat k/v heads if n_kv_heads < n_heads
524
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
525
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
526
+ dropout_rate = 0.0 if not self.training else self.attention_dropout
527
+
528
+ # In PEFT, usually we cast the layer norms in float32 for training stability reasons
529
+ # therefore the input hidden states gets silently casted in float32. Hence, we need
530
+ # cast them back in float16 just to be sure everything works as expected.
531
+ input_dtype = query_states.dtype
532
+ if input_dtype == torch.float32:
533
+ if torch.is_autocast_enabled():
534
+ target_dtype = torch.get_autocast_gpu_dtype()
535
+ # Handle the case where the model is quantized
536
+ elif hasattr(self.config, "_pre_quantization_dtype"):
537
+ target_dtype = self.config._pre_quantization_dtype
538
+ else:
539
+ target_dtype = self.q_proj.weight.dtype
540
+
541
+ logger.warning_once(
542
+ f"The input hidden states seems to be silently casted in float32, this might be related to"
543
+ f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
544
+ f" {target_dtype}."
545
+ )
546
+
547
+ query_states = query_states.to(target_dtype)
548
+ key_states = key_states.to(target_dtype)
549
+ value_states = value_states.to(target_dtype)
550
+
551
+ # Reashape to the expected shape for Flash Attention
552
+ query_states = query_states.transpose(1, 2)
553
+ key_states = key_states.transpose(1, 2)
554
+ value_states = value_states.transpose(1, 2)
555
+
556
+ attn_output = self._flash_attention_forward(
557
+ query_states,
558
+ key_states,
559
+ value_states,
560
+ attention_mask,
561
+ q_len,
562
+ dropout=dropout_rate,
563
+ use_sliding_windows=use_sliding_windows,
564
+ )
565
+
566
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
567
+ attn_output = self.o_proj(attn_output)
568
+
569
+ if not output_attentions:
570
+ attn_weights = None
571
+
572
+ return attn_output, attn_weights, past_key_value
573
+
574
+ def _flash_attention_forward(
575
+ self,
576
+ query_states,
577
+ key_states,
578
+ value_states,
579
+ attention_mask,
580
+ query_length,
581
+ dropout=0.0,
582
+ softmax_scale=None,
583
+ use_sliding_windows=False,
584
+ ):
585
+ """
586
+ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
587
+ first unpad the input, then computes the attention scores and pad the final attention scores.
588
+
589
+ Args:
590
+ query_states (`torch.Tensor`):
591
+ Input query states to be passed to Flash Attention API
592
+ key_states (`torch.Tensor`):
593
+ Input key states to be passed to Flash Attention API
594
+ value_states (`torch.Tensor`):
595
+ Input value states to be passed to Flash Attention API
596
+ attention_mask (`torch.Tensor`):
597
+ The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
598
+ position of padding tokens and 1 for the position of non-padding tokens.
599
+ dropout (`float`):
600
+ Attention dropout
601
+ softmax_scale (`float`, *optional*):
602
+ The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
603
+ use_sliding_windows (`bool`, *optional*):
604
+ Whether to activate sliding window attention.
605
+ """
606
+ if not self._flash_attn_uses_top_left_mask:
607
+ causal = self.is_causal
608
+ else:
609
+ # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__.
610
+ causal = self.is_causal and query_length != 1
611
+
612
+ # Decide whether to use SWA or not by layer index.
613
+ if use_sliding_windows and self.layer_idx >= self.config.max_window_layers:
614
+ use_sliding_windows = False
615
+
616
+ # Contains at least one padding token in the sequence
617
+ if attention_mask is not None:
618
+ batch_size = query_states.shape[0]
619
+ (
620
+ query_states,
621
+ key_states,
622
+ value_states,
623
+ indices_q,
624
+ cu_seq_lens,
625
+ max_seq_lens,
626
+ ) = self._upad_input(
627
+ query_states, key_states, value_states, attention_mask, query_length
628
+ )
629
+
630
+ cu_seqlens_q, cu_seqlens_k = cu_seq_lens
631
+ max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
632
+
633
+ if not use_sliding_windows:
634
+ attn_output_unpad = flash_attn_varlen_func(
635
+ query_states,
636
+ key_states,
637
+ value_states,
638
+ cu_seqlens_q=cu_seqlens_q,
639
+ cu_seqlens_k=cu_seqlens_k,
640
+ max_seqlen_q=max_seqlen_in_batch_q,
641
+ max_seqlen_k=max_seqlen_in_batch_k,
642
+ dropout_p=dropout,
643
+ softmax_scale=softmax_scale,
644
+ causal=causal,
645
+ )
646
+ else:
647
+ attn_output_unpad = flash_attn_varlen_func(
648
+ query_states,
649
+ key_states,
650
+ value_states,
651
+ cu_seqlens_q=cu_seqlens_q,
652
+ cu_seqlens_k=cu_seqlens_k,
653
+ max_seqlen_q=max_seqlen_in_batch_q,
654
+ max_seqlen_k=max_seqlen_in_batch_k,
655
+ dropout_p=dropout,
656
+ softmax_scale=softmax_scale,
657
+ causal=causal,
658
+ window_size=(
659
+ self.config.sliding_window,
660
+ self.config.sliding_window,
661
+ ),
662
+ )
663
+
664
+ attn_output = pad_input(
665
+ attn_output_unpad, indices_q, batch_size, query_length
666
+ )
667
+ else:
668
+ if not use_sliding_windows:
669
+ attn_output = flash_attn_func(
670
+ query_states,
671
+ key_states,
672
+ value_states,
673
+ dropout,
674
+ softmax_scale=softmax_scale,
675
+ causal=causal,
676
+ )
677
+ else:
678
+ attn_output = flash_attn_func(
679
+ query_states,
680
+ key_states,
681
+ value_states,
682
+ dropout,
683
+ softmax_scale=softmax_scale,
684
+ causal=causal,
685
+ window_size=(
686
+ self.config.sliding_window,
687
+ self.config.sliding_window,
688
+ ),
689
+ )
690
+
691
+ return attn_output
692
+
693
+ # Copied from transformers.models.mistral.modeling_mistral.MistralFlashAttention2._upad_input
694
+ def _upad_input(
695
+ self, query_layer, key_layer, value_layer, attention_mask, query_length
696
+ ):
697
+ batch_size, kv_seq_len, num_heads, head_dim = key_layer.shape
698
+
699
+ # On the first iteration we need to properly re-create the padding mask
700
+ # by slicing it on the proper place
701
+ if kv_seq_len != attention_mask.shape[-1]:
702
+ attention_mask_num_tokens = attention_mask.shape[-1]
703
+ attention_mask = attention_mask[:, attention_mask_num_tokens - kv_seq_len :]
704
+
705
+ indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
706
+
707
+ key_layer = index_first_axis(
708
+ key_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k
709
+ )
710
+ value_layer = index_first_axis(
711
+ value_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k
712
+ )
713
+
714
+ if query_length == kv_seq_len:
715
+ query_layer = index_first_axis(
716
+ query_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim),
717
+ indices_k,
718
+ )
719
+ cu_seqlens_q = cu_seqlens_k
720
+ max_seqlen_in_batch_q = max_seqlen_in_batch_k
721
+ indices_q = indices_k
722
+ elif query_length == 1:
723
+ max_seqlen_in_batch_q = 1
724
+ cu_seqlens_q = torch.arange(
725
+ batch_size + 1, dtype=torch.int32, device=query_layer.device
726
+ ) # There is a memcpy here, that is very bad.
727
+ indices_q = cu_seqlens_q[:-1]
728
+ query_layer = query_layer.squeeze(1)
729
+ else:
730
+ # The -q_len: slice assumes left padding.
731
+ attention_mask = attention_mask[:, -query_length:]
732
+ query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(
733
+ query_layer, attention_mask
734
+ )
735
+
736
+ return (
737
+ query_layer,
738
+ key_layer,
739
+ value_layer,
740
+ indices_q,
741
+ (cu_seqlens_q, cu_seqlens_k),
742
+ (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
743
+ )
744
+
745
+
746
+ # Copied from transformers.models.mistral.modeling_mistral.MistralSdpaAttention with Mistral->Qwen2
747
+ class Qwen2SdpaAttention(Qwen2Attention):
748
+ """
749
+ Qwen2 attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
750
+ `Qwen2Attention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to
751
+ SDPA API.
752
+ """
753
+
754
+ # Adapted from Qwen2Attention.forward
755
+ def forward(
756
+ self,
757
+ hidden_states: torch.Tensor,
758
+ attention_mask: Optional[torch.Tensor] = None,
759
+ position_ids: Optional[torch.LongTensor] = None,
760
+ past_key_value: Optional[Cache] = None,
761
+ output_attentions: bool = False,
762
+ use_cache: bool = False,
763
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
764
+ if output_attentions:
765
+ # TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented.
766
+ logger.warning_once(
767
+ "Qwen2Model is using Qwen2SdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, "
768
+ 'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
769
+ )
770
+ return super().forward(
771
+ hidden_states=hidden_states,
772
+ attention_mask=attention_mask,
773
+ position_ids=position_ids,
774
+ past_key_value=past_key_value,
775
+ output_attentions=output_attentions,
776
+ use_cache=use_cache,
777
+ )
778
+
779
+ bsz, q_len, _ = hidden_states.size()
780
+
781
+ query_states = self.q_proj(hidden_states)
782
+ key_states = self.k_proj(hidden_states)
783
+ value_states = self.v_proj(hidden_states)
784
+
785
+ query_states = query_states.view(
786
+ bsz, q_len, self.num_heads, self.head_dim
787
+ ).transpose(1, 2)
788
+ key_states = key_states.view(
789
+ bsz, q_len, self.num_key_value_heads, self.head_dim
790
+ ).transpose(1, 2)
791
+ value_states = value_states.view(
792
+ bsz, q_len, self.num_key_value_heads, self.head_dim
793
+ ).transpose(1, 2)
794
+
795
+ kv_seq_len = key_states.shape[-2]
796
+ if past_key_value is not None:
797
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
798
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
799
+
800
+ query_states, key_states = apply_rotary_pos_emb(
801
+ query_states, key_states, cos, sin, position_ids
802
+ )
803
+
804
+ if past_key_value is not None:
805
+ cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models
806
+ key_states, value_states = past_key_value.update(
807
+ key_states, value_states, self.layer_idx, cache_kwargs
808
+ )
809
+
810
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
811
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
812
+
813
+ if attention_mask is not None:
814
+ if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
815
+ raise ValueError(
816
+ f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
817
+ )
818
+
819
+ # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask,
820
+ # Reference: https://github.com/pytorch/pytorch/issues/112577.
821
+ if query_states.device.type == "cuda" and attention_mask is not None:
822
+ query_states = query_states.contiguous()
823
+ key_states = key_states.contiguous()
824
+ value_states = value_states.contiguous()
825
+
826
+ attn_output = torch.nn.functional.scaled_dot_product_attention(
827
+ query_states,
828
+ key_states,
829
+ value_states,
830
+ attn_mask=attention_mask,
831
+ dropout_p=self.attention_dropout if self.training else 0.0,
832
+ # The q_len > 1 is necessary to match with AttentionMaskConverter.to_causal_4d that does not create a causal mask in case q_len == 1.
833
+ is_causal=self.is_causal and attention_mask is None and q_len > 1,
834
+ )
835
+
836
+ attn_output = attn_output.transpose(1, 2).contiguous()
837
+ attn_output = attn_output.view(bsz, q_len, self.hidden_size)
838
+
839
+ attn_output = self.o_proj(attn_output)
840
+
841
+ return attn_output, None, past_key_value
842
+
843
+
844
+ QWEN2_ATTENTION_CLASSES = {
845
+ "eager": Qwen2Attention,
846
+ "flash_attention_2": Qwen2FlashAttention2,
847
+ "sdpa": Qwen2SdpaAttention,
848
+ }
849
+
850
+
851
+ class Qwen2DecoderLayer(nn.Module):
852
+ def __init__(self, config: Qwen2Config, layer_idx: int):
853
+ super().__init__()
854
+ self.hidden_size = config.hidden_size
855
+
856
+ if (
857
+ config.use_sliding_window
858
+ and config._attn_implementation != "flash_attention_2"
859
+ ):
860
+ logger.warning_once(
861
+ f"Sliding Window Attention is enabled but not implemented for `{config._attn_implementation}`; "
862
+ "unexpected results may be encountered."
863
+ )
864
+ self.self_attn = QWEN2_ATTENTION_CLASSES[config._attn_implementation](
865
+ config, layer_idx
866
+ )
867
+
868
+ self.moe = Qwen2MoE(config)
869
+ self.input_layernorm = Qwen2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
870
+ self.post_attention_layernorm = Qwen2RMSNorm(
871
+ config.hidden_size, eps=config.rms_norm_eps
872
+ )
873
+
874
+ def forward(
875
+ self,
876
+ hidden_states: torch.Tensor,
877
+ attention_mask: Optional[torch.Tensor] = None,
878
+ position_ids: Optional[torch.LongTensor] = None,
879
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
880
+ output_attentions: Optional[bool] = False,
881
+ use_cache: Optional[bool] = False,
882
+ **kwargs,
883
+ ) -> Tuple[
884
+ torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]
885
+ ]:
886
+ if "padding_mask" in kwargs:
887
+ warnings.warn(
888
+ "Passing `padding_mask` is deprecated and will be removed in v4.37. "
889
+ "Please make sure use `attention_mask` instead.`"
890
+ )
891
+ """
892
+ Args:
893
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
894
+ attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
895
+ `(batch, sequence_length)` where padding elements are indicated by 0.
896
+ output_attentions (`bool`, *optional*):
897
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
898
+ returned tensors for more detail.
899
+ use_cache (`bool`, *optional*):
900
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
901
+ (see `past_key_values`).
902
+ past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
903
+ """
904
+
905
+ residual = hidden_states
906
+
907
+ hidden_states = self.input_layernorm(hidden_states)
908
+
909
+ # Self Attention
910
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
911
+ hidden_states=hidden_states,
912
+ attention_mask=attention_mask,
913
+ position_ids=position_ids,
914
+ past_key_value=past_key_value,
915
+ output_attentions=output_attentions,
916
+ use_cache=use_cache,
917
+ )
918
+ hidden_states = residual + hidden_states
919
+
920
+ # Fully Connected
921
+ residual = hidden_states
922
+ hidden_states = self.post_attention_layernorm(hidden_states)
923
+ hidden_states = self.moe(hidden_states)
924
+ hidden_states = residual + hidden_states
925
+
926
+ outputs = (hidden_states,)
927
+
928
+ if output_attentions:
929
+ outputs += (self_attn_weights,)
930
+
931
+ if use_cache:
932
+ outputs += (present_key_value,)
933
+
934
+ return outputs
935
+
936
+
937
+ QWEN2_START_DOCSTRING = r"""
938
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
939
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
940
+ etc.)
941
+
942
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
943
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
944
+ and behavior.
945
+
946
+ Parameters:
947
+ config ([`Qwen2Config`]):
948
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
949
+ load the weights associated with the model, only the configuration. Check out the
950
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
951
+ """
952
+
953
+
954
+ @add_start_docstrings(
955
+ "The bare Qwen2 Model outputting raw hidden-states without any specific head on top.",
956
+ QWEN2_START_DOCSTRING,
957
+ )
958
+ class Qwen2PreTrainedModel(PreTrainedModel):
959
+ config_class = Qwen2Config
960
+ base_model_prefix = "model"
961
+ supports_gradient_checkpointing = True
962
+ _no_split_modules = ["Qwen2DecoderLayer"]
963
+ _skip_keys_device_placement = "past_key_values"
964
+ _supports_flash_attn_2 = True
965
+ _supports_sdpa = True
966
+ _supports_cache_class = True
967
+
968
+ def _init_weights(self, module):
969
+ std = self.config.initializer_range
970
+ if isinstance(module, nn.Linear):
971
+ module.weight.data.normal_(mean=0.0, std=std)
972
+ if module.bias is not None:
973
+ module.bias.data.zero_()
974
+ elif isinstance(module, nn.Embedding):
975
+ module.weight.data.normal_(mean=0.0, std=std)
976
+ if module.padding_idx is not None:
977
+ module.weight.data[module.padding_idx].zero_()
978
+
979
+
980
+ QWEN2_INPUTS_DOCSTRING = r"""
981
+ Args:
982
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
983
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
984
+ it.
985
+
986
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
987
+ [`PreTrainedTokenizer.__call__`] for details.
988
+
989
+ [What are input IDs?](../glossary#input-ids)
990
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
991
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
992
+
993
+ - 1 for tokens that are **not masked**,
994
+ - 0 for tokens that are **masked**.
995
+
996
+ [What are attention masks?](../glossary#attention-mask)
997
+
998
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
999
+ [`PreTrainedTokenizer.__call__`] for details.
1000
+
1001
+ If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
1002
+ `past_key_values`).
1003
+
1004
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
1005
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
1006
+ information on the default strategy.
1007
+
1008
+ - 1 indicates the head is **not masked**,
1009
+ - 0 indicates the head is **masked**.
1010
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1011
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
1012
+ config.n_positions - 1]`.
1013
+
1014
+ [What are position IDs?](../glossary#position-ids)
1015
+ past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):
1016
+ Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
1017
+ blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`
1018
+ returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
1019
+
1020
+ Two formats are allowed:
1021
+ - a [`~cache_utils.Cache`] instance;
1022
+ - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
1023
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy
1024
+ cache format.
1025
+
1026
+ The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the
1027
+ legacy cache format will be returned.
1028
+
1029
+ If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
1030
+ have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
1031
+ of shape `(batch_size, sequence_length)`.
1032
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1033
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
1034
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
1035
+ model's internal embedding lookup matrix.
1036
+ use_cache (`bool`, *optional*):
1037
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
1038
+ `past_key_values`).
1039
+ output_attentions (`bool`, *optional*):
1040
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
1041
+ tensors for more detail.
1042
+ output_hidden_states (`bool`, *optional*):
1043
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
1044
+ more detail.
1045
+ return_dict (`bool`, *optional*):
1046
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
1047
+ """
1048
+
1049
+
1050
+ @add_start_docstrings(
1051
+ "The bare Qwen2 Model outputting raw hidden-states without any specific head on top.",
1052
+ QWEN2_START_DOCSTRING,
1053
+ )
1054
+ class Qwen2Model(Qwen2PreTrainedModel):
1055
+ """
1056
+ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`Qwen2DecoderLayer`]
1057
+
1058
+ Args:
1059
+ config: Qwen2Config
1060
+ """
1061
+
1062
+ def __init__(self, config: Qwen2Config):
1063
+ super().__init__(config)
1064
+ self.padding_idx = config.pad_token_id
1065
+ self.vocab_size = config.vocab_size
1066
+
1067
+ self.embed_tokens = nn.Embedding(
1068
+ config.vocab_size, config.hidden_size, self.padding_idx
1069
+ )
1070
+ self.layers = nn.ModuleList(
1071
+ [
1072
+ Qwen2DecoderLayer(config, layer_idx)
1073
+ for layer_idx in range(config.num_hidden_layers)
1074
+ ]
1075
+ )
1076
+ self._attn_implementation = config._attn_implementation
1077
+ self.norm = Qwen2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
1078
+
1079
+ self.gradient_checkpointing = False
1080
+ # Initialize weights and apply final processing
1081
+ self.post_init()
1082
+
1083
+ def get_input_embeddings(self):
1084
+ return self.embed_tokens
1085
+
1086
+ def set_input_embeddings(self, value):
1087
+ self.embed_tokens = value
1088
+
1089
+ @add_start_docstrings_to_model_forward(QWEN2_INPUTS_DOCSTRING)
1090
+ def forward(
1091
+ self,
1092
+ input_ids: torch.LongTensor = None,
1093
+ attention_mask: Optional[torch.Tensor] = None,
1094
+ position_ids: Optional[torch.LongTensor] = None,
1095
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1096
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1097
+ use_cache: Optional[bool] = None,
1098
+ output_attentions: Optional[bool] = None,
1099
+ output_hidden_states: Optional[bool] = None,
1100
+ return_dict: Optional[bool] = None,
1101
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
1102
+ output_attentions = (
1103
+ output_attentions
1104
+ if output_attentions is not None
1105
+ else self.config.output_attentions
1106
+ )
1107
+ output_hidden_states = (
1108
+ output_hidden_states
1109
+ if output_hidden_states is not None
1110
+ else self.config.output_hidden_states
1111
+ )
1112
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
1113
+
1114
+ return_dict = (
1115
+ return_dict if return_dict is not None else self.config.use_return_dict
1116
+ )
1117
+
1118
+ # retrieve input_ids and inputs_embeds
1119
+ if input_ids is not None and inputs_embeds is not None:
1120
+ raise ValueError(
1121
+ "You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time"
1122
+ )
1123
+ elif input_ids is not None:
1124
+ batch_size, seq_length = input_ids.shape
1125
+ elif inputs_embeds is not None:
1126
+ batch_size, seq_length, _ = inputs_embeds.shape
1127
+ else:
1128
+ raise ValueError(
1129
+ "You have to specify either decoder_input_ids or decoder_inputs_embeds"
1130
+ )
1131
+
1132
+ if self.gradient_checkpointing and self.training:
1133
+ if use_cache:
1134
+ logger.warning_once(
1135
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
1136
+ )
1137
+ use_cache = False
1138
+
1139
+ past_key_values_length = 0
1140
+
1141
+ if use_cache:
1142
+ use_legacy_cache = not isinstance(past_key_values, Cache)
1143
+ if use_legacy_cache:
1144
+ past_key_values = DynamicCache.from_legacy_cache(past_key_values)
1145
+ past_key_values_length = past_key_values.get_usable_length(seq_length)
1146
+
1147
+ if position_ids is None:
1148
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
1149
+ position_ids = torch.arange(
1150
+ past_key_values_length,
1151
+ seq_length + past_key_values_length,
1152
+ dtype=torch.long,
1153
+ device=device,
1154
+ )
1155
+ position_ids = position_ids.unsqueeze(0).view(-1, seq_length)
1156
+ else:
1157
+ position_ids = position_ids.view(-1, seq_length).long()
1158
+
1159
+ if inputs_embeds is None:
1160
+ inputs_embeds = self.embed_tokens(input_ids)
1161
+
1162
+ if (
1163
+ attention_mask is not None
1164
+ and self._attn_implementation == "flash_attention_2"
1165
+ and use_cache
1166
+ ):
1167
+ is_padding_right = attention_mask[:, -1].sum().item() != batch_size
1168
+ if is_padding_right:
1169
+ raise ValueError(
1170
+ "You are attempting to perform batched generation with padding_side='right'"
1171
+ " this may lead to unexpected behaviour for Flash Attention version of Qwen2. Make sure to "
1172
+ " call `tokenizer.padding_side = 'left'` before tokenizing the input. "
1173
+ )
1174
+
1175
+ if self._attn_implementation == "flash_attention_2":
1176
+ # 2d mask is passed through the layers
1177
+ attention_mask = (
1178
+ attention_mask
1179
+ if (attention_mask is not None and 0 in attention_mask)
1180
+ else None
1181
+ )
1182
+ elif self._attn_implementation == "sdpa" and not output_attentions:
1183
+ # output_attentions=True can not be supported when using SDPA, and we fall back on
1184
+ # the manual implementation that requires a 4D causal mask in all cases.
1185
+ attention_mask = _prepare_4d_causal_attention_mask_for_sdpa(
1186
+ attention_mask,
1187
+ (batch_size, seq_length),
1188
+ inputs_embeds,
1189
+ past_key_values_length,
1190
+ )
1191
+ else:
1192
+ # 4d mask is passed through the layers
1193
+ attention_mask = _prepare_4d_causal_attention_mask(
1194
+ attention_mask,
1195
+ (batch_size, seq_length),
1196
+ inputs_embeds,
1197
+ past_key_values_length,
1198
+ sliding_window=self.config.sliding_window,
1199
+ )
1200
+
1201
+ hidden_states = inputs_embeds
1202
+
1203
+ # decoder layers
1204
+ all_hidden_states = () if output_hidden_states else None
1205
+ all_self_attns = () if output_attentions else None
1206
+ next_decoder_cache = None
1207
+
1208
+ for decoder_layer in self.layers:
1209
+ if output_hidden_states:
1210
+ all_hidden_states += (hidden_states,)
1211
+
1212
+ if self.gradient_checkpointing and self.training:
1213
+ layer_outputs = self._gradient_checkpointing_func(
1214
+ decoder_layer.__call__,
1215
+ hidden_states,
1216
+ attention_mask,
1217
+ position_ids,
1218
+ past_key_values,
1219
+ output_attentions,
1220
+ use_cache,
1221
+ )
1222
+ else:
1223
+ layer_outputs = decoder_layer(
1224
+ hidden_states,
1225
+ attention_mask=attention_mask,
1226
+ position_ids=position_ids,
1227
+ past_key_value=past_key_values,
1228
+ output_attentions=output_attentions,
1229
+ use_cache=use_cache,
1230
+ )
1231
+
1232
+ hidden_states = layer_outputs[0]
1233
+
1234
+ if use_cache:
1235
+ next_decoder_cache = layer_outputs[2 if output_attentions else 1]
1236
+
1237
+ if output_attentions:
1238
+ all_self_attns += (layer_outputs[1],)
1239
+
1240
+ hidden_states = self.norm(hidden_states)
1241
+
1242
+ # add hidden states from the last decoder layer
1243
+ if output_hidden_states:
1244
+ all_hidden_states += (hidden_states,)
1245
+
1246
+ next_cache = None
1247
+ if use_cache:
1248
+ next_cache = (
1249
+ next_decoder_cache.to_legacy_cache()
1250
+ if use_legacy_cache
1251
+ else next_decoder_cache
1252
+ )
1253
+
1254
+ if not return_dict:
1255
+ return tuple(
1256
+ v
1257
+ for v in [hidden_states, next_cache, all_hidden_states, all_self_attns]
1258
+ if v is not None
1259
+ )
1260
+ return BaseModelOutputWithPast(
1261
+ last_hidden_state=hidden_states,
1262
+ past_key_values=next_cache,
1263
+ hidden_states=all_hidden_states,
1264
+ attentions=all_self_attns,
1265
+ )
1266
+
1267
+
1268
+ class Qwen2ForCausalLM(Qwen2PreTrainedModel):
1269
+ _tied_weights_keys = ["lm_head.weight"]
1270
+
1271
+ def __init__(self, config):
1272
+ super().__init__(config)
1273
+ self.model = Qwen2Model(config)
1274
+ self.vocab_size = config.vocab_size
1275
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
1276
+
1277
+ # Initialize weights and apply final processing
1278
+ self.post_init()
1279
+
1280
+ def get_input_embeddings(self):
1281
+ return self.model.embed_tokens
1282
+
1283
+ def set_input_embeddings(self, value):
1284
+ self.model.embed_tokens = value
1285
+
1286
+ def get_output_embeddings(self):
1287
+ return self.lm_head
1288
+
1289
+ def set_output_embeddings(self, new_embeddings):
1290
+ self.lm_head = new_embeddings
1291
+
1292
+ def set_decoder(self, decoder):
1293
+ self.model = decoder
1294
+
1295
+ def get_decoder(self):
1296
+ return self.model
1297
+
1298
+ @add_start_docstrings_to_model_forward(QWEN2_INPUTS_DOCSTRING)
1299
+ @replace_return_docstrings(
1300
+ output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC
1301
+ )
1302
+ def forward(
1303
+ self,
1304
+ input_ids: torch.LongTensor = None,
1305
+ attention_mask: Optional[torch.Tensor] = None,
1306
+ position_ids: Optional[torch.LongTensor] = None,
1307
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1308
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1309
+ labels: Optional[torch.LongTensor] = None,
1310
+ use_cache: Optional[bool] = None,
1311
+ output_attentions: Optional[bool] = None,
1312
+ output_hidden_states: Optional[bool] = None,
1313
+ return_dict: Optional[bool] = None,
1314
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
1315
+ r"""
1316
+ Args:
1317
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1318
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1319
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1320
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1321
+
1322
+ Returns:
1323
+
1324
+ Example:
1325
+
1326
+ ```python
1327
+ >>> from transformers import AutoTokenizer, Qwen2ForCausalLM
1328
+
1329
+ >>> model = Qwen2ForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS)
1330
+ >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER)
1331
+
1332
+ >>> prompt = "Hey, are you conscious? Can you talk to me?"
1333
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
1334
+
1335
+ >>> # Generate
1336
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
1337
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
1338
+ "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
1339
+ ```"""
1340
+
1341
+ output_attentions = (
1342
+ output_attentions
1343
+ if output_attentions is not None
1344
+ else self.config.output_attentions
1345
+ )
1346
+ output_hidden_states = (
1347
+ output_hidden_states
1348
+ if output_hidden_states is not None
1349
+ else self.config.output_hidden_states
1350
+ )
1351
+ return_dict = (
1352
+ return_dict if return_dict is not None else self.config.use_return_dict
1353
+ )
1354
+
1355
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
1356
+ outputs = self.model(
1357
+ input_ids=input_ids,
1358
+ attention_mask=attention_mask,
1359
+ position_ids=position_ids,
1360
+ past_key_values=past_key_values,
1361
+ inputs_embeds=inputs_embeds,
1362
+ use_cache=use_cache,
1363
+ output_attentions=output_attentions,
1364
+ output_hidden_states=output_hidden_states,
1365
+ return_dict=return_dict,
1366
+ )
1367
+
1368
+ hidden_states = outputs[0]
1369
+ logits = self.lm_head(hidden_states)
1370
+ logits = logits.float()
1371
+
1372
+ loss = None
1373
+ if labels is not None:
1374
+ # Shift so that tokens < n predict n
1375
+ shift_logits = logits[..., :-1, :].contiguous()
1376
+ shift_labels = labels[..., 1:].contiguous()
1377
+ # Flatten the tokens
1378
+ loss_fct = CrossEntropyLoss()
1379
+ shift_logits = shift_logits.view(-1, self.config.vocab_size)
1380
+ shift_labels = shift_labels.view(-1)
1381
+ # Enable model parallelism
1382
+ shift_labels = shift_labels.to(shift_logits.device)
1383
+ loss = loss_fct(shift_logits, shift_labels)
1384
+
1385
+ if not return_dict:
1386
+ output = (logits,) + outputs[1:]
1387
+ return (loss,) + output if loss is not None else output
1388
+
1389
+ return CausalLMOutputWithPast(
1390
+ loss=loss,
1391
+ logits=logits,
1392
+ past_key_values=outputs.past_key_values,
1393
+ hidden_states=outputs.hidden_states,
1394
+ attentions=outputs.attentions,
1395
+ )
1396
+
1397
+ def prepare_inputs_for_generation(
1398
+ self,
1399
+ input_ids,
1400
+ past_key_values=None,
1401
+ attention_mask=None,
1402
+ inputs_embeds=None,
1403
+ **kwargs,
1404
+ ):
1405
+ # Omit tokens covered by past_key_values
1406
+ if past_key_values is not None:
1407
+ if isinstance(past_key_values, Cache):
1408
+ cache_length = past_key_values.get_seq_length()
1409
+ past_length = past_key_values.seen_tokens
1410
+ max_cache_length = past_key_values.get_max_length()
1411
+ else:
1412
+ cache_length = past_length = past_key_values[0][0].shape[2]
1413
+ max_cache_length = None
1414
+
1415
+ # Keep only the unprocessed tokens:
1416
+ # 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where
1417
+ # some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as
1418
+ # input)
1419
+ if (
1420
+ attention_mask is not None
1421
+ and attention_mask.shape[1] > input_ids.shape[1]
1422
+ ):
1423
+ input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :]
1424
+ # 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard
1425
+ # input_ids based on the past_length.
1426
+ elif past_length < input_ids.shape[1]:
1427
+ input_ids = input_ids[:, past_length:]
1428
+ # 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens.
1429
+
1430
+ # If we are about to go beyond the maximum cache length, we need to crop the input attention mask.
1431
+ if (
1432
+ max_cache_length is not None
1433
+ and attention_mask is not None
1434
+ and cache_length + input_ids.shape[1] > max_cache_length
1435
+ ):
1436
+ attention_mask = attention_mask[:, -max_cache_length:]
1437
+
1438
+ position_ids = kwargs.get("position_ids", None)
1439
+ if attention_mask is not None and position_ids is None:
1440
+ # create position_ids on the fly for batch generation
1441
+ position_ids = attention_mask.long().cumsum(-1) - 1
1442
+ position_ids.masked_fill_(attention_mask == 0, 1)
1443
+ if past_key_values:
1444
+ position_ids = position_ids[:, -input_ids.shape[1] :]
1445
+
1446
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
1447
+ if inputs_embeds is not None and past_key_values is None:
1448
+ model_inputs = {"inputs_embeds": inputs_embeds}
1449
+ else:
1450
+ model_inputs = {"input_ids": input_ids}
1451
+
1452
+ model_inputs.update(
1453
+ {
1454
+ "position_ids": position_ids,
1455
+ "past_key_values": past_key_values,
1456
+ "use_cache": kwargs.get("use_cache"),
1457
+ "attention_mask": attention_mask,
1458
+ }
1459
+ )
1460
+ return model_inputs
1461
+
1462
+ @staticmethod
1463
+ def _reorder_cache(past_key_values, beam_idx):
1464
+ reordered_past = ()
1465
+ for layer_past in past_key_values:
1466
+ reordered_past += (
1467
+ tuple(
1468
+ past_state.index_select(0, beam_idx.to(past_state.device))
1469
+ for past_state in layer_past
1470
+ ),
1471
+ )
1472
+ return reordered_past
1473
+
1474
+
1475
+ @add_start_docstrings(
1476
+ """
1477
+ The Qwen2 Model transformer with a sequence classification head on top (linear layer).
1478
+
1479
+ [`Qwen2ForSequenceClassification`] uses the last token in order to do the classification, as other causal models
1480
+ (e.g. GPT-2) do.
1481
+
1482
+ Since it does classification on the last token, it requires to know the position of the last token. If a
1483
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
1484
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
1485
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
1486
+ each row of the batch).
1487
+ """,
1488
+ QWEN2_START_DOCSTRING,
1489
+ )
1490
+ class Qwen2ForSequenceClassification(Qwen2PreTrainedModel):
1491
+ def __init__(self, config):
1492
+ super().__init__(config)
1493
+ self.num_labels = config.num_labels
1494
+ self.model = Qwen2Model(config)
1495
+ self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
1496
+
1497
+ # Initialize weights and apply final processing
1498
+ self.post_init()
1499
+
1500
+ def get_input_embeddings(self):
1501
+ return self.model.embed_tokens
1502
+
1503
+ def set_input_embeddings(self, value):
1504
+ self.model.embed_tokens = value
1505
+
1506
+ @add_start_docstrings_to_model_forward(QWEN2_INPUTS_DOCSTRING)
1507
+ def forward(
1508
+ self,
1509
+ input_ids: torch.LongTensor = None,
1510
+ attention_mask: Optional[torch.Tensor] = None,
1511
+ position_ids: Optional[torch.LongTensor] = None,
1512
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1513
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1514
+ labels: Optional[torch.LongTensor] = None,
1515
+ use_cache: Optional[bool] = None,
1516
+ output_attentions: Optional[bool] = None,
1517
+ output_hidden_states: Optional[bool] = None,
1518
+ return_dict: Optional[bool] = None,
1519
+ ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
1520
+ r"""
1521
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1522
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1523
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1524
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1525
+ """
1526
+ return_dict = (
1527
+ return_dict if return_dict is not None else self.config.use_return_dict
1528
+ )
1529
+
1530
+ transformer_outputs = self.model(
1531
+ input_ids,
1532
+ attention_mask=attention_mask,
1533
+ position_ids=position_ids,
1534
+ past_key_values=past_key_values,
1535
+ inputs_embeds=inputs_embeds,
1536
+ use_cache=use_cache,
1537
+ output_attentions=output_attentions,
1538
+ output_hidden_states=output_hidden_states,
1539
+ return_dict=return_dict,
1540
+ )
1541
+ hidden_states = transformer_outputs[0]
1542
+ logits = self.score(hidden_states)
1543
+
1544
+ if input_ids is not None:
1545
+ batch_size = input_ids.shape[0]
1546
+ else:
1547
+ batch_size = inputs_embeds.shape[0]
1548
+
1549
+ if self.config.pad_token_id is None and batch_size != 1:
1550
+ raise ValueError(
1551
+ "Cannot handle batch sizes > 1 if no padding token is defined."
1552
+ )
1553
+ if self.config.pad_token_id is None:
1554
+ sequence_lengths = -1
1555
+ else:
1556
+ if input_ids is not None:
1557
+ # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
1558
+ sequence_lengths = (
1559
+ torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
1560
+ )
1561
+ sequence_lengths = sequence_lengths % input_ids.shape[-1]
1562
+ sequence_lengths = sequence_lengths.to(logits.device)
1563
+ else:
1564
+ sequence_lengths = -1
1565
+
1566
+ pooled_logits = logits[
1567
+ torch.arange(batch_size, device=logits.device), sequence_lengths
1568
+ ]
1569
+
1570
+ loss = None
1571
+ if labels is not None:
1572
+ labels = labels.to(logits.device)
1573
+ if self.config.problem_type is None:
1574
+ if self.num_labels == 1:
1575
+ self.config.problem_type = "regression"
1576
+ elif self.num_labels > 1 and (
1577
+ labels.dtype == torch.long or labels.dtype == torch.int
1578
+ ):
1579
+ self.config.problem_type = "single_label_classification"
1580
+ else:
1581
+ self.config.problem_type = "multi_label_classification"
1582
+
1583
+ if self.config.problem_type == "regression":
1584
+ loss_fct = MSELoss()
1585
+ if self.num_labels == 1:
1586
+ loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
1587
+ else:
1588
+ loss = loss_fct(pooled_logits, labels)
1589
+ elif self.config.problem_type == "single_label_classification":
1590
+ loss_fct = CrossEntropyLoss()
1591
+ loss = loss_fct(
1592
+ pooled_logits.view(-1, self.num_labels), labels.view(-1)
1593
+ )
1594
+ elif self.config.problem_type == "multi_label_classification":
1595
+ loss_fct = BCEWithLogitsLoss()
1596
+ loss = loss_fct(pooled_logits, labels)
1597
+ if not return_dict:
1598
+ output = (pooled_logits,) + transformer_outputs[1:]
1599
+ return ((loss,) + output) if loss is not None else output
1600
+
1601
+ return SequenceClassifierOutputWithPast(
1602
+ loss=loss,
1603
+ logits=pooled_logits,
1604
+ past_key_values=transformer_outputs.past_key_values,
1605
+ hidden_states=transformer_outputs.hidden_states,
1606
+ attentions=transformer_outputs.attentions,
1607
+ )
special_tokens_map.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>"
5
+ ],
6
+ "eos_token": {
7
+ "content": "<|im_end|>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false
12
+ },
13
+ "pad_token": "<|im_end|>"
14
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "151643": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "151644": {
13
+ "content": "<|im_start|>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "151645": {
21
+ "content": "<|im_end|>",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ }
28
+ },
29
+ "additional_special_tokens": [
30
+ "<|im_start|>",
31
+ "<|im_end|>"
32
+ ],
33
+ "bos_token": null,
34
+ "chat_template": "{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are a helpful assistant<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content']}}{% if (loop.last and add_generation_prompt) or not loop.last %}{{ '<|im_end|>' + '\n'}}{% endif %}{% endfor %}{% if add_generation_prompt and messages[-1]['role'] != 'assistant' %}{{ '<|im_start|>assistant\n' }}{% endif %}",
35
+ "clean_up_tokenization_spaces": false,
36
+ "eos_token": "<|im_end|>",
37
+ "errors": "replace",
38
+ "model_max_length": 32768,
39
+ "pad_token": "<|im_end|>",
40
+ "split_special_tokens": false,
41
+ "tokenizer_class": "Qwen2Tokenizer",
42
+ "unk_token": null
43
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff