Question Answering
Transformers
Safetensors
English
doge
text-generation
custom_code
JingzeShi commited on
Commit
8a569b4
verified
1 Parent(s): 711e752

Upload DogeForCausalLM

Browse files
Files changed (4) hide show
  1. config.json +48 -48
  2. configuration_doge.py +64 -62
  3. generation_config.json +1 -1
  4. modeling_doge.py +120 -196
config.json CHANGED
@@ -1,48 +1,48 @@
1
- {
2
- "_name_or_path": "./results/Doge-60M-Instruct-DPO",
3
- "architectures": [
4
- "DogeForCausalLM"
5
- ],
6
- "attention_dropout": 0.0,
7
- "auto_map": {
8
- "AutoConfig": "configuration_doge.DogeConfig",
9
- "AutoModelForCausalLM": "modeling_doge.DogeForCausalLM"
10
- },
11
- "bos_token_id": 0,
12
- "dynamic_mask_ratio": 0.0,
13
- "eos_token_id": 1,
14
- "expert_retrieval_size": 256,
15
- "hidden_act": "silu",
16
- "hidden_bias": false,
17
- "hidden_dropout": 0.0,
18
- "hidden_size": 512,
19
- "initializer_range": 0.02,
20
- "intermediate_size": 1024,
21
- "is_moe": false,
22
- "max_position_embeddings": 2048,
23
- "model_type": "doge",
24
- "num_attention_heads": 4,
25
- "num_cdmmoe_experts": 2048,
26
- "num_cdmmoe_experts_per_head": 8,
27
- "num_cdmmoe_heads": 4,
28
- "num_cdmoe_experts": 16348,
29
- "num_cdmoe_experts_per_head": 8,
30
- "num_cdmoe_heads": 4,
31
- "num_channels": 3,
32
- "num_hidden_layers": 16,
33
- "num_key_value_heads": 2,
34
- "pad_token_id": 2,
35
- "patch_size": 16,
36
- "rms_norm_eps": 1e-06,
37
- "rope_scaling": {
38
- "factor": 4.0,
39
- "original_max_position_embeddings": 2048,
40
- "rope_type": "dynamic"
41
- },
42
- "rope_theta": 10000.0,
43
- "tie_word_embeddings": true,
44
- "torch_dtype": "float32",
45
- "transformers_version": "4.48.1",
46
- "use_cache": true,
47
- "vocab_size": 32768
48
- }
 
1
+ {
2
+ "_name_or_path": "SmallDoge/Doge-60M-Instruct",
3
+ "architectures": [
4
+ "DogeForCausalLM"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "auto_map": {
8
+ "AutoConfig": "configuration_doge.DogeConfig",
9
+ "AutoModelForCausalLM": "modeling_doge.DogeForCausalLM"
10
+ },
11
+ "bos_token_id": 0,
12
+ "dynamic_mask_ratio": 0.0,
13
+ "eos_token_id": 1,
14
+ "expert_retrieval_size": 256,
15
+ "hidden_act": "silu",
16
+ "hidden_bias": false,
17
+ "hidden_dropout": 0.0,
18
+ "hidden_size": 512,
19
+ "initializer_range": 0.02,
20
+ "intermediate_size": 1024,
21
+ "is_moe": false,
22
+ "max_position_embeddings": 2048,
23
+ "model_type": "doge",
24
+ "num_attention_heads": 4,
25
+ "num_cdmmoe_experts": 2048,
26
+ "num_cdmmoe_experts_per_head": 8,
27
+ "num_cdmmoe_heads": 4,
28
+ "num_cdmoe_experts": 16348,
29
+ "num_cdmoe_experts_per_head": 8,
30
+ "num_cdmoe_heads": 4,
31
+ "num_channels": 3,
32
+ "num_hidden_layers": 16,
33
+ "num_key_value_heads": 2,
34
+ "pad_token_id": 2,
35
+ "patch_size": 16,
36
+ "rms_norm_eps": 1e-06,
37
+ "rope_scaling": {
38
+ "factor": 4.0,
39
+ "original_max_position_embeddings": 2048,
40
+ "rope_type": "dynamic"
41
+ },
42
+ "rope_theta": 10000.0,
43
+ "tie_word_embeddings": true,
44
+ "torch_dtype": "float32",
45
+ "transformers_version": "4.48.2",
46
+ "use_cache": true,
47
+ "vocab_size": 32768
48
+ }
configuration_doge.py CHANGED
@@ -1,9 +1,14 @@
 
 
 
 
 
 
1
  # coding=utf-8
2
  # Copyright 2024 Jingze Shi and the HuggingFace Inc. team. All rights reserved.
3
  #
4
  # This code is based on the Wonderful Matrices paper implementation.
5
- #
6
- # https://arxiv.org/abs/2412.11834
7
  #
8
  # Licensed under the Apache License, Version 2.0 (the "License");
9
  # you may not use this file except in compliance with the License.
@@ -16,8 +21,6 @@
16
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17
  # See the License for the specific language governing permissions and
18
  # limitations under the License.
19
- """PyTorch Doge model configuration"""
20
-
21
  from transformers.configuration_utils import PretrainedConfig
22
  from transformers.modeling_rope_utils import rope_config_validation
23
 
@@ -25,7 +28,7 @@ from transformers.modeling_rope_utils import rope_config_validation
25
  class DogeConfig(PretrainedConfig):
26
  r"""
27
  This is the configuration class to store the configuration of a [`DogeModel`]. It is used to instantiate an Doge
28
- model according to the specified arguments, defining the model architecture like [JingzeShi/Doge-20M](https://huggingface.co/JingzeShi/Doge-20M).
29
 
30
  Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
31
  documentation from [`PretrainedConfig`] for more information.
@@ -33,10 +36,6 @@ class DogeConfig(PretrainedConfig):
33
  Args:
34
  vocab_size (`int`, *optional*, defaults to 32768):
35
  Vocabulary size of the Doge model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`DogeModel`]
36
- num_channels (`int`, *optional*, defaults to 3):
37
- Number of channels in the input image.
38
- patch_size (`int`, *optional*, defaults to 16):
39
- Patch size of Vision Transformer Embeddings.
40
  hidden_size (`int`, *optional*, defaults to 1024):
41
  Dimension of the hidden representations.
42
  intermediate_size (`int`, *optional*, defaults to 2048):
@@ -49,25 +48,41 @@ class DogeConfig(PretrainedConfig):
49
  Dropout probability for each sequence transformation and state transformation module.
50
  hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
51
  The non-linear activation function (function or string) in the decoder.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
  max_position_embeddings (`int`, *optional*, defaults to 2048):
53
  The maximum sequence length that this model might ever be used with.
54
  rope_theta (`float`, *optional*, defaults to 10000.0):
55
  The base period of the RoPE embeddings.
56
  rope_scaling (`Dict`, *optional*):
57
- Dictionary containing the scaling configuration for the RoPE embeddings.
58
  NOTE: if you apply new rope type and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value accordingly.
 
59
  Expected contents:
60
  `rope_type` (`str`):
61
  The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope', 'llama3'], with 'default' being the original RoPE implementation.
62
  `factor` (`float`, *optional*):
63
- Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings.
64
  In most scaling types, a `factor` of x will enable the model to handle sequences of length x * original maximum pre-trained length.
65
  `original_max_position_embeddings` (`int`, *optional*):
66
- Used with 'dynamic', 'longrope' and 'llama3'.
67
  The original max position embeddings used during pretraining.
68
  `attention_factor` (`float`, *optional*):
69
  Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention
70
- computation.
71
  If unspecified, it defaults to value recommended by the implementation, using the `factor` field to infer the suggested value.
72
  `beta_fast` (`float`, *optional*):
73
  Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear
@@ -76,54 +91,51 @@ class DogeConfig(PretrainedConfig):
76
  Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear
77
  ramp function. If unspecified, it defaults to 1.
78
  `short_factor` (`List[float]`, *optional*):
79
- Only used with 'longrope'. The scaling factor to be applied to short contexts (<`original_max_position_embeddings`).
80
  Must be a list of numbers with the same length as the hidden size divided by the number of attention heads divided by 2
81
  `long_factor` (`List[float]`, *optional*):
82
- Only used with 'longrope'. The scaling factor to be applied to long contexts (<`original_max_position_embeddings`).
83
  Must be a list of numbers with the same length as the hidden size divided by the number of attention heads divided by 2
84
  `low_freq_factor` (`float`, *optional*):
85
  Only used with 'llama3'. Scaling factor applied to low frequency components of the RoPE
86
  `high_freq_factor` (`float`, *optional*):
87
  Only used with 'llama3'. Scaling factor applied to high frequency components of the RoPE
88
- initializer_range (`float`, *optional*, defaults to 0.02):
89
- The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
90
- rms_norm_eps (`float`, *optional*, defaults to 1e-06):
91
- The epsilon used by the rms normalization layers.
92
- use_cache (`bool`, *optional*, defaults to `True`):
93
- Whether or not the model should return the last key/values attentions (not used by all models). Only
94
- relevant if `config.is_decoder=True`.
95
- pad_token_id (`int`, *optional*, defaults to 0):
96
- Padding token id.
97
- bos_token_id (`int`, *optional*, defaults to 1):
98
- Beginning of stream token id.
99
- eos_token_id (`int`, *optional*, defaults to 2):
100
- End of stream token id.
101
- tie_word_embeddings (`bool`, *optional*, defaults to `True`):
102
- Whether to tie weight embeddings
103
  num_attention_heads (`int`, *optional*, defaults to 8):
104
  Number of attention heads for each attention layer in the Transformer decoder.
105
- num_key_value_heads (`int`, *optional*, defaults to `None`):
106
- This is the number of key_value heads that should be used to implement Grouped Query Attention.
107
  If `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
108
- `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used.
109
- When converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group.
110
- For more details checkout [this paper](https://arxiv.org/pdf/2305.13245.pdf).
111
  If it is not specified, will default to `num_attention_heads`.
112
  attention_dropout (`float`, *optional*, defaults to 0.0):
113
  The dropout ratio for the attention probabilities.
114
- dynamic_mask_ratio (`float`, *optional*, defaults to 0.0, range [0, 1]):
115
- The ratio to control the proportion of the dynamic mask filled with the minimum value.
116
  is_moe (`bool`, *optional*, defaults to `False`):
117
- Whether to use the Cross Domain Mixture of Experts, if `True`, the MoE will inherit the MLP to initialize
118
  num_cdmoe_experts (`int`, *optional*, defaults to 16348):
119
- Number of Private Experts for the Cross Domain Mixture of Experts. calculation formula: :math:`\text{num_cdmoe_experts} = (32 \times \text{num_cdmoe_heads})^2`
120
  num_cdmoe_heads (`int`, *optional*, defaults to 4):
121
- Number of heads of Private Experts for the Cross Domain Mixture of Experts.
122
  num_cdmoe_experts_per_head (`int`, *optional*, defaults to 8):
123
- Number of Private Experts per head for the Cross Domain Mixture of Experts.
124
  expert_retrieval_size (`int`, *optional*, defaults to 64):
125
- Dimension of the Expert retrieval states for the Cross Domain Mixture of Experts.
126
- """
 
 
 
 
 
 
 
 
 
 
 
 
127
 
128
  model_type = "doge"
129
  keys_to_ignore_at_inference = ["past_key_values"]
@@ -142,28 +154,22 @@ class DogeConfig(PretrainedConfig):
142
  def __init__(
143
  self,
144
  vocab_size=32768,
145
- num_channels=3,
146
- patch_size=16,
147
  hidden_size=1024,
148
  intermediate_size=2048,
149
  num_hidden_layers=32,
150
  hidden_bias=False,
151
  hidden_dropout=0.0,
152
  hidden_act="silu",
153
- max_position_embeddings=2048,
154
- rope_theta=10000.0,
155
- rope_scaling={
156
- "rope_type": "dynamic",
157
- "factor": 4.0,
158
- "original_max_position_embeddings": 2048,
159
- },
160
  initializer_range=0.02,
161
  rms_norm_eps=1e-06,
162
  use_cache=True,
163
  bos_token_id=0,
164
  eos_token_id=1,
165
  pad_token_id=2,
166
- tie_word_embeddings=True,
 
 
 
167
  num_attention_heads=8,
168
  num_key_value_heads=None,
169
  attention_dropout=0.0,
@@ -176,24 +182,20 @@ class DogeConfig(PretrainedConfig):
176
  **kwargs,
177
  ):
178
  self.vocab_size = vocab_size
179
- self.num_channels = num_channels
180
- self.patch_size = patch_size
181
  self.hidden_size = hidden_size
182
  self.intermediate_size = intermediate_size
183
  self.num_hidden_layers = num_hidden_layers
 
184
  self.hidden_bias = hidden_bias
185
  self.hidden_dropout = hidden_dropout
186
  self.hidden_act = hidden_act
187
- self.max_position_embeddings = max_position_embeddings
188
- self.rope_theta = rope_theta
189
- self.rope_scaling = rope_scaling
190
  self.initializer_range = initializer_range
191
  self.rms_norm_eps = rms_norm_eps
192
  self.use_cache = use_cache
193
- self.bos_token_id = bos_token_id
194
- self.eos_token_id = eos_token_id
195
- self.pad_token_id = pad_token_id
196
- self.tie_word_embeddings = tie_word_embeddings
197
  self.num_attention_heads = num_attention_heads
198
  self.num_key_value_heads = num_key_value_heads
199
  self.attention_dropout = attention_dropout
 
1
+ # 馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃
2
+ # This file was automatically generated from src/transformers/models/doge/modular_doge.py.
3
+ # Do NOT edit this file manually as any edits will be overwritten by the generation of
4
+ # the file from the modular. If any change should be done, please apply the change to the
5
+ # modular_doge.py file directly. One of our CI enforces this.
6
+ # 馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃
7
  # coding=utf-8
8
  # Copyright 2024 Jingze Shi and the HuggingFace Inc. team. All rights reserved.
9
  #
10
  # This code is based on the Wonderful Matrices paper implementation.
11
+ # The Doge family of small language models is trained by Jingze Shi.
 
12
  #
13
  # Licensed under the Apache License, Version 2.0 (the "License");
14
  # you may not use this file except in compliance with the License.
 
21
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
22
  # See the License for the specific language governing permissions and
23
  # limitations under the License.
 
 
24
  from transformers.configuration_utils import PretrainedConfig
25
  from transformers.modeling_rope_utils import rope_config_validation
26
 
 
28
  class DogeConfig(PretrainedConfig):
29
  r"""
30
  This is the configuration class to store the configuration of a [`DogeModel`]. It is used to instantiate an Doge
31
+ model according to the specified arguments, defining the model architecture like [SmallDoge/Doge-20M](https://huggingface.co/SmallDoge/Doge-20M).
32
 
33
  Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
34
  documentation from [`PretrainedConfig`] for more information.
 
36
  Args:
37
  vocab_size (`int`, *optional*, defaults to 32768):
38
  Vocabulary size of the Doge model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`DogeModel`]
 
 
 
 
39
  hidden_size (`int`, *optional*, defaults to 1024):
40
  Dimension of the hidden representations.
41
  intermediate_size (`int`, *optional*, defaults to 2048):
 
48
  Dropout probability for each sequence transformation and state transformation module.
49
  hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
50
  The non-linear activation function (function or string) in the decoder.
51
+ initializer_range (`float`, *optional*, defaults to 0.02):
52
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
53
+ rms_norm_eps (`float`, *optional*, defaults to 1e-06):
54
+ The epsilon used by the rms normalization layers.
55
+ use_cache (`bool`, *optional*, defaults to `True`):
56
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
57
+ relevant if `config.is_decoder=True`.
58
+ bos_token_id (`int`, *optional*, defaults to 0):
59
+ Beginning of stream token id.
60
+ eos_token_id (`int`, *optional*, defaults to 1):
61
+ End of stream token id.
62
+ pad_token_id (`int`, *optional*, defaults to 2):
63
+ Padding token id.
64
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
65
+ Whether to tie weight embeddings
66
  max_position_embeddings (`int`, *optional*, defaults to 2048):
67
  The maximum sequence length that this model might ever be used with.
68
  rope_theta (`float`, *optional*, defaults to 10000.0):
69
  The base period of the RoPE embeddings.
70
  rope_scaling (`Dict`, *optional*):
71
+ Dictionary containing the scaling configuration for the RoPE embeddings.
72
  NOTE: if you apply new rope type and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value accordingly.
73
+ Doge family of small models use `{ 'rope_type': 'dynamic', 'factor': 4.0, 'original_max_position_embeddings': 2048 }` as the default value.
74
  Expected contents:
75
  `rope_type` (`str`):
76
  The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope', 'llama3'], with 'default' being the original RoPE implementation.
77
  `factor` (`float`, *optional*):
78
+ Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings.
79
  In most scaling types, a `factor` of x will enable the model to handle sequences of length x * original maximum pre-trained length.
80
  `original_max_position_embeddings` (`int`, *optional*):
81
+ Used with 'dynamic', 'longrope' and 'llama3'.
82
  The original max position embeddings used during pretraining.
83
  `attention_factor` (`float`, *optional*):
84
  Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention
85
+ computation.
86
  If unspecified, it defaults to value recommended by the implementation, using the `factor` field to infer the suggested value.
87
  `beta_fast` (`float`, *optional*):
88
  Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear
 
91
  Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear
92
  ramp function. If unspecified, it defaults to 1.
93
  `short_factor` (`List[float]`, *optional*):
94
+ Only used with 'longrope'. The scaling factor to be applied to short contexts (<`original_max_position_embeddings`).
95
  Must be a list of numbers with the same length as the hidden size divided by the number of attention heads divided by 2
96
  `long_factor` (`List[float]`, *optional*):
97
+ Only used with 'longrope'. The scaling factor to be applied to long contexts (<`original_max_position_embeddings`).
98
  Must be a list of numbers with the same length as the hidden size divided by the number of attention heads divided by 2
99
  `low_freq_factor` (`float`, *optional*):
100
  Only used with 'llama3'. Scaling factor applied to low frequency components of the RoPE
101
  `high_freq_factor` (`float`, *optional*):
102
  Only used with 'llama3'. Scaling factor applied to high frequency components of the RoPE
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
103
  num_attention_heads (`int`, *optional*, defaults to 8):
104
  Number of attention heads for each attention layer in the Transformer decoder.
105
+ num_key_value_heads (`int`, *optional*):
106
+ This is the number of key_value heads that should be used to implement Grouped Query Attention.
107
  If `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
108
+ `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used.
109
+ When converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group.
110
+ For more details checkout [this paper](https://arxiv.org/pdf/2305.13245.pdf).
111
  If it is not specified, will default to `num_attention_heads`.
112
  attention_dropout (`float`, *optional*, defaults to 0.0):
113
  The dropout ratio for the attention probabilities.
114
+ dynamic_mask_ratio (`float`, *optional*, defaults to 0.0):
115
+ The ratio to control the proportion of the dynamic mask filled with the minimum value. For more details checkout [this paper](https://arxiv.org/pdf/2412.11834).
116
  is_moe (`bool`, *optional*, defaults to `False`):
117
+ Whether to use the Cross Domain Mixture of Experts, if `True`, the MoE will inherit the MLP to initialize. For more details checkout [this paper](https://arxiv.org/pdf/2412.11834).
118
  num_cdmoe_experts (`int`, *optional*, defaults to 16348):
119
+ Number of Experts for the Cross Domain Mixture of Experts.
120
  num_cdmoe_heads (`int`, *optional*, defaults to 4):
121
+ Number of retrieval heads, used to mix multi-head experts.
122
  num_cdmoe_experts_per_head (`int`, *optional*, defaults to 8):
123
+ Number of Experts per retrieval head, used to mix multi-head experts.
124
  expert_retrieval_size (`int`, *optional*, defaults to 64):
125
+ Dimension of the Expert retrieval states for calculating the dot product of query and key to determine the expert index.
126
+
127
+ ```python
128
+ >>> from transformers import DogeConfig, DogeModel
129
+
130
+ >>> # Initializing a Doge-320M style configuration
131
+ >>> configuration = DogeConfig()
132
+
133
+ >>> # Initializing a model from the Doge-320M style configuration
134
+ >>> model = DogeModel(configuration)
135
+
136
+ >>> # Accessing the model configuration
137
+ >>> configuration = model.config
138
+ ```"""
139
 
140
  model_type = "doge"
141
  keys_to_ignore_at_inference = ["past_key_values"]
 
154
  def __init__(
155
  self,
156
  vocab_size=32768,
 
 
157
  hidden_size=1024,
158
  intermediate_size=2048,
159
  num_hidden_layers=32,
160
  hidden_bias=False,
161
  hidden_dropout=0.0,
162
  hidden_act="silu",
 
 
 
 
 
 
 
163
  initializer_range=0.02,
164
  rms_norm_eps=1e-06,
165
  use_cache=True,
166
  bos_token_id=0,
167
  eos_token_id=1,
168
  pad_token_id=2,
169
+ tie_word_embeddings=False,
170
+ max_position_embeddings=2048,
171
+ rope_theta=10000.0,
172
+ rope_scaling=None,
173
  num_attention_heads=8,
174
  num_key_value_heads=None,
175
  attention_dropout=0.0,
 
182
  **kwargs,
183
  ):
184
  self.vocab_size = vocab_size
 
 
185
  self.hidden_size = hidden_size
186
  self.intermediate_size = intermediate_size
187
  self.num_hidden_layers = num_hidden_layers
188
+
189
  self.hidden_bias = hidden_bias
190
  self.hidden_dropout = hidden_dropout
191
  self.hidden_act = hidden_act
 
 
 
192
  self.initializer_range = initializer_range
193
  self.rms_norm_eps = rms_norm_eps
194
  self.use_cache = use_cache
195
+
196
+ self.max_position_embeddings = max_position_embeddings
197
+ self.rope_theta = rope_theta
198
+ self.rope_scaling = rope_scaling
199
  self.num_attention_heads = num_attention_heads
200
  self.num_key_value_heads = num_key_value_heads
201
  self.attention_dropout = attention_dropout
generation_config.json CHANGED
@@ -3,5 +3,5 @@
3
  "bos_token_id": 0,
4
  "eos_token_id": 1,
5
  "pad_token_id": 2,
6
- "transformers_version": "4.48.1"
7
  }
 
3
  "bos_token_id": 0,
4
  "eos_token_id": 1,
5
  "pad_token_id": 2,
6
+ "transformers_version": "4.48.2"
7
  }
modeling_doge.py CHANGED
@@ -1,9 +1,14 @@
 
 
 
 
 
 
1
  # coding=utf-8
2
  # Copyright 2024 Jingze Shi and the HuggingFace Inc. team. All rights reserved.
3
  #
4
  # This code is based on the Wonderful Matrices paper implementation.
5
- #
6
- # https://arxiv.org/abs/2412.11834
7
  #
8
  # Licensed under the Apache License, Version 2.0 (the "License");
9
  # you may not use this file except in compliance with the License.
@@ -16,16 +21,13 @@
16
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17
  # See the License for the specific language governing permissions and
18
  # limitations under the License.
19
- """PyTorch Doge model."""
20
 
21
  import math
22
  from typing import Callable, List, Optional, Tuple, Union
23
 
24
  import torch
25
  import torch.nn.functional as F
26
- import torch.utils.checkpoint
27
  from torch import nn
28
-
29
  from transformers.activations import ACT2FN
30
  from transformers.cache_utils import Cache, DynamicCache, StaticCache
31
  from transformers.generation import GenerationMixin
@@ -41,18 +43,16 @@ from transformers.utils import (
41
  LossKwargs,
42
  add_start_docstrings,
43
  add_start_docstrings_to_model_forward,
44
- is_torch_greater_or_equal,
45
  logging,
46
  replace_return_docstrings,
47
  )
 
 
48
  from .configuration_doge import DogeConfig
49
 
50
- try:
51
- from einx import add as einx_add
52
- except ImportError:
53
- einx_add = None
54
 
55
- if is_torch_greater_or_equal("2.5"):
56
  from torch.nn.attention.flex_attention import flex_attention
57
 
58
 
@@ -94,22 +94,20 @@ class Residual(nn.Module):
94
 
95
 
96
  class RotaryEmbedding(nn.Module):
97
- def __init__(self, config: Optional[DogeConfig] = None):
98
  super().__init__()
99
- self.rope_kwargs = {}
100
-
101
- if config.rope_scaling is not None:
102
  self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type"))
103
  else:
104
  self.rope_type = "default"
105
  self.max_seq_len_cached = config.max_position_embeddings
106
  self.original_max_seq_len = config.max_position_embeddings
107
- self.base = config.rope_theta
108
 
109
  self.config = config
110
  self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
111
 
112
- inv_freq, self.attention_scaling = self.rope_init_fn(self.config, **self.rope_kwargs)
113
  self.register_buffer("inv_freq", inv_freq, persistent=False)
114
  self.original_inv_freq = self.inv_freq
115
 
@@ -121,13 +119,14 @@ class RotaryEmbedding(nn.Module):
121
  """
122
  seq_len = torch.max(position_ids) + 1
123
  if seq_len > self.max_seq_len_cached: # growth
124
- inv_freq, self.attention_scaling = self.rope_init_fn(
125
- self.config, device, seq_len=seq_len, **self.rope_kwargs
126
- )
127
  self.register_buffer("inv_freq", inv_freq, persistent=False) # TODO joao: may break with compilation
128
  self.max_seq_len_cached = seq_len
129
 
130
  if seq_len < self.original_max_seq_len and self.max_seq_len_cached > self.original_max_seq_len: # reset
 
 
 
131
  self.register_buffer("inv_freq", self.original_inv_freq, persistent=False)
132
  self.max_seq_len_cached = self.original_max_seq_len
133
 
@@ -136,7 +135,7 @@ class RotaryEmbedding(nn.Module):
136
  if "dynamic" in self.rope_type:
137
  self._dynamic_frequency_update(position_ids, device=x.device)
138
 
139
- # core RoPE block
140
  inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
141
  position_ids_expanded = position_ids[:, None, :].float()
142
  # Force float32 (see https://github.com/huggingface/transformers/pull/29285)
@@ -164,7 +163,7 @@ def rotate_half(x):
164
  return torch.cat((-x2, x1), dim=-1)
165
 
166
 
167
- def apply_QK_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
168
  """Applies Rotary Position Embedding to the query and key tensors.
169
 
170
  Args:
@@ -176,8 +175,8 @@ def apply_QK_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
176
  Deprecated and unused.
177
  unsqueeze_dim (`int`, *optional*, defaults to 1):
178
  The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
179
- sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k.
180
- For example, note that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim].
181
  Then, if q and k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k.
182
  Similarly, if q and k have the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
183
  Returns:
@@ -192,7 +191,7 @@ def apply_QK_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
192
 
193
  def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
194
  """
195
- This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep).
196
  The hidden states go from (batch, num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
197
  """
198
  batch, num_key_value_heads, slen, head_dim = hidden_states.shape
@@ -211,45 +210,33 @@ class DogeDynamicMaskAttention(nn.Module):
211
  self.layer_idx = layer_idx
212
  self.head_dim = config.hidden_size // config.num_attention_heads
213
  self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
214
- self.scaling = self.head_dim ** -0.5
215
  self.attention_dropout = config.attention_dropout
216
  self.dynamic_mask_ratio = config.dynamic_mask_ratio
217
 
218
  self.ALL_ATTENTION_FUNCTIONS = {
219
  "eager": self.eager_attention_forward,
220
- "sdpa": self.sdpa_attention_forward,
221
  "flex_attention": self.flex_attention_forward,
 
222
  }
223
 
224
  # Q K V O projections
225
  self.q_proj = nn.Linear(
226
- config.hidden_size,
227
- config.num_attention_heads * self.head_dim,
228
- bias=config.hidden_bias
229
  )
230
  self.k_proj = nn.Linear(
231
- config.hidden_size,
232
- config.num_key_value_heads * self.head_dim,
233
- bias=config.hidden_bias
234
  )
235
  self.v_proj = nn.Linear(
236
- config.hidden_size,
237
- config.num_key_value_heads * self.head_dim,
238
- bias=config.hidden_bias
239
  )
240
  # dynamic mask for the QK^T attention score matrix
241
- self.A = nn.Parameter(
242
- torch.ones(config.num_attention_heads)
243
- )
244
  self.dt_proj = nn.Linear(
245
- config.num_key_value_heads * self.head_dim,
246
- config.num_attention_heads,
247
- bias=config.hidden_bias
248
  )
249
  self.o_proj = nn.Linear(
250
- config.num_attention_heads * self.head_dim,
251
- config.hidden_size,
252
- bias=config.hidden_bias
253
  )
254
 
255
  def forward(
@@ -269,7 +256,7 @@ class DogeDynamicMaskAttention(nn.Module):
269
  value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
270
 
271
  cos, sin = position_embeddings
272
- query_states, key_states = apply_QK_rotary_pos_emb(query_states, key_states, cos, sin)
273
 
274
  if past_key_value is not None:
275
  # sin and cos are specific to RoPE models; cache_position needed for the static cache
@@ -277,7 +264,9 @@ class DogeDynamicMaskAttention(nn.Module):
277
  key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
278
 
279
  # calculate dynamic mask from value_states
280
- dt_states = self.dt_proj(value_states.transpose(1, 2).reshape(value_states.shape[0], value_states.shape[-2], -1))
 
 
281
  dynamic_mask = torch.exp(self.A * F.softplus(dt_states)).transpose(-1, -2)
282
  attn_mask = self.prepare_dynamic_mask(
283
  hidden_states=hidden_states,
@@ -289,7 +278,7 @@ class DogeDynamicMaskAttention(nn.Module):
289
  attention_interface: Callable = self.eager_attention_forward
290
  if self.config._attn_implementation != "eager":
291
  attention_interface = self.ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
292
-
293
  attn_output = attention_interface(
294
  query_states,
295
  key_states,
@@ -320,17 +309,22 @@ class DogeDynamicMaskAttention(nn.Module):
320
  dynamic_mask_ratio (`float`, *optional*): Ratio from 0.0 to 1.0 used to control the proportion of the dynamic mask filled with the minimum value.
321
  attention_mask (`torch.Tensor`, *optional*): attention mask of shape `(batch_size, 1, query_sequence_length, key_sequence_length)`.
322
  """
323
- min_type = torch.finfo(hidden_states.dtype).min
324
- attn_mask = dynamic_mask[:, :, None, :]
325
- if 0.0 < dynamic_mask_ratio < 1.0:
326
- num_dynamic_mask = int(attn_mask.shape[-1] * dynamic_mask_ratio)
327
- if num_dynamic_mask > 0:
328
- rate_value = torch.kthvalue(attn_mask, num_dynamic_mask, dim=-1, keepdim=True).values
329
- attn_mask = attn_mask.masked_fill(attn_mask < rate_value, min_type)
330
- if attention_mask is not None:
331
- attn_mask = attn_mask.masked_fill(attention_mask[:, :, :, : hidden_states.shape[-2]] == min_type, min_type)
 
 
 
 
 
332
  return attn_mask
333
-
334
  def eager_attention_forward(
335
  self,
336
  query: torch.Tensor,
@@ -349,7 +343,7 @@ class DogeDynamicMaskAttention(nn.Module):
349
  if attention_mask is not None:
350
  causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
351
  attn_weights = attn_weights + causal_mask
352
-
353
  # upcast attention scores to fp32
354
  attn_weights = F.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
355
  attn_weights = F.dropout(attn_weights, p=dropout, training=self.training)
@@ -358,7 +352,7 @@ class DogeDynamicMaskAttention(nn.Module):
358
  attn_output = torch.matmul(attn_weights, value_states)
359
  attn_output = attn_output.transpose(1, 2).contiguous()
360
  return attn_output
361
-
362
  def sdpa_attention_forward(
363
  self,
364
  query: torch.Tensor,
@@ -369,6 +363,9 @@ class DogeDynamicMaskAttention(nn.Module):
369
  dropout: float = 0.0,
370
  **kwargs,
371
  ) -> torch.Tensor:
 
 
 
372
  causal_mask = attention_mask
373
  if attention_mask is not None:
374
  causal_mask = causal_mask[:, :, :, : key.shape[-2]]
@@ -388,11 +385,10 @@ class DogeDynamicMaskAttention(nn.Module):
388
  attn_mask=causal_mask,
389
  dropout_p=dropout,
390
  scale=scaling,
391
- enable_gqa=True,
392
  )
393
  attn_output = attn_output.transpose(1, 2).contiguous()
394
  return attn_output
395
-
396
  def flex_attention_forward(
397
  self,
398
  query: torch.Tensor,
@@ -403,30 +399,37 @@ class DogeDynamicMaskAttention(nn.Module):
403
  dropout: float = 0.0,
404
  **kwargs,
405
  ) -> torch.Tensor:
 
 
 
406
  causal_mask = attention_mask
407
  if attention_mask is not None:
408
  causal_mask = causal_mask[:, :, :, : key.shape[-2]]
409
 
410
- # TODO: flex_attention: Captured buffers that require grad are not yet supported.
411
  # NOTE: So we only use flex_attention in inference mode.
412
- def mask_mod(score, batch, head, q_idx, kv_idx):
 
 
 
 
413
  score = score + causal_mask[batch][head][q_idx][kv_idx]
414
  return score
415
-
 
 
416
  attn_output = flex_attention(
417
  query,
418
  key,
419
  value,
420
  score_mod=mask_mod,
421
  scale=scaling,
422
- enable_gqa=True,
423
  )
424
  attn_output = attn_output.transpose(1, 2).contiguous()
425
  return attn_output
426
 
427
 
428
  class DogeMLP(nn.Module):
429
-
430
  def __init__(self, config: DogeConfig):
431
  super().__init__()
432
  self.hidden_dim = config.hidden_size
@@ -465,7 +468,7 @@ class DogeCDMoE(DogeMLP):
465
  self.keys = nn.Parameter(torch.zeros(self.num_cdmoe_heads, self.num_keys, 2, self.expert_retrieval_dim // 2))
466
 
467
  # experts
468
- self.down_embed = nn.Embedding(self.num_cdmoe_experts, self.hidden_dim)
469
  self.up_embed = nn.Embedding(self.num_cdmoe_experts, self.hidden_dim)
470
 
471
  def forward(
@@ -482,14 +485,10 @@ class DogeCDMoE(DogeMLP):
482
 
483
  # get experts with the highest similarity
484
  (scores_x, scores_y), (indices_x, indices_y) = sim.topk(self.num_cdmoe_experts_per_head, dim=-1)
485
- if einx_add is not None:
486
- all_scores = einx_add("... i, ... j -> ... (i j)", scores_x, scores_y)
487
- all_indices = einx_add("... i, ... j -> ... (i j)", indices_x * self.num_keys, indices_y)
488
- else:
489
- all_scores = scores_x.unsqueeze(-1) + scores_y.unsqueeze(-2)
490
- all_scores = all_scores.view(*scores_x.shape[:-1], -1)
491
- all_indices = (indices_x.unsqueeze(-1) * self.num_keys) + indices_y.unsqueeze(-2)
492
- all_indices = all_indices.view(*indices_x.shape[:-1], -1)
493
  scores, pk_indices = all_scores.topk(self.num_cdmoe_experts_per_head, dim=-1)
494
  indices = all_indices.gather(-1, pk_indices)
495
  down_embed = self.down_embed(indices)
@@ -514,7 +513,7 @@ class DogeDecoderLayer(nn.Module):
514
  self.pre_residual = Residual(config.hidden_size)
515
 
516
  self.post_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
517
- self.feed_forward = DogeMLP(config) if config.is_moe == False else DogeCDMoE(config)
518
  self.post_residual = Residual(config.hidden_size)
519
 
520
  def forward(
@@ -529,7 +528,6 @@ class DogeDecoderLayer(nn.Module):
529
  position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC
530
  **kwargs,
531
  ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
532
-
533
  # sequence transformation
534
  residual = hidden_states
535
  hidden_states = self.pre_layernorm(hidden_states)
@@ -575,6 +573,8 @@ DOGE_START_DOCSTRING = r"""
575
  load the weights associated with the model, only the configuration. Check out the
576
  [`~PreTrainedModel.from_pretrained`] method to load the model weights.
577
  """
 
 
578
  @add_start_docstrings(
579
  "The bare Doge Model outputting raw hidden-states without any specific head on top.",
580
  DOGE_START_DOCSTRING,
@@ -854,7 +854,7 @@ class DogeModel(DogePreTrainedModel):
854
  )
855
 
856
  return causal_mask
857
-
858
  @staticmethod
859
  def _prepare_4d_causal_attention_mask_with_cache_position(
860
  attention_mask: torch.Tensor = None,
@@ -895,7 +895,9 @@ class DogeModel(DogePreTrainedModel):
895
  min_dtype = torch.finfo(dtype).min
896
  causal_mask = torch.full(
897
  (sequence_length, target_length),
898
- fill_value=min_dtype, dtype=dtype, device=device,
 
 
899
  )
900
  if sequence_length != 1:
901
  causal_mask = torch.triu(causal_mask, diagonal=1)
@@ -941,13 +943,14 @@ class DogeForCausalLM(DogePreTrainedModel, GenerationMixin):
941
 
942
  def set_output_embeddings(self, new_embeddings):
943
  self.lm_head = new_embeddings
944
-
945
  def get_decoder(self):
946
  return self.model
947
 
948
  def set_decoder(self, decoder):
949
  self.model = decoder
950
 
 
951
  @add_start_docstrings_to_model_forward(DOGE_INPUTS_DOCSTRING)
952
  @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
953
  def forward(
@@ -963,7 +966,7 @@ class DogeForCausalLM(DogePreTrainedModel, GenerationMixin):
963
  output_hidden_states: Optional[bool] = None,
964
  return_dict: Optional[bool] = None,
965
  cache_position: Optional[torch.LongTensor] = None,
966
- num_logits_to_keep: int = 0,
967
  **kwargs: Unpack[KwargsForCausalLM],
968
  ) -> Union[Tuple, CausalLMOutputWithPast]:
969
  r"""
@@ -973,10 +976,12 @@ class DogeForCausalLM(DogePreTrainedModel, GenerationMixin):
973
  config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
974
  (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
975
 
976
- num_logits_to_keep (`int`, *optional*):
977
- Calculate logits for the last `num_logits_to_keep` tokens. If `0`, calculate logits for all
978
  `input_ids` (special case). Only last token logits are needed for generation, and calculating them only for that
979
  token can save memory, which becomes pretty significant for long sequences or large vocabulary size.
 
 
980
 
981
  Returns:
982
 
@@ -985,8 +990,8 @@ class DogeForCausalLM(DogePreTrainedModel, GenerationMixin):
985
  ```python
986
  >>> from transformers import AutoTokenizer, AutoModelForCausalLM
987
 
988
- >>> model = AutoModelForCausalLM.from_pretrained("JingzeShi/Doge-20M-Instruct")
989
- >>> tokenizer = AutoTokenizer.from_pretrained("JingzeShi/Doge-20M-Instruct")
990
 
991
  >>> prompt = "Hey, are you conscious? Can you talk to me?"
992
  >>> inputs = tokenizer(prompt, return_tensors="pt")
@@ -1018,9 +1023,9 @@ class DogeForCausalLM(DogePreTrainedModel, GenerationMixin):
1018
  )
1019
 
1020
  hidden_states = outputs[0]
1021
-
1022
  # only compute necessary logits, and do not upcast them to float if we are not computing the loss
1023
- logits = self.lm_head(hidden_states[:, -num_logits_to_keep:, :])
 
1024
 
1025
  loss = None
1026
  if labels is not None:
@@ -1039,111 +1044,32 @@ class DogeForCausalLM(DogePreTrainedModel, GenerationMixin):
1039
  )
1040
 
1041
 
1042
- class DogePatchEmbedding(nn.Module):
1043
- """
1044
- This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial `hidden_states` of shape `(batch_size, seq_len, hidden_size)` to be consumed by a Transformer.
1045
- """
1046
-
1047
- def __init__(self, config: DogeConfig):
1048
- super().__init__()
1049
-
1050
- self.num_channels = config.num_channels
1051
- self.patch_size = config.patch_size
1052
- self.hidden_dim = config.hidden_size
1053
-
1054
- self.sequence_proj = nn.Conv2d(self.num_channels, self.hidden_dim, kernel_size=self.patch_size, stride=self.patch_size)
1055
- self.state_proj = nn.Linear(self.hidden_dim, self.hidden_dim, bias=config.hidden_bias)
1056
-
1057
- def forward(
1058
- self,
1059
- pixel_values: torch.Tensor,
1060
- ) -> torch.Tensor:
1061
- image_embedding = self.sequence_proj(pixel_values).flatten(2).transpose(1, 2)
1062
- image_embedding = self.state_proj(image_embedding)
1063
- return image_embedding
1064
-
1065
-
1066
- class DogeForCausalVLM(DogeForCausalLM):
1067
- _tied_weights_keys = ["lm_head.weight"]
1068
-
1069
- def __init__(self, config: DogeConfig):
1070
- super().__init__(config)
1071
- self.config = config
1072
- self.pixel_embed = DogePatchEmbedding(config)
1073
-
1074
- # Initialize weights and apply final processing
1075
- self.post_init()
1076
-
1077
- def forward(
1078
- self,
1079
- input_ids: torch.LongTensor = None,
1080
- pixel_values: torch.FloatTensor = None,
1081
- attention_mask: Optional[torch.Tensor] = None,
1082
- position_ids: Optional[torch.LongTensor] = None,
1083
- past_key_values: Optional[torch.Tensor] = None,
1084
- inputs_embeds: Optional[torch.FloatTensor] = None,
1085
- labels: Optional[torch.LongTensor] = None,
1086
- use_cache: Optional[bool] = None,
1087
- output_attentions: Optional[bool] = None,
1088
- output_hidden_states: Optional[bool] = None,
1089
- return_dict: Optional[bool] = None,
1090
- cache_position: Optional[torch.LongTensor] = None,
1091
- num_logits_to_keep: int = 0,
1092
- **loss_kwargs,
1093
- ) -> Union[Tuple, CausalLMOutputWithPast]:
1094
- # TODO: @wubingheng111: refer to Llava for implementating the forward method
1095
- ...
1096
-
1097
- def prepare_inputs_for_generation(
1098
- self,
1099
- input_ids=None,
1100
- pixel_values=None,
1101
- past_key_values=None,
1102
- input_embeds=None,
1103
- attention_mask=None,
1104
- cache_position=None,
1105
- num_logits_to_keep=None,
1106
- **kwargs,
1107
- ):
1108
- model_inputs = self.model.prepare_inputs_for_generation(
1109
- input_ids,
1110
- past_key_values=past_key_values,
1111
- inputs_embeds=input_embeds,
1112
- attention_mask=attention_mask,
1113
- cache_position=cache_position,
1114
- num_logits_to_keep=num_logits_to_keep,
1115
- **kwargs,
1116
- )
1117
-
1118
- if cache_position[0] == 0:
1119
- model_inputs["pixel_values"] = pixel_values
1120
-
1121
- return model_inputs
1122
-
1123
-
1124
  @add_start_docstrings(
1125
  """
1126
  The Doge Model transformer with a sequence classification head on top (linear layer).
1127
 
1128
- [`DogeForSequenceClassification`] uses the last token in order to do the classification, as other causal models (e.g. GPT-2) do.
 
1129
 
1130
- Since it does classification on the last token, it requires to know the position of the last token.
1131
- If a `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row.
1132
- If no `pad_token_id` is defined, it simply takes the last value in each row of the batch.
1133
- Since it cannot guess the padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in each row of the batch).
1134
- """
 
 
1135
  )
1136
  class DogeForSequenceClassification(DogePreTrainedModel):
1137
  def __init__(self, config: DogeConfig):
1138
  super().__init__(config)
1139
- self.config = config
1140
  self.num_labels = config.num_labels
1141
 
1142
  self.model = DogeModel(config)
1143
- self.classifier = nn.Linear(config.hidden_size, self.num_labels, bias=False)
 
1144
 
1145
  # Initialize weights and apply final processing
1146
- self.init_weights()
1147
 
1148
  def get_input_embeddings(self):
1149
  return self.model.word_embed
@@ -1167,14 +1093,14 @@ class DogeForSequenceClassification(DogePreTrainedModel):
1167
  ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
1168
  r"""
1169
  labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1170
- Labels for computing the sequence classification/regression loss.
1171
- Indices should be in `[0, ..., config.num_labels - 1]`.
1172
- If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1173
  """
1174
  return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1175
 
1176
- outputs = self.model(
1177
- input_ids=input_ids,
1178
  attention_mask=attention_mask,
1179
  position_ids=position_ids,
1180
  past_key_values=past_key_values,
@@ -1184,8 +1110,8 @@ class DogeForSequenceClassification(DogePreTrainedModel):
1184
  output_hidden_states=output_hidden_states,
1185
  return_dict=return_dict,
1186
  )
1187
- hidden_states = outputs[0]
1188
- logits = self.classifier(hidden_states)
1189
 
1190
  if input_ids is not None:
1191
  batch_size = input_ids.shape[0]
@@ -1209,21 +1135,19 @@ class DogeForSequenceClassification(DogePreTrainedModel):
1209
 
1210
  loss = None
1211
  if labels is not None:
1212
- loss = self.loss_function(
1213
- logits=logits,
1214
- labels=labels,
1215
- pooled_logits=pooled_logits,
1216
- config=self.config,
1217
- )
1218
 
1219
  if not return_dict:
1220
- output = (pooled_logits,) + outputs[1:]
1221
  return ((loss,) + output) if loss is not None else output
1222
 
1223
  return SequenceClassifierOutputWithPast(
1224
  loss=loss,
1225
  logits=pooled_logits,
1226
- past_key_values=outputs.past_key_values,
1227
- hidden_states=outputs.hidden_states,
1228
- attentions=outputs.attentions,
1229
  )
 
 
 
 
1
+ # 馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃
2
+ # This file was automatically generated from src/transformers/models/doge/modular_doge.py.
3
+ # Do NOT edit this file manually as any edits will be overwritten by the generation of
4
+ # the file from the modular. If any change should be done, please apply the change to the
5
+ # modular_doge.py file directly. One of our CI enforces this.
6
+ # 馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃
7
  # coding=utf-8
8
  # Copyright 2024 Jingze Shi and the HuggingFace Inc. team. All rights reserved.
9
  #
10
  # This code is based on the Wonderful Matrices paper implementation.
11
+ # The Doge family of small language models is trained by Jingze Shi.
 
12
  #
13
  # Licensed under the Apache License, Version 2.0 (the "License");
14
  # you may not use this file except in compliance with the License.
 
21
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
22
  # See the License for the specific language governing permissions and
23
  # limitations under the License.
 
24
 
25
  import math
26
  from typing import Callable, List, Optional, Tuple, Union
27
 
28
  import torch
29
  import torch.nn.functional as F
 
30
  from torch import nn
 
31
  from transformers.activations import ACT2FN
32
  from transformers.cache_utils import Cache, DynamicCache, StaticCache
33
  from transformers.generation import GenerationMixin
 
43
  LossKwargs,
44
  add_start_docstrings,
45
  add_start_docstrings_to_model_forward,
46
+ is_torch_flex_attn_available,
47
  logging,
48
  replace_return_docstrings,
49
  )
50
+ from transformers.utils.deprecation import deprecate_kwarg
51
+
52
  from .configuration_doge import DogeConfig
53
 
 
 
 
 
54
 
55
+ if is_torch_flex_attn_available():
56
  from torch.nn.attention.flex_attention import flex_attention
57
 
58
 
 
94
 
95
 
96
  class RotaryEmbedding(nn.Module):
97
+ def __init__(self, config: Optional[DogeConfig] = None, device=None):
98
  super().__init__()
99
+ # BC: "rope_type" was originally "type"
100
+ if hasattr(config, "rope_scaling") and config.rope_scaling is not None:
 
101
  self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type"))
102
  else:
103
  self.rope_type = "default"
104
  self.max_seq_len_cached = config.max_position_embeddings
105
  self.original_max_seq_len = config.max_position_embeddings
 
106
 
107
  self.config = config
108
  self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
109
 
110
+ inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
111
  self.register_buffer("inv_freq", inv_freq, persistent=False)
112
  self.original_inv_freq = self.inv_freq
113
 
 
119
  """
120
  seq_len = torch.max(position_ids) + 1
121
  if seq_len > self.max_seq_len_cached: # growth
122
+ inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device, seq_len=seq_len)
 
 
123
  self.register_buffer("inv_freq", inv_freq, persistent=False) # TODO joao: may break with compilation
124
  self.max_seq_len_cached = seq_len
125
 
126
  if seq_len < self.original_max_seq_len and self.max_seq_len_cached > self.original_max_seq_len: # reset
127
+ # This .to() is needed if the model has been moved to a device after being initialized (because
128
+ # the buffer is automatically moved, but not the original copy)
129
+ self.original_inv_freq = self.original_inv_freq.to(device)
130
  self.register_buffer("inv_freq", self.original_inv_freq, persistent=False)
131
  self.max_seq_len_cached = self.original_max_seq_len
132
 
 
135
  if "dynamic" in self.rope_type:
136
  self._dynamic_frequency_update(position_ids, device=x.device)
137
 
138
+ # Core RoPE block
139
  inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
140
  position_ids_expanded = position_ids[:, None, :].float()
141
  # Force float32 (see https://github.com/huggingface/transformers/pull/29285)
 
163
  return torch.cat((-x2, x1), dim=-1)
164
 
165
 
166
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
167
  """Applies Rotary Position Embedding to the query and key tensors.
168
 
169
  Args:
 
175
  Deprecated and unused.
176
  unsqueeze_dim (`int`, *optional*, defaults to 1):
177
  The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
178
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k.
179
+ For example, note that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim].
180
  Then, if q and k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k.
181
  Similarly, if q and k have the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
182
  Returns:
 
191
 
192
  def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
193
  """
194
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep).
195
  The hidden states go from (batch, num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
196
  """
197
  batch, num_key_value_heads, slen, head_dim = hidden_states.shape
 
210
  self.layer_idx = layer_idx
211
  self.head_dim = config.hidden_size // config.num_attention_heads
212
  self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
213
+ self.scaling = self.head_dim**-0.5
214
  self.attention_dropout = config.attention_dropout
215
  self.dynamic_mask_ratio = config.dynamic_mask_ratio
216
 
217
  self.ALL_ATTENTION_FUNCTIONS = {
218
  "eager": self.eager_attention_forward,
 
219
  "flex_attention": self.flex_attention_forward,
220
+ "sdpa": self.sdpa_attention_forward,
221
  }
222
 
223
  # Q K V O projections
224
  self.q_proj = nn.Linear(
225
+ config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.hidden_bias
 
 
226
  )
227
  self.k_proj = nn.Linear(
228
+ config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.hidden_bias
 
 
229
  )
230
  self.v_proj = nn.Linear(
231
+ config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.hidden_bias
 
 
232
  )
233
  # dynamic mask for the QK^T attention score matrix
234
+ self.A = nn.Parameter(torch.zeros(config.num_attention_heads))
 
 
235
  self.dt_proj = nn.Linear(
236
+ config.num_key_value_heads * self.head_dim, config.num_attention_heads, bias=config.hidden_bias
 
 
237
  )
238
  self.o_proj = nn.Linear(
239
+ config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.hidden_bias
 
 
240
  )
241
 
242
  def forward(
 
256
  value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
257
 
258
  cos, sin = position_embeddings
259
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
260
 
261
  if past_key_value is not None:
262
  # sin and cos are specific to RoPE models; cache_position needed for the static cache
 
264
  key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
265
 
266
  # calculate dynamic mask from value_states
267
+ dt_states = self.dt_proj(
268
+ value_states.transpose(1, 2).reshape(value_states.shape[0], value_states.shape[-2], -1)
269
+ )
270
  dynamic_mask = torch.exp(self.A * F.softplus(dt_states)).transpose(-1, -2)
271
  attn_mask = self.prepare_dynamic_mask(
272
  hidden_states=hidden_states,
 
278
  attention_interface: Callable = self.eager_attention_forward
279
  if self.config._attn_implementation != "eager":
280
  attention_interface = self.ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
281
+
282
  attn_output = attention_interface(
283
  query_states,
284
  key_states,
 
309
  dynamic_mask_ratio (`float`, *optional*): Ratio from 0.0 to 1.0 used to control the proportion of the dynamic mask filled with the minimum value.
310
  attention_mask (`torch.Tensor`, *optional*): attention mask of shape `(batch_size, 1, query_sequence_length, key_sequence_length)`.
311
  """
312
+ attn_mask = None
313
+ if dynamic_mask is not None:
314
+ attn_mask = dynamic_mask[:, :, None, :]
315
+ if 0.0 < dynamic_mask_ratio < 1.0:
316
+ min_type = torch.finfo(hidden_states.dtype).min
317
+ num_dynamic_mask = int(attn_mask.shape[-1] * dynamic_mask_ratio)
318
+ if num_dynamic_mask > 0:
319
+ rate_value = torch.kthvalue(attn_mask, num_dynamic_mask, dim=-1, keepdim=True).values
320
+ attn_mask = attn_mask.masked_fill(attn_mask < rate_value, min_type)
321
+ if attention_mask is not None:
322
+ attn_mask = attn_mask + attention_mask[:, :, :, : attn_mask.shape[-1]]
323
+ else:
324
+ attn_mask = attention_mask
325
+
326
  return attn_mask
327
+
328
  def eager_attention_forward(
329
  self,
330
  query: torch.Tensor,
 
343
  if attention_mask is not None:
344
  causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
345
  attn_weights = attn_weights + causal_mask
346
+
347
  # upcast attention scores to fp32
348
  attn_weights = F.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
349
  attn_weights = F.dropout(attn_weights, p=dropout, training=self.training)
 
352
  attn_output = torch.matmul(attn_weights, value_states)
353
  attn_output = attn_output.transpose(1, 2).contiguous()
354
  return attn_output
355
+
356
  def sdpa_attention_forward(
357
  self,
358
  query: torch.Tensor,
 
363
  dropout: float = 0.0,
364
  **kwargs,
365
  ) -> torch.Tensor:
366
+ key = repeat_kv(key, self.num_key_value_groups)
367
+ value = repeat_kv(value, self.num_key_value_groups)
368
+
369
  causal_mask = attention_mask
370
  if attention_mask is not None:
371
  causal_mask = causal_mask[:, :, :, : key.shape[-2]]
 
385
  attn_mask=causal_mask,
386
  dropout_p=dropout,
387
  scale=scaling,
 
388
  )
389
  attn_output = attn_output.transpose(1, 2).contiguous()
390
  return attn_output
391
+
392
  def flex_attention_forward(
393
  self,
394
  query: torch.Tensor,
 
399
  dropout: float = 0.0,
400
  **kwargs,
401
  ) -> torch.Tensor:
402
+ key = repeat_kv(key, self.num_key_value_groups)
403
+ value = repeat_kv(value, self.num_key_value_groups)
404
+
405
  causal_mask = attention_mask
406
  if attention_mask is not None:
407
  causal_mask = causal_mask[:, :, :, : key.shape[-2]]
408
 
409
+ # TODO: flex_attention: As of pytorch 2.5.1, captured buffers that require grad are not yet supported.
410
  # NOTE: So we only use flex_attention in inference mode.
411
+ def causal_mod(score, batch, head, q_idx, kv_idx):
412
+ score = score + causal_mask[batch][0][q_idx][kv_idx]
413
+ return score
414
+
415
+ def dynamic_mod(score, batch, head, q_idx, kv_idx):
416
  score = score + causal_mask[batch][head][q_idx][kv_idx]
417
  return score
418
+
419
+ mask_mod = causal_mod if self.is_causal else dynamic_mod
420
+
421
  attn_output = flex_attention(
422
  query,
423
  key,
424
  value,
425
  score_mod=mask_mod,
426
  scale=scaling,
 
427
  )
428
  attn_output = attn_output.transpose(1, 2).contiguous()
429
  return attn_output
430
 
431
 
432
  class DogeMLP(nn.Module):
 
433
  def __init__(self, config: DogeConfig):
434
  super().__init__()
435
  self.hidden_dim = config.hidden_size
 
468
  self.keys = nn.Parameter(torch.zeros(self.num_cdmoe_heads, self.num_keys, 2, self.expert_retrieval_dim // 2))
469
 
470
  # experts
471
+ self.down_embed = nn.Embedding(self.num_cdmoe_experts, self.hidden_dim)
472
  self.up_embed = nn.Embedding(self.num_cdmoe_experts, self.hidden_dim)
473
 
474
  def forward(
 
485
 
486
  # get experts with the highest similarity
487
  (scores_x, scores_y), (indices_x, indices_y) = sim.topk(self.num_cdmoe_experts_per_head, dim=-1)
488
+ all_scores = scores_x.unsqueeze(-1) + scores_y.unsqueeze(-2)
489
+ all_scores = all_scores.view(*scores_x.shape[:-1], -1)
490
+ all_indices = (indices_x.unsqueeze(-1) * self.num_keys) + indices_y.unsqueeze(-2)
491
+ all_indices = all_indices.view(*indices_x.shape[:-1], -1)
 
 
 
 
492
  scores, pk_indices = all_scores.topk(self.num_cdmoe_experts_per_head, dim=-1)
493
  indices = all_indices.gather(-1, pk_indices)
494
  down_embed = self.down_embed(indices)
 
513
  self.pre_residual = Residual(config.hidden_size)
514
 
515
  self.post_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
516
+ self.feed_forward = DogeMLP(config) if not config.is_moe else DogeCDMoE(config)
517
  self.post_residual = Residual(config.hidden_size)
518
 
519
  def forward(
 
528
  position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC
529
  **kwargs,
530
  ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
 
531
  # sequence transformation
532
  residual = hidden_states
533
  hidden_states = self.pre_layernorm(hidden_states)
 
573
  load the weights associated with the model, only the configuration. Check out the
574
  [`~PreTrainedModel.from_pretrained`] method to load the model weights.
575
  """
576
+
577
+
578
  @add_start_docstrings(
579
  "The bare Doge Model outputting raw hidden-states without any specific head on top.",
580
  DOGE_START_DOCSTRING,
 
854
  )
855
 
856
  return causal_mask
857
+
858
  @staticmethod
859
  def _prepare_4d_causal_attention_mask_with_cache_position(
860
  attention_mask: torch.Tensor = None,
 
895
  min_dtype = torch.finfo(dtype).min
896
  causal_mask = torch.full(
897
  (sequence_length, target_length),
898
+ fill_value=min_dtype,
899
+ dtype=dtype,
900
+ device=device,
901
  )
902
  if sequence_length != 1:
903
  causal_mask = torch.triu(causal_mask, diagonal=1)
 
943
 
944
  def set_output_embeddings(self, new_embeddings):
945
  self.lm_head = new_embeddings
946
+
947
  def get_decoder(self):
948
  return self.model
949
 
950
  def set_decoder(self, decoder):
951
  self.model = decoder
952
 
953
+ @deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
954
  @add_start_docstrings_to_model_forward(DOGE_INPUTS_DOCSTRING)
955
  @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
956
  def forward(
 
966
  output_hidden_states: Optional[bool] = None,
967
  return_dict: Optional[bool] = None,
968
  cache_position: Optional[torch.LongTensor] = None,
969
+ logits_to_keep: int = 0,
970
  **kwargs: Unpack[KwargsForCausalLM],
971
  ) -> Union[Tuple, CausalLMOutputWithPast]:
972
  r"""
 
976
  config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
977
  (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
978
 
979
+ logits_to_keep (`int`, *optional*):
980
+ If an `int`, compute logits for the last `logits_to_keep` tokens. If `0`, calculate logits for all
981
  `input_ids` (special case). Only last token logits are needed for generation, and calculating them only for that
982
  token can save memory, which becomes pretty significant for long sequences or large vocabulary size.
983
+ If a `torch.Tensor`, must be 1D corresponding to the indices to keep in the sequence length dimension.
984
+ This is useful when using packed tensor format (single dimension for batch and sequence length).
985
 
986
  Returns:
987
 
 
990
  ```python
991
  >>> from transformers import AutoTokenizer, AutoModelForCausalLM
992
 
993
+ >>> model = AutoModelForCausalLM.from_pretrained("SmallDoge/Doge-20M")
994
+ >>> tokenizer = AutoTokenizer.from_pretrained("SmallDoge/Doge-20M")
995
 
996
  >>> prompt = "Hey, are you conscious? Can you talk to me?"
997
  >>> inputs = tokenizer(prompt, return_tensors="pt")
 
1023
  )
1024
 
1025
  hidden_states = outputs[0]
 
1026
  # only compute necessary logits, and do not upcast them to float if we are not computing the loss
1027
+ slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
1028
+ logits = self.lm_head(hidden_states[:, slice_indices, :])
1029
 
1030
  loss = None
1031
  if labels is not None:
 
1044
  )
1045
 
1046
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1047
  @add_start_docstrings(
1048
  """
1049
  The Doge Model transformer with a sequence classification head on top (linear layer).
1050
 
1051
+ [`DogeForSequenceClassification`] uses the last token in order to do the classification, as other causal models
1052
+ (e.g. GPT-2) do.
1053
 
1054
+ Since it does classification on the last token, it requires to know the position of the last token. If a
1055
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
1056
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
1057
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
1058
+ each row of the batch).
1059
+ """,
1060
+ DOGE_START_DOCSTRING,
1061
  )
1062
  class DogeForSequenceClassification(DogePreTrainedModel):
1063
  def __init__(self, config: DogeConfig):
1064
  super().__init__(config)
 
1065
  self.num_labels = config.num_labels
1066
 
1067
  self.model = DogeModel(config)
1068
+ self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
1069
+ self.config = config
1070
 
1071
  # Initialize weights and apply final processing
1072
+ self.post_init()
1073
 
1074
  def get_input_embeddings(self):
1075
  return self.model.word_embed
 
1093
  ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
1094
  r"""
1095
  labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1096
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1097
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1098
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1099
  """
1100
  return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1101
 
1102
+ transformer_outputs = self.model(
1103
+ input_ids,
1104
  attention_mask=attention_mask,
1105
  position_ids=position_ids,
1106
  past_key_values=past_key_values,
 
1110
  output_hidden_states=output_hidden_states,
1111
  return_dict=return_dict,
1112
  )
1113
+ hidden_states = transformer_outputs[0]
1114
+ logits = self.score(hidden_states)
1115
 
1116
  if input_ids is not None:
1117
  batch_size = input_ids.shape[0]
 
1135
 
1136
  loss = None
1137
  if labels is not None:
1138
+ loss = self.loss_function(logits=logits, labels=labels, pooled_logits=pooled_logits, config=self.config)
 
 
 
 
 
1139
 
1140
  if not return_dict:
1141
+ output = (pooled_logits,) + transformer_outputs[1:]
1142
  return ((loss,) + output) if loss is not None else output
1143
 
1144
  return SequenceClassifierOutputWithPast(
1145
  loss=loss,
1146
  logits=pooled_logits,
1147
+ past_key_values=transformer_outputs.past_key_values,
1148
+ hidden_states=transformer_outputs.hidden_states,
1149
+ attentions=transformer_outputs.attentions,
1150
  )
1151
+
1152
+
1153
+ __all__ = ["DogeForCausalLM", "DogeModel", "DogePreTrainedModel", "DogeForSequenceClassification"]