Maxwell-Jia commited on
Commit
9f16b22
1 Parent(s): 57bc71e

Upload model

Browse files
Files changed (4) hide show
  1. config.json +25 -0
  2. configuration_spect.py +123 -0
  3. model.safetensors +3 -0
  4. modeling_spect.py +635 -0
config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "SpecTModel"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.0,
6
+ "auto_map": {
7
+ "AutoConfig": "configuration_spect.SpecTConfig",
8
+ "AutoModel": "modeling_spect.SpecTModel"
9
+ },
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.0,
12
+ "hidden_size": 64,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 128,
15
+ "layer_norm_eps": 1e-12,
16
+ "model_type": "spect",
17
+ "num_attention_heads": 2,
18
+ "num_channels": 1,
19
+ "num_hidden_layers": 2,
20
+ "patch_size": 64,
21
+ "qkv_bias": true,
22
+ "spectral_length": 4096,
23
+ "torch_dtype": "float32",
24
+ "transformers_version": "4.36.2"
25
+ }
configuration_spect.py ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 Google AI and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ SpecT model configuration"""
16
+
17
+ from collections import OrderedDict
18
+ from typing import Mapping
19
+
20
+ from packaging import version
21
+
22
+ from transformers.utils import logging
23
+ from transformers.onnx import OnnxConfig
24
+ from transformers.configuration_utils import PretrainedConfig
25
+
26
+
27
+ logger = logging.get_logger(__name__)
28
+
29
+ # VIT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
30
+ # "google/vit-base-patch16-224": "https://huggingface.co/vit-base-patch16-224/resolve/main/config.json",
31
+ # # See all ViT models at https://huggingface.co/models?filter=vit
32
+ # }
33
+
34
+
35
+ class SpecTConfig(PretrainedConfig):
36
+ r"""
37
+ This is the configuration class to store the configuration of a [`SpecTModel`]. It is used to instantiate an SpecT
38
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
39
+ defaults will yield a similar configuration to that of the SpecT architecture.
40
+
41
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
42
+ documentation from [`PretrainedConfig`] for more information.
43
+
44
+
45
+ Args:
46
+ hidden_size (`int`, *optional*, defaults to 768):
47
+ Dimensionality of the encoder layers and the pooler layer.
48
+ num_hidden_layers (`int`, *optional*, defaults to 12):
49
+ Number of hidden layers in the Transformer encoder.
50
+ num_attention_heads (`int`, *optional*, defaults to 12):
51
+ Number of attention heads for each attention layer in the Transformer encoder.
52
+ intermediate_size (`int`, *optional*, defaults to 3072):
53
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
54
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
55
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
56
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
57
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
58
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
59
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
60
+ The dropout ratio for the attention probabilities.
61
+ initializer_range (`float`, *optional*, defaults to 0.02):
62
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
63
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
64
+ The epsilon used by the layer normalization layers.
65
+ spectral_length (`int`, *optional*, defaults to 4096):
66
+ The length of each spectral.
67
+ patch_size (`int`, *optional*, defaults to 64):
68
+ The size (resolution) of each patch.
69
+ num_channels (`int`, *optional*, defaults to 1):
70
+ The number of input channels.
71
+ qkv_bias (`bool`, *optional*, defaults to `True`):
72
+ Whether to add a bias to the queries, keys and values.
73
+
74
+ Example:
75
+
76
+ ```python
77
+ >>> from transformers import ViTConfig, ViTModel
78
+
79
+ >>> # Initializing a ViT vit-base-patch16-224 style configuration
80
+ >>> configuration = ViTConfig()
81
+
82
+ >>> # Initializing a model (with random weights) from the vit-base-patch16-224 style configuration
83
+ >>> model = ViTModel(configuration)
84
+
85
+ >>> # Accessing the model configuration
86
+ >>> configuration = model.config
87
+ ```"""
88
+
89
+ model_type = "spect"
90
+
91
+ def __init__(
92
+ self,
93
+ hidden_size=768,
94
+ num_hidden_layers=12,
95
+ num_attention_heads=12,
96
+ intermediate_size=3072,
97
+ hidden_act="gelu",
98
+ hidden_dropout_prob=0.0,
99
+ attention_probs_dropout_prob=0.0,
100
+ initializer_range=0.02,
101
+ layer_norm_eps=1e-12,
102
+ spectral_length=4096,
103
+ patch_size=64,
104
+ num_channels=1,
105
+ qkv_bias=True,
106
+ **kwargs,
107
+ ):
108
+ super().__init__(**kwargs)
109
+
110
+ self.hidden_size = hidden_size
111
+ self.num_hidden_layers = num_hidden_layers
112
+ self.num_attention_heads = num_attention_heads
113
+ self.intermediate_size = intermediate_size
114
+ self.hidden_act = hidden_act
115
+ self.hidden_dropout_prob = hidden_dropout_prob
116
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
117
+ self.initializer_range = initializer_range
118
+ self.layer_norm_eps = layer_norm_eps
119
+ self.spectral_length = spectral_length
120
+ self.patch_size = patch_size
121
+ self.num_channels = num_channels
122
+ self.qkv_bias = qkv_bias
123
+
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:441471a885417c5f0f51562b584dd68a22afc54202bf57e0e7e5b3f8c0ee2f98
3
+ size 323264
modeling_spect.py ADDED
@@ -0,0 +1,635 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ from typing import Dict, List, Optional, Set, Tuple, Union
3
+
4
+ import torch
5
+ import torch.nn as nn
6
+
7
+ from transformers import PreTrainedModel
8
+ from transformers.activations import ACT2FN
9
+ from transformers.file_utils import ModelOutput
10
+ from transformers.modeling_outputs import (
11
+ BaseModelOutput,
12
+ BaseModelOutputWithPooling,
13
+ SequenceClassifierOutput
14
+ )
15
+ from transformers.pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
16
+ from transformers.utils import (
17
+ add_code_sample_docstrings,
18
+ add_start_docstrings,
19
+ add_start_docstrings_to_model_forward,
20
+ logging,
21
+ )
22
+
23
+ from models.spect import SpecTConfig
24
+
25
+
26
+ logger = logging.get_logger(__name__)
27
+
28
+ # General docstring
29
+ _CONFIG_FOR_DOC = "SpecTConfig"
30
+
31
+ # Base docstring
32
+ _CHECKPOINT_FOR_DOC = "Maxwell-Jia/spect-base-patch64-4096-lamost"
33
+ _EXPECTED_OUTPUT_SHAPE = [1, 65, 768]
34
+
35
+ # # Image classification docstring
36
+ # _IMAGE_CLASS_CHECKPOINT = "google/vit-base-patch16-224"
37
+ # _IMAGE_CLASS_EXPECTED_OUTPUT = "Egyptian cat"
38
+
39
+ VIT_PRETRAINED_MODEL_ARCHIVE_LIST = [
40
+ "Maxwell-Jia/spect-base-patch64-4096-lamost",
41
+ # See all Spectral data models at your model repository or documentation page
42
+ ]
43
+
44
+
45
+ class SpecTPatchEmbeddings(nn.Module):
46
+ """
47
+ This class turns `spectral_values` of shape `(batch_size, sequence_length)` into
48
+ `hidden_states` (segment embeddings) of shape `(batch_size, num_segments, hidden_size)`
49
+ for a Transformer.
50
+ """
51
+
52
+ def __init__(self, config: SpecTConfig) -> None:
53
+ super().__init__()
54
+ spectral_length, patch_size = config.spectral_length, config.patch_size
55
+ num_channels, hidden_size = config.num_channels, config.hidden_size
56
+
57
+ # Assuming spectral data is 1D, adjust dimensions accordingly
58
+ num_patches = spectral_length // patch_size
59
+ self.spectral_length = spectral_length
60
+ self.patch_size = patch_size
61
+ self.num_channels = num_channels
62
+ self.num_patches = num_patches
63
+
64
+ # Using Conv1d for patching the spectral sequence
65
+ self.projection = nn.Conv1d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size)
66
+
67
+ def forward(self, spectral_values: torch.Tensor) -> torch.Tensor:
68
+ batch_size, spectral_length = spectral_values.shape
69
+ if spectral_length != self.spectral_length:
70
+ raise ValueError(
71
+ f"Spectral sequence length ({spectral_length}) doesn't match model"
72
+ f" ({self.spectral_length})."
73
+ )
74
+ # Reshape and project the spectral segments to embeddings
75
+ spectral_values = spectral_values.unsqueeze(1) # Add a channel dimension
76
+ embeddings = self.projection(spectral_values).transpose(1, 2)
77
+ return embeddings
78
+
79
+
80
+ class SpecTEmbeddings(nn.Module):
81
+ """
82
+ Construct the CLS token, position embeddings for spectral data.
83
+ Optionally, also the mask token.
84
+ """
85
+
86
+ def __init__(self, config: SpecTConfig, use_mask_token: bool = False) -> None:
87
+ super().__init__()
88
+
89
+ self.cls_token = nn.Parameter(torch.randn(1, 1, config.hidden_size))
90
+ self.mask_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size)) if use_mask_token else None
91
+ self.patch_embeddings = SpecTPatchEmbeddings(config)
92
+ num_patches = self.patch_embeddings.num_patches
93
+ self.position_embeddings = nn.Parameter(torch.randn(1, num_patches + 1, config.hidden_size))
94
+
95
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
96
+ # any TensorFlow checkpoint file
97
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
98
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
99
+ self.config = config
100
+
101
+ def forward(
102
+ self,
103
+ flux_values: torch.Tensor,
104
+ bool_masked_pos: Optional[torch.BoolTensor] = None,
105
+ ) -> torch.Tensor:
106
+ batch_size = flux_values.shape[0]
107
+ embeddings = self.patch_embeddings(flux_values)
108
+
109
+ if bool_masked_pos is not None:
110
+ seq_length = embeddings.shape[1]
111
+ mask_tokens = self.mask_token.expand(batch_size, seq_length, -1)
112
+ # replace the masked visual tokens by mask_tokens
113
+ mask = bool_masked_pos.unsqueeze(-1).type_as(mask_tokens)
114
+ embeddings = embeddings * (1.0 - mask) + mask_tokens * mask
115
+
116
+ # add the [CLS] token to the embedded patch tokens
117
+ cls_tokens = self.cls_token.expand(batch_size, -1, -1)
118
+ embeddings = torch.cat((cls_tokens, embeddings), dim=1)
119
+
120
+ # add positional encoding to each token
121
+ embeddings = embeddings + self.position_embeddings
122
+ embeddings = self.LayerNorm(embeddings)
123
+ embeddings = self.dropout(embeddings)
124
+
125
+ return embeddings
126
+
127
+
128
+ class SpecTSelfAttention(nn.Module):
129
+ def __init__(self, config: SpecTConfig) -> None:
130
+ super().__init__()
131
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
132
+ raise ValueError(
133
+ f"The hidden size {config.hidden_size,} is not a multiple of the number of attention "
134
+ f"heads {config.num_attention_heads}."
135
+ )
136
+
137
+ self.num_attention_heads = config.num_attention_heads
138
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
139
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
140
+
141
+ self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
142
+ self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
143
+ self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
144
+
145
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
146
+
147
+ def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
148
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
149
+ x = x.view(new_x_shape)
150
+ return x.permute(0, 2, 1, 3)
151
+
152
+ def forward(
153
+ self,
154
+ hidden_states: torch.Tensor,
155
+ head_mask: Optional[torch.Tensor] = None,
156
+ output_attentions: bool = False
157
+ ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
158
+ mixed_query_layer = self.query(hidden_states)
159
+
160
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
161
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
162
+ query_layer = self.transpose_for_scores(mixed_query_layer)
163
+
164
+ # Take the dot product between "query" and "key" to get the raw attention scores.
165
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
166
+
167
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
168
+
169
+ # Normalize the attention scores to probabilities.
170
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
171
+
172
+ # This is actually dropping out entire tokens to attend to, which might
173
+ # seem a bit unusual, but is taken from the original Transformer paper.
174
+ attention_probs = self.dropout(attention_probs)
175
+
176
+ # Mask heads if we want to
177
+ if head_mask is not None:
178
+ attention_probs = attention_probs * head_mask
179
+
180
+ context_layer = torch.matmul(attention_probs, value_layer)
181
+
182
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
183
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
184
+ context_layer = context_layer.view(new_context_layer_shape)
185
+
186
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
187
+
188
+ return outputs
189
+
190
+
191
+ class SpecTSelfOutput(nn.Module):
192
+ """
193
+ The residual connection is defined in ViTLayer instead of here (as is the case with other models), due to the
194
+ layernorm applied before each block.
195
+ """
196
+
197
+ def __init__(self, config: SpecTConfig) -> None:
198
+ super().__init__()
199
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
200
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
201
+
202
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
203
+ hidden_states = self.dense(hidden_states)
204
+ hidden_states = self.dropout(hidden_states)
205
+
206
+ return hidden_states
207
+
208
+
209
+ class SpecTAttention(nn.Module):
210
+ def __init__(self, config: SpecTConfig) -> None:
211
+ super().__init__()
212
+ self.attention = SpecTSelfAttention(config)
213
+ self.output = SpecTSelfOutput(config)
214
+ self.pruned_heads = set()
215
+
216
+ def prune_heads(self, heads: Set[int]) -> None:
217
+ if len(heads) == 0:
218
+ return
219
+ heads, index = find_pruneable_heads_and_indices(
220
+ heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads
221
+ )
222
+
223
+ # Prune linear layers
224
+ self.attention.query = prune_linear_layer(self.attention.query, index)
225
+ self.attention.key = prune_linear_layer(self.attention.key, index)
226
+ self.attention.value = prune_linear_layer(self.attention.value, index)
227
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
228
+
229
+ # Update hyper params and store pruned heads
230
+ self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads)
231
+ self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads
232
+ self.pruned_heads = self.pruned_heads.union(heads)
233
+
234
+ def forward(
235
+ self,
236
+ hidden_states: torch.Tensor,
237
+ head_mask: Optional[torch.Tensor] = None,
238
+ output_attentions: bool = False,
239
+ ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
240
+ self_outputs = self.attention(hidden_states, head_mask, output_attentions)
241
+
242
+ attention_output = self.output(self_outputs[0], hidden_states)
243
+
244
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
245
+ return outputs
246
+
247
+
248
+ class SpecTIntermediate(nn.Module):
249
+ def __init__(self, config: SpecTConfig) -> None:
250
+ super().__init__()
251
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
252
+ if isinstance(config.hidden_act, str):
253
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
254
+ else:
255
+ self.intermediate_act_fn = config.hidden_act
256
+
257
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
258
+ hidden_states = self.dense(hidden_states)
259
+ hidden_states = self.intermediate_act_fn(hidden_states)
260
+
261
+ return hidden_states
262
+
263
+
264
+ class SpecTOutput(nn.Module):
265
+ def __init__(self, config: SpecTConfig) -> None:
266
+ super().__init__()
267
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
268
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
269
+
270
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
271
+ hidden_states = self.dense(hidden_states)
272
+ hidden_states = self.dropout(hidden_states)
273
+
274
+ hidden_states = hidden_states + input_tensor
275
+
276
+ return hidden_states
277
+
278
+
279
+ class SpecTLayer(nn.Module):
280
+ def __init__(self, config: SpecTConfig) -> None:
281
+ super().__init__()
282
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
283
+ self.seq_len_dim = 1
284
+ self.attention = SpecTAttention(config)
285
+ self.intermediate = SpecTIntermediate(config)
286
+ self.output = SpecTOutput(config)
287
+ self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
288
+ self.layernorm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
289
+
290
+ def forward(
291
+ self,
292
+ hidden_states: torch.Tensor,
293
+ head_mask: Optional[torch.Tensor] = None,
294
+ output_attentions: bool = False,
295
+ ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
296
+ self_attention_outputs = self.attention(
297
+ self.layernorm_before(hidden_states), # in ViT, layernorm is applied before self-attention
298
+ head_mask,
299
+ output_attentions=output_attentions,
300
+ )
301
+ attention_output = self_attention_outputs[0]
302
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
303
+
304
+ # first residual connection
305
+ hidden_states = attention_output + hidden_states
306
+
307
+ # in ViT, layernorm is also applied after self-attention
308
+ layer_output = self.layernorm_after(hidden_states)
309
+ layer_output = self.intermediate(layer_output)
310
+
311
+ # second residual connection is done here
312
+ layer_output = self.output(layer_output, hidden_states)
313
+
314
+ outputs = (layer_output,) + outputs
315
+
316
+ return outputs
317
+
318
+
319
+ class SpecTEncoder(nn.Module):
320
+ def __init__(self, config: SpecTConfig) -> None:
321
+ super().__init__()
322
+ self.config = config
323
+ self.layer = nn.ModuleList([SpecTLayer(config) for _ in range(config.num_hidden_layers)])
324
+ self.gradient_checkpointing = False
325
+
326
+ def forward(
327
+ self,
328
+ hidden_states: torch.Tensor,
329
+ head_mask: Optional[torch.Tensor] = None,
330
+ output_attentions: bool = False,
331
+ output_hidden_states: bool = False,
332
+ return_dict: bool = True,
333
+ ) -> Union[tuple, BaseModelOutput]:
334
+ all_hidden_states = () if output_hidden_states else None
335
+ all_self_attentions = () if output_attentions else None
336
+
337
+ for i, layer_module in enumerate(self.layer):
338
+ if output_hidden_states:
339
+ all_hidden_states = all_hidden_states + (hidden_states,)
340
+
341
+ layer_head_mask = head_mask[i] if head_mask is not None else None
342
+
343
+ if self.gradient_checkpointing and self.training:
344
+ layer_outputs = self._gradient_checkpointing_func(
345
+ layer_module.__call__,
346
+ hidden_states,
347
+ layer_head_mask,
348
+ output_attentions,
349
+ )
350
+ else:
351
+ layer_outputs = layer_module(hidden_states, layer_head_mask, output_attentions)
352
+
353
+ hidden_states = layer_outputs[0]
354
+
355
+ if output_attentions:
356
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
357
+
358
+ if output_hidden_states:
359
+ all_hidden_states = all_hidden_states + (hidden_states,)
360
+
361
+ if not return_dict:
362
+ return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
363
+ return BaseModelOutput(
364
+ last_hidden_state=hidden_states,
365
+ hidden_states=all_hidden_states,
366
+ attentions=all_self_attentions,
367
+ )
368
+
369
+
370
+ class SpecTPreTrainedModel(PreTrainedModel):
371
+ """
372
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
373
+ models.
374
+ """
375
+
376
+ config_class = SpecTConfig
377
+ base_model_prefix = "spect"
378
+ main_input_name = "spectral_values"
379
+ supports_gradient_checkpointing = True
380
+ _no_split_modules = ["SpecTEmbeddings", "SpecTLayer"]
381
+
382
+ def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None:
383
+ """Initialize the weights"""
384
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
385
+ # Upcast the input in `fp32` and cast it back to desired `dtype` to avoid
386
+ # `trunc_normal_cpu` not implemented in `half` issues
387
+ module.weight.data = nn.init.trunc_normal_(
388
+ module.weight.data.to(torch.float32), mean=0.0, std=self.config.initializer_range
389
+ ).to(module.weight.dtype)
390
+ if module.bias is not None:
391
+ module.bias.data.zero_()
392
+ elif isinstance(module, nn.LayerNorm):
393
+ module.bias.data.zero_()
394
+ module.weight.data.fill_(1.0)
395
+ elif isinstance(module, SpecTEmbeddings):
396
+ module.position_embeddings.data = nn.init.trunc_normal_(
397
+ module.position_embeddings.data.to(torch.float32),
398
+ mean=0.0,
399
+ std=self.config.initializer_range,
400
+ ).to(module.position_embeddings.dtype)
401
+
402
+ module.cls_token.data = nn.init.trunc_normal_(
403
+ module.cls_token.data.to(torch.float32),
404
+ mean=0.0,
405
+ std=self.config.initializer_range,
406
+ ).to(module.cls_token.dtype)
407
+
408
+
409
+ SPECT_START_DOCSTRING = r"""
410
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass
411
+ designed for spectral data analysis. Use it as a regular PyTorch Module and refer to the PyTorch documentation
412
+ for all matters related to general usage and behavior.
413
+
414
+ Parameters:
415
+ config ([`SpecTConfig`]): Model configuration class with all the parameters of the model specific to spectral
416
+ data analysis. Initializing with a config file does not load the weights associated with the model, only
417
+ the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
418
+ """
419
+
420
+
421
+ SPECT_INPUTS_DOCSTRING = r"""
422
+ Args:
423
+ flux_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
424
+ Spectral flux values across wavelengths for each sequence in the batch.
425
+ Represents the input spectral data to be processed by the model.
426
+
427
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
428
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
429
+
430
+ - 1 indicates the head is **not masked**,
431
+ - 0 indicates the head is **masked**.
432
+
433
+ output_attentions (`bool`, *optional*):
434
+ Whether or not to return the attentions tensors of all attention layers.
435
+ See `attentions` under returned tensors for more detail.
436
+
437
+ output_hidden_states (`bool`, *optional*):
438
+ Whether or not to return the hidden states of all layers.
439
+ See `hidden_states` under returned tensors for more detail.
440
+
441
+ bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`, *optional*):
442
+ Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
443
+ Relevant for models that incorporate some form of masked or self-supervised learning on spectral data.
444
+
445
+ return_dict (`bool`, *optional*):
446
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
447
+ """
448
+
449
+
450
+
451
+ @add_start_docstrings(
452
+ "The bare SpecT Model transformer outputting raw hidden-states without any specific head on top.",
453
+ SPECT_START_DOCSTRING,
454
+ )
455
+ class SpecTModel(SpecTPreTrainedModel):
456
+ config_class = SpecTConfig
457
+
458
+ def __init__(self, config: SpecTConfig, add_pooling_layer: bool = True, use_mask_token: bool = False):
459
+ super().__init__(config)
460
+ self.config = config
461
+
462
+ self.embeddings = SpecTEmbeddings(config, use_mask_token=use_mask_token)
463
+ self.encoder = SpecTEncoder(config)
464
+
465
+ self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
466
+ self.pooler = SpecTPooler(config) if add_pooling_layer else None
467
+
468
+ # Initialize weights and apply final processing
469
+ self.post_init()
470
+
471
+ def get_input_embeddings(self) -> SpecTPatchEmbeddings:
472
+ return self.embeddings.patch_embeddings
473
+
474
+ def _prune_heads(self, heads_to_prune: Dict[int, List[int]]) -> None:
475
+ """
476
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
477
+ class PreTrainedModel
478
+ """
479
+ for layer, heads in heads_to_prune.items():
480
+ self.encoder.layer[layer].attention.prune_heads(heads)
481
+
482
+ @add_start_docstrings_to_model_forward(SPECT_INPUTS_DOCSTRING)
483
+ @add_code_sample_docstrings(
484
+ checkpoint=_CHECKPOINT_FOR_DOC,
485
+ output_type=BaseModelOutputWithPooling,
486
+ config_class=_CONFIG_FOR_DOC,
487
+ modality="vision",
488
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
489
+ )
490
+ def forward(
491
+ self,
492
+ flux_values: Optional[torch.Tensor] = None,
493
+ bool_masked_pos: Optional[torch.BoolTensor] = None,
494
+ head_mask: Optional[torch.Tensor] = None,
495
+ output_attentions: Optional[bool] = None,
496
+ output_hidden_states: Optional[bool] = None,
497
+ return_dict: Optional[bool] = None,
498
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
499
+ r"""
500
+ bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`, *optional*):
501
+ Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
502
+ """
503
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
504
+ output_hidden_states = (
505
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
506
+ )
507
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
508
+
509
+ if flux_values is None:
510
+ raise ValueError("You have to specify flux_values")
511
+
512
+ # Prepare head mask if needed
513
+ # 1.0 in head_mask indicate we keep the head
514
+ # attention_probs has shape bsz x n_heads x N x N
515
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
516
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
517
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
518
+
519
+ # TODO: maybe have a cleaner way to cast the input (from `ImageProcessor` side?)
520
+ expected_dtype = self.embeddings.patch_embeddings.projection.weight.dtype
521
+ if flux_values.dtype != expected_dtype:
522
+ flux_values = flux_values.to(expected_dtype)
523
+
524
+ embedding_output = self.embeddings(
525
+ flux_values, bool_masked_pos=bool_masked_pos
526
+ )
527
+
528
+ encoder_outputs = self.encoder(
529
+ embedding_output,
530
+ head_mask=head_mask,
531
+ output_attentions=output_attentions,
532
+ output_hidden_states=output_hidden_states,
533
+ return_dict=return_dict,
534
+ )
535
+ sequence_output = encoder_outputs[0]
536
+ sequence_output = self.layernorm(sequence_output)
537
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
538
+
539
+ if not return_dict:
540
+ head_outputs = (sequence_output, pooled_output) if pooled_output is not None else (sequence_output,)
541
+ return head_outputs + encoder_outputs[1:]
542
+
543
+ return BaseModelOutputWithPooling(
544
+ last_hidden_state=sequence_output,
545
+ pooler_output=pooled_output,
546
+ hidden_states=encoder_outputs.hidden_states,
547
+ attentions=encoder_outputs.attentions,
548
+ )
549
+
550
+
551
+ class SpecTPooler(nn.Module):
552
+ def __init__(self, config: SpecTConfig):
553
+ super().__init__()
554
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
555
+ self.activation = nn.Tanh()
556
+
557
+ def forward(self, hidden_states):
558
+ # We "pool" the model by simply taking the hidden state corresponding
559
+ # to the first token.
560
+ first_token_tensor = hidden_states[:, 0]
561
+ pooled_output = self.dense(first_token_tensor)
562
+ pooled_output = self.activation(pooled_output)
563
+ return pooled_output
564
+
565
+
566
+ @add_start_docstrings(
567
+ """
568
+ SpecT Model transformer with an squence classification head on top (a linear layer on top of the final hidden state
569
+ of the [CLS] token).
570
+ """,
571
+ SPECT_START_DOCSTRING,
572
+ )
573
+ class SpecTForSequenceClassification(PreTrainedModel):
574
+ """
575
+ This model is a modification of the SpecTModel for sequence classification tasks. It adds a classification head
576
+ on top of the SpecTModel, making it suitable for tasks like spectral type classification from stellar spectra.
577
+ """
578
+ config_class = SpecTConfig
579
+
580
+ def __init__(self, config: SpecTConfig):
581
+ super().__init__(config)
582
+ self.num_labels = config.num_labels
583
+
584
+ # The base SpecTModel
585
+ self.spect = SpecTModel(config, add_pooling_layer=False)
586
+
587
+ # Classification head
588
+ self.classifier = nn.Linear(config.hidden_size, self.num_labels)
589
+
590
+ # Initialize weights and apply final processing
591
+ self.post_init()
592
+
593
+ def forward(
594
+ self,
595
+ flux_values: Optional[torch.Tensor] = None,
596
+ labels: Optional[torch.Tensor] = None,
597
+ head_mask: Optional[torch.Tensor] = None,
598
+ output_attentions: Optional[bool] = None,
599
+ output_hidden_states: Optional[bool] = None,
600
+ return_dict: Optional[bool] = None,
601
+ ):
602
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
603
+
604
+ outputs = self.spect(
605
+ flux_values,
606
+ head_mask=head_mask,
607
+ output_attentions=output_attentions,
608
+ output_hidden_states=output_hidden_states,
609
+ return_dict=return_dict,
610
+ )
611
+
612
+ sequence_output = outputs[0]
613
+ pooled_output = sequence_output[:, 0, :]
614
+
615
+ logits = self.classifier(pooled_output)
616
+
617
+ loss = None
618
+ if labels is not None:
619
+ if self.num_labels == 1: # This is for binary classification
620
+ loss_fct = nn.BCEWithLogitsLoss()
621
+ loss = loss_fct(logits.view(-1), labels.view(-1))
622
+ else:
623
+ loss_fct = nn.CrossEntropyLoss()
624
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
625
+
626
+ if not return_dict:
627
+ output = (logits,) + outputs[2:]
628
+ return ((loss,) + output)
629
+
630
+ return SequenceClassifierOutput(
631
+ loss=loss,
632
+ logits=logits,
633
+ hidden_states=outputs.hidden_states,
634
+ attentions=outputs.attentions,
635
+ )