valhalla commited on
Commit
8eaba8f
1 Parent(s): b9781cd

Update configuration_ldmbert.py

Browse files
Files changed (1) hide show
  1. configuration_ldmbert.py +150 -0
configuration_ldmbert.py CHANGED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ LDMBERT model configuration"""
16
+ import warnings
17
+ from collections import OrderedDict
18
+ from typing import Any, Mapping, Optional
19
+
20
+ from transformers import PreTrainedTokenizer
21
+ from transformers.configuration_utils import PretrainedConfig
22
+ from transformers.utils import TensorType, is_torch_available, logging
23
+
24
+
25
+ logger = logging.get_logger(__name__)
26
+
27
+ LDMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
28
+ "ldm-bert": "https://huggingface.co/ldm-bert/resolve/main/config.json",
29
+ }
30
+
31
+
32
+ class LDMBertConfig(PretrainedConfig):
33
+ r"""
34
+ This is the configuration class to store the configuration of a [`LDMBertModel`]. It is used to instantiate a
35
+ LDMBERT model according to the specified arguments, defining the model architecture. Instantiating a configuration
36
+ with the defaults will yield a similar configuration to that of the LDMBERT
37
+ [facebook/ldmbert-large](https://huggingface.co/facebook/ldmbert-large) architecture.
38
+
39
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
40
+ documentation from [`PretrainedConfig`] for more information.
41
+
42
+
43
+ Args:
44
+ vocab_size (`int`, *optional*, defaults to 50265):
45
+ Vocabulary size of the LDMBERT model. Defines the number of different tokens that can be represented by the
46
+ `inputs_ids` passed when calling [`LDMBertModel`] or [`TFLDMBertModel`].
47
+ d_model (`int`, *optional*, defaults to 1024):
48
+ Dimensionality of the layers and the pooler layer.
49
+ encoder_layers (`int`, *optional*, defaults to 12):
50
+ Number of encoder layers.
51
+ decoder_layers (`int`, *optional*, defaults to 12):
52
+ Number of decoder layers.
53
+ encoder_attention_heads (`int`, *optional*, defaults to 16):
54
+ Number of attention heads for each attention layer in the Transformer encoder.
55
+ decoder_attention_heads (`int`, *optional*, defaults to 16):
56
+ Number of attention heads for each attention layer in the Transformer decoder.
57
+ decoder_ffn_dim (`int`, *optional*, defaults to 4096):
58
+ Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
59
+ encoder_ffn_dim (`int`, *optional*, defaults to 4096):
60
+ Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
61
+ activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
62
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
63
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
64
+ dropout (`float`, *optional*, defaults to 0.1):
65
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
66
+ attention_dropout (`float`, *optional*, defaults to 0.0):
67
+ The dropout ratio for the attention probabilities.
68
+ activation_dropout (`float`, *optional*, defaults to 0.0):
69
+ The dropout ratio for activations inside the fully connected layer.
70
+ classifier_dropout (`float`, *optional*, defaults to 0.0):
71
+ The dropout ratio for classifier.
72
+ max_position_embeddings (`int`, *optional*, defaults to 1024):
73
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
74
+ just in case (e.g., 512 or 1024 or 2048).
75
+ init_std (`float`, *optional*, defaults to 0.02):
76
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
77
+ encoder_layerdrop: (`float`, *optional*, defaults to 0.0):
78
+ The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
79
+ for more details.
80
+ decoder_layerdrop: (`float`, *optional*, defaults to 0.0):
81
+ The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
82
+ for more details.
83
+ scale_embedding (`bool`, *optional*, defaults to `False`):
84
+ Scale embeddings by diving by sqrt(d_model).
85
+ use_cache (`bool`, *optional*, defaults to `True`):
86
+ Whether or not the model should return the last key/values attentions (not used by all models).
87
+ num_labels: (`int`, *optional*, defaults to 3):
88
+ The number of labels to use in [`LDMBertForSequenceClassification`].
89
+ forced_eos_token_id (`int`, *optional*, defaults to 2):
90
+ The id of the token to force as the last generated token when `max_length` is reached. Usually set to
91
+ `eos_token_id`.
92
+
93
+ Example:
94
+
95
+ ```python
96
+ >>> from transformers import LDMBertModel, LDMBertConfig
97
+
98
+ >>> # Initializing a LDMBERT facebook/ldmbert-large style configuration
99
+ >>> configuration = LDMBertConfig()
100
+
101
+ >>> # Initializing a model from the facebook/ldmbert-large style configuration
102
+ >>> model = LDMBertModel(configuration)
103
+
104
+ >>> # Accessing the model configuration
105
+ >>> configuration = model.config
106
+ ```"""
107
+ model_type = "ldmbert"
108
+ keys_to_ignore_at_inference = ["past_key_values"]
109
+ attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
110
+
111
+ def __init__(
112
+ self,
113
+ vocab_size=30522,
114
+ max_position_embeddings=77,
115
+ encoder_layers=32,
116
+ encoder_ffn_dim=5120,
117
+ encoder_attention_heads=8,
118
+ head_dim=64,
119
+ encoder_layerdrop=0.0,
120
+ activation_function="gelu",
121
+ d_model=1280,
122
+ dropout=0.1,
123
+ attention_dropout=0.0,
124
+ activation_dropout=0.0,
125
+ init_std=0.02,
126
+ classifier_dropout=0.0,
127
+ scale_embedding=False,
128
+ use_cache=True,
129
+ pad_token_id=0,
130
+ **kwargs
131
+ ):
132
+ self.vocab_size = vocab_size
133
+ self.max_position_embeddings = max_position_embeddings
134
+ self.d_model = d_model
135
+ self.encoder_ffn_dim = encoder_ffn_dim
136
+ self.encoder_layers = encoder_layers
137
+ self.encoder_attention_heads = encoder_attention_heads
138
+ self.head_dim = head_dim
139
+ self.dropout = dropout
140
+ self.attention_dropout = attention_dropout
141
+ self.activation_dropout = activation_dropout
142
+ self.activation_function = activation_function
143
+ self.init_std = init_std
144
+ self.encoder_layerdrop = encoder_layerdrop
145
+ self.classifier_dropout = classifier_dropout
146
+ self.use_cache = use_cache
147
+ self.num_hidden_layers = encoder_layers
148
+ self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
149
+
150
+ super().__init__(pad_token_id=pad_token_id, **kwargs)