|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
""" LddBERT model configuration""" |
|
from transformers.configuration_utils import PretrainedConfig |
|
from transformers.utils import logging |
|
|
|
|
|
logger = logging.get_logger(__name__) |
|
|
|
LDDBERT_PRETRAINED_CONFIG_ARCHIVE_MAP = { |
|
|
|
} |
|
|
|
|
|
class LddBertConfig(PretrainedConfig): |
|
r""" |
|
模型配置。 |
|
|
|
Args: |
|
vocab_size (`int`, *optional*, defaults to 30522): |
|
Vocabulary size of the LddBERT model. Defines the number of different tokens that can be represented by |
|
the `inputs_ids` passed when calling [`LddBertModel`] or [`TFLddBertModel`]. |
|
max_position_embeddings (`int`, *optional*, defaults to 512): |
|
The maximum sequence length that this model might ever be used with. Typically set this to something large |
|
just in case (e.g., 512 or 1024 or 2048). |
|
sinusoidal_pos_embds (`boolean`, *optional*, defaults to `False`): |
|
Whether to use sinusoidal positional embeddings. |
|
n_layers (`int`, *optional*, defaults to 6): |
|
Number of hidden layers in the Transformer encoder. |
|
n_gru_layers (`int`, *optional*, defaults to 1): |
|
GRU 层数. |
|
n_heads (`int`, *optional*, defaults to 12): |
|
Number of attention heads for each attention layer in the Transformer encoder. |
|
dim (`int`, *optional*, defaults to 768): |
|
Dimensionality of the encoder layers and the pooler layer. |
|
hidden_dim (`int`, *optional*, defaults to 3072): |
|
The size of the "intermediate" (often named feed-forward) layer in the Transformer encoder. |
|
dropout (`float`, *optional*, defaults to 0.1): |
|
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. |
|
attention_dropout (`float`, *optional*, defaults to 0.1): |
|
The dropout ratio for the attention probabilities. |
|
activation (`str` or `Callable`, *optional*, defaults to `"gelu"`): |
|
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, |
|
`"relu"`, `"silu"` and `"gelu_new"` are supported. |
|
type_vocab_size (`int`, *optional*, defaults to 2): |
|
The vocabulary size of the `token_type_ids` passed when calling [`BertModel`] or [`TFBertModel`]. |
|
initializer_range (`float`, *optional*, defaults to 0.02): |
|
The standard deviation of the truncated_normal_initializer for initializing all weight matrices. |
|
qa_dropout (`float`, *optional*, defaults to 0.1): |
|
The dropout probabilities used in the question answering model [`LddBertForQuestionAnswering`]. |
|
seq_classif_dropout (`float`, *optional*, defaults to 0.2): |
|
The dropout probabilities used in the sequence classification and the multiple choice model |
|
[`LddBertForSequenceClassification`]. |
|
|
|
Examples: |
|
|
|
```python |
|
>>> from transformers import LddBertModel, LddBertConfig |
|
|
|
>>> # Initializing a LddBERT configuration |
|
>>> configuration = LddBertConfig() |
|
|
|
>>> # Initializing a model from the configuration |
|
>>> model = LddBertModel(configuration) |
|
|
|
>>> # Accessing the model configuration |
|
>>> configuration = model.config |
|
```""" |
|
model_type = "lddbert" |
|
attribute_map = { |
|
"hidden_size": "dim", |
|
"num_attention_heads": "n_heads", |
|
"num_hidden_layers": "n_layers", |
|
} |
|
|
|
def __init__( |
|
self, |
|
n_layers=6, |
|
n_heads=12, |
|
dim=768, |
|
hidden_dim=4*768, |
|
activation="gelu", |
|
initializer_range=0.02, |
|
vocab_size=30522, |
|
max_position_embeddings=512, |
|
sinusoidal_pos_embds=False, |
|
pad_token_id=0, |
|
type_vocab_size=2, |
|
dropout=0.1, |
|
attention_dropout=0.1, |
|
qa_dropout=0.1, |
|
seq_classif_dropout=0.2, |
|
n_gru_layers=6, |
|
n_cnn_layers=6, |
|
cnn_kernel_size=5, |
|
**kwargs |
|
): |
|
self.vocab_size = vocab_size |
|
self.max_position_embeddings = max_position_embeddings |
|
self.sinusoidal_pos_embds = sinusoidal_pos_embds |
|
self.n_layers = n_layers |
|
self.n_gru_layers = n_gru_layers |
|
self.n_cnn_layers = n_cnn_layers |
|
self.cnn_kernel_size = cnn_kernel_size |
|
self.n_heads = n_heads |
|
self.dim = dim |
|
self.hidden_dim = hidden_dim |
|
self.dropout = dropout |
|
self.attention_dropout = attention_dropout |
|
self.activation = activation |
|
self.type_vocab_size = type_vocab_size |
|
self.initializer_range = initializer_range |
|
self.qa_dropout = qa_dropout |
|
self.seq_classif_dropout = seq_classif_dropout |
|
super().__init__(**kwargs, pad_token_id=pad_token_id) |
|
|
|
|