EricLam commited on
Commit
2f8cb54
1 Parent(s): 0436c9b

Update requirements.txt.file

Browse files
Files changed (1) hide show
  1. requirements.txt.file +2 -198
requirements.txt.file CHANGED
@@ -1,198 +1,2 @@
1
- # coding=utf-8
2
- # Copyright 2020, Microsoft and the HuggingFace Inc. team.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """ DeBERTa model configuration"""
16
- from collections import OrderedDict
17
- from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
18
-
19
- from ...configuration_utils import PretrainedConfig
20
- from ...onnx import OnnxConfig
21
- from ...utils import logging
22
-
23
-
24
- if TYPE_CHECKING:
25
- from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
26
-
27
-
28
- logger = logging.get_logger(__name__)
29
-
30
- DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP = {
31
- "microsoft/deberta-base": "https://huggingface.co/microsoft/deberta-base/resolve/main/config.json",
32
- "microsoft/deberta-large": "https://huggingface.co/microsoft/deberta-large/resolve/main/config.json",
33
- "microsoft/deberta-xlarge": "https://huggingface.co/microsoft/deberta-xlarge/resolve/main/config.json",
34
- "microsoft/deberta-base-mnli": "https://huggingface.co/microsoft/deberta-base-mnli/resolve/main/config.json",
35
- "microsoft/deberta-large-mnli": "https://huggingface.co/microsoft/deberta-large-mnli/resolve/main/config.json",
36
- "microsoft/deberta-xlarge-mnli": "https://huggingface.co/microsoft/deberta-xlarge-mnli/resolve/main/config.json",
37
- }
38
-
39
-
40
- class DebertaConfig(PretrainedConfig):
41
- r"""
42
- This is the configuration class to store the configuration of a [`DebertaModel`] or a [`TFDebertaModel`]. It is
43
- used to instantiate a DeBERTa model according to the specified arguments, defining the model architecture.
44
- Instantiating a configuration with the defaults will yield a similar configuration to that of the DeBERTa
45
- [microsoft/deberta-base](https://huggingface.co/microsoft/deberta-base) architecture.
46
-
47
- Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
48
- documentation from [`PretrainedConfig`] for more information.
49
-
50
- Arguments:
51
- vocab_size (`int`, *optional*, defaults to 30522):
52
- Vocabulary size of the DeBERTa model. Defines the number of different tokens that can be represented by the
53
- `inputs_ids` passed when calling [`DebertaModel`] or [`TFDebertaModel`].
54
- hidden_size (`int`, *optional*, defaults to 768):
55
- Dimensionality of the encoder layers and the pooler layer.
56
- num_hidden_layers (`int`, *optional*, defaults to 12):
57
- Number of hidden layers in the Transformer encoder.
58
- num_attention_heads (`int`, *optional*, defaults to 12):
59
- Number of attention heads for each attention layer in the Transformer encoder.
60
- intermediate_size (`int`, *optional*, defaults to 3072):
61
- Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
62
- hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
63
- The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
64
- `"relu"`, `"silu"`, `"gelu"`, `"tanh"`, `"gelu_fast"`, `"mish"`, `"linear"`, `"sigmoid"` and `"gelu_new"`
65
- are supported.
66
- hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
67
- The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
68
- attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
69
- The dropout ratio for the attention probabilities.
70
- max_position_embeddings (`int`, *optional*, defaults to 512):
71
- The maximum sequence length that this model might ever be used with. Typically set this to something large
72
- just in case (e.g., 512 or 1024 or 2048).
73
- type_vocab_size (`int`, *optional*, defaults to 2):
74
- The vocabulary size of the `token_type_ids` passed when calling [`DebertaModel`] or [`TFDebertaModel`].
75
- initializer_range (`float`, *optional*, defaults to 0.02):
76
- The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
77
- layer_norm_eps (`float`, *optional*, defaults to 1e-12):
78
- The epsilon used by the layer normalization layers.
79
- relative_attention (`bool`, *optional*, defaults to `False`):
80
- Whether use relative position encoding.
81
- max_relative_positions (`int`, *optional*, defaults to 1):
82
- The range of relative positions `[-max_position_embeddings, max_position_embeddings]`. Use the same value
83
- as `max_position_embeddings`.
84
- pad_token_id (`int`, *optional*, defaults to 0):
85
- The value used to pad input_ids.
86
- position_biased_input (`bool`, *optional*, defaults to `True`):
87
- Whether add absolute position embedding to content embedding.
88
- pos_att_type (`List[str]`, *optional*):
89
- The type of relative position attention, it can be a combination of `["p2c", "c2p"]`, e.g. `["p2c"]`,
90
- `["p2c", "c2p"]`.
91
- layer_norm_eps (`float`, optional, defaults to 1e-12):
92
- The epsilon used by the layer normalization layers.
93
-
94
- Example:
95
-
96
- ```python
97
- >>> from transformers import DebertaConfig, DebertaModel
98
-
99
- >>> # Initializing a DeBERTa microsoft/deberta-base style configuration
100
- >>> configuration = DebertaConfig()
101
-
102
- >>> # Initializing a model (with random weights) from the microsoft/deberta-base style configuration
103
- >>> model = DebertaModel(configuration)
104
-
105
- >>> # Accessing the model configuration
106
- >>> configuration = model.config
107
- ```"""
108
- model_type = "deberta"
109
-
110
- def __init__(
111
- self,
112
- vocab_size=50265,
113
- hidden_size=768,
114
- num_hidden_layers=12,
115
- num_attention_heads=12,
116
- intermediate_size=3072,
117
- hidden_act="gelu",
118
- hidden_dropout_prob=0.1,
119
- attention_probs_dropout_prob=0.1,
120
- max_position_embeddings=512,
121
- type_vocab_size=0,
122
- initializer_range=0.02,
123
- layer_norm_eps=1e-7,
124
- relative_attention=False,
125
- max_relative_positions=-1,
126
- pad_token_id=0,
127
- position_biased_input=True,
128
- pos_att_type=None,
129
- pooler_dropout=0,
130
- pooler_hidden_act="gelu",
131
- **kwargs
132
- ):
133
- super().__init__(**kwargs)
134
-
135
- self.hidden_size = hidden_size
136
- self.num_hidden_layers = num_hidden_layers
137
- self.num_attention_heads = num_attention_heads
138
- self.intermediate_size = intermediate_size
139
- self.hidden_act = hidden_act
140
- self.hidden_dropout_prob = hidden_dropout_prob
141
- self.attention_probs_dropout_prob = attention_probs_dropout_prob
142
- self.max_position_embeddings = max_position_embeddings
143
- self.type_vocab_size = type_vocab_size
144
- self.initializer_range = initializer_range
145
- self.relative_attention = relative_attention
146
- self.max_relative_positions = max_relative_positions
147
- self.pad_token_id = pad_token_id
148
- self.position_biased_input = position_biased_input
149
-
150
- # Backwards compatibility
151
- if type(pos_att_type) == str:
152
- pos_att_type = [x.strip() for x in pos_att_type.lower().split("|")]
153
-
154
- self.pos_att_type = pos_att_type
155
- self.vocab_size = vocab_size
156
- self.layer_norm_eps = layer_norm_eps
157
-
158
- self.pooler_hidden_size = kwargs.get("pooler_hidden_size", hidden_size)
159
- self.pooler_dropout = pooler_dropout
160
- self.pooler_hidden_act = pooler_hidden_act
161
-
162
-
163
- # Copied from transformers.models.deberta_v2.configuration_deberta_v2.DebertaV2OnnxConfig
164
- class DebertaOnnxConfig(OnnxConfig):
165
- @property
166
- def inputs(self) -> Mapping[str, Mapping[int, str]]:
167
- if self.task == "multiple-choice":
168
- dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"}
169
- else:
170
- dynamic_axis = {0: "batch", 1: "sequence"}
171
- if self._config.type_vocab_size > 0:
172
- return OrderedDict(
173
- [("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis)]
174
- )
175
- else:
176
- return OrderedDict([("input_ids", dynamic_axis), ("attention_mask", dynamic_axis)])
177
-
178
- @property
179
- def default_onnx_opset(self) -> int:
180
- return 12
181
-
182
- def generate_dummy_inputs(
183
- self,
184
- preprocessor: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"],
185
- batch_size: int = -1,
186
- seq_length: int = -1,
187
- num_choices: int = -1,
188
- is_pair: bool = False,
189
- framework: Optional["TensorType"] = None,
190
- num_channels: int = 3,
191
- image_width: int = 40,
192
- image_height: int = 40,
193
- tokenizer: "PreTrainedTokenizerBase" = None,
194
- ) -> Mapping[str, Any]:
195
- dummy_inputs = super().generate_dummy_inputs(preprocessor=preprocessor, framework=framework)
196
- if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
197
- del dummy_inputs["token_type_ids"]
198
- return dummy_inputs
 
1
+ transformers
2
+ torch