nicholasKluge commited on
Commit
dbbb76e
1 Parent(s): 5ac5a85

Delete toxicity_model_config.py

Browse files
Files changed (1) hide show
  1. toxicity_model_config.py +0 -46
toxicity_model_config.py DELETED
@@ -1,46 +0,0 @@
1
- from transformers import PretrainedConfig
2
-
3
- class ToxicityModelConfig(PretrainedConfig):
4
- model_type="bert-toxic"
5
-
6
- def __init__(
7
- self,
8
- vocab_size=29794,
9
- hidden_size=768,
10
- num_hidden_layers=12,
11
- num_attention_heads=12,
12
- intermediate_size=3072,
13
- hidden_act="gelu",
14
- hidden_dropout_prob=0.1,
15
- attention_probs_dropout_prob=0.1,
16
- max_position_embeddings=512,
17
- type_vocab_size=2,
18
- initializer_range=0.02,
19
- layer_norm_eps=1e-12,
20
- pad_token_id=0,
21
- position_embedding_type="absolute",
22
- use_cache=True,
23
- classifier_dropout=None,
24
- linear_layer=128,
25
- linear_layer_output=1,
26
- **kwargs,
27
- ):
28
- super().__init__(pad_token_id=pad_token_id, **kwargs)
29
-
30
- self.vocab_size = vocab_size
31
- self.hidden_size = hidden_size
32
- self.num_hidden_layers = num_hidden_layers
33
- self.num_attention_heads = num_attention_heads
34
- self.hidden_act = hidden_act
35
- self.intermediate_size = intermediate_size
36
- self.hidden_dropout_prob = hidden_dropout_prob
37
- self.attention_probs_dropout_prob = attention_probs_dropout_prob
38
- self.max_position_embeddings = max_position_embeddings
39
- self.type_vocab_size = type_vocab_size
40
- self.initializer_range = initializer_range
41
- self.layer_norm_eps = layer_norm_eps
42
- self.position_embedding_type = position_embedding_type
43
- self.use_cache = use_cache
44
- self.classifier_dropout = classifier_dropout
45
- self.linear_layer = linear_layer
46
- self.linear_layer_output = linear_layer_output