vilsonrodrigues commited on
Commit
b0adcaf
1 Parent(s): e14c52f

deprecated

Browse files
Files changed (1) hide show
  1. configuration_RW.py +0 -79
configuration_RW.py DELETED
@@ -1,79 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2022 the Big Science Workshop and HuggingFace Inc. team. All rights reserved.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """ Bloom configuration"""
16
- from transformers.configuration_utils import PretrainedConfig
17
- from transformers.utils import logging
18
-
19
-
20
- logger = logging.get_logger(__name__)
21
-
22
-
23
- class RWConfig(PretrainedConfig):
24
- model_type = "RefinedWebModel"
25
- keys_to_ignore_at_inference = ["past_key_values"]
26
- attribute_map = {
27
- "num_hidden_layers": "n_layer",
28
- "num_attention_heads": "n_head",
29
- }
30
-
31
- def __init__(
32
- self,
33
- vocab_size=250880,
34
- hidden_size=64,
35
- n_layer=2,
36
- n_head=8,
37
- layer_norm_epsilon=1e-5,
38
- initializer_range=0.02,
39
- use_cache=True,
40
- bos_token_id=1,
41
- eos_token_id=2,
42
- apply_residual_connection_post_layernorm=False,
43
- hidden_dropout=0.0,
44
- attention_dropout=0.0,
45
- multi_query=False,
46
- alibi=False,
47
- bias=False,
48
- parallel_attn=False,
49
- **kwargs,
50
- ):
51
- self.vocab_size = vocab_size
52
- # Backward compatibility with n_embed kwarg
53
- n_embed = kwargs.pop("n_embed", None)
54
- self.hidden_size = hidden_size if n_embed is None else n_embed
55
- self.n_layer = n_layer
56
- self.n_head = n_head
57
- self.layer_norm_epsilon = layer_norm_epsilon
58
- self.initializer_range = initializer_range
59
- self.use_cache = use_cache
60
- self.apply_residual_connection_post_layernorm = apply_residual_connection_post_layernorm
61
- self.hidden_dropout = hidden_dropout
62
- self.attention_dropout = attention_dropout
63
-
64
- self.bos_token_id = bos_token_id
65
- self.eos_token_id = eos_token_id
66
- self.multi_query = multi_query
67
- self.alibi = alibi
68
- self.bias = bias
69
- self.parallel_attn = parallel_attn
70
-
71
- super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
72
-
73
- @property
74
- def head_dim(self):
75
- return self.hidden_size // self.n_head
76
-
77
- @property
78
- def rotary(self):
79
- return not self.alibi