imranshah commited on
Commit
eb7c879
1 Parent(s): 318c118

Upload configuration_RW.py

Browse files
Files changed (1) hide show
  1. configuration_RW.py +75 -0
configuration_RW.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 the Big Science Workshop and HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Bloom configuration"""
16
+ from transformers.configuration_utils import PretrainedConfig
17
+ from transformers.utils import logging
18
+
19
+
20
+ logger = logging.get_logger(__name__)
21
+
22
+
23
+ class RWConfig(PretrainedConfig):
24
+ model_type = "RefinedWeb"
25
+ keys_to_ignore_at_inference = ["past_key_values"]
26
+ attribute_map = {
27
+ "num_hidden_layers": "n_layer",
28
+ "num_attention_heads": "n_head",
29
+ }
30
+
31
+ def __init__(
32
+ self,
33
+ vocab_size=250880,
34
+ hidden_size=64,
35
+ n_layer=2,
36
+ n_head=8,
37
+ layer_norm_epsilon=1e-5,
38
+ initializer_range=0.02,
39
+ use_cache=True,
40
+ bos_token_id=1,
41
+ eos_token_id=2,
42
+ apply_residual_connection_post_layernorm=False,
43
+ hidden_dropout=0.0,
44
+ attention_dropout=0.0,
45
+ n_head_kv=None,
46
+ alibi=False,
47
+ **kwargs,
48
+ ):
49
+ self.vocab_size = vocab_size
50
+ # Backward compatibility with n_embed kwarg
51
+ n_embed = kwargs.pop("n_embed", None)
52
+ self.hidden_size = hidden_size if n_embed is None else n_embed
53
+ self.n_layer = n_layer
54
+ self.n_head = n_head
55
+ self.layer_norm_epsilon = layer_norm_epsilon
56
+ self.initializer_range = initializer_range
57
+ self.use_cache = use_cache
58
+ self.apply_residual_connection_post_layernorm = apply_residual_connection_post_layernorm
59
+ self.hidden_dropout = hidden_dropout
60
+ self.attention_dropout = attention_dropout
61
+
62
+ self.bos_token_id = bos_token_id
63
+ self.eos_token_id = eos_token_id
64
+ self.n_head_kv = n_head if n_head_kv is None else n_head_kv
65
+ self.alibi = alibi
66
+
67
+ super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
68
+
69
+ @property
70
+ def head_dim(self):
71
+ return self.hidden_size // self.n_head
72
+
73
+ @property
74
+ def rotary(self):
75
+ return not self.alibi