Abhaykoul commited on
Commit
a874ac3
1 Parent(s): 84b91f7

Update configuration_HelpingAI.py

Browse files
Files changed (1) hide show
  1. configuration_HelpingAI.py +60 -25
configuration_HelpingAI.py CHANGED
@@ -6,55 +6,90 @@ from transformers.utils import logging
6
 
7
  logger = logging.get_logger(__name__)
8
 
 
9
  class HelpingAIConfig(PretrainedConfig):
10
- keys_to_ignore_at_inference = ["past_key_values"]
11
  model_type = "HelpingAI"
 
 
12
  def __init__(
13
  self,
14
- vocab_size=50304,
15
  hidden_size=2560,
16
- intermediate_size=6912,
17
  num_hidden_layers=32,
18
  num_attention_heads=32,
19
- num_key_value_heads=32,
20
  head_dim=256,
 
 
 
21
  hidden_act="silu",
 
 
 
22
  max_position_embeddings=4096,
23
  initializer_range=0.02,
24
  rms_norm_eps=1e-6,
25
- use_cache=True,
26
- hidden_activation=None,
27
- rope_theta=10000,
 
 
 
28
  rope_pct=0.25,
29
- attention_bias=False,
30
- attention_dropout=0.0,
31
- num_experts_per_tok=2,
32
- num_local_experts=8,
33
- router_aux_loss_coef=0.02,
34
  output_router_logits=False,
35
- norm_eps=1.0e-5,
36
  **kwargs,
37
  ):
 
38
  self.vocab_size = vocab_size
39
- self.max_position_embeddings = max_position_embeddings
40
  self.hidden_size = hidden_size
41
- self.intermediate_size = intermediate_size
42
  self.num_hidden_layers = num_hidden_layers
43
  self.num_attention_heads = num_attention_heads
44
  self.head_dim = head_dim
 
 
 
45
  self.hidden_act = hidden_act
46
- self.hidden_activation = hidden_activation
47
- self.num_key_value_heads = num_key_value_heads
 
48
  self.initializer_range = initializer_range
49
  self.rms_norm_eps = rms_norm_eps
 
50
  self.use_cache = use_cache
 
 
51
  self.rope_theta = rope_theta
52
- self.attention_bias = attention_bias
53
- self.attention_dropout = attention_dropout
54
- self.num_experts_per_tok = num_experts_per_tok
55
- self.num_local_experts = num_local_experts
56
- self.router_aux_loss_coef = router_aux_loss_coef
57
  self.output_router_logits = output_router_logits
58
- self.rope_pct = rope_pct
59
- self.norm_eps = norm_eps
60
- super().__init__(**kwargs)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
 
7
  logger = logging.get_logger(__name__)
8
 
9
+
10
  class HelpingAIConfig(PretrainedConfig):
 
11
  model_type = "HelpingAI"
12
+ keys_to_ignore_at_inference = ["past_key_values"]
13
+
14
  def __init__(
15
  self,
16
+ vocab_size=50281,
17
  hidden_size=2560,
 
18
  num_hidden_layers=32,
19
  num_attention_heads=32,
 
20
  head_dim=256,
21
+ num_local_experts=8,
22
+ num_experts_per_tok=2,
23
+ intermediate_size=6912,
24
  hidden_act="silu",
25
+ hidden_dropout=0.0,
26
+ attention_dropout=0.0,
27
+ classifier_dropout=0.1,
28
  max_position_embeddings=4096,
29
  initializer_range=0.02,
30
  rms_norm_eps=1e-6,
31
+ layer_norm_eps=1e-5,
32
+ use_cache=False,
33
+ bos_token_id=50278,
34
+ eos_token_id=50279,
35
+ pad_token_id=50279,
36
+ tie_word_embeddings=False,
37
  rope_pct=0.25,
38
+ rope_theta=10000,
39
+ partial_rotary_factor=0.25,
40
+ use_qkv_bias=False,
 
 
41
  output_router_logits=False,
42
+ router_aux_loss_coef=0.02,
43
  **kwargs,
44
  ):
45
+ super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
46
  self.vocab_size = vocab_size
47
+ self.max_position_embeddings = max_position_embeddings
48
  self.hidden_size = hidden_size
 
49
  self.num_hidden_layers = num_hidden_layers
50
  self.num_attention_heads = num_attention_heads
51
  self.head_dim = head_dim
52
+ self.num_local_experts = num_local_experts
53
+ self.num_experts_per_tok = num_experts_per_tok
54
+ self.intermediate_size = intermediate_size
55
  self.hidden_act = hidden_act
56
+ self.hidden_dropout = hidden_dropout
57
+ self.attention_dropout = attention_dropout
58
+ self.classifier_dropout = classifier_dropout
59
  self.initializer_range = initializer_range
60
  self.rms_norm_eps = rms_norm_eps
61
+ self.layer_norm_eps = layer_norm_eps
62
  self.use_cache = use_cache
63
+ self.tie_word_embeddings = tie_word_embeddings
64
+ self.rope_pct = rope_pct
65
  self.rope_theta = rope_theta
66
+ self.partial_rotary_factor = partial_rotary_factor
67
+ self.use_qkv_bias = use_qkv_bias
 
 
 
68
  self.output_router_logits = output_router_logits
69
+ self.router_aux_loss_coef = router_aux_loss_coef
70
+
71
+ if self.hidden_size % self.num_attention_heads != 0:
72
+ raise ValueError(
73
+ "The hidden size is not divisble by the number of attention heads! Make sure to update them!"
74
+ )
75
+
76
+ # Copied from transformers.models.llama.configuration_llama.LlamaConfig._rope_scaling_validation
77
+ def _rope_scaling_validation(self):
78
+ """
79
+ Validate the `rope_scaling` configuration.
80
+ """
81
+ if self.rope_scaling is None:
82
+ return
83
+
84
+ if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:
85
+ raise ValueError(
86
+ "`rope_scaling` must be a dictionary with two fields, `type` and `factor`, " f"got {self.rope_scaling}"
87
+ )
88
+ rope_scaling_type = self.rope_scaling.get("type", None)
89
+ rope_scaling_factor = self.rope_scaling.get("factor", None)
90
+ if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
91
+ raise ValueError(
92
+ f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}"
93
+ )
94
+ if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0:
95
+ raise ValueError(f"`rope_scaling`'s factor field must be a float > 1, got {rope_scaling_factor}")