Recag commited on
Commit
088503c
1 Parent(s): eb81c91

Upload config

Browse files
Files changed (2) hide show
  1. config.json +26 -0
  2. config.py +158 -0
config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "attention_bias": false,
3
+ "attention_dropout": 0.0,
4
+ "auto_map": {
5
+ "AutoConfig": "config.BharataiConfig"
6
+ },
7
+ "bos_token_id": 1,
8
+ "eos_token_id": 2,
9
+ "hidden_act": "silu",
10
+ "hidden_size": 4096,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 11008,
13
+ "max_position_embeddings": 16384,
14
+ "model_type": "Bharatai",
15
+ "num_attention_heads": 32,
16
+ "num_hidden_layers": 32,
17
+ "num_key_value_heads": 32,
18
+ "pretraining_tp": 1,
19
+ "rms_norm_eps": 1e-06,
20
+ "rope_scaling": null,
21
+ "rope_theta": 10000.0,
22
+ "tie_word_embeddings": false,
23
+ "transformers_version": "4.36.0.dev0",
24
+ "use_cache": true,
25
+ "vocab_size": 32000
26
+ }
config.py ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 BharatTech Tech Ecosystem Pvt. Ltd. and the HuggingFace Inc. team. All rights reserved.
3
+
4
+ """ Bharatai model configuration"""
5
+
6
+ from transformers.configuration_utils import PretrainedConfig
7
+ from transformers.utils import logging
8
+
9
+
10
+ logger = logging.get_logger(__name__)
11
+
12
+ Bharatai_PRETRAINED_CONFIG_ARCHIVE_MAP = {}
13
+
14
+
15
+ class BharataiConfig(PretrainedConfig):
16
+ r"""
17
+ This is the configuration class to store the configuration of a [`BharataiModel`].
18
+
19
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
20
+ documentation from [`PretrainedConfig`] for more information.
21
+
22
+
23
+ Args:
24
+ vocab_size (`int`, *optional*, defaults to 32000):
25
+ Vocabulary size of the Bharataimodel. Defines the number of different tokens that can be represented by the
26
+ `inputs_ids` passed when calling [`BharataiModel`]
27
+ hidden_size (`int`, *optional*, defaults to 4096):
28
+ Dimension of the hidden representations.
29
+ intermediate_size (`int`, *optional*, defaults to 11008):
30
+ Dimension of the MLP representations.
31
+ num_hidden_layers (`int`, *optional*, defaults to 32):
32
+ Number of hidden layers in the Transformer decoder.
33
+ num_attention_heads (`int`, *optional*, defaults to 32):
34
+ Number of attention heads for each attention layer in the Transformer decoder.
35
+ num_key_value_heads (`int`, *optional*):
36
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
37
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
38
+ `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
39
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
40
+ by meanpooling all the original heads within that group. If it is not specified, will default to
41
+ `num_attention_heads`.
42
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
43
+ The non-linear activation function (function or string) in the decoder.
44
+ max_position_embeddings (`int`, *optional*, defaults to 2048):
45
+ The maximum sequence length that this model might ever be used with. Bharatai supports up to 16384.
46
+ initializer_range (`float`, *optional*, defaults to 0.02):
47
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
48
+ rms_norm_eps (`float`, *optional*, defaults to 1e-06):
49
+ The epsilon used by the rms normalization layers.
50
+ use_cache (`bool`, *optional*, defaults to `True`):
51
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
52
+ relevant if `config.is_decoder=True`.
53
+ pad_token_id (`int`, *optional*):
54
+ Padding token id.
55
+ bos_token_id (`int`, *optional*, defaults to 1):
56
+ Beginning of stream token id.
57
+ eos_token_id (`int`, *optional*, defaults to 2):
58
+ End of stream token id.
59
+ pretraining_tp (`int`, *optional*, defaults to 1):
60
+ Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this
61
+ document](https://huggingface.co/docs/transformers/parallelism) to understand more about it. This value is
62
+ necessary to ensure exact reproducibility of the pretraining results. Please refer to [this
63
+ issue](https://github.com/pytorch/pytorch/issues/76232).
64
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
65
+ Whether to tie weight embeddings
66
+ rope_theta (`float`, *optional*, defaults to 10000.0):
67
+ The base period of the RoPE embeddings.
68
+ rope_scaling (`Dict`, *optional*):
69
+ Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling
70
+ strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is
71
+ `{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update
72
+ `max_position_embeddings` to the expected new maximum.
73
+ attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
74
+ Whether to use a bias in the query, key, value and output projection layers during self-attention.
75
+ attention_dropout (`float`, *optional*, defaults to 0.0):
76
+ The dropout ratio for the attention probabilities.
77
+
78
+
79
+ ```"""
80
+
81
+ model_type = "Bharatai"
82
+ keys_to_ignore_at_inference = ["past_key_values"]
83
+
84
+ def __init__(
85
+ self,
86
+ vocab_size=32000,
87
+ hidden_size=4096,
88
+ intermediate_size=11008,
89
+ num_hidden_layers=32,
90
+ num_attention_heads=32,
91
+ num_key_value_heads=None,
92
+ hidden_act="silu",
93
+ max_position_embeddings=16384,
94
+ initializer_range=0.02,
95
+ rms_norm_eps=1e-6,
96
+ use_cache=True,
97
+ pad_token_id=None,
98
+ bos_token_id=1,
99
+ eos_token_id=2,
100
+ pretraining_tp=1,
101
+ tie_word_embeddings=False,
102
+ rope_theta=10000.0,
103
+ rope_scaling=None,
104
+ attention_bias=False,
105
+ attention_dropout=0.0,
106
+ **kwargs,
107
+ ):
108
+ self.vocab_size = vocab_size
109
+ self.max_position_embeddings = max_position_embeddings
110
+ self.hidden_size = hidden_size
111
+ self.intermediate_size = intermediate_size
112
+ self.num_hidden_layers = num_hidden_layers
113
+ self.num_attention_heads = num_attention_heads
114
+
115
+ # for backward compatibility
116
+ if num_key_value_heads is None:
117
+ num_key_value_heads = num_attention_heads
118
+
119
+ self.num_key_value_heads = num_key_value_heads
120
+ self.hidden_act = hidden_act
121
+ self.initializer_range = initializer_range
122
+ self.rms_norm_eps = rms_norm_eps
123
+ self.pretraining_tp = pretraining_tp
124
+ self.use_cache = use_cache
125
+ self.rope_theta = rope_theta
126
+ self.rope_scaling = rope_scaling
127
+ self._rope_scaling_validation()
128
+ self.attention_bias = attention_bias
129
+ self.attention_dropout = attention_dropout
130
+
131
+ super().__init__(
132
+ pad_token_id=pad_token_id,
133
+ bos_token_id=bos_token_id,
134
+ eos_token_id=eos_token_id,
135
+ tie_word_embeddings=tie_word_embeddings,
136
+ **kwargs,
137
+ )
138
+
139
+ def _rope_scaling_validation(self):
140
+ """
141
+ Validate the `rope_scaling` configuration.
142
+ """
143
+ if self.rope_scaling is None:
144
+ return
145
+
146
+ if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:
147
+ raise ValueError(
148
+ "`rope_scaling` must be a dictionary with with two fields, `type` and `factor`, "
149
+ f"got {self.rope_scaling}"
150
+ )
151
+ rope_scaling_type = self.rope_scaling.get("type", None)
152
+ rope_scaling_factor = self.rope_scaling.get("factor", None)
153
+ if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
154
+ raise ValueError(
155
+ f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}"
156
+ )
157
+ if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0:
158
+ raise ValueError(f"`rope_scaling`'s factor field must be a float > 1, got {rope_scaling_factor}")