Xenova HF staff commited on
Commit
8f724ed
1 Parent(s): f5ba37d

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ onnx/model.onnx_data filter=lfs diff=lfs merge=lfs -text
config.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "stabilityai/stablelm-2-1_6b",
3
+ "architectures": [
4
+ "StableLMEpochForCausalLM"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "auto_map": {
8
+ "AutoConfig": "configuration_stablelm_epoch.StableLMEpochConfig",
9
+ "AutoModelForCausalLM": "stabilityai/stablelm-2-1_6b--modeling_stablelm_epoch.StableLMEpochForCausalLM"
10
+ },
11
+ "bos_token_id": 100257,
12
+ "eos_token_id": 100257,
13
+ "hidden_act": "silu",
14
+ "hidden_size": 2048,
15
+ "initializer_range": 0.02,
16
+ "intermediate_size": 5632,
17
+ "max_position_embeddings": 4096,
18
+ "model_type": "stablelm_epoch",
19
+ "norm_eps": 1e-05,
20
+ "num_attention_heads": 32,
21
+ "num_heads": 32,
22
+ "num_hidden_layers": 24,
23
+ "num_key_value_heads": 32,
24
+ "rope_pct": 0.25,
25
+ "rope_theta": 10000,
26
+ "rotary_scaling_factor": 1.0,
27
+ "tie_word_embeddings": false,
28
+ "transformers_version": "4.37.2",
29
+ "use_cache": true,
30
+ "use_qkv_bias": true,
31
+ "vocab_size": 100352
32
+ }
configuration_stablelm_epoch.py ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 Stability and The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """ StableLM Epoch model configuration"""
15
+ from transformers import PretrainedConfig
16
+ from transformers.utils import logging
17
+
18
+
19
+ logger = logging.get_logger(__name__)
20
+
21
+
22
+ class StableLMEpochConfig(PretrainedConfig):
23
+ r"""
24
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
25
+ documentation from [`PretrainedConfig`] for more information.
26
+
27
+ Args:
28
+ vocab_size (`int`, *optional*, defaults to 50_304):
29
+ Vocabulary size of the StableLM model. Defines the number of different tokens that
30
+ can be represented by the `inputs_ids` passed when calling [`StableLMEpochModel`].
31
+ intermediate_size (`int`, *optional*, defaults to 6912):
32
+ Dimension of the MLP representations.
33
+ hidden_size (`int`, *optional*, defaults to 2560):
34
+ Dimension of the decoder layers and the pooler layer.
35
+ num_hidden_layers (`int`, *optional*, defaults to 32):
36
+ Number of hidden layers in the Transformer decoder.
37
+ num_attention_heads (`int`, *optional*, defaults to 32):
38
+ Number of attention heads for each attention layer in the Transformer encoder.
39
+ num_key_value_heads (`int`, *optional*):
40
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
41
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
42
+ `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
43
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
44
+ by meanpooling all the original heads within that group. For more details checkout [this
45
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
46
+ `num_attention_heads`.
47
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
48
+ The non-linear activation function (function or string).
49
+ rope_pct (`float`, *optional*, defaults to 1.0):
50
+ Percentage of hidden dimensions to allocate to rotary embeddings.
51
+ rope_theta (`float`, *optional*, defaults to 10000.0):
52
+ The base period of the RoPE embeddings.
53
+ max_position_embeddings (`int`, *optional*, defaults to 2048):
54
+ The maximum sequence length that this model might ever be used with.
55
+ Typically set this to something large just in case (e.g., 512 or 1024 or 2048).
56
+ initializer_range (`float`, *optional*, defaults to 1e-5):
57
+ The standard deviation of the truncated_normal_initializer for initializing
58
+ all weight matrices.
59
+ norm_eps (`float`, *optional*, defaults to 1e-8):
60
+ The epsilon used by the normalization layers.
61
+ use_cache (`bool`, *optional*, defaults to `True`):
62
+ Whether or not the model should return the last key/values attentions
63
+ (not used by all models). Only relevant if `config.is_decoder=True`.
64
+ use_qkv_bias (`bool`, *optional*, defaults to `True`):
65
+ Whether or not the model should use bias for qkv layers.
66
+ tie_word_embeddings(`bool`, *optional*, defaults to `False`):
67
+ Whether to tie weight embeddings
68
+ attention_dropout (`float`, *optional*, defaults to 0.0):
69
+ The dropout ratio for the attention probabilities.
70
+ """
71
+ model_type = "stablelm_epoch"
72
+ keys_to_ignore_at_inference = ["past_key_values"]
73
+
74
+ def __init__(
75
+ self,
76
+ vocab_size=50_304,
77
+ intermediate_size=6912,
78
+ hidden_size=2560,
79
+ num_hidden_layers=32,
80
+ num_attention_heads=32,
81
+ num_key_value_heads=32,
82
+ hidden_act="silu",
83
+ rope_pct=0.25,
84
+ rope_theta=10_000,
85
+ max_position_embeddings=4096,
86
+ initializer_range=0.02,
87
+ norm_eps=1.0e-5,
88
+ use_cache=True,
89
+ use_qkv_bias=True,
90
+ bos_token_id=0,
91
+ eos_token_id=2,
92
+ tie_word_embeddings=False,
93
+ attention_dropout: float = 0.0,
94
+ **kwargs,
95
+ ):
96
+ self.vocab_size = vocab_size
97
+ self.max_position_embeddings = max_position_embeddings
98
+ self.intermediate_size = intermediate_size
99
+ self.hidden_size = hidden_size
100
+ self.num_hidden_layers = num_hidden_layers
101
+ self.num_attention_heads = num_attention_heads
102
+ self.num_key_value_heads = num_key_value_heads
103
+ self.hidden_act = hidden_act
104
+ self.rope_pct = rope_pct
105
+ self.rope_theta = rope_theta
106
+ self.initializer_range = initializer_range
107
+ self.norm_eps = norm_eps
108
+ self.use_cache = use_cache
109
+ self.use_qkv_bias = use_qkv_bias
110
+ self.tie_word_embeddings = tie_word_embeddings
111
+ self.attention_dropout = attention_dropout
112
+ super().__init__(
113
+ bos_token_id=bos_token_id,
114
+ eos_token_id=eos_token_id,
115
+ tie_word_embeddings=tie_word_embeddings,
116
+ **kwargs,
117
+ )
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 100257,
4
+ "eos_token_id": 100257,
5
+ "transformers_version": "4.37.2"
6
+ }
onnx/model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f1bbe7c22692872fa055cbe8b69c07f17d1ae034b018c952bc5c06662e20c627
3
+ size 1373206
onnx/model.onnx_data ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e9d581cb6e29ffd814df5c6025382aefc8e5b6e0b26857f93301f2299910decf
3
+ size 6577676288
onnx/model_quantized.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:55768447a380e2b6cba9904c5d8009f811dfa0797ad450e84ee680d534fc5c90
3
+ size 1646968724
quantize_config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "per_channel": false,
3
+ "reduce_range": false,
4
+ "per_model_config": {
5
+ "model": {
6
+ "op_types": [
7
+ "Cast",
8
+ "Identity",
9
+ "Range",
10
+ "Transpose",
11
+ "Div",
12
+ "Sub",
13
+ "ReduceMean",
14
+ "Equal",
15
+ "Expand",
16
+ "Pow",
17
+ "Squeeze",
18
+ "ConstantOfShape",
19
+ "Sigmoid",
20
+ "Concat",
21
+ "MatMul",
22
+ "Softmax",
23
+ "Reshape",
24
+ "Where",
25
+ "Constant",
26
+ "Add",
27
+ "Neg",
28
+ "Unsqueeze",
29
+ "Shape",
30
+ "Slice",
31
+ "Less",
32
+ "Mul",
33
+ "Gather",
34
+ "Sqrt"
35
+ ],
36
+ "weight_type": "QInt8"
37
+ }
38
+ }
39
+ }