v2ray commited on
Commit
cf0f89d
1 Parent(s): 9ec2bd8

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +35 -0
  2. README.md +8 -0
  3. __init__.py +2 -0
  4. config.json +37 -0
  5. configuration_grok.py +151 -0
  6. generation_config.json +7 -0
  7. model-00001-of-00129.safetensors +3 -0
  8. model-00002-of-00129.safetensors +3 -0
  9. model-00003-of-00129.safetensors +3 -0
  10. model-00004-of-00129.safetensors +3 -0
  11. model-00005-of-00129.safetensors +3 -0
  12. model-00006-of-00129.safetensors +3 -0
  13. model-00007-of-00129.safetensors +3 -0
  14. model-00008-of-00129.safetensors +3 -0
  15. model-00009-of-00129.safetensors +3 -0
  16. model-00010-of-00129.safetensors +3 -0
  17. model-00011-of-00129.safetensors +3 -0
  18. model-00012-of-00129.safetensors +3 -0
  19. model-00013-of-00129.safetensors +3 -0
  20. model-00014-of-00129.safetensors +3 -0
  21. model-00015-of-00129.safetensors +3 -0
  22. model-00016-of-00129.safetensors +3 -0
  23. model-00017-of-00129.safetensors +3 -0
  24. model-00018-of-00129.safetensors +3 -0
  25. model-00019-of-00129.safetensors +3 -0
  26. model-00020-of-00129.safetensors +3 -0
  27. model-00021-of-00129.safetensors +3 -0
  28. model-00022-of-00129.safetensors +3 -0
  29. model-00023-of-00129.safetensors +3 -0
  30. model-00024-of-00129.safetensors +3 -0
  31. model-00025-of-00129.safetensors +3 -0
  32. model-00026-of-00129.safetensors +3 -0
  33. model-00027-of-00129.safetensors +3 -0
  34. model-00028-of-00129.safetensors +3 -0
  35. model-00029-of-00129.safetensors +3 -0
  36. model-00030-of-00129.safetensors +3 -0
  37. model-00031-of-00129.safetensors +3 -0
  38. model-00032-of-00129.safetensors +3 -0
  39. model-00033-of-00129.safetensors +3 -0
  40. model-00034-of-00129.safetensors +3 -0
  41. model-00035-of-00129.safetensors +3 -0
  42. model-00036-of-00129.safetensors +3 -0
  43. model-00037-of-00129.safetensors +3 -0
  44. model-00038-of-00129.safetensors +3 -0
  45. model-00039-of-00129.safetensors +3 -0
  46. model-00040-of-00129.safetensors +3 -0
  47. model-00041-of-00129.safetensors +3 -0
  48. model-00042-of-00129.safetensors +3 -0
  49. model-00043-of-00129.safetensors +3 -0
  50. model-00044-of-00129.safetensors +3 -0
.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ library_name: transformers
4
+ ---
5
+
6
+ Unofficial dequantized weight of [grok-1](https://huggingface.co/xai-org/grok-1) in HF Transformers format.
7
+
8
+ The weights are converted using the [script here](https://gist.github.com/chu-tianxiang/ec310e15d56949fd0f351cb5f65ee7a1) ran inside the [grok-1 repo](https://github.com/xai-org/grok-1). Since downloading the dequantized weight needs twice as much time, it's recommended to download the original weight and convert on your own.
__init__.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ from .configuration_grok import *
2
+ from .modeling_grok import *
config.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "grok-ztmp",
3
+ "architectures": [
4
+ "GrokForCausalLM"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "attn_output_multiplier": 0.08838834764831845,
8
+ "auto_map": {
9
+ "AutoConfig": "configuration_grok.GrokConfig",
10
+ "AutoModelForCausalLM": "modeling_grok.GrokForCausalLM"
11
+ },
12
+ "bos_token_id": 1,
13
+ "embedding_multiplier_scale": 78.38367176906169,
14
+ "eos_token_id": 2,
15
+ "hidden_act": "gelu_new",
16
+ "hidden_size": 6144,
17
+ "initializer_range": 0.02,
18
+ "intermediate_size": 32768,
19
+ "max_position_embeddings": 8192,
20
+ "model_type": "grok",
21
+ "num_attention_heads": 48,
22
+ "num_experts_per_tok": 2,
23
+ "num_hidden_layers": 64,
24
+ "num_key_value_heads": 8,
25
+ "num_local_experts": 8,
26
+ "output_multiplier_scale": 0.5773502691896257,
27
+ "output_router_logits": false,
28
+ "pad_token_id": 0,
29
+ "rms_norm_eps": 1e-05,
30
+ "rope_theta": 10000.0,
31
+ "router_aux_loss_coef": 0.02,
32
+ "sliding_window": null,
33
+ "torch_dtype": "bfloat16",
34
+ "transformers_version": "4.38.2",
35
+ "use_cache": true,
36
+ "vocab_size": 131072
37
+ }
configuration_grok.py ADDED
@@ -0,0 +1,151 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 Mixtral AI and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Grok model configuration"""
16
+
17
+ from transformers.configuration_utils import PretrainedConfig
18
+ from transformers.utils import logging
19
+
20
+
21
+ logger = logging.get_logger(__name__)
22
+
23
+
24
+ class GrokConfig(PretrainedConfig):
25
+ r"""
26
+ This is the configuration class to store the configuration of a [`GrokModel`].
27
+
28
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
29
+ documentation from [`PretrainedConfig`] for more information.
30
+
31
+
32
+ Args:
33
+ vocab_size (`int`, *optional*, defaults to 32000):
34
+ Vocabulary size of the Mixtral model. Defines the number of different tokens that can be represented by the
35
+ `inputs_ids` passed when calling [`MixtralModel`]
36
+ hidden_size (`int`, *optional*, defaults to 4096):
37
+ Dimension of the hidden representations.
38
+ intermediate_size (`int`, *optional*, defaults to 14336):
39
+ Dimension of the MLP representations.
40
+ num_hidden_layers (`int`, *optional*, defaults to 32):
41
+ Number of hidden layers in the Transformer encoder.
42
+ num_attention_heads (`int`, *optional*, defaults to 32):
43
+ Number of attention heads for each attention layer in the Transformer encoder.
44
+ num_key_value_heads (`int`, *optional*, defaults to 8):
45
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
46
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
47
+ `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
48
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
49
+ by meanpooling all the original heads within that group. For more details checkout [this
50
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to `8`.
51
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
52
+ The non-linear activation function (function or string) in the decoder.
53
+ max_position_embeddings (`int`, *optional*, defaults to `4096*32`):
54
+ The maximum sequence length that this model might ever be used with. Mixtral's sliding window attention
55
+ allows sequence of up to 4096*32 tokens.
56
+ initializer_range (`float`, *optional*, defaults to 0.02):
57
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
58
+ rms_norm_eps (`float`, *optional*, defaults to 1e-05):
59
+ The epsilon used by the rms normalization layers.
60
+ use_cache (`bool`, *optional*, defaults to `True`):
61
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
62
+ relevant if `config.is_decoder=True`.
63
+ pad_token_id (`int`, *optional*):
64
+ The id of the padding token.
65
+ bos_token_id (`int`, *optional*, defaults to 1):
66
+ The id of the "beginning-of-sequence" token.
67
+ eos_token_id (`int`, *optional*, defaults to 2):
68
+ The id of the "end-of-sequence" token.
69
+ tie_word_embeddings (`bool`, *optional*, defaults to `True`):
70
+ Whether the model's input and output word embeddings should be tied.
71
+ rope_theta (`float`, *optional*, defaults to 100000.0):
72
+ The base period of the RoPE embeddings.
73
+ attention_dropout (`float`, *optional*, defaults to 0.0):
74
+ The dropout ratio for the attention probabilities.
75
+ num_experts_per_tok (`int`, *optional*, defaults to 2):
76
+ The number of experts to root per-token, can be also interpreted as the `top-p` routing
77
+ parameter
78
+ num_local_experts (`int`, *optional*, defaults to 8):
79
+ Number of experts per Sparse MLP layer.
80
+ output_router_logits (`bool`, *optional*, defaults to `False`):
81
+ Whether or not the router logits should be returned by the model. Enabeling this will also
82
+ allow the model to output the auxiliary loss. See [here]() for more details
83
+ router_aux_loss_coef (`float`, *optional*, defaults to 0.001):
84
+ The aux loss factor for the total loss.
85
+
86
+ """
87
+
88
+ model_type = "grok"
89
+ keys_to_ignore_at_inference = ["past_key_values"]
90
+
91
+ def __init__(
92
+ self,
93
+ vocab_size=131072,
94
+ hidden_size=6144,
95
+ intermediate_size=32768,
96
+ num_hidden_layers=64,
97
+ num_attention_heads=48,
98
+ num_key_value_heads=8,
99
+ hidden_act="silu",
100
+ max_position_embeddings=4096,
101
+ initializer_range=0.02,
102
+ rms_norm_eps=1e-5,
103
+ use_cache=True,
104
+ pad_token_id=0,
105
+ bos_token_id=1,
106
+ eos_token_id=2,
107
+ tie_word_embeddings=True,
108
+ rope_theta=1e5,
109
+ attention_dropout=0.0,
110
+ num_experts_per_tok=2,
111
+ num_local_experts=8,
112
+ output_router_logits=False,
113
+ router_aux_loss_coef=0.001,
114
+ output_multiplier_scale=0.5773502691896257,
115
+ embedding_multiplier_scale=78.38367176906169,
116
+ attn_output_multiplier=0.08838834764831845,
117
+ **kwargs,
118
+ ):
119
+ self.vocab_size = vocab_size
120
+ self.max_position_embeddings = max_position_embeddings
121
+ self.hidden_size = hidden_size
122
+ self.intermediate_size = intermediate_size
123
+ self.num_hidden_layers = num_hidden_layers
124
+ self.num_attention_heads = num_attention_heads
125
+
126
+ # for backward compatibility
127
+ if num_key_value_heads is None:
128
+ num_key_value_heads = num_attention_heads
129
+
130
+ self.num_key_value_heads = num_key_value_heads
131
+ self.hidden_act = hidden_act
132
+ self.initializer_range = initializer_range
133
+ self.rms_norm_eps = rms_norm_eps
134
+ self.use_cache = use_cache
135
+ self.rope_theta = rope_theta
136
+ self.attention_dropout = attention_dropout
137
+
138
+ self.num_experts_per_tok = num_experts_per_tok
139
+ self.num_local_experts = num_local_experts
140
+ self.output_router_logits = output_router_logits
141
+ self.router_aux_loss_coef = router_aux_loss_coef
142
+ self.output_multiplier_scale = output_multiplier_scale
143
+ self.embedding_multiplier_scale = embedding_multiplier_scale
144
+ self.attn_output_multiplier = attn_output_multiplier
145
+ super().__init__(
146
+ pad_token_id=pad_token_id,
147
+ bos_token_id=bos_token_id,
148
+ eos_token_id=eos_token_id,
149
+ tie_word_embeddings=tie_word_embeddings,
150
+ **kwargs,
151
+ )
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "pad_token_id": 0,
6
+ "transformers_version": "4.38.2"
7
+ }
model-00001-of-00129.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:739afdc4fe7c736298f1056131da2a27d5e08e2d9d3654470866d11339e71d08
3
+ size 4605544112
model-00002-of-00129.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2da2697d8179839e2c4eab0df9f9af6bfe1ac0f022db6ee942e8e472d3b72e2b
3
+ size 4831839776
model-00003-of-00129.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c619d81a1e82abfaf498cd6cb4fefd509efccaca6e927fc62cc5060d9f6d6361
3
+ size 4605643240
model-00004-of-00129.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1f77124ab7ec6a9fce4a42eef6908136fd6ef684a78bb157819d6215bdee9790
3
+ size 4831839776
model-00005-of-00129.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:34babd1fbb246f160ee65e8d66963fa6dabe7d67bd6442f6a3b7e1120908d5e8
3
+ size 4605643248
model-00006-of-00129.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c9741b2416238bac11d893af8c13e5f7bd824cfbbda7e53806e2e7432a42bd7c
3
+ size 4831839776
model-00007-of-00129.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b119ac6352864065f756bdd989b87a365dcedbfbd2285b306b2a81ef0a194901
3
+ size 4605643240
model-00008-of-00129.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:099d554f67f605c85f3a9e90cc6d781b252c46b2a4cc6c13fa3573ac13dd5671
3
+ size 4831839776
model-00009-of-00129.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:11a8593548b70f975b3b08faec2c0a51ee2426a40e3590dcc20dd3b103075b39
3
+ size 4605643240
model-00010-of-00129.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5e450cb6a0a6d23b2cc29cba9b0bdcdff2bd088fad8a5dc7064276640f87de16
3
+ size 4831839776
model-00011-of-00129.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f43f5d81e191360353b909503bb5257872884dc810c09331ad3ffcdd3e896356
3
+ size 4605643248
model-00012-of-00129.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b4e3e6ecd078f12184f9b9d2369215cbaf6d9dbcd47f556f9e0c720efb1c848b
3
+ size 4831839776
model-00013-of-00129.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eac368881aedb951ee3eae29809f3c2bd8440f9ce7e76d74f2ff43c06377a1e7
3
+ size 4605643240
model-00014-of-00129.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:16ff28e12db93b60248c80d47703b75a8ae8cc99fdf6dc28e2972ccda76aab8c
3
+ size 4831839776
model-00015-of-00129.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:772707d8d2b363e9fe2ece49d26b099f57d7d3be0d0c63b85d7d399317391af6
3
+ size 4605643240
model-00016-of-00129.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c8d31ef102baf09a2b05f466eb411333070070c8cc6e0e878c503ad53bb3a26d
3
+ size 4831839776
model-00017-of-00129.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:14b32ffd8b1fb73af472141f2144b8c2832a0d05bd8c3c7719268082d3126648
3
+ size 4932602224
model-00018-of-00129.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4e16a2317e13ff084cf5db91213c073d1228aff1c05eb5e05a5073602eee1b75
3
+ size 4907534104
model-00019-of-00129.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e4e05340708865242537b03a520a934f91dac42cadca279420d1510ffbbbf714
3
+ size 4932602224
model-00020-of-00129.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3f0469775fc7cff1e86c415d04a130ee6e5d279ac40ff65971b8062328036cad
3
+ size 4907534104
model-00021-of-00129.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1c3bcac033018cd66c6b61f4c1694f5bb3f8f82908482c1d531ccb4e7176a48c
3
+ size 4932602216
model-00022-of-00129.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6f39177735f9e11cc0547eaf56af0b7d2977864d7caae927bbb29aca14aa63ed
3
+ size 4907534120
model-00023-of-00129.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:13661a056515aaada2be4620a02784080d7fbbbc79c190b19b6aebe7613951b1
3
+ size 4932602248
model-00024-of-00129.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:755229210823b40a3241880975d6fc51771b7cc3c36e54219fb44dbed48cb2c2
3
+ size 4907534120
model-00025-of-00129.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a8119c0587696df0b7a9c74758df30587657bd1cd766d105020c590c9bd035c0
3
+ size 4932602248
model-00026-of-00129.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c99121391ec30bb015010fbefc321f5fb7ca3bb58327261fb51e92edde042198
3
+ size 4907534120
model-00027-of-00129.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e6d3e3effb57f725f3d078e2fffdca481bcd8bd9f0719bd88fc6e00ff60db9de
3
+ size 4932602248
model-00028-of-00129.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a6924fd54f346d241b7e747d6118fac999acb8ed4253b5d156555ce28df9c9e2
3
+ size 4907534120
model-00029-of-00129.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:54c8fcf7dfe74ffe480cc2673485a1ffe3c21dea79d109a27294118d511221db
3
+ size 4932602248
model-00030-of-00129.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6a5da644297e20c4d3789b83ad856b0529940352e29f5b53999aaa5ad420b780
3
+ size 4907534120
model-00031-of-00129.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3f0263c63393d929a3088eafaf071e30d53e4786c23e37af955e910693f0a482
3
+ size 4932602248
model-00032-of-00129.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f25b07be35d6b22d2cefddef443f020ecdb6f68a29fa0d0821c68b5ba471f126
3
+ size 4907534120
model-00033-of-00129.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6b0ff57efd777bf516e8501f2fe41afac4f5b7cce723c246586238c33b9ee8f0
3
+ size 4932602248
model-00034-of-00129.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dd742e0639c16b9e79b1360a23d8823e661e1e36ac21581dc4da9bca1f482e03
3
+ size 4907534120
model-00035-of-00129.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a8646bc069c02ce86375f50572bcbcb65299a8f5a7c34be1164d983e9478e5b3
3
+ size 4932602248
model-00036-of-00129.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e376e4fb2c76cf03c335a7904872306935c76b3b69af8e6d4f6ae9a2fd87b6cf
3
+ size 4907534120
model-00037-of-00129.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:55c188265cc7cc166ef111d1e92a4392cc2ab993d84046bae390bfdc5be28fb2
3
+ size 4932602248
model-00038-of-00129.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:08309696ef312a305b8f25bb19f2781f086d1bd9b847a5232e1357482fb015bd
3
+ size 4907534120
model-00039-of-00129.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:25bf348553f9229c981a48155bcccece55e9d7e7771de6ead7e512173c19a8bb
3
+ size 4932602248
model-00040-of-00129.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:08c4f2b7b66e2b58060f3e42bca18ca397e61d806137f88448219e924f5b3a97
3
+ size 4907534120
model-00041-of-00129.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:26c6502621af02d5354b715f0694021e26d9691a40b410d4a64899619993db4a
3
+ size 4932602248
model-00042-of-00129.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f61c9740cc478a478ce13122d71a91c426128d61b10effa0d7272b0b025a969a
3
+ size 4907534120
model-00043-of-00129.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f6f9b9088299b99cd7fda366c803886bebcd58157d74456a0ae907c96199073c
3
+ size 4932602248
model-00044-of-00129.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cd1e334af06cab85e057b1be0ff963070991c36742a1ae3787c8465a9d22bce6
3
+ size 4907534120