megatron
Browse files
config.json
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_from_model_config": true,
|
3 |
+
"architectures": [
|
4 |
+
"YuanForCausalLM"
|
5 |
+
],
|
6 |
+
"auto_map":{
|
7 |
+
"AutoConfig": "configuration_yuan.YuanConfig",
|
8 |
+
"AutoModelForCausalLM": "yuan_hf_model.YuanForCausalLM"
|
9 |
+
},
|
10 |
+
"tokenizer_class": "YuanTokenizer",
|
11 |
+
"hidden_act": "silu",
|
12 |
+
"hidden_size": 2048,
|
13 |
+
"initializer_range": 0.02,
|
14 |
+
"intermediate_size": 8192,
|
15 |
+
"max_position_embeddings": 4096,
|
16 |
+
"model_type": "yuan",
|
17 |
+
"num_attention_heads": 16,
|
18 |
+
"num_hidden_layers": 24,
|
19 |
+
"rms_norm_eps": 1e-06,
|
20 |
+
"dropout": 0,
|
21 |
+
"tie_word_embeddings": true,
|
22 |
+
"torch_dtype": "bfloat16",
|
23 |
+
"transformers_version": "4.30.0.dev0",
|
24 |
+
"use_cache": true,
|
25 |
+
"causal_mask": true,
|
26 |
+
"use_flash_attention": true,
|
27 |
+
"reset_attention_mask": true,
|
28 |
+
"reset_position_ids": true,
|
29 |
+
"use_loss_mask": false,
|
30 |
+
"use_moe": true,
|
31 |
+
"moe_config":{
|
32 |
+
"moe_num_experts": 32,
|
33 |
+
"moe_top_k": 2,
|
34 |
+
"ffn_hidden_size": 8192,
|
35 |
+
"norm_topk_prob":true,
|
36 |
+
"gated_linear_unit":true
|
37 |
+
},
|
38 |
+
"eod_token": 77185,
|
39 |
+
"sep_token": 77187,
|
40 |
+
"eod_token_id": 77185,
|
41 |
+
"sep_token_id": 77185,
|
42 |
+
"pad_token_id": 77185,
|
43 |
+
"bos_token_id": 77185,
|
44 |
+
"eos_token_id": 77185,
|
45 |
+
"mask_token_id": 77185,
|
46 |
+
"vocab_size": 135040,
|
47 |
+
"attention_projection_size": 4096,
|
48 |
+
"reset_attention_mask": false,
|
49 |
+
"output_router_logits": true
|
50 |
+
}
|
51 |
+
|
configuration_yuan.py
ADDED
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
from transformers.configuration_utils import PretrainedConfig
|
3 |
+
|
4 |
+
|
5 |
+
class YuanConfig(PretrainedConfig):
|
6 |
+
model_type = "yuan"
|
7 |
+
keys_to_ignore_at_inference = ["past_key_values"]
|
8 |
+
|
9 |
+
def __init__(
|
10 |
+
self,
|
11 |
+
vocab_size=135040,
|
12 |
+
hidden_size=2048,
|
13 |
+
intermediate_size=8192,
|
14 |
+
num_hidden_layers=24,
|
15 |
+
num_attention_heads=32,
|
16 |
+
hidden_act="silu",
|
17 |
+
model_max_length=8192,
|
18 |
+
initializer_range=0.02,
|
19 |
+
rms_norm_eps=1e-6,
|
20 |
+
use_cache=True,
|
21 |
+
pad_token_id=77185,
|
22 |
+
bos_token_id=77185,
|
23 |
+
eos_token_id=77185,
|
24 |
+
tie_word_embeddings=True,
|
25 |
+
**kwargs,
|
26 |
+
):
|
27 |
+
self.vocab_size = vocab_size
|
28 |
+
self.model_max_length = model_max_length
|
29 |
+
self.hidden_size = hidden_size
|
30 |
+
self.intermediate_size = intermediate_size
|
31 |
+
self.num_hidden_layers = num_hidden_layers
|
32 |
+
self.num_attention_heads = num_attention_heads
|
33 |
+
self.hidden_act = hidden_act
|
34 |
+
self.initializer_range = initializer_range
|
35 |
+
self.rms_norm_eps = rms_norm_eps
|
36 |
+
self.use_cache = use_cache
|
37 |
+
super().__init__(
|
38 |
+
pad_token_id=pad_token_id,
|
39 |
+
bos_token_id=bos_token_id,
|
40 |
+
eos_token_id=eos_token_id,
|
41 |
+
tie_word_embeddings=tie_word_embeddings,
|
42 |
+
**kwargs,
|
43 |
+
)
|
44 |
+
|
iter_0001477/mp_rank_00_000/model_optim_rng.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2e26d1367a3a5eae075bad849d8d651b8cb4a61f7831b9c376ddf409d7989762
|
3 |
+
size 10469760007
|
iter_0001477/mp_rank_00_001/model_optim_rng.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:067dff491248a5c43434fe329632d0c31d73322e0617549d063183549b3b7c27
|
3 |
+
size 9916635714
|
latest_checkpointed_iteration.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
1477
|