Text Generation
scaling
GregorZiegltrumAA commited on
Commit
890a986
1 Parent(s): 8e83520
config.yml ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ optimizer:
2
+ allreduce_bucket_size: 500000000
3
+ beta1: 0.9
4
+ beta2: 0.95
5
+ debug_log: false
6
+ eps: 1e-08
7
+ gradient_clipping: 1.0
8
+ zero: true
9
+ zero_save_static: false
10
+ topology:
11
+ activation_checkpointing_type: disabled
12
+ global_batch_size: 1024
13
+ gradient_accumulation_steps: 4
14
+ micro_batch_size: 2
15
+ model_parallel_size: 1
16
+ pipe_parallel_size: 1
17
+ pipe_partition_method: balanced
18
+ pipe_partition_overwrite: null
19
+ sequence_parallel: false
20
+ trainer:
21
+ seed: 42
22
+ train_iterations: 72000
23
+ training:
24
+ allow_missing_params_in_optimizer: true
25
+ training_groups:
26
+ - group_name: param_group
27
+ independent_weight_decay: false
28
+ learning_rate_scheduler:
29
+ learning_rate: 0.0006
30
+ learning_rate_decay_iters: 72000
31
+ learning_rate_decay_style: cosine
32
+ learning_rate_minimum: 6e-05
33
+ learning_rate_warmup_steps: 500
34
+ parameters_exclude: null
35
+ weight_decay: 0.1
36
+ transformer_architecture:
37
+ attention_bias: false
38
+ attention_num_kv_heads: null
39
+ attention_qkv_in_one: true
40
+ dropout_after_attention: 0.0
41
+ dropout_after_mlp: 0.0
42
+ dropout_attention_probs: 0.0
43
+ dropout_embedding: 0.0
44
+ dropout_image_encoder: 0.0
45
+ hidden_size: 2048
46
+ image_encoder: false
47
+ key_query_norm: false
48
+ layernorm:
49
+ layernorm_epsilon: 1e-05
50
+ optimization_type: torch
51
+ local_attention_window_size: null
52
+ masked_softmax:
53
+ kernel: flash_attention
54
+ scale: 1.0
55
+ softmax_in_fp32: false
56
+ mlp_bias: false
57
+ mlp_factor: 2.6640625
58
+ mlp_type: swiglu
59
+ norm_type: rms
60
+ num_attention_heads: 16
61
+ num_layers: 16
62
+ num_local_attention_heads: 0
63
+ precision: bfloat16
64
+ relative_position_embedding_type: rotary_complex
65
+ reset_attention_mask: false
66
+ reset_position_ids: false
67
+ rotary_embedding_base: 10000
68
+ rotary_percentage: 1.0
69
+ sequence_length: 4096
70
+ umup:
71
+ enable: false
72
+ vocab_file: null
73
+ vocab_size: 65536
74
+ weight_tying: false
model_state_layer_0_EmbeddingInput.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:38dc92b46f631e35a25c3a9dc8659ddf42f0e4ec463a3da960f3f69fd158efcd
3
+ size 268436939
model_state_layer_10_TransformerLayer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bbe7c937fa24e0863cb6b11e4afcb13377db26e6f052caf6210f2f5d9482952c
3
+ size 100609197
model_state_layer_11_TransformerLayer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ec6fb819ec1593dfaf937353fc849d343a6dcc80683af02afdcc8f3540dbb87a
3
+ size 100609197
model_state_layer_12_TransformerLayer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ee2589abc2ee8df96798b0fdf180b7a030b9568203e4e4ef1f0e1952435915ec
3
+ size 100609197
model_state_layer_13_TransformerLayer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b9a199abe2b89afc587dc45caa3b0bd79d94a62d51c298ff35bb82fc9d0fa55b
3
+ size 100609197
model_state_layer_14_TransformerLayer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:91ba987139155d09851fe6ac3836d81363ab84f36dd19e0c5f5130f32f5ecafe
3
+ size 100609197
model_state_layer_15_TransformerLayer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bae1f18a72445940016cb5b2a28da0a837b07b17a6410790c8a4191e34ed90e4
3
+ size 100609197
model_state_layer_16_TransformerLayer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fccd055ba578d8058e124898c705a63a525a2cb4ef72a6c1026fdc5e21243d5e
3
+ size 100609197
model_state_layer_17_LayerNormWrapper.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:61d831059922884b7a50bba9ad22bf3dc6962ecd5e98f16a44f34d91d9d1b6ba
3
+ size 5554
model_state_layer_18_TransformerLMHead.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c953d8a14a5bd34d08654ee537b23abe8be51fbff8302eff385a1f6e6eaad001
3
+ size 268436904
model_state_layer_1_TransformerLayer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d04c7a0bc2da96f8b8be9784cf81ca64d3852a8a35be1e393efaf14d73c69206
3
+ size 100609186
model_state_layer_2_TransformerLayer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4c66177c579445486748bd252c394edc81b7611963cfd8c8a314c6ae0ce90a5b
3
+ size 100609186
model_state_layer_3_TransformerLayer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c3c19e34736a28015e9740d95bb685c669cfc0d27d1c18aaae05246271f348eb
3
+ size 100609186
model_state_layer_4_TransformerLayer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4cd33519bb34f2710962ccc5cf059c11b413b5a02929d73c774f5980496114ca
3
+ size 100609186
model_state_layer_5_TransformerLayer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:31689cdeb3e79910865397da417912678c62dedbc1c9aae9934102c21a13f3c8
3
+ size 100609186
model_state_layer_6_TransformerLayer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2d80982db4b2630d3df3841bffcc65c8a2d0c9f864511e2f16b72305ffd1dcd4
3
+ size 100609186
model_state_layer_7_TransformerLayer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8e842df68b415519a5032f9da049147ec256c0ab86376740d38e300b7a8c299c
3
+ size 100609186
model_state_layer_8_TransformerLayer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:21415a8cbd331fbaa107d2dea33e0c8320dfa75faa1fbd161ac0ba6dabcec677
3
+ size 100609186
model_state_layer_9_TransformerLayer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:99e2fff19fe5c121c6aba331f57718fd696fe4e4e5b182b6ed7bc0da06d663c9
3
+ size 100609186
vocab.json ADDED
The diff for this file is too large to render. See raw diff