rabiulawal commited on
Commit
b4b31a2
·
verified ·
1 Parent(s): cafd0bb

Add files using upload-large-folder tool

Browse files
config.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Emu3ForCausalLM"
4
+ ],
5
+ "attention_dropout": 0.1,
6
+ "auto_map": {
7
+ "AutoConfig": "BAAI/Emu3-Stage1--configuration_emu3.Emu3Config",
8
+ "AutoModelForCausalLM": "BAAI/Emu3-Stage1--modeling_emu3.Emu3ForCausalLM"
9
+ },
10
+ "boi_token_id": 151852,
11
+ "bos_token_id": 151849,
12
+ "eof_token_id": 151847,
13
+ "eoi_token_id": 151853,
14
+ "eol_token_id": 151846,
15
+ "eos_token_id": 151850,
16
+ "hidden_act": "silu",
17
+ "hidden_size": 4096,
18
+ "image_area": 65536,
19
+ "img_token_id": 151851,
20
+ "initializer_range": 0.02,
21
+ "intermediate_size": 14336,
22
+ "max_position_embeddings": 4300,
23
+ "model_type": "Emu3",
24
+ "num_attention_heads": 32,
25
+ "num_hidden_layers": 32,
26
+ "num_key_value_heads": 8,
27
+ "pad_token_id": 151643,
28
+ "pretraining_tp": 1,
29
+ "rms_norm_eps": 1e-05,
30
+ "rope_scaling": null,
31
+ "rope_theta": 1000000.0,
32
+ "tie_word_embeddings": false,
33
+ "torch_dtype": "bfloat16",
34
+ "transformers_version": "4.51.3",
35
+ "use_cache": false,
36
+ "vocab_size": 184622
37
+ }
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 151849,
4
+ "eos_token_id": 151850,
5
+ "pad_token_id": 151643,
6
+ "transformers_version": "4.51.3"
7
+ }
model-00001-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0bcf7ee3ce4748103b6f82cd71aacc9afddf3d2ed332d7e902b6f1c5cfc0cbd8
3
+ size 4884766656
model-00002-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:170bf2fc78db49c910245e236ca318210d897be320a6ae89fad10ff5038180c4
3
+ size 4999819320
model-00003-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:044e446c4bfd1cf59b5419ce627c2d857650737ef280ef0e8dfa34b6a2500838
3
+ size 4915916184
model-00004-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:69896f7c15d49e10155971654a23ac5c695e2366c764a8135ddb693b81897aa4
3
+ size 2183554760
model.safetensors.index.json ADDED
@@ -0,0 +1,298 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 16984023040
4
+ },
5
+ "weight_map": {
6
+ "lm_head.weight": "model-00004-of-00004.safetensors",
7
+ "model.embed_tokens.weight": "model-00001-of-00004.safetensors",
8
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00004.safetensors",
9
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
10
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
11
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
12
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
13
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
14
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
15
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
16
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
17
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00004.safetensors",
18
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
19
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
20
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
21
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
22
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
23
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
24
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
25
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
26
+ "model.layers.10.input_layernorm.weight": "model-00002-of-00004.safetensors",
27
+ "model.layers.10.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
28
+ "model.layers.10.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
29
+ "model.layers.10.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
30
+ "model.layers.10.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
31
+ "model.layers.10.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
32
+ "model.layers.10.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
33
+ "model.layers.10.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
34
+ "model.layers.10.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
35
+ "model.layers.11.input_layernorm.weight": "model-00002-of-00004.safetensors",
36
+ "model.layers.11.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
37
+ "model.layers.11.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
38
+ "model.layers.11.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
39
+ "model.layers.11.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
40
+ "model.layers.11.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
41
+ "model.layers.11.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
42
+ "model.layers.11.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
43
+ "model.layers.11.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
44
+ "model.layers.12.input_layernorm.weight": "model-00002-of-00004.safetensors",
45
+ "model.layers.12.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
46
+ "model.layers.12.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
47
+ "model.layers.12.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
48
+ "model.layers.12.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
49
+ "model.layers.12.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
50
+ "model.layers.12.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
51
+ "model.layers.12.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
52
+ "model.layers.12.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
53
+ "model.layers.13.input_layernorm.weight": "model-00002-of-00004.safetensors",
54
+ "model.layers.13.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
55
+ "model.layers.13.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
56
+ "model.layers.13.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
57
+ "model.layers.13.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
58
+ "model.layers.13.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
59
+ "model.layers.13.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
60
+ "model.layers.13.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
61
+ "model.layers.13.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
62
+ "model.layers.14.input_layernorm.weight": "model-00002-of-00004.safetensors",
63
+ "model.layers.14.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
64
+ "model.layers.14.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
65
+ "model.layers.14.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
66
+ "model.layers.14.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
67
+ "model.layers.14.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
68
+ "model.layers.14.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
69
+ "model.layers.14.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
70
+ "model.layers.14.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
71
+ "model.layers.15.input_layernorm.weight": "model-00002-of-00004.safetensors",
72
+ "model.layers.15.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
73
+ "model.layers.15.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
74
+ "model.layers.15.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
75
+ "model.layers.15.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
76
+ "model.layers.15.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
77
+ "model.layers.15.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
78
+ "model.layers.15.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
79
+ "model.layers.15.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
80
+ "model.layers.16.input_layernorm.weight": "model-00002-of-00004.safetensors",
81
+ "model.layers.16.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
82
+ "model.layers.16.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
83
+ "model.layers.16.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
84
+ "model.layers.16.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
85
+ "model.layers.16.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
86
+ "model.layers.16.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
87
+ "model.layers.16.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
88
+ "model.layers.16.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
89
+ "model.layers.17.input_layernorm.weight": "model-00002-of-00004.safetensors",
90
+ "model.layers.17.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
91
+ "model.layers.17.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
92
+ "model.layers.17.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
93
+ "model.layers.17.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
94
+ "model.layers.17.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
95
+ "model.layers.17.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
96
+ "model.layers.17.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
97
+ "model.layers.17.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
98
+ "model.layers.18.input_layernorm.weight": "model-00002-of-00004.safetensors",
99
+ "model.layers.18.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
100
+ "model.layers.18.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
101
+ "model.layers.18.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
102
+ "model.layers.18.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
103
+ "model.layers.18.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
104
+ "model.layers.18.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
105
+ "model.layers.18.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
106
+ "model.layers.18.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
107
+ "model.layers.19.input_layernorm.weight": "model-00003-of-00004.safetensors",
108
+ "model.layers.19.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
109
+ "model.layers.19.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
110
+ "model.layers.19.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
111
+ "model.layers.19.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
112
+ "model.layers.19.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
113
+ "model.layers.19.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
114
+ "model.layers.19.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
115
+ "model.layers.19.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
116
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00004.safetensors",
117
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
118
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
119
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
120
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
121
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
122
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
123
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
124
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
125
+ "model.layers.20.input_layernorm.weight": "model-00003-of-00004.safetensors",
126
+ "model.layers.20.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
127
+ "model.layers.20.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
128
+ "model.layers.20.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
129
+ "model.layers.20.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
130
+ "model.layers.20.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
131
+ "model.layers.20.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
132
+ "model.layers.20.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
133
+ "model.layers.20.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
134
+ "model.layers.21.input_layernorm.weight": "model-00003-of-00004.safetensors",
135
+ "model.layers.21.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
136
+ "model.layers.21.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
137
+ "model.layers.21.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
138
+ "model.layers.21.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
139
+ "model.layers.21.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
140
+ "model.layers.21.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
141
+ "model.layers.21.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
142
+ "model.layers.21.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
143
+ "model.layers.22.input_layernorm.weight": "model-00003-of-00004.safetensors",
144
+ "model.layers.22.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
145
+ "model.layers.22.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
146
+ "model.layers.22.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
147
+ "model.layers.22.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
148
+ "model.layers.22.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
149
+ "model.layers.22.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
150
+ "model.layers.22.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
151
+ "model.layers.22.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
152
+ "model.layers.23.input_layernorm.weight": "model-00003-of-00004.safetensors",
153
+ "model.layers.23.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
154
+ "model.layers.23.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
155
+ "model.layers.23.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
156
+ "model.layers.23.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
157
+ "model.layers.23.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
158
+ "model.layers.23.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
159
+ "model.layers.23.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
160
+ "model.layers.23.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
161
+ "model.layers.24.input_layernorm.weight": "model-00003-of-00004.safetensors",
162
+ "model.layers.24.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
163
+ "model.layers.24.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
164
+ "model.layers.24.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
165
+ "model.layers.24.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
166
+ "model.layers.24.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
167
+ "model.layers.24.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
168
+ "model.layers.24.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
169
+ "model.layers.24.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
170
+ "model.layers.25.input_layernorm.weight": "model-00003-of-00004.safetensors",
171
+ "model.layers.25.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
172
+ "model.layers.25.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
173
+ "model.layers.25.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
174
+ "model.layers.25.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
175
+ "model.layers.25.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
176
+ "model.layers.25.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
177
+ "model.layers.25.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
178
+ "model.layers.25.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
179
+ "model.layers.26.input_layernorm.weight": "model-00003-of-00004.safetensors",
180
+ "model.layers.26.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
181
+ "model.layers.26.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
182
+ "model.layers.26.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
183
+ "model.layers.26.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
184
+ "model.layers.26.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
185
+ "model.layers.26.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
186
+ "model.layers.26.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
187
+ "model.layers.26.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
188
+ "model.layers.27.input_layernorm.weight": "model-00003-of-00004.safetensors",
189
+ "model.layers.27.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
190
+ "model.layers.27.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
191
+ "model.layers.27.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
192
+ "model.layers.27.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
193
+ "model.layers.27.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
194
+ "model.layers.27.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
195
+ "model.layers.27.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
196
+ "model.layers.27.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
197
+ "model.layers.28.input_layernorm.weight": "model-00003-of-00004.safetensors",
198
+ "model.layers.28.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
199
+ "model.layers.28.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
200
+ "model.layers.28.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
201
+ "model.layers.28.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
202
+ "model.layers.28.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
203
+ "model.layers.28.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
204
+ "model.layers.28.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
205
+ "model.layers.28.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
206
+ "model.layers.29.input_layernorm.weight": "model-00003-of-00004.safetensors",
207
+ "model.layers.29.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
208
+ "model.layers.29.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
209
+ "model.layers.29.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
210
+ "model.layers.29.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
211
+ "model.layers.29.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
212
+ "model.layers.29.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
213
+ "model.layers.29.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
214
+ "model.layers.29.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
215
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00004.safetensors",
216
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
217
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
218
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
219
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
220
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
221
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
222
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
223
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
224
+ "model.layers.30.input_layernorm.weight": "model-00004-of-00004.safetensors",
225
+ "model.layers.30.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
226
+ "model.layers.30.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
227
+ "model.layers.30.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
228
+ "model.layers.30.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
229
+ "model.layers.30.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
230
+ "model.layers.30.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
231
+ "model.layers.30.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
232
+ "model.layers.30.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
233
+ "model.layers.31.input_layernorm.weight": "model-00004-of-00004.safetensors",
234
+ "model.layers.31.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
235
+ "model.layers.31.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
236
+ "model.layers.31.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
237
+ "model.layers.31.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
238
+ "model.layers.31.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
239
+ "model.layers.31.self_attn.o_proj.weight": "model-00004-of-00004.safetensors",
240
+ "model.layers.31.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
241
+ "model.layers.31.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
242
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00004.safetensors",
243
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
244
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
245
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
246
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
247
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
248
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
249
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
250
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
251
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00004.safetensors",
252
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
253
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
254
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
255
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
256
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
257
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
258
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
259
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
260
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00004.safetensors",
261
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
262
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
263
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
264
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
265
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
266
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
267
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
268
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
269
+ "model.layers.7.input_layernorm.weight": "model-00002-of-00004.safetensors",
270
+ "model.layers.7.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
271
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
272
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
273
+ "model.layers.7.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
274
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
275
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
276
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
277
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
278
+ "model.layers.8.input_layernorm.weight": "model-00002-of-00004.safetensors",
279
+ "model.layers.8.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
280
+ "model.layers.8.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
281
+ "model.layers.8.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
282
+ "model.layers.8.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
283
+ "model.layers.8.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
284
+ "model.layers.8.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
285
+ "model.layers.8.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
286
+ "model.layers.8.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
287
+ "model.layers.9.input_layernorm.weight": "model-00002-of-00004.safetensors",
288
+ "model.layers.9.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
289
+ "model.layers.9.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
290
+ "model.layers.9.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
291
+ "model.layers.9.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
292
+ "model.layers.9.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
293
+ "model.layers.9.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
294
+ "model.layers.9.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
295
+ "model.layers.9.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
296
+ "model.norm.weight": "model-00004-of-00004.safetensors"
297
+ }
298
+ }
rng_state_0.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0a8fd8f8fa25dc9c72ffe5c33017ac5bd1c7f0e3eab2ffa0afd2a70dfb67181a
3
+ size 15984
rng_state_1.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f6506b9dac8338674b040d0855b6626c7912ca1febae648f81a6d00bddc22b59
3
+ size 15984
rng_state_2.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:802fd73a5ba8c9b04ca3ff466d5f6c33709ceadbfd94e5be69773677565f60b4
3
+ size 15984
rng_state_3.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eccedd7855b66a982c31c5faaa21b552862e3b52da2db1c07e7151d7d433533c
3
+ size 15984
rng_state_4.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:381af05376596ba601d462616703e8e9f83ef675a567988f3675a2493e7c5762
3
+ size 15984
rng_state_5.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8457eab8e7f7ec705a74ddf5df53ebbdf0b137549fbeb9032711b54b076d32f5
3
+ size 15984
rng_state_6.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:02cc7f48ad42c221aae2dec8d063b6b29c574a23a3d8a6896b76ae9842672415
3
+ size 15984
rng_state_7.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2be3b63e5e7b67b8fe620dce983474903e105bc6bb95efff0690e65f0e7ab4d8
3
+ size 15984
trainer_state.json ADDED
@@ -0,0 +1,1266 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": null,
3
+ "best_metric": null,
4
+ "best_model_checkpoint": null,
5
+ "epoch": 1.9496127404055348,
6
+ "eval_steps": 100,
7
+ "global_step": 5600,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.017404925593943087,
14
+ "grad_norm": 4.2433902126025425,
15
+ "learning_rate": 9.8e-05,
16
+ "loss": 3.6204,
17
+ "step": 50
18
+ },
19
+ {
20
+ "epoch": 0.034809851187886175,
21
+ "grad_norm": 3.0786203091123565,
22
+ "learning_rate": 9.999928647255986e-05,
23
+ "loss": 2.5602,
24
+ "step": 100
25
+ },
26
+ {
27
+ "epoch": 0.034809851187886175,
28
+ "eval_loss": 2.363542079925537,
29
+ "eval_runtime": 14.092,
30
+ "eval_samples_per_second": 70.962,
31
+ "eval_steps_per_second": 2.271,
32
+ "step": 100
33
+ },
34
+ {
35
+ "epoch": 0.05221477678182926,
36
+ "grad_norm": 2.489798826862787,
37
+ "learning_rate": 9.999708736748881e-05,
38
+ "loss": 2.2577,
39
+ "step": 150
40
+ },
41
+ {
42
+ "epoch": 0.06961970237577235,
43
+ "grad_norm": 4.227397206704295,
44
+ "learning_rate": 9.999340245361986e-05,
45
+ "loss": 2.065,
46
+ "step": 200
47
+ },
48
+ {
49
+ "epoch": 0.06961970237577235,
50
+ "eval_loss": 2.0204052925109863,
51
+ "eval_runtime": 14.0707,
52
+ "eval_samples_per_second": 71.07,
53
+ "eval_steps_per_second": 2.274,
54
+ "step": 200
55
+ },
56
+ {
57
+ "epoch": 0.08702462796971543,
58
+ "grad_norm": 1.5627603609182088,
59
+ "learning_rate": 9.998823184156712e-05,
60
+ "loss": 1.9504,
61
+ "step": 250
62
+ },
63
+ {
64
+ "epoch": 0.10442955356365852,
65
+ "grad_norm": 2.0849274528531834,
66
+ "learning_rate": 9.998157568654259e-05,
67
+ "loss": 1.9106,
68
+ "step": 300
69
+ },
70
+ {
71
+ "epoch": 0.10442955356365852,
72
+ "eval_loss": 1.8868601322174072,
73
+ "eval_runtime": 14.0214,
74
+ "eval_samples_per_second": 71.319,
75
+ "eval_steps_per_second": 2.282,
76
+ "step": 300
77
+ },
78
+ {
79
+ "epoch": 0.12183447915760161,
80
+ "grad_norm": 1.2760571472974125,
81
+ "learning_rate": 9.997343418835142e-05,
82
+ "loss": 1.8861,
83
+ "step": 350
84
+ },
85
+ {
86
+ "epoch": 0.1392394047515447,
87
+ "grad_norm": 3.7430921365305005,
88
+ "learning_rate": 9.996380759138595e-05,
89
+ "loss": 1.8622,
90
+ "step": 400
91
+ },
92
+ {
93
+ "epoch": 0.1392394047515447,
94
+ "eval_loss": 1.8383088111877441,
95
+ "eval_runtime": 14.0403,
96
+ "eval_samples_per_second": 71.224,
97
+ "eval_steps_per_second": 2.279,
98
+ "step": 400
99
+ },
100
+ {
101
+ "epoch": 0.15664433034548778,
102
+ "grad_norm": 0.9586199143769504,
103
+ "learning_rate": 9.995269618461844e-05,
104
+ "loss": 1.8478,
105
+ "step": 450
106
+ },
107
+ {
108
+ "epoch": 0.17404925593943085,
109
+ "grad_norm": 1.5061363604288809,
110
+ "learning_rate": 9.99401003015922e-05,
111
+ "loss": 1.8117,
112
+ "step": 500
113
+ },
114
+ {
115
+ "epoch": 0.17404925593943085,
116
+ "eval_loss": 1.7942754030227661,
117
+ "eval_runtime": 14.0551,
118
+ "eval_samples_per_second": 71.149,
119
+ "eval_steps_per_second": 2.277,
120
+ "step": 500
121
+ },
122
+ {
123
+ "epoch": 0.19145418153337396,
124
+ "grad_norm": 2.433096036143497,
125
+ "learning_rate": 9.992602032041181e-05,
126
+ "loss": 1.8071,
127
+ "step": 550
128
+ },
129
+ {
130
+ "epoch": 0.20885910712731703,
131
+ "grad_norm": 3.0865325369733796,
132
+ "learning_rate": 9.991045666373163e-05,
133
+ "loss": 1.7895,
134
+ "step": 600
135
+ },
136
+ {
137
+ "epoch": 0.20885910712731703,
138
+ "eval_loss": 1.7960834503173828,
139
+ "eval_runtime": 14.0575,
140
+ "eval_samples_per_second": 71.136,
141
+ "eval_steps_per_second": 2.276,
142
+ "step": 600
143
+ },
144
+ {
145
+ "epoch": 0.2262640327212601,
146
+ "grad_norm": 1.0844936845986464,
147
+ "learning_rate": 9.989340979874317e-05,
148
+ "loss": 1.7954,
149
+ "step": 650
150
+ },
151
+ {
152
+ "epoch": 0.24366895831520322,
153
+ "grad_norm": 1.5841141135500758,
154
+ "learning_rate": 9.987488023716102e-05,
155
+ "loss": 1.7827,
156
+ "step": 700
157
+ },
158
+ {
159
+ "epoch": 0.24366895831520322,
160
+ "eval_loss": 1.7635940313339233,
161
+ "eval_runtime": 14.0192,
162
+ "eval_samples_per_second": 71.331,
163
+ "eval_steps_per_second": 2.283,
164
+ "step": 700
165
+ },
166
+ {
167
+ "epoch": 0.26107388390914626,
168
+ "grad_norm": 1.0358659820491174,
169
+ "learning_rate": 9.985486853520748e-05,
170
+ "loss": 1.7755,
171
+ "step": 750
172
+ },
173
+ {
174
+ "epoch": 0.2784788095030894,
175
+ "grad_norm": 2.921637768400008,
176
+ "learning_rate": 9.983337529359597e-05,
177
+ "loss": 1.7689,
178
+ "step": 800
179
+ },
180
+ {
181
+ "epoch": 0.2784788095030894,
182
+ "eval_loss": 1.7601885795593262,
183
+ "eval_runtime": 14.0539,
184
+ "eval_samples_per_second": 71.155,
185
+ "eval_steps_per_second": 2.277,
186
+ "step": 800
187
+ },
188
+ {
189
+ "epoch": 0.2958837350970325,
190
+ "grad_norm": 1.2109225200496343,
191
+ "learning_rate": 9.981040115751287e-05,
192
+ "loss": 1.7642,
193
+ "step": 850
194
+ },
195
+ {
196
+ "epoch": 0.31328866069097555,
197
+ "grad_norm": 0.9168156411457995,
198
+ "learning_rate": 9.978594681659822e-05,
199
+ "loss": 1.7584,
200
+ "step": 900
201
+ },
202
+ {
203
+ "epoch": 0.31328866069097555,
204
+ "eval_loss": 1.733883023262024,
205
+ "eval_runtime": 14.0773,
206
+ "eval_samples_per_second": 71.036,
207
+ "eval_steps_per_second": 2.273,
208
+ "step": 900
209
+ },
210
+ {
211
+ "epoch": 0.33069358628491863,
212
+ "grad_norm": 1.231514474527335,
213
+ "learning_rate": 9.976001300492505e-05,
214
+ "loss": 1.7476,
215
+ "step": 950
216
+ },
217
+ {
218
+ "epoch": 0.3480985118788617,
219
+ "grad_norm": 1.3345795373547282,
220
+ "learning_rate": 9.97326005009772e-05,
221
+ "loss": 1.7529,
222
+ "step": 1000
223
+ },
224
+ {
225
+ "epoch": 0.3480985118788617,
226
+ "eval_loss": 1.7277562618255615,
227
+ "eval_runtime": 14.0558,
228
+ "eval_samples_per_second": 71.145,
229
+ "eval_steps_per_second": 2.277,
230
+ "step": 1000
231
+ },
232
+ {
233
+ "epoch": 0.3655034374728048,
234
+ "grad_norm": 1.3848562250830305,
235
+ "learning_rate": 9.970371012762615e-05,
236
+ "loss": 1.7383,
237
+ "step": 1050
238
+ },
239
+ {
240
+ "epoch": 0.3829083630667479,
241
+ "grad_norm": 0.9102870237217918,
242
+ "learning_rate": 9.967334275210616e-05,
243
+ "loss": 1.7312,
244
+ "step": 1100
245
+ },
246
+ {
247
+ "epoch": 0.3829083630667479,
248
+ "eval_loss": 1.7197346687316895,
249
+ "eval_runtime": 14.1392,
250
+ "eval_samples_per_second": 70.725,
251
+ "eval_steps_per_second": 2.263,
252
+ "step": 1100
253
+ },
254
+ {
255
+ "epoch": 0.400313288660691,
256
+ "grad_norm": 0.9403631790293067,
257
+ "learning_rate": 9.964149928598834e-05,
258
+ "loss": 1.7354,
259
+ "step": 1150
260
+ },
261
+ {
262
+ "epoch": 0.41771821425463407,
263
+ "grad_norm": 1.5743784967989016,
264
+ "learning_rate": 9.96081806851532e-05,
265
+ "loss": 1.7384,
266
+ "step": 1200
267
+ },
268
+ {
269
+ "epoch": 0.41771821425463407,
270
+ "eval_loss": 1.7184182405471802,
271
+ "eval_runtime": 14.0812,
272
+ "eval_samples_per_second": 71.017,
273
+ "eval_steps_per_second": 2.273,
274
+ "step": 1200
275
+ },
276
+ {
277
+ "epoch": 0.43512313984857715,
278
+ "grad_norm": 2.0859941497191743,
279
+ "learning_rate": 9.957338794976201e-05,
280
+ "loss": 1.7389,
281
+ "step": 1250
282
+ },
283
+ {
284
+ "epoch": 0.4525280654425202,
285
+ "grad_norm": 1.347628827751819,
286
+ "learning_rate": 9.953712212422681e-05,
287
+ "loss": 1.7267,
288
+ "step": 1300
289
+ },
290
+ {
291
+ "epoch": 0.4525280654425202,
292
+ "eval_loss": 1.7116867303848267,
293
+ "eval_runtime": 14.0496,
294
+ "eval_samples_per_second": 71.177,
295
+ "eval_steps_per_second": 2.278,
296
+ "step": 1300
297
+ },
298
+ {
299
+ "epoch": 0.4699329910364633,
300
+ "grad_norm": 0.7775683989797394,
301
+ "learning_rate": 9.949938429717895e-05,
302
+ "loss": 1.7136,
303
+ "step": 1350
304
+ },
305
+ {
306
+ "epoch": 0.48733791663040643,
307
+ "grad_norm": 0.7955090270012505,
308
+ "learning_rate": 9.946017560143651e-05,
309
+ "loss": 1.7188,
310
+ "step": 1400
311
+ },
312
+ {
313
+ "epoch": 0.48733791663040643,
314
+ "eval_loss": 1.703679084777832,
315
+ "eval_runtime": 14.077,
316
+ "eval_samples_per_second": 71.038,
317
+ "eval_steps_per_second": 2.273,
318
+ "step": 1400
319
+ },
320
+ {
321
+ "epoch": 0.5047428422243495,
322
+ "grad_norm": 1.2332760111187542,
323
+ "learning_rate": 9.941949721397028e-05,
324
+ "loss": 1.7169,
325
+ "step": 1450
326
+ },
327
+ {
328
+ "epoch": 0.5221477678182925,
329
+ "grad_norm": 1.81355607393607,
330
+ "learning_rate": 9.93773503558684e-05,
331
+ "loss": 1.7157,
332
+ "step": 1500
333
+ },
334
+ {
335
+ "epoch": 0.5221477678182925,
336
+ "eval_loss": 1.7078830003738403,
337
+ "eval_runtime": 14.09,
338
+ "eval_samples_per_second": 70.972,
339
+ "eval_steps_per_second": 2.271,
340
+ "step": 1500
341
+ },
342
+ {
343
+ "epoch": 0.5395526934122357,
344
+ "grad_norm": 0.9053542478773059,
345
+ "learning_rate": 9.933373629229969e-05,
346
+ "loss": 1.7102,
347
+ "step": 1550
348
+ },
349
+ {
350
+ "epoch": 0.5569576190061788,
351
+ "grad_norm": 0.6503277295238644,
352
+ "learning_rate": 9.928865633247573e-05,
353
+ "loss": 1.7033,
354
+ "step": 1600
355
+ },
356
+ {
357
+ "epoch": 0.5569576190061788,
358
+ "eval_loss": 1.6917779445648193,
359
+ "eval_runtime": 14.0698,
360
+ "eval_samples_per_second": 71.074,
361
+ "eval_steps_per_second": 2.274,
362
+ "step": 1600
363
+ },
364
+ {
365
+ "epoch": 0.5743625446001218,
366
+ "grad_norm": 1.4224490096345375,
367
+ "learning_rate": 9.92421118296115e-05,
368
+ "loss": 1.6997,
369
+ "step": 1650
370
+ },
371
+ {
372
+ "epoch": 0.591767470194065,
373
+ "grad_norm": 0.7864420926166752,
374
+ "learning_rate": 9.919410418088481e-05,
375
+ "loss": 1.7102,
376
+ "step": 1700
377
+ },
378
+ {
379
+ "epoch": 0.591767470194065,
380
+ "eval_loss": 1.690305233001709,
381
+ "eval_runtime": 14.1062,
382
+ "eval_samples_per_second": 70.891,
383
+ "eval_steps_per_second": 2.269,
384
+ "step": 1700
385
+ },
386
+ {
387
+ "epoch": 0.609172395788008,
388
+ "grad_norm": 0.5663590518834145,
389
+ "learning_rate": 9.914463482739435e-05,
390
+ "loss": 1.7046,
391
+ "step": 1750
392
+ },
393
+ {
394
+ "epoch": 0.6265773213819511,
395
+ "grad_norm": 1.1145025421986445,
396
+ "learning_rate": 9.909370525411637e-05,
397
+ "loss": 1.6905,
398
+ "step": 1800
399
+ },
400
+ {
401
+ "epoch": 0.6265773213819511,
402
+ "eval_loss": 1.6856919527053833,
403
+ "eval_runtime": 14.0345,
404
+ "eval_samples_per_second": 71.253,
405
+ "eval_steps_per_second": 2.28,
406
+ "step": 1800
407
+ },
408
+ {
409
+ "epoch": 0.6439822469758941,
410
+ "grad_norm": 1.079593642429848,
411
+ "learning_rate": 9.90413169898602e-05,
412
+ "loss": 1.6973,
413
+ "step": 1850
414
+ },
415
+ {
416
+ "epoch": 0.6613871725698373,
417
+ "grad_norm": 0.8794305699903086,
418
+ "learning_rate": 9.898747160722229e-05,
419
+ "loss": 1.6821,
420
+ "step": 1900
421
+ },
422
+ {
423
+ "epoch": 0.6613871725698373,
424
+ "eval_loss": 1.680002212524414,
425
+ "eval_runtime": 14.0923,
426
+ "eval_samples_per_second": 70.961,
427
+ "eval_steps_per_second": 2.271,
428
+ "step": 1900
429
+ },
430
+ {
431
+ "epoch": 0.6787920981637804,
432
+ "grad_norm": 1.3664190261530837,
433
+ "learning_rate": 9.893217072253903e-05,
434
+ "loss": 1.6909,
435
+ "step": 1950
436
+ },
437
+ {
438
+ "epoch": 0.6961970237577234,
439
+ "grad_norm": 0.9268231360918758,
440
+ "learning_rate": 9.88754159958382e-05,
441
+ "loss": 1.6901,
442
+ "step": 2000
443
+ },
444
+ {
445
+ "epoch": 0.6961970237577234,
446
+ "eval_loss": 1.6765377521514893,
447
+ "eval_runtime": 14.0942,
448
+ "eval_samples_per_second": 70.951,
449
+ "eval_steps_per_second": 2.27,
450
+ "step": 2000
451
+ },
452
+ {
453
+ "epoch": 0.7136019493516665,
454
+ "grad_norm": 0.9864416812238661,
455
+ "learning_rate": 9.881720913078921e-05,
456
+ "loss": 1.6911,
457
+ "step": 2050
458
+ },
459
+ {
460
+ "epoch": 0.7310068749456096,
461
+ "grad_norm": 0.8706035984933645,
462
+ "learning_rate": 9.875755187465186e-05,
463
+ "loss": 1.6866,
464
+ "step": 2100
465
+ },
466
+ {
467
+ "epoch": 0.7310068749456096,
468
+ "eval_loss": 1.675471305847168,
469
+ "eval_runtime": 14.0392,
470
+ "eval_samples_per_second": 71.229,
471
+ "eval_steps_per_second": 2.279,
472
+ "step": 2100
473
+ },
474
+ {
475
+ "epoch": 0.7484118005395527,
476
+ "grad_norm": 0.9954026204157976,
477
+ "learning_rate": 9.869644601822396e-05,
478
+ "loss": 1.6764,
479
+ "step": 2150
480
+ },
481
+ {
482
+ "epoch": 0.7658167261334958,
483
+ "grad_norm": 0.9859776473729975,
484
+ "learning_rate": 9.863389339578761e-05,
485
+ "loss": 1.6772,
486
+ "step": 2200
487
+ },
488
+ {
489
+ "epoch": 0.7658167261334958,
490
+ "eval_loss": 1.6698520183563232,
491
+ "eval_runtime": 14.0605,
492
+ "eval_samples_per_second": 71.121,
493
+ "eval_steps_per_second": 2.276,
494
+ "step": 2200
495
+ },
496
+ {
497
+ "epoch": 0.7832216517274389,
498
+ "grad_norm": 0.9106273220771831,
499
+ "learning_rate": 9.856989588505399e-05,
500
+ "loss": 1.6796,
501
+ "step": 2250
502
+ },
503
+ {
504
+ "epoch": 0.800626577321382,
505
+ "grad_norm": 1.1219788313484198,
506
+ "learning_rate": 9.850445540710714e-05,
507
+ "loss": 1.6742,
508
+ "step": 2300
509
+ },
510
+ {
511
+ "epoch": 0.800626577321382,
512
+ "eval_loss": 1.663262963294983,
513
+ "eval_runtime": 14.0171,
514
+ "eval_samples_per_second": 71.341,
515
+ "eval_steps_per_second": 2.283,
516
+ "step": 2300
517
+ },
518
+ {
519
+ "epoch": 0.818031502915325,
520
+ "grad_norm": 0.7584108888049894,
521
+ "learning_rate": 9.843757392634629e-05,
522
+ "loss": 1.6773,
523
+ "step": 2350
524
+ },
525
+ {
526
+ "epoch": 0.8354364285092681,
527
+ "grad_norm": 0.8524680066268957,
528
+ "learning_rate": 9.836925345042675e-05,
529
+ "loss": 1.6802,
530
+ "step": 2400
531
+ },
532
+ {
533
+ "epoch": 0.8354364285092681,
534
+ "eval_loss": 1.6637836694717407,
535
+ "eval_runtime": 14.0523,
536
+ "eval_samples_per_second": 71.163,
537
+ "eval_steps_per_second": 2.277,
538
+ "step": 2400
539
+ },
540
+ {
541
+ "epoch": 0.8528413541032112,
542
+ "grad_norm": 0.5830225213698085,
543
+ "learning_rate": 9.82994960301998e-05,
544
+ "loss": 1.6774,
545
+ "step": 2450
546
+ },
547
+ {
548
+ "epoch": 0.8702462796971543,
549
+ "grad_norm": 0.8294538008262156,
550
+ "learning_rate": 9.822830375965103e-05,
551
+ "loss": 1.6702,
552
+ "step": 2500
553
+ },
554
+ {
555
+ "epoch": 0.8702462796971543,
556
+ "eval_loss": 1.6600449085235596,
557
+ "eval_runtime": 14.0902,
558
+ "eval_samples_per_second": 70.972,
559
+ "eval_steps_per_second": 2.271,
560
+ "step": 2500
561
+ },
562
+ {
563
+ "epoch": 0.8876512052910974,
564
+ "grad_norm": 1.0515224700476833,
565
+ "learning_rate": 9.815567877583758e-05,
566
+ "loss": 1.6758,
567
+ "step": 2550
568
+ },
569
+ {
570
+ "epoch": 0.9050561308850404,
571
+ "grad_norm": 0.7866141842693181,
572
+ "learning_rate": 9.808162325882385e-05,
573
+ "loss": 1.6645,
574
+ "step": 2600
575
+ },
576
+ {
577
+ "epoch": 0.9050561308850404,
578
+ "eval_loss": 1.657778263092041,
579
+ "eval_runtime": 14.0817,
580
+ "eval_samples_per_second": 71.014,
581
+ "eval_steps_per_second": 2.272,
582
+ "step": 2600
583
+ },
584
+ {
585
+ "epoch": 0.9224610564789836,
586
+ "grad_norm": 0.9909101251943951,
587
+ "learning_rate": 9.800613943161619e-05,
588
+ "loss": 1.6629,
589
+ "step": 2650
590
+ },
591
+ {
592
+ "epoch": 0.9398659820729266,
593
+ "grad_norm": 0.9534991636209588,
594
+ "learning_rate": 9.79292295600961e-05,
595
+ "loss": 1.6523,
596
+ "step": 2700
597
+ },
598
+ {
599
+ "epoch": 0.9398659820729266,
600
+ "eval_loss": 1.6576528549194336,
601
+ "eval_runtime": 14.0341,
602
+ "eval_samples_per_second": 71.255,
603
+ "eval_steps_per_second": 2.28,
604
+ "step": 2700
605
+ },
606
+ {
607
+ "epoch": 0.9572709076668697,
608
+ "grad_norm": 0.7620610436511178,
609
+ "learning_rate": 9.785089595295222e-05,
610
+ "loss": 1.6573,
611
+ "step": 2750
612
+ },
613
+ {
614
+ "epoch": 0.9746758332608129,
615
+ "grad_norm": 1.5752171211110084,
616
+ "learning_rate": 9.777114096161105e-05,
617
+ "loss": 1.6583,
618
+ "step": 2800
619
+ },
620
+ {
621
+ "epoch": 0.9746758332608129,
622
+ "eval_loss": 1.6622099876403809,
623
+ "eval_runtime": 14.0927,
624
+ "eval_samples_per_second": 70.959,
625
+ "eval_steps_per_second": 2.271,
626
+ "step": 2800
627
+ },
628
+ {
629
+ "epoch": 0.9920807588547559,
630
+ "grad_norm": 0.5970970379963504,
631
+ "learning_rate": 9.768996698016636e-05,
632
+ "loss": 1.6625,
633
+ "step": 2850
634
+ },
635
+ {
636
+ "epoch": 1.009746758332608,
637
+ "grad_norm": 0.7470976369983713,
638
+ "learning_rate": 9.760737644530726e-05,
639
+ "loss": 1.6597,
640
+ "step": 2900
641
+ },
642
+ {
643
+ "epoch": 1.009746758332608,
644
+ "eval_loss": 1.647603988647461,
645
+ "eval_runtime": 14.12,
646
+ "eval_samples_per_second": 70.822,
647
+ "eval_steps_per_second": 2.266,
648
+ "step": 2900
649
+ },
650
+ {
651
+ "epoch": 1.0271516839265513,
652
+ "grad_norm": 0.962160586071795,
653
+ "learning_rate": 9.75233718362452e-05,
654
+ "loss": 1.611,
655
+ "step": 2950
656
+ },
657
+ {
658
+ "epoch": 1.0445566095204943,
659
+ "grad_norm": 0.6386050774526276,
660
+ "learning_rate": 9.74379556746394e-05,
661
+ "loss": 1.619,
662
+ "step": 3000
663
+ },
664
+ {
665
+ "epoch": 1.0445566095204943,
666
+ "eval_loss": 1.6434565782546997,
667
+ "eval_runtime": 14.0132,
668
+ "eval_samples_per_second": 71.361,
669
+ "eval_steps_per_second": 2.284,
670
+ "step": 3000
671
+ },
672
+ {
673
+ "epoch": 1.0619615351144374,
674
+ "grad_norm": 1.5569952795665942,
675
+ "learning_rate": 9.735113052452119e-05,
676
+ "loss": 1.6108,
677
+ "step": 3050
678
+ },
679
+ {
680
+ "epoch": 1.0793664607083804,
681
+ "grad_norm": 1.223444554102184,
682
+ "learning_rate": 9.726289899221713e-05,
683
+ "loss": 1.6242,
684
+ "step": 3100
685
+ },
686
+ {
687
+ "epoch": 1.0793664607083804,
688
+ "eval_loss": 1.6534233093261719,
689
+ "eval_runtime": 14.0914,
690
+ "eval_samples_per_second": 70.965,
691
+ "eval_steps_per_second": 2.271,
692
+ "step": 3100
693
+ },
694
+ {
695
+ "epoch": 1.0967713863023236,
696
+ "grad_norm": 0.6055563672851731,
697
+ "learning_rate": 9.717326372627065e-05,
698
+ "loss": 1.6165,
699
+ "step": 3150
700
+ },
701
+ {
702
+ "epoch": 1.1141763118962666,
703
+ "grad_norm": 0.7125630072846985,
704
+ "learning_rate": 9.708222741736268e-05,
705
+ "loss": 1.6137,
706
+ "step": 3200
707
+ },
708
+ {
709
+ "epoch": 1.1141763118962666,
710
+ "eval_loss": 1.6405473947525024,
711
+ "eval_runtime": 14.0433,
712
+ "eval_samples_per_second": 71.208,
713
+ "eval_steps_per_second": 2.279,
714
+ "step": 3200
715
+ },
716
+ {
717
+ "epoch": 1.1315812374902097,
718
+ "grad_norm": 0.6828372843368237,
719
+ "learning_rate": 9.698979279823071e-05,
720
+ "loss": 1.6178,
721
+ "step": 3250
722
+ },
723
+ {
724
+ "epoch": 1.148986163084153,
725
+ "grad_norm": 0.6458088716811551,
726
+ "learning_rate": 9.689596264358694e-05,
727
+ "loss": 1.6057,
728
+ "step": 3300
729
+ },
730
+ {
731
+ "epoch": 1.148986163084153,
732
+ "eval_loss": 1.6405302286148071,
733
+ "eval_runtime": 14.0715,
734
+ "eval_samples_per_second": 71.065,
735
+ "eval_steps_per_second": 2.274,
736
+ "step": 3300
737
+ },
738
+ {
739
+ "epoch": 1.166391088678096,
740
+ "grad_norm": 0.7900271544609745,
741
+ "learning_rate": 9.680073977003483e-05,
742
+ "loss": 1.6031,
743
+ "step": 3350
744
+ },
745
+ {
746
+ "epoch": 1.183796014272039,
747
+ "grad_norm": 1.0152045530917768,
748
+ "learning_rate": 9.670412703598469e-05,
749
+ "loss": 1.6117,
750
+ "step": 3400
751
+ },
752
+ {
753
+ "epoch": 1.183796014272039,
754
+ "eval_loss": 1.639701247215271,
755
+ "eval_runtime": 14.0316,
756
+ "eval_samples_per_second": 71.268,
757
+ "eval_steps_per_second": 2.281,
758
+ "step": 3400
759
+ },
760
+ {
761
+ "epoch": 1.201200939865982,
762
+ "grad_norm": 0.7302281809291777,
763
+ "learning_rate": 9.660612734156777e-05,
764
+ "loss": 1.6027,
765
+ "step": 3450
766
+ },
767
+ {
768
+ "epoch": 1.2186058654599252,
769
+ "grad_norm": 0.6755389511951054,
770
+ "learning_rate": 9.650674362854923e-05,
771
+ "loss": 1.6227,
772
+ "step": 3500
773
+ },
774
+ {
775
+ "epoch": 1.2186058654599252,
776
+ "eval_loss": 1.633447289466858,
777
+ "eval_runtime": 14.0553,
778
+ "eval_samples_per_second": 71.148,
779
+ "eval_steps_per_second": 2.277,
780
+ "step": 3500
781
+ },
782
+ {
783
+ "epoch": 1.2360107910538682,
784
+ "grad_norm": 0.4958097178118167,
785
+ "learning_rate": 9.640597888023988e-05,
786
+ "loss": 1.6039,
787
+ "step": 3550
788
+ },
789
+ {
790
+ "epoch": 1.2534157166478113,
791
+ "grad_norm": 0.842473221384133,
792
+ "learning_rate": 9.630383612140661e-05,
793
+ "loss": 1.6105,
794
+ "step": 3600
795
+ },
796
+ {
797
+ "epoch": 1.2534157166478113,
798
+ "eval_loss": 1.6299790143966675,
799
+ "eval_runtime": 14.0661,
800
+ "eval_samples_per_second": 71.093,
801
+ "eval_steps_per_second": 2.275,
802
+ "step": 3600
803
+ },
804
+ {
805
+ "epoch": 1.2708206422417545,
806
+ "grad_norm": 0.5547865266670734,
807
+ "learning_rate": 9.62003184181815e-05,
808
+ "loss": 1.608,
809
+ "step": 3650
810
+ },
811
+ {
812
+ "epoch": 1.2882255678356975,
813
+ "grad_norm": 0.5625620088465835,
814
+ "learning_rate": 9.609542887796993e-05,
815
+ "loss": 1.6141,
816
+ "step": 3700
817
+ },
818
+ {
819
+ "epoch": 1.2882255678356975,
820
+ "eval_loss": 1.6269824504852295,
821
+ "eval_runtime": 14.0355,
822
+ "eval_samples_per_second": 71.248,
823
+ "eval_steps_per_second": 2.28,
824
+ "step": 3700
825
+ },
826
+ {
827
+ "epoch": 1.3056304934296405,
828
+ "grad_norm": 0.9870933875943105,
829
+ "learning_rate": 9.598917064935719e-05,
830
+ "loss": 1.6045,
831
+ "step": 3750
832
+ },
833
+ {
834
+ "epoch": 1.3230354190235838,
835
+ "grad_norm": 0.6538637454488698,
836
+ "learning_rate": 9.5881546922014e-05,
837
+ "loss": 1.601,
838
+ "step": 3800
839
+ },
840
+ {
841
+ "epoch": 1.3230354190235838,
842
+ "eval_loss": 1.626142144203186,
843
+ "eval_runtime": 14.1089,
844
+ "eval_samples_per_second": 70.877,
845
+ "eval_steps_per_second": 2.268,
846
+ "step": 3800
847
+ },
848
+ {
849
+ "epoch": 1.3404403446175268,
850
+ "grad_norm": 0.6547060258929419,
851
+ "learning_rate": 9.57725609266008e-05,
852
+ "loss": 1.6066,
853
+ "step": 3850
854
+ },
855
+ {
856
+ "epoch": 1.3578452702114698,
857
+ "grad_norm": 0.9358458562600437,
858
+ "learning_rate": 9.566221593467069e-05,
859
+ "loss": 1.6221,
860
+ "step": 3900
861
+ },
862
+ {
863
+ "epoch": 1.3578452702114698,
864
+ "eval_loss": 1.627410888671875,
865
+ "eval_runtime": 14.088,
866
+ "eval_samples_per_second": 70.982,
867
+ "eval_steps_per_second": 2.271,
868
+ "step": 3900
869
+ },
870
+ {
871
+ "epoch": 1.3752501958054129,
872
+ "grad_norm": 0.8129191474694835,
873
+ "learning_rate": 9.555051525857134e-05,
874
+ "loss": 1.5996,
875
+ "step": 3950
876
+ },
877
+ {
878
+ "epoch": 1.392655121399356,
879
+ "grad_norm": 0.6824919031119797,
880
+ "learning_rate": 9.54374622513454e-05,
881
+ "loss": 1.6101,
882
+ "step": 4000
883
+ },
884
+ {
885
+ "epoch": 1.392655121399356,
886
+ "eval_loss": 1.6165417432785034,
887
+ "eval_runtime": 14.0492,
888
+ "eval_samples_per_second": 71.179,
889
+ "eval_steps_per_second": 2.278,
890
+ "step": 4000
891
+ },
892
+ {
893
+ "epoch": 1.4100600469932991,
894
+ "grad_norm": 0.9330542502271321,
895
+ "learning_rate": 9.532306030663e-05,
896
+ "loss": 1.5958,
897
+ "step": 4050
898
+ },
899
+ {
900
+ "epoch": 1.4274649725872421,
901
+ "grad_norm": 0.6438330837104954,
902
+ "learning_rate": 9.520731285855482e-05,
903
+ "loss": 1.599,
904
+ "step": 4100
905
+ },
906
+ {
907
+ "epoch": 1.4274649725872421,
908
+ "eval_loss": 1.6210800409317017,
909
+ "eval_runtime": 14.0932,
910
+ "eval_samples_per_second": 70.956,
911
+ "eval_steps_per_second": 2.271,
912
+ "step": 4100
913
+ },
914
+ {
915
+ "epoch": 1.4448698981811852,
916
+ "grad_norm": 0.9636631744898069,
917
+ "learning_rate": 9.509022338163896e-05,
918
+ "loss": 1.5955,
919
+ "step": 4150
920
+ },
921
+ {
922
+ "epoch": 1.4622748237751284,
923
+ "grad_norm": 0.5569273625801461,
924
+ "learning_rate": 9.497179539068673e-05,
925
+ "loss": 1.6007,
926
+ "step": 4200
927
+ },
928
+ {
929
+ "epoch": 1.4622748237751284,
930
+ "eval_loss": 1.6149400472640991,
931
+ "eval_runtime": 14.0717,
932
+ "eval_samples_per_second": 71.064,
933
+ "eval_steps_per_second": 2.274,
934
+ "step": 4200
935
+ },
936
+ {
937
+ "epoch": 1.4796797493690714,
938
+ "grad_norm": 0.5160141243848255,
939
+ "learning_rate": 9.485203244068202e-05,
940
+ "loss": 1.5926,
941
+ "step": 4250
942
+ },
943
+ {
944
+ "epoch": 1.4970846749630145,
945
+ "grad_norm": 0.48151772986247815,
946
+ "learning_rate": 9.473093812668182e-05,
947
+ "loss": 1.5936,
948
+ "step": 4300
949
+ },
950
+ {
951
+ "epoch": 1.4970846749630145,
952
+ "eval_loss": 1.6123466491699219,
953
+ "eval_runtime": 14.0881,
954
+ "eval_samples_per_second": 70.982,
955
+ "eval_steps_per_second": 2.271,
956
+ "step": 4300
957
+ },
958
+ {
959
+ "epoch": 1.5144896005569577,
960
+ "grad_norm": 1.1271863223922003,
961
+ "learning_rate": 9.460851608370794e-05,
962
+ "loss": 1.6012,
963
+ "step": 4350
964
+ },
965
+ {
966
+ "epoch": 1.5318945261509007,
967
+ "grad_norm": 0.8558669669849335,
968
+ "learning_rate": 9.448476998663825e-05,
969
+ "loss": 1.605,
970
+ "step": 4400
971
+ },
972
+ {
973
+ "epoch": 1.5318945261509007,
974
+ "eval_loss": 1.6140981912612915,
975
+ "eval_runtime": 14.1256,
976
+ "eval_samples_per_second": 70.793,
977
+ "eval_steps_per_second": 2.265,
978
+ "step": 4400
979
+ },
980
+ {
981
+ "epoch": 1.5492994517448437,
982
+ "grad_norm": 0.7276127450869437,
983
+ "learning_rate": 9.435970355009615e-05,
984
+ "loss": 1.5938,
985
+ "step": 4450
986
+ },
987
+ {
988
+ "epoch": 1.566704377338787,
989
+ "grad_norm": 0.6065688198096086,
990
+ "learning_rate": 9.423332052833916e-05,
991
+ "loss": 1.5946,
992
+ "step": 4500
993
+ },
994
+ {
995
+ "epoch": 1.566704377338787,
996
+ "eval_loss": 1.611683964729309,
997
+ "eval_runtime": 14.0436,
998
+ "eval_samples_per_second": 71.207,
999
+ "eval_steps_per_second": 2.279,
1000
+ "step": 4500
1001
+ },
1002
+ {
1003
+ "epoch": 1.58410930293273,
1004
+ "grad_norm": 0.7748024258482299,
1005
+ "learning_rate": 9.410562471514616e-05,
1006
+ "loss": 1.5894,
1007
+ "step": 4550
1008
+ },
1009
+ {
1010
+ "epoch": 1.601514228526673,
1011
+ "grad_norm": 0.48917881847751543,
1012
+ "learning_rate": 9.397661994370357e-05,
1013
+ "loss": 1.5877,
1014
+ "step": 4600
1015
+ },
1016
+ {
1017
+ "epoch": 1.601514228526673,
1018
+ "eval_loss": 1.6069624423980713,
1019
+ "eval_runtime": 14.0735,
1020
+ "eval_samples_per_second": 71.056,
1021
+ "eval_steps_per_second": 2.274,
1022
+ "step": 4600
1023
+ },
1024
+ {
1025
+ "epoch": 1.6189191541206163,
1026
+ "grad_norm": 0.8166564830453485,
1027
+ "learning_rate": 9.384631008649027e-05,
1028
+ "loss": 1.5875,
1029
+ "step": 4650
1030
+ },
1031
+ {
1032
+ "epoch": 1.636324079714559,
1033
+ "grad_norm": 0.9485787011897893,
1034
+ "learning_rate": 9.371469905516128e-05,
1035
+ "loss": 1.5926,
1036
+ "step": 4700
1037
+ },
1038
+ {
1039
+ "epoch": 1.636324079714559,
1040
+ "eval_loss": 1.6103551387786865,
1041
+ "eval_runtime": 14.0489,
1042
+ "eval_samples_per_second": 71.18,
1043
+ "eval_steps_per_second": 2.278,
1044
+ "step": 4700
1045
+ },
1046
+ {
1047
+ "epoch": 1.6537290053085023,
1048
+ "grad_norm": 0.6608190035209371,
1049
+ "learning_rate": 9.358179080043047e-05,
1050
+ "loss": 1.5852,
1051
+ "step": 4750
1052
+ },
1053
+ {
1054
+ "epoch": 1.6711339309024456,
1055
+ "grad_norm": 0.5091041850584289,
1056
+ "learning_rate": 9.344758931195186e-05,
1057
+ "loss": 1.5818,
1058
+ "step": 4800
1059
+ },
1060
+ {
1061
+ "epoch": 1.6711339309024456,
1062
+ "eval_loss": 1.6055699586868286,
1063
+ "eval_runtime": 14.0386,
1064
+ "eval_samples_per_second": 71.232,
1065
+ "eval_steps_per_second": 2.279,
1066
+ "step": 4800
1067
+ },
1068
+ {
1069
+ "epoch": 1.6885388564963884,
1070
+ "grad_norm": 0.4809752811498165,
1071
+ "learning_rate": 9.331209861819991e-05,
1072
+ "loss": 1.5945,
1073
+ "step": 4850
1074
+ },
1075
+ {
1076
+ "epoch": 1.7059437820903316,
1077
+ "grad_norm": 1.16696044120828,
1078
+ "learning_rate": 9.31753227863486e-05,
1079
+ "loss": 1.5906,
1080
+ "step": 4900
1081
+ },
1082
+ {
1083
+ "epoch": 1.7059437820903316,
1084
+ "eval_loss": 1.602495551109314,
1085
+ "eval_runtime": 14.0638,
1086
+ "eval_samples_per_second": 71.104,
1087
+ "eval_steps_per_second": 2.275,
1088
+ "step": 4900
1089
+ },
1090
+ {
1091
+ "epoch": 1.7233487076842746,
1092
+ "grad_norm": 0.7703478252526429,
1093
+ "learning_rate": 9.303726592214927e-05,
1094
+ "loss": 1.5759,
1095
+ "step": 4950
1096
+ },
1097
+ {
1098
+ "epoch": 1.7407536332782176,
1099
+ "grad_norm": 0.4326591794595183,
1100
+ "learning_rate": 9.289793216980748e-05,
1101
+ "loss": 1.589,
1102
+ "step": 5000
1103
+ },
1104
+ {
1105
+ "epoch": 1.7407536332782176,
1106
+ "eval_loss": 1.598211646080017,
1107
+ "eval_runtime": 14.05,
1108
+ "eval_samples_per_second": 71.174,
1109
+ "eval_steps_per_second": 2.278,
1110
+ "step": 5000
1111
+ },
1112
+ {
1113
+ "epoch": 1.7581585588721609,
1114
+ "grad_norm": 0.9150661442715593,
1115
+ "learning_rate": 9.275732571185852e-05,
1116
+ "loss": 1.5925,
1117
+ "step": 5050
1118
+ },
1119
+ {
1120
+ "epoch": 1.775563484466104,
1121
+ "grad_norm": 0.4835138015080412,
1122
+ "learning_rate": 9.261545076904189e-05,
1123
+ "loss": 1.587,
1124
+ "step": 5100
1125
+ },
1126
+ {
1127
+ "epoch": 1.775563484466104,
1128
+ "eval_loss": 1.5962464809417725,
1129
+ "eval_runtime": 14.0435,
1130
+ "eval_samples_per_second": 71.207,
1131
+ "eval_steps_per_second": 2.279,
1132
+ "step": 5100
1133
+ },
1134
+ {
1135
+ "epoch": 1.792968410060047,
1136
+ "grad_norm": 0.8246740616874354,
1137
+ "learning_rate": 9.247231160017462e-05,
1138
+ "loss": 1.5845,
1139
+ "step": 5150
1140
+ },
1141
+ {
1142
+ "epoch": 1.8103733356539902,
1143
+ "grad_norm": 0.7636936218440887,
1144
+ "learning_rate": 9.232791250202342e-05,
1145
+ "loss": 1.5789,
1146
+ "step": 5200
1147
+ },
1148
+ {
1149
+ "epoch": 1.8103733356539902,
1150
+ "eval_loss": 1.5962697267532349,
1151
+ "eval_runtime": 14.0724,
1152
+ "eval_samples_per_second": 71.061,
1153
+ "eval_steps_per_second": 2.274,
1154
+ "step": 5200
1155
+ },
1156
+ {
1157
+ "epoch": 1.8277782612479332,
1158
+ "grad_norm": 0.5278061111679693,
1159
+ "learning_rate": 9.218225780917564e-05,
1160
+ "loss": 1.5784,
1161
+ "step": 5250
1162
+ },
1163
+ {
1164
+ "epoch": 1.8451831868418762,
1165
+ "grad_norm": 0.5521436007234811,
1166
+ "learning_rate": 9.203535189390927e-05,
1167
+ "loss": 1.5859,
1168
+ "step": 5300
1169
+ },
1170
+ {
1171
+ "epoch": 1.8451831868418762,
1172
+ "eval_loss": 1.589383840560913,
1173
+ "eval_runtime": 14.0972,
1174
+ "eval_samples_per_second": 70.936,
1175
+ "eval_steps_per_second": 2.27,
1176
+ "step": 5300
1177
+ },
1178
+ {
1179
+ "epoch": 1.8625881124358195,
1180
+ "grad_norm": 0.9153838912238841,
1181
+ "learning_rate": 9.188719916606157e-05,
1182
+ "loss": 1.5767,
1183
+ "step": 5350
1184
+ },
1185
+ {
1186
+ "epoch": 1.8799930380297625,
1187
+ "grad_norm": 0.5869179835862129,
1188
+ "learning_rate": 9.17378040728968e-05,
1189
+ "loss": 1.5771,
1190
+ "step": 5400
1191
+ },
1192
+ {
1193
+ "epoch": 1.8799930380297625,
1194
+ "eval_loss": 1.5878838300704956,
1195
+ "eval_runtime": 14.0586,
1196
+ "eval_samples_per_second": 71.131,
1197
+ "eval_steps_per_second": 2.276,
1198
+ "step": 5400
1199
+ },
1200
+ {
1201
+ "epoch": 1.8973979636237055,
1202
+ "grad_norm": 0.8157168714834181,
1203
+ "learning_rate": 9.158717109897263e-05,
1204
+ "loss": 1.5626,
1205
+ "step": 5450
1206
+ },
1207
+ {
1208
+ "epoch": 1.9148028892176487,
1209
+ "grad_norm": 0.7455391308200009,
1210
+ "learning_rate": 9.14353047660056e-05,
1211
+ "loss": 1.5651,
1212
+ "step": 5500
1213
+ },
1214
+ {
1215
+ "epoch": 1.9148028892176487,
1216
+ "eval_loss": 1.5843595266342163,
1217
+ "eval_runtime": 14.0141,
1218
+ "eval_samples_per_second": 71.356,
1219
+ "eval_steps_per_second": 2.283,
1220
+ "step": 5500
1221
+ },
1222
+ {
1223
+ "epoch": 1.9322078148115915,
1224
+ "grad_norm": 0.48742202866618534,
1225
+ "learning_rate": 9.128220963273532e-05,
1226
+ "loss": 1.5806,
1227
+ "step": 5550
1228
+ },
1229
+ {
1230
+ "epoch": 1.9496127404055348,
1231
+ "grad_norm": 0.49018002201797567,
1232
+ "learning_rate": 9.112789029478769e-05,
1233
+ "loss": 1.5715,
1234
+ "step": 5600
1235
+ },
1236
+ {
1237
+ "epoch": 1.9496127404055348,
1238
+ "eval_loss": 1.583487868309021,
1239
+ "eval_runtime": 14.076,
1240
+ "eval_samples_per_second": 71.043,
1241
+ "eval_steps_per_second": 2.273,
1242
+ "step": 5600
1243
+ }
1244
+ ],
1245
+ "logging_steps": 50,
1246
+ "max_steps": 28720,
1247
+ "num_input_tokens_seen": 0,
1248
+ "num_train_epochs": 10,
1249
+ "save_steps": 800,
1250
+ "stateful_callbacks": {
1251
+ "TrainerControl": {
1252
+ "args": {
1253
+ "should_epoch_stop": false,
1254
+ "should_evaluate": false,
1255
+ "should_log": false,
1256
+ "should_save": true,
1257
+ "should_training_stop": false
1258
+ },
1259
+ "attributes": {}
1260
+ }
1261
+ },
1262
+ "total_flos": 5432367833415680.0,
1263
+ "train_batch_size": 4,
1264
+ "trial_name": null,
1265
+ "trial_params": null
1266
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:50c0acabb981dfa7ab41f4379309e8fe958f9a7dc2fea40dc8e04a65181552bf
3
+ size 7416