adarshxs commited on
Commit
5842a6b
1 Parent(s): b08c569
README.md ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: other
3
+ base_model: google/gemma-2b
4
+ tags:
5
+ - llama-factory
6
+ - full
7
+ - generated_from_trainer
8
+ model-index:
9
+ - name: final_trained
10
+ results: []
11
+ ---
12
+
13
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
14
+ should probably proofread and complete it, then remove this comment. -->
15
+
16
+ # final_trained
17
+
18
+ This model is a fine-tuned version of [google/gemma-2b](https://huggingface.co/google/gemma-2b) on the samvaad-hi-v1 dataset.
19
+
20
+ ## Model description
21
+
22
+ More information needed
23
+
24
+ ## Intended uses & limitations
25
+
26
+ More information needed
27
+
28
+ ## Training and evaluation data
29
+
30
+ More information needed
31
+
32
+ ## Training procedure
33
+
34
+ ### Training hyperparameters
35
+
36
+ The following hyperparameters were used during training:
37
+ - learning_rate: 5e-05
38
+ - train_batch_size: 4
39
+ - eval_batch_size: 8
40
+ - seed: 42
41
+ - distributed_type: multi-GPU
42
+ - num_devices: 4
43
+ - gradient_accumulation_steps: 4
44
+ - total_train_batch_size: 64
45
+ - total_eval_batch_size: 32
46
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
47
+ - lr_scheduler_type: cosine
48
+ - num_epochs: 1.0
49
+
50
+ ### Training results
51
+
52
+
53
+
54
+ ### Framework versions
55
+
56
+ - Transformers 4.39.0.dev0
57
+ - Pytorch 2.0.1+cu118
58
+ - Datasets 2.16.1
59
+ - Tokenizers 0.15.0
all_results.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.0,
3
+ "train_loss": 1.5383612829627413,
4
+ "train_runtime": 4681.1872,
5
+ "train_samples_per_second": 21.666,
6
+ "train_steps_per_second": 0.338
7
+ }
checkpoint-1000/config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/gemma-2b",
3
+ "architectures": [
4
+ "GemmaForCausalLM"
5
+ ],
6
+ "attention_bias": false,
7
+ "attention_dropout": 0.0,
8
+ "bos_token_id": 2,
9
+ "eos_token_id": 1,
10
+ "head_dim": 256,
11
+ "hidden_act": "gelu",
12
+ "hidden_size": 2048,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 16384,
15
+ "max_position_embeddings": 8192,
16
+ "model_type": "gemma",
17
+ "num_attention_heads": 8,
18
+ "num_hidden_layers": 18,
19
+ "num_key_value_heads": 1,
20
+ "pad_token_id": 0,
21
+ "rms_norm_eps": 1e-06,
22
+ "rope_scaling": null,
23
+ "rope_theta": 10000.0,
24
+ "torch_dtype": "float32",
25
+ "transformers_version": "4.39.0.dev0",
26
+ "use_cache": false,
27
+ "vocab_size": 256000
28
+ }
checkpoint-1000/generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 2,
4
+ "eos_token_id": 1,
5
+ "pad_token_id": 0,
6
+ "transformers_version": "4.39.0.dev0"
7
+ }
checkpoint-1000/model-00001-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0368dc4194ee2fdbe49909e4846ccc81d0b9fb3d1a1fd9c9d3b26a72c7895653
3
+ size 4911635192
checkpoint-1000/model-00002-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f92fd384fe15130d9e13c2dc3394517b4fd642d7af96d5e734699919ff39a365
3
+ size 4978830584
checkpoint-1000/model-00003-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:585b62412105f82990495f02d7c75c28b5e3c5777ce25a5021ffb17ee4c151e5
3
+ size 134242760
checkpoint-1000/model.safetensors.index.json ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 10024689664
4
+ },
5
+ "weight_map": {
6
+ "model.embed_tokens.weight": "model-00001-of-00003.safetensors",
7
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00003.safetensors",
8
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
9
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
10
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
11
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
12
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
13
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
14
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
15
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
16
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00003.safetensors",
17
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
18
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
19
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
20
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
21
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
22
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
23
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
24
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
25
+ "model.layers.10.input_layernorm.weight": "model-00002-of-00003.safetensors",
26
+ "model.layers.10.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
27
+ "model.layers.10.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
28
+ "model.layers.10.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
29
+ "model.layers.10.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
30
+ "model.layers.10.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
31
+ "model.layers.10.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
32
+ "model.layers.10.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
33
+ "model.layers.10.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
34
+ "model.layers.11.input_layernorm.weight": "model-00002-of-00003.safetensors",
35
+ "model.layers.11.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
36
+ "model.layers.11.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
37
+ "model.layers.11.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
38
+ "model.layers.11.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
39
+ "model.layers.11.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
40
+ "model.layers.11.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
41
+ "model.layers.11.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
42
+ "model.layers.11.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
43
+ "model.layers.12.input_layernorm.weight": "model-00002-of-00003.safetensors",
44
+ "model.layers.12.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
45
+ "model.layers.12.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
46
+ "model.layers.12.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
47
+ "model.layers.12.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
48
+ "model.layers.12.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
49
+ "model.layers.12.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
50
+ "model.layers.12.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
51
+ "model.layers.12.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
52
+ "model.layers.13.input_layernorm.weight": "model-00002-of-00003.safetensors",
53
+ "model.layers.13.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
54
+ "model.layers.13.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
55
+ "model.layers.13.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
56
+ "model.layers.13.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
57
+ "model.layers.13.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
58
+ "model.layers.13.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
59
+ "model.layers.13.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
60
+ "model.layers.13.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
61
+ "model.layers.14.input_layernorm.weight": "model-00002-of-00003.safetensors",
62
+ "model.layers.14.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
63
+ "model.layers.14.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
64
+ "model.layers.14.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
65
+ "model.layers.14.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
66
+ "model.layers.14.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
67
+ "model.layers.14.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
68
+ "model.layers.14.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
69
+ "model.layers.14.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
70
+ "model.layers.15.input_layernorm.weight": "model-00002-of-00003.safetensors",
71
+ "model.layers.15.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
72
+ "model.layers.15.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
73
+ "model.layers.15.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
74
+ "model.layers.15.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
75
+ "model.layers.15.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
76
+ "model.layers.15.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
77
+ "model.layers.15.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
78
+ "model.layers.15.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
79
+ "model.layers.16.input_layernorm.weight": "model-00002-of-00003.safetensors",
80
+ "model.layers.16.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
81
+ "model.layers.16.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
82
+ "model.layers.16.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
83
+ "model.layers.16.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
84
+ "model.layers.16.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
85
+ "model.layers.16.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
86
+ "model.layers.16.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
87
+ "model.layers.16.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
88
+ "model.layers.17.input_layernorm.weight": "model-00003-of-00003.safetensors",
89
+ "model.layers.17.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
90
+ "model.layers.17.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
91
+ "model.layers.17.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
92
+ "model.layers.17.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
93
+ "model.layers.17.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
94
+ "model.layers.17.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
95
+ "model.layers.17.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
96
+ "model.layers.17.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
97
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00003.safetensors",
98
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
99
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
100
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
101
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
102
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
103
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
104
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
105
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
106
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00003.safetensors",
107
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
108
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
109
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
110
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
111
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
112
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
113
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
114
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
115
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00003.safetensors",
116
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
117
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
118
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
119
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
120
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
121
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
122
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
123
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
124
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00003.safetensors",
125
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
126
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
127
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
128
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
129
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
130
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
131
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
132
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
133
+ "model.layers.6.input_layernorm.weight": "model-00002-of-00003.safetensors",
134
+ "model.layers.6.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
135
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
136
+ "model.layers.6.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
137
+ "model.layers.6.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
138
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
139
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
140
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
141
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
142
+ "model.layers.7.input_layernorm.weight": "model-00002-of-00003.safetensors",
143
+ "model.layers.7.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
144
+ "model.layers.7.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
145
+ "model.layers.7.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
146
+ "model.layers.7.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
147
+ "model.layers.7.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
148
+ "model.layers.7.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
149
+ "model.layers.7.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
150
+ "model.layers.7.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
151
+ "model.layers.8.input_layernorm.weight": "model-00002-of-00003.safetensors",
152
+ "model.layers.8.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
153
+ "model.layers.8.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
154
+ "model.layers.8.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
155
+ "model.layers.8.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
156
+ "model.layers.8.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
157
+ "model.layers.8.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
158
+ "model.layers.8.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
159
+ "model.layers.8.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
160
+ "model.layers.9.input_layernorm.weight": "model-00002-of-00003.safetensors",
161
+ "model.layers.9.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
162
+ "model.layers.9.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
163
+ "model.layers.9.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
164
+ "model.layers.9.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
165
+ "model.layers.9.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
166
+ "model.layers.9.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
167
+ "model.layers.9.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
168
+ "model.layers.9.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
169
+ "model.norm.weight": "model-00003-of-00003.safetensors"
170
+ }
171
+ }
checkpoint-1000/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7015da0998d2376a75ff258403df7cc2e7ccd73d5263d78ab3e8265f255037cc
3
+ size 20049522541
checkpoint-1000/rng_state_0.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1eafe3d5e0585dde8c5033613de99a5d4f23df4284a488f4007b3944580c0b97
3
+ size 17655
checkpoint-1000/rng_state_1.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7e34eb456d2d003a2839f2daa9425e99bdd79ed7e24a1de9fc7d5738476bfb4b
3
+ size 17655
checkpoint-1000/rng_state_2.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b374af4a2765d8771cee7a72921d3c2e438b9bee34f0b2d098ce6071afeb65e4
3
+ size 17655
checkpoint-1000/rng_state_3.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5df75d8477fcc69c7abb03025313915ebfe3ac18c54a7c57aaa455c0099e13e5
3
+ size 17655
checkpoint-1000/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:20cd9ebcbc893aaeed7a3e32a2a0eeb344f999c91a3ede9cd26c28f6dd8957bb
3
+ size 627
checkpoint-1000/special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<bos>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<eos>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "<pad>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "<unk>",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
checkpoint-1000/tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:61a7b147390c64585d6c3543dd6fc636906c9af3865a5548f27f31aee1d4c8e2
3
+ size 4241003
checkpoint-1000/tokenizer_config.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "added_tokens_decoder": {
5
+ "0": {
6
+ "content": "<pad>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "1": {
14
+ "content": "<eos>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "2": {
22
+ "content": "<bos>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "3": {
30
+ "content": "<unk>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ }
37
+ },
38
+ "bos_token": "<bos>",
39
+ "clean_up_tokenization_spaces": false,
40
+ "eos_token": "<eos>",
41
+ "legacy": null,
42
+ "model_max_length": 1000000000000000019884624838656,
43
+ "pad_token": "<pad>",
44
+ "padding_side": "right",
45
+ "sp_model_kwargs": {},
46
+ "spaces_between_special_tokens": false,
47
+ "split_special_tokens": false,
48
+ "tokenizer_class": "GemmaTokenizer",
49
+ "unk_token": "<unk>",
50
+ "use_default_system_prompt": false
51
+ }
checkpoint-1000/trainer_state.json ADDED
@@ -0,0 +1,721 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.6310143555765894,
5
+ "eval_steps": 500,
6
+ "global_step": 1000,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.01,
13
+ "grad_norm": 3.7190074920654297,
14
+ "learning_rate": 4.9995083170283816e-05,
15
+ "loss": 2.9245,
16
+ "step": 10
17
+ },
18
+ {
19
+ "epoch": 0.01,
20
+ "grad_norm": 3.431870222091675,
21
+ "learning_rate": 4.998033461515242e-05,
22
+ "loss": 2.0053,
23
+ "step": 20
24
+ },
25
+ {
26
+ "epoch": 0.02,
27
+ "grad_norm": 2.3315682411193848,
28
+ "learning_rate": 4.9955760135896534e-05,
29
+ "loss": 1.888,
30
+ "step": 30
31
+ },
32
+ {
33
+ "epoch": 0.03,
34
+ "grad_norm": 3.2937276363372803,
35
+ "learning_rate": 4.992136939879856e-05,
36
+ "loss": 1.8447,
37
+ "step": 40
38
+ },
39
+ {
40
+ "epoch": 0.03,
41
+ "grad_norm": 2.7375714778900146,
42
+ "learning_rate": 4.9877175931330346e-05,
43
+ "loss": 1.8212,
44
+ "step": 50
45
+ },
46
+ {
47
+ "epoch": 0.04,
48
+ "grad_norm": 2.15061092376709,
49
+ "learning_rate": 4.982319711683221e-05,
50
+ "loss": 1.793,
51
+ "step": 60
52
+ },
53
+ {
54
+ "epoch": 0.04,
55
+ "grad_norm": 2.0427424907684326,
56
+ "learning_rate": 4.975945418767529e-05,
57
+ "loss": 1.756,
58
+ "step": 70
59
+ },
60
+ {
61
+ "epoch": 0.05,
62
+ "grad_norm": 2.107785224914551,
63
+ "learning_rate": 4.968597221690986e-05,
64
+ "loss": 1.7285,
65
+ "step": 80
66
+ },
67
+ {
68
+ "epoch": 0.06,
69
+ "grad_norm": 2.100552558898926,
70
+ "learning_rate": 4.96027801084029e-05,
71
+ "loss": 1.7297,
72
+ "step": 90
73
+ },
74
+ {
75
+ "epoch": 0.06,
76
+ "grad_norm": 2.2227377891540527,
77
+ "learning_rate": 4.950991058546893e-05,
78
+ "loss": 1.7602,
79
+ "step": 100
80
+ },
81
+ {
82
+ "epoch": 0.07,
83
+ "grad_norm": 1.535144567489624,
84
+ "learning_rate": 4.940740017799833e-05,
85
+ "loss": 1.7433,
86
+ "step": 110
87
+ },
88
+ {
89
+ "epoch": 0.08,
90
+ "grad_norm": 1.6522979736328125,
91
+ "learning_rate": 4.929528920808854e-05,
92
+ "loss": 1.7363,
93
+ "step": 120
94
+ },
95
+ {
96
+ "epoch": 0.08,
97
+ "grad_norm": 2.8091869354248047,
98
+ "learning_rate": 4.917362177418342e-05,
99
+ "loss": 1.6872,
100
+ "step": 130
101
+ },
102
+ {
103
+ "epoch": 0.09,
104
+ "grad_norm": 2.1017510890960693,
105
+ "learning_rate": 4.904244573372733e-05,
106
+ "loss": 1.7084,
107
+ "step": 140
108
+ },
109
+ {
110
+ "epoch": 0.09,
111
+ "grad_norm": 1.6424258947372437,
112
+ "learning_rate": 4.8901812684340564e-05,
113
+ "loss": 1.6997,
114
+ "step": 150
115
+ },
116
+ {
117
+ "epoch": 0.1,
118
+ "grad_norm": 1.4547488689422607,
119
+ "learning_rate": 4.8751777943523634e-05,
120
+ "loss": 1.6747,
121
+ "step": 160
122
+ },
123
+ {
124
+ "epoch": 0.11,
125
+ "grad_norm": 1.6251146793365479,
126
+ "learning_rate": 4.8592400526898314e-05,
127
+ "loss": 1.6836,
128
+ "step": 170
129
+ },
130
+ {
131
+ "epoch": 0.11,
132
+ "grad_norm": 2.098386526107788,
133
+ "learning_rate": 4.842374312499405e-05,
134
+ "loss": 1.6552,
135
+ "step": 180
136
+ },
137
+ {
138
+ "epoch": 0.12,
139
+ "grad_norm": 2.2387640476226807,
140
+ "learning_rate": 4.824587207858888e-05,
141
+ "loss": 1.6489,
142
+ "step": 190
143
+ },
144
+ {
145
+ "epoch": 0.13,
146
+ "grad_norm": 1.7299611568450928,
147
+ "learning_rate": 4.805885735261454e-05,
148
+ "loss": 1.6576,
149
+ "step": 200
150
+ },
151
+ {
152
+ "epoch": 0.13,
153
+ "grad_norm": 1.5701665878295898,
154
+ "learning_rate": 4.786277250863599e-05,
155
+ "loss": 1.6533,
156
+ "step": 210
157
+ },
158
+ {
159
+ "epoch": 0.14,
160
+ "grad_norm": 2.417296886444092,
161
+ "learning_rate": 4.765769467591625e-05,
162
+ "loss": 1.6356,
163
+ "step": 220
164
+ },
165
+ {
166
+ "epoch": 0.15,
167
+ "grad_norm": 1.2636029720306396,
168
+ "learning_rate": 4.744370452107789e-05,
169
+ "loss": 1.6389,
170
+ "step": 230
171
+ },
172
+ {
173
+ "epoch": 0.15,
174
+ "grad_norm": 1.576324224472046,
175
+ "learning_rate": 4.722088621637309e-05,
176
+ "loss": 1.6546,
177
+ "step": 240
178
+ },
179
+ {
180
+ "epoch": 0.16,
181
+ "grad_norm": 1.9720542430877686,
182
+ "learning_rate": 4.698932740657479e-05,
183
+ "loss": 1.6354,
184
+ "step": 250
185
+ },
186
+ {
187
+ "epoch": 0.16,
188
+ "grad_norm": 1.5250279903411865,
189
+ "learning_rate": 4.6749119174501975e-05,
190
+ "loss": 1.6342,
191
+ "step": 260
192
+ },
193
+ {
194
+ "epoch": 0.17,
195
+ "grad_norm": 2.4737966060638428,
196
+ "learning_rate": 4.6500356005192514e-05,
197
+ "loss": 1.6407,
198
+ "step": 270
199
+ },
200
+ {
201
+ "epoch": 0.18,
202
+ "grad_norm": 1.2792372703552246,
203
+ "learning_rate": 4.6243135748737864e-05,
204
+ "loss": 1.6339,
205
+ "step": 280
206
+ },
207
+ {
208
+ "epoch": 0.18,
209
+ "grad_norm": 1.5593037605285645,
210
+ "learning_rate": 4.597755958179406e-05,
211
+ "loss": 1.6095,
212
+ "step": 290
213
+ },
214
+ {
215
+ "epoch": 0.19,
216
+ "grad_norm": 1.3141404390335083,
217
+ "learning_rate": 4.570373196778427e-05,
218
+ "loss": 1.6036,
219
+ "step": 300
220
+ },
221
+ {
222
+ "epoch": 0.2,
223
+ "grad_norm": 1.2617065906524658,
224
+ "learning_rate": 4.5421760615808474e-05,
225
+ "loss": 1.6244,
226
+ "step": 310
227
+ },
228
+ {
229
+ "epoch": 0.2,
230
+ "grad_norm": 1.64117431640625,
231
+ "learning_rate": 4.513175643827647e-05,
232
+ "loss": 1.6449,
233
+ "step": 320
234
+ },
235
+ {
236
+ "epoch": 0.21,
237
+ "grad_norm": 1.7132749557495117,
238
+ "learning_rate": 4.4833833507280884e-05,
239
+ "loss": 1.5948,
240
+ "step": 330
241
+ },
242
+ {
243
+ "epoch": 0.21,
244
+ "grad_norm": 2.1323654651641846,
245
+ "learning_rate": 4.4528109009727336e-05,
246
+ "loss": 1.627,
247
+ "step": 340
248
+ },
249
+ {
250
+ "epoch": 0.22,
251
+ "grad_norm": 2.253115653991699,
252
+ "learning_rate": 4.42147032012394e-05,
253
+ "loss": 1.6151,
254
+ "step": 350
255
+ },
256
+ {
257
+ "epoch": 0.23,
258
+ "grad_norm": 1.6143097877502441,
259
+ "learning_rate": 4.389373935885646e-05,
260
+ "loss": 1.5838,
261
+ "step": 360
262
+ },
263
+ {
264
+ "epoch": 0.23,
265
+ "grad_norm": 1.3353707790374756,
266
+ "learning_rate": 4.356534373254316e-05,
267
+ "loss": 1.5935,
268
+ "step": 370
269
+ },
270
+ {
271
+ "epoch": 0.24,
272
+ "grad_norm": 1.283742904663086,
273
+ "learning_rate": 4.322964549552943e-05,
274
+ "loss": 1.6015,
275
+ "step": 380
276
+ },
277
+ {
278
+ "epoch": 0.25,
279
+ "grad_norm": 1.437249779701233,
280
+ "learning_rate": 4.288677669350066e-05,
281
+ "loss": 1.577,
282
+ "step": 390
283
+ },
284
+ {
285
+ "epoch": 0.25,
286
+ "grad_norm": 1.5190638303756714,
287
+ "learning_rate": 4.2536872192658036e-05,
288
+ "loss": 1.5843,
289
+ "step": 400
290
+ },
291
+ {
292
+ "epoch": 0.26,
293
+ "grad_norm": 2.1320886611938477,
294
+ "learning_rate": 4.218006962666934e-05,
295
+ "loss": 1.6145,
296
+ "step": 410
297
+ },
298
+ {
299
+ "epoch": 0.27,
300
+ "grad_norm": 1.0696591138839722,
301
+ "learning_rate": 4.181650934253132e-05,
302
+ "loss": 1.5601,
303
+ "step": 420
304
+ },
305
+ {
306
+ "epoch": 0.27,
307
+ "grad_norm": 1.3149545192718506,
308
+ "learning_rate": 4.144633434536467e-05,
309
+ "loss": 1.5664,
310
+ "step": 430
311
+ },
312
+ {
313
+ "epoch": 0.28,
314
+ "grad_norm": 1.3661577701568604,
315
+ "learning_rate": 4.1069690242163484e-05,
316
+ "loss": 1.6002,
317
+ "step": 440
318
+ },
319
+ {
320
+ "epoch": 0.28,
321
+ "grad_norm": 1.6984481811523438,
322
+ "learning_rate": 4.06867251845213e-05,
323
+ "loss": 1.576,
324
+ "step": 450
325
+ },
326
+ {
327
+ "epoch": 0.29,
328
+ "grad_norm": 1.2728784084320068,
329
+ "learning_rate": 4.0297589810356165e-05,
330
+ "loss": 1.5448,
331
+ "step": 460
332
+ },
333
+ {
334
+ "epoch": 0.3,
335
+ "grad_norm": 1.4147616624832153,
336
+ "learning_rate": 3.9902437184657784e-05,
337
+ "loss": 1.5595,
338
+ "step": 470
339
+ },
340
+ {
341
+ "epoch": 0.3,
342
+ "grad_norm": 1.2289011478424072,
343
+ "learning_rate": 3.9501422739279956e-05,
344
+ "loss": 1.5628,
345
+ "step": 480
346
+ },
347
+ {
348
+ "epoch": 0.31,
349
+ "grad_norm": 1.5690233707427979,
350
+ "learning_rate": 3.909470421180201e-05,
351
+ "loss": 1.5731,
352
+ "step": 490
353
+ },
354
+ {
355
+ "epoch": 0.32,
356
+ "grad_norm": 1.4935098886489868,
357
+ "learning_rate": 3.8682441583483314e-05,
358
+ "loss": 1.545,
359
+ "step": 500
360
+ },
361
+ {
362
+ "epoch": 0.32,
363
+ "grad_norm": 1.2939772605895996,
364
+ "learning_rate": 3.8264797016335205e-05,
365
+ "loss": 1.5793,
366
+ "step": 510
367
+ },
368
+ {
369
+ "epoch": 0.33,
370
+ "grad_norm": 1.2150651216506958,
371
+ "learning_rate": 3.7841934789335164e-05,
372
+ "loss": 1.5378,
373
+ "step": 520
374
+ },
375
+ {
376
+ "epoch": 0.33,
377
+ "grad_norm": 1.2153139114379883,
378
+ "learning_rate": 3.741402123380828e-05,
379
+ "loss": 1.5345,
380
+ "step": 530
381
+ },
382
+ {
383
+ "epoch": 0.34,
384
+ "grad_norm": 1.290591835975647,
385
+ "learning_rate": 3.6981224668001424e-05,
386
+ "loss": 1.5517,
387
+ "step": 540
388
+ },
389
+ {
390
+ "epoch": 0.35,
391
+ "grad_norm": 1.1924967765808105,
392
+ "learning_rate": 3.654371533087586e-05,
393
+ "loss": 1.5472,
394
+ "step": 550
395
+ },
396
+ {
397
+ "epoch": 0.35,
398
+ "grad_norm": 1.6345056295394897,
399
+ "learning_rate": 3.610166531514436e-05,
400
+ "loss": 1.5564,
401
+ "step": 560
402
+ },
403
+ {
404
+ "epoch": 0.36,
405
+ "grad_norm": 2.185119867324829,
406
+ "learning_rate": 3.565524849957921e-05,
407
+ "loss": 1.5574,
408
+ "step": 570
409
+ },
410
+ {
411
+ "epoch": 0.37,
412
+ "grad_norm": 1.3646321296691895,
413
+ "learning_rate": 3.520464048061758e-05,
414
+ "loss": 1.5584,
415
+ "step": 580
416
+ },
417
+ {
418
+ "epoch": 0.37,
419
+ "grad_norm": 1.2333228588104248,
420
+ "learning_rate": 3.47500185032913e-05,
421
+ "loss": 1.518,
422
+ "step": 590
423
+ },
424
+ {
425
+ "epoch": 0.38,
426
+ "grad_norm": 1.3945318460464478,
427
+ "learning_rate": 3.4291561391508185e-05,
428
+ "loss": 1.5339,
429
+ "step": 600
430
+ },
431
+ {
432
+ "epoch": 0.38,
433
+ "grad_norm": 1.304306149482727,
434
+ "learning_rate": 3.3829449477712324e-05,
435
+ "loss": 1.5339,
436
+ "step": 610
437
+ },
438
+ {
439
+ "epoch": 0.39,
440
+ "grad_norm": 1.6393932104110718,
441
+ "learning_rate": 3.336386453195088e-05,
442
+ "loss": 1.5399,
443
+ "step": 620
444
+ },
445
+ {
446
+ "epoch": 0.4,
447
+ "grad_norm": 1.2000635862350464,
448
+ "learning_rate": 3.2894989690375626e-05,
449
+ "loss": 1.5233,
450
+ "step": 630
451
+ },
452
+ {
453
+ "epoch": 0.4,
454
+ "grad_norm": 1.1479601860046387,
455
+ "learning_rate": 3.2423009383206876e-05,
456
+ "loss": 1.538,
457
+ "step": 640
458
+ },
459
+ {
460
+ "epoch": 0.41,
461
+ "grad_norm": 1.1483389139175415,
462
+ "learning_rate": 3.194810926218861e-05,
463
+ "loss": 1.528,
464
+ "step": 650
465
+ },
466
+ {
467
+ "epoch": 0.42,
468
+ "grad_norm": 1.2403253316879272,
469
+ "learning_rate": 3.147047612756302e-05,
470
+ "loss": 1.5307,
471
+ "step": 660
472
+ },
473
+ {
474
+ "epoch": 0.42,
475
+ "grad_norm": 1.3997712135314941,
476
+ "learning_rate": 3.099029785459328e-05,
477
+ "loss": 1.4915,
478
+ "step": 670
479
+ },
480
+ {
481
+ "epoch": 0.43,
482
+ "grad_norm": 1.2010352611541748,
483
+ "learning_rate": 3.0507763319663517e-05,
484
+ "loss": 1.5268,
485
+ "step": 680
486
+ },
487
+ {
488
+ "epoch": 0.44,
489
+ "grad_norm": 1.0670932531356812,
490
+ "learning_rate": 3.002306232598497e-05,
491
+ "loss": 1.5273,
492
+ "step": 690
493
+ },
494
+ {
495
+ "epoch": 0.44,
496
+ "grad_norm": 1.2283655405044556,
497
+ "learning_rate": 2.9536385528937567e-05,
498
+ "loss": 1.5273,
499
+ "step": 700
500
+ },
501
+ {
502
+ "epoch": 0.45,
503
+ "grad_norm": 1.1306476593017578,
504
+ "learning_rate": 2.9047924361076345e-05,
505
+ "loss": 1.5072,
506
+ "step": 710
507
+ },
508
+ {
509
+ "epoch": 0.45,
510
+ "grad_norm": 1.1699943542480469,
511
+ "learning_rate": 2.8557870956832132e-05,
512
+ "loss": 1.4856,
513
+ "step": 720
514
+ },
515
+ {
516
+ "epoch": 0.46,
517
+ "grad_norm": 1.2550854682922363,
518
+ "learning_rate": 2.8066418076936167e-05,
519
+ "loss": 1.4983,
520
+ "step": 730
521
+ },
522
+ {
523
+ "epoch": 0.47,
524
+ "grad_norm": 1.0610970258712769,
525
+ "learning_rate": 2.7573759032598366e-05,
526
+ "loss": 1.5518,
527
+ "step": 740
528
+ },
529
+ {
530
+ "epoch": 0.47,
531
+ "grad_norm": 1.1754754781723022,
532
+ "learning_rate": 2.7080087609469062e-05,
533
+ "loss": 1.4998,
534
+ "step": 750
535
+ },
536
+ {
537
+ "epoch": 0.48,
538
+ "grad_norm": 1.1955766677856445,
539
+ "learning_rate": 2.6585597991414114e-05,
540
+ "loss": 1.5109,
541
+ "step": 760
542
+ },
543
+ {
544
+ "epoch": 0.49,
545
+ "grad_norm": 1.0891656875610352,
546
+ "learning_rate": 2.6090484684133404e-05,
547
+ "loss": 1.5007,
548
+ "step": 770
549
+ },
550
+ {
551
+ "epoch": 0.49,
552
+ "grad_norm": 1.0880335569381714,
553
+ "learning_rate": 2.5594942438652688e-05,
554
+ "loss": 1.5049,
555
+ "step": 780
556
+ },
557
+ {
558
+ "epoch": 0.5,
559
+ "grad_norm": 1.345954418182373,
560
+ "learning_rate": 2.509916617471903e-05,
561
+ "loss": 1.5154,
562
+ "step": 790
563
+ },
564
+ {
565
+ "epoch": 0.5,
566
+ "grad_norm": 1.1668224334716797,
567
+ "learning_rate": 2.46033509041298e-05,
568
+ "loss": 1.4883,
569
+ "step": 800
570
+ },
571
+ {
572
+ "epoch": 0.51,
573
+ "grad_norm": 1.055127501487732,
574
+ "learning_rate": 2.410769165402549e-05,
575
+ "loss": 1.5053,
576
+ "step": 810
577
+ },
578
+ {
579
+ "epoch": 0.52,
580
+ "grad_norm": 1.0528500080108643,
581
+ "learning_rate": 2.3612383390176503e-05,
582
+ "loss": 1.4871,
583
+ "step": 820
584
+ },
585
+ {
586
+ "epoch": 0.52,
587
+ "grad_norm": 1.328258991241455,
588
+ "learning_rate": 2.3117620940294048e-05,
589
+ "loss": 1.5037,
590
+ "step": 830
591
+ },
592
+ {
593
+ "epoch": 0.53,
594
+ "grad_norm": 1.0326772928237915,
595
+ "learning_rate": 2.2623598917395438e-05,
596
+ "loss": 1.4525,
597
+ "step": 840
598
+ },
599
+ {
600
+ "epoch": 0.54,
601
+ "grad_norm": 3.057058811187744,
602
+ "learning_rate": 2.213051164325366e-05,
603
+ "loss": 1.4898,
604
+ "step": 850
605
+ },
606
+ {
607
+ "epoch": 0.54,
608
+ "grad_norm": 1.1190940141677856,
609
+ "learning_rate": 2.1638553071961708e-05,
610
+ "loss": 1.488,
611
+ "step": 860
612
+ },
613
+ {
614
+ "epoch": 0.55,
615
+ "grad_norm": 1.1501041650772095,
616
+ "learning_rate": 2.1147916713641367e-05,
617
+ "loss": 1.4711,
618
+ "step": 870
619
+ },
620
+ {
621
+ "epoch": 0.56,
622
+ "grad_norm": 1.090022325515747,
623
+ "learning_rate": 2.0658795558326743e-05,
624
+ "loss": 1.488,
625
+ "step": 880
626
+ },
627
+ {
628
+ "epoch": 0.56,
629
+ "grad_norm": 1.0642565488815308,
630
+ "learning_rate": 2.017138200005236e-05,
631
+ "loss": 1.4791,
632
+ "step": 890
633
+ },
634
+ {
635
+ "epoch": 0.57,
636
+ "grad_norm": 1.3562296628952026,
637
+ "learning_rate": 1.9685867761175584e-05,
638
+ "loss": 1.4956,
639
+ "step": 900
640
+ },
641
+ {
642
+ "epoch": 0.57,
643
+ "grad_norm": 1.2069261074066162,
644
+ "learning_rate": 1.9202443816963425e-05,
645
+ "loss": 1.4918,
646
+ "step": 910
647
+ },
648
+ {
649
+ "epoch": 0.58,
650
+ "grad_norm": 1.3227437734603882,
651
+ "learning_rate": 1.872130032047302e-05,
652
+ "loss": 1.4577,
653
+ "step": 920
654
+ },
655
+ {
656
+ "epoch": 0.59,
657
+ "grad_norm": 1.0784181356430054,
658
+ "learning_rate": 1.824262652775568e-05,
659
+ "loss": 1.4888,
660
+ "step": 930
661
+ },
662
+ {
663
+ "epoch": 0.59,
664
+ "grad_norm": 1.000135898590088,
665
+ "learning_rate": 1.7766610723413684e-05,
666
+ "loss": 1.4673,
667
+ "step": 940
668
+ },
669
+ {
670
+ "epoch": 0.6,
671
+ "grad_norm": 1.136026382446289,
672
+ "learning_rate": 1.7293440146539196e-05,
673
+ "loss": 1.4779,
674
+ "step": 950
675
+ },
676
+ {
677
+ "epoch": 0.61,
678
+ "grad_norm": 1.123252272605896,
679
+ "learning_rate": 1.682330091706446e-05,
680
+ "loss": 1.4583,
681
+ "step": 960
682
+ },
683
+ {
684
+ "epoch": 0.61,
685
+ "grad_norm": 1.0559343099594116,
686
+ "learning_rate": 1.6356377962552238e-05,
687
+ "loss": 1.4471,
688
+ "step": 970
689
+ },
690
+ {
691
+ "epoch": 0.62,
692
+ "grad_norm": 1.0266658067703247,
693
+ "learning_rate": 1.589285494545514e-05,
694
+ "loss": 1.4632,
695
+ "step": 980
696
+ },
697
+ {
698
+ "epoch": 0.62,
699
+ "grad_norm": 1.1371444463729858,
700
+ "learning_rate": 1.5432914190872757e-05,
701
+ "loss": 1.4732,
702
+ "step": 990
703
+ },
704
+ {
705
+ "epoch": 0.63,
706
+ "grad_norm": 1.1203784942626953,
707
+ "learning_rate": 1.4976736614834664e-05,
708
+ "loss": 1.452,
709
+ "step": 1000
710
+ }
711
+ ],
712
+ "logging_steps": 10,
713
+ "max_steps": 1584,
714
+ "num_input_tokens_seen": 0,
715
+ "num_train_epochs": 1,
716
+ "save_steps": 1000,
717
+ "total_flos": 7.003073868034212e+17,
718
+ "train_batch_size": 4,
719
+ "trial_name": null,
720
+ "trial_params": null
721
+ }
checkpoint-1000/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f5f270ab83bb6b8b6198abab8938484e33528253ff0e05545853f6357e16e105
3
+ size 4603
config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/gemma-2b",
3
+ "architectures": [
4
+ "GemmaForCausalLM"
5
+ ],
6
+ "attention_bias": false,
7
+ "attention_dropout": 0.0,
8
+ "bos_token_id": 2,
9
+ "eos_token_id": 1,
10
+ "head_dim": 256,
11
+ "hidden_act": "gelu",
12
+ "hidden_size": 2048,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 16384,
15
+ "max_position_embeddings": 8192,
16
+ "model_type": "gemma",
17
+ "num_attention_heads": 8,
18
+ "num_hidden_layers": 18,
19
+ "num_key_value_heads": 1,
20
+ "pad_token_id": 0,
21
+ "rms_norm_eps": 1e-06,
22
+ "rope_scaling": null,
23
+ "rope_theta": 10000.0,
24
+ "torch_dtype": "float32",
25
+ "transformers_version": "4.39.0.dev0",
26
+ "use_cache": false,
27
+ "vocab_size": 256000
28
+ }
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 2,
4
+ "eos_token_id": 1,
5
+ "pad_token_id": 0,
6
+ "transformers_version": "4.39.0.dev0"
7
+ }
model-00001-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5828c23c231714f0c3a5adfbeaa34e33ca63a3fa9b43cec046bb63f6105b8070
3
+ size 4911635192
model-00002-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6c18d66a22c4354915b02d45a604f4985c6dee807860bf3a62648f1d90989741
3
+ size 4978830584
model-00003-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:24ea177b26459a76af2bc31a814a448e76fa4b497980a84cf3a9e333378a07c0
3
+ size 134242760
model.safetensors.index.json ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 10024689664
4
+ },
5
+ "weight_map": {
6
+ "model.embed_tokens.weight": "model-00001-of-00003.safetensors",
7
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00003.safetensors",
8
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
9
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
10
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
11
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
12
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
13
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
14
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
15
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
16
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00003.safetensors",
17
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
18
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
19
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
20
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
21
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
22
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
23
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
24
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
25
+ "model.layers.10.input_layernorm.weight": "model-00002-of-00003.safetensors",
26
+ "model.layers.10.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
27
+ "model.layers.10.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
28
+ "model.layers.10.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
29
+ "model.layers.10.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
30
+ "model.layers.10.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
31
+ "model.layers.10.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
32
+ "model.layers.10.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
33
+ "model.layers.10.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
34
+ "model.layers.11.input_layernorm.weight": "model-00002-of-00003.safetensors",
35
+ "model.layers.11.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
36
+ "model.layers.11.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
37
+ "model.layers.11.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
38
+ "model.layers.11.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
39
+ "model.layers.11.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
40
+ "model.layers.11.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
41
+ "model.layers.11.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
42
+ "model.layers.11.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
43
+ "model.layers.12.input_layernorm.weight": "model-00002-of-00003.safetensors",
44
+ "model.layers.12.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
45
+ "model.layers.12.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
46
+ "model.layers.12.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
47
+ "model.layers.12.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
48
+ "model.layers.12.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
49
+ "model.layers.12.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
50
+ "model.layers.12.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
51
+ "model.layers.12.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
52
+ "model.layers.13.input_layernorm.weight": "model-00002-of-00003.safetensors",
53
+ "model.layers.13.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
54
+ "model.layers.13.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
55
+ "model.layers.13.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
56
+ "model.layers.13.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
57
+ "model.layers.13.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
58
+ "model.layers.13.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
59
+ "model.layers.13.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
60
+ "model.layers.13.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
61
+ "model.layers.14.input_layernorm.weight": "model-00002-of-00003.safetensors",
62
+ "model.layers.14.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
63
+ "model.layers.14.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
64
+ "model.layers.14.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
65
+ "model.layers.14.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
66
+ "model.layers.14.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
67
+ "model.layers.14.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
68
+ "model.layers.14.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
69
+ "model.layers.14.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
70
+ "model.layers.15.input_layernorm.weight": "model-00002-of-00003.safetensors",
71
+ "model.layers.15.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
72
+ "model.layers.15.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
73
+ "model.layers.15.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
74
+ "model.layers.15.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
75
+ "model.layers.15.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
76
+ "model.layers.15.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
77
+ "model.layers.15.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
78
+ "model.layers.15.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
79
+ "model.layers.16.input_layernorm.weight": "model-00002-of-00003.safetensors",
80
+ "model.layers.16.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
81
+ "model.layers.16.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
82
+ "model.layers.16.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
83
+ "model.layers.16.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
84
+ "model.layers.16.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
85
+ "model.layers.16.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
86
+ "model.layers.16.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
87
+ "model.layers.16.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
88
+ "model.layers.17.input_layernorm.weight": "model-00003-of-00003.safetensors",
89
+ "model.layers.17.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
90
+ "model.layers.17.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
91
+ "model.layers.17.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
92
+ "model.layers.17.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
93
+ "model.layers.17.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
94
+ "model.layers.17.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
95
+ "model.layers.17.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
96
+ "model.layers.17.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
97
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00003.safetensors",
98
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
99
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
100
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
101
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
102
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
103
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
104
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
105
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
106
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00003.safetensors",
107
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
108
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
109
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
110
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
111
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
112
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
113
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
114
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
115
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00003.safetensors",
116
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
117
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
118
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
119
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
120
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
121
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
122
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
123
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
124
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00003.safetensors",
125
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
126
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
127
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
128
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
129
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
130
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
131
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
132
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
133
+ "model.layers.6.input_layernorm.weight": "model-00002-of-00003.safetensors",
134
+ "model.layers.6.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
135
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
136
+ "model.layers.6.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
137
+ "model.layers.6.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
138
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
139
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
140
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
141
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
142
+ "model.layers.7.input_layernorm.weight": "model-00002-of-00003.safetensors",
143
+ "model.layers.7.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
144
+ "model.layers.7.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
145
+ "model.layers.7.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
146
+ "model.layers.7.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
147
+ "model.layers.7.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
148
+ "model.layers.7.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
149
+ "model.layers.7.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
150
+ "model.layers.7.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
151
+ "model.layers.8.input_layernorm.weight": "model-00002-of-00003.safetensors",
152
+ "model.layers.8.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
153
+ "model.layers.8.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
154
+ "model.layers.8.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
155
+ "model.layers.8.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
156
+ "model.layers.8.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
157
+ "model.layers.8.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
158
+ "model.layers.8.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
159
+ "model.layers.8.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
160
+ "model.layers.9.input_layernorm.weight": "model-00002-of-00003.safetensors",
161
+ "model.layers.9.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
162
+ "model.layers.9.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
163
+ "model.layers.9.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
164
+ "model.layers.9.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
165
+ "model.layers.9.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
166
+ "model.layers.9.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
167
+ "model.layers.9.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
168
+ "model.layers.9.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
169
+ "model.norm.weight": "model-00003-of-00003.safetensors"
170
+ }
171
+ }
runs/Feb22_05-59-21_e8792c30e3aa/events.out.tfevents.1708582264.e8792c30e3aa.257.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8179b2643fcee37e93feebb21bf0d7dfa8569a8cca50d5cf3a4eaeb9a81016e2
3
+ size 38398
special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<bos>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<eos>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "<pad>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "<unk>",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:61a7b147390c64585d6c3543dd6fc636906c9af3865a5548f27f31aee1d4c8e2
3
+ size 4241003
tokenizer_config.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "added_tokens_decoder": {
5
+ "0": {
6
+ "content": "<pad>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "1": {
14
+ "content": "<eos>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "2": {
22
+ "content": "<bos>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "3": {
30
+ "content": "<unk>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ }
37
+ },
38
+ "bos_token": "<bos>",
39
+ "clean_up_tokenization_spaces": false,
40
+ "eos_token": "<eos>",
41
+ "legacy": null,
42
+ "model_max_length": 1000000000000000019884624838656,
43
+ "pad_token": "<pad>",
44
+ "padding_side": "right",
45
+ "sp_model_kwargs": {},
46
+ "spaces_between_special_tokens": false,
47
+ "split_special_tokens": false,
48
+ "tokenizer_class": "GemmaTokenizer",
49
+ "unk_token": "<unk>",
50
+ "use_default_system_prompt": false
51
+ }
train_results.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.0,
3
+ "train_loss": 1.5383612829627413,
4
+ "train_runtime": 4681.1872,
5
+ "train_samples_per_second": 21.666,
6
+ "train_steps_per_second": 0.338
7
+ }
trainer_log.jsonl ADDED
@@ -0,0 +1,159 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"current_steps": 10, "total_steps": 1584, "loss": 2.9245, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.9995083170283816e-05, "epoch": 0.01, "percentage": 0.63, "elapsed_time": "0:00:31", "remaining_time": "1:22:22"}
2
+ {"current_steps": 20, "total_steps": 1584, "loss": 2.0053, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.998033461515242e-05, "epoch": 0.01, "percentage": 1.26, "elapsed_time": "0:01:00", "remaining_time": "1:18:25"}
3
+ {"current_steps": 30, "total_steps": 1584, "loss": 1.888, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.9955760135896534e-05, "epoch": 0.02, "percentage": 1.89, "elapsed_time": "0:01:29", "remaining_time": "1:16:52"}
4
+ {"current_steps": 40, "total_steps": 1584, "loss": 1.8447, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.992136939879856e-05, "epoch": 0.03, "percentage": 2.53, "elapsed_time": "0:01:58", "remaining_time": "1:16:07"}
5
+ {"current_steps": 50, "total_steps": 1584, "loss": 1.8212, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.9877175931330346e-05, "epoch": 0.03, "percentage": 3.16, "elapsed_time": "0:02:28", "remaining_time": "1:15:43"}
6
+ {"current_steps": 60, "total_steps": 1584, "loss": 1.793, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.982319711683221e-05, "epoch": 0.04, "percentage": 3.79, "elapsed_time": "0:02:57", "remaining_time": "1:15:10"}
7
+ {"current_steps": 70, "total_steps": 1584, "loss": 1.756, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.975945418767529e-05, "epoch": 0.04, "percentage": 4.42, "elapsed_time": "0:03:27", "remaining_time": "1:14:37"}
8
+ {"current_steps": 80, "total_steps": 1584, "loss": 1.7285, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.968597221690986e-05, "epoch": 0.05, "percentage": 5.05, "elapsed_time": "0:03:56", "remaining_time": "1:14:01"}
9
+ {"current_steps": 90, "total_steps": 1584, "loss": 1.7297, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.96027801084029e-05, "epoch": 0.06, "percentage": 5.68, "elapsed_time": "0:04:25", "remaining_time": "1:13:25"}
10
+ {"current_steps": 100, "total_steps": 1584, "loss": 1.7602, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.950991058546893e-05, "epoch": 0.06, "percentage": 6.31, "elapsed_time": "0:04:55", "remaining_time": "1:12:57"}
11
+ {"current_steps": 110, "total_steps": 1584, "loss": 1.7433, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.940740017799833e-05, "epoch": 0.07, "percentage": 6.94, "elapsed_time": "0:05:24", "remaining_time": "1:12:26"}
12
+ {"current_steps": 120, "total_steps": 1584, "loss": 1.7363, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.929528920808854e-05, "epoch": 0.08, "percentage": 7.58, "elapsed_time": "0:05:53", "remaining_time": "1:11:54"}
13
+ {"current_steps": 130, "total_steps": 1584, "loss": 1.6872, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.917362177418342e-05, "epoch": 0.08, "percentage": 8.21, "elapsed_time": "0:06:22", "remaining_time": "1:11:21"}
14
+ {"current_steps": 140, "total_steps": 1584, "loss": 1.7084, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.904244573372733e-05, "epoch": 0.09, "percentage": 8.84, "elapsed_time": "0:06:52", "remaining_time": "1:10:53"}
15
+ {"current_steps": 150, "total_steps": 1584, "loss": 1.6997, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.8901812684340564e-05, "epoch": 0.09, "percentage": 9.47, "elapsed_time": "0:07:21", "remaining_time": "1:10:22"}
16
+ {"current_steps": 160, "total_steps": 1584, "loss": 1.6747, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.8751777943523634e-05, "epoch": 0.1, "percentage": 10.1, "elapsed_time": "0:07:50", "remaining_time": "1:09:50"}
17
+ {"current_steps": 170, "total_steps": 1584, "loss": 1.6836, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.8592400526898314e-05, "epoch": 0.11, "percentage": 10.73, "elapsed_time": "0:08:20", "remaining_time": "1:09:22"}
18
+ {"current_steps": 180, "total_steps": 1584, "loss": 1.6552, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.842374312499405e-05, "epoch": 0.11, "percentage": 11.36, "elapsed_time": "0:08:49", "remaining_time": "1:08:51"}
19
+ {"current_steps": 190, "total_steps": 1584, "loss": 1.6489, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.824587207858888e-05, "epoch": 0.12, "percentage": 11.99, "elapsed_time": "0:09:18", "remaining_time": "1:08:20"}
20
+ {"current_steps": 200, "total_steps": 1584, "loss": 1.6576, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.805885735261454e-05, "epoch": 0.13, "percentage": 12.63, "elapsed_time": "0:09:48", "remaining_time": "1:07:51"}
21
+ {"current_steps": 210, "total_steps": 1584, "loss": 1.6533, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.786277250863599e-05, "epoch": 0.13, "percentage": 13.26, "elapsed_time": "0:10:17", "remaining_time": "1:07:21"}
22
+ {"current_steps": 220, "total_steps": 1584, "loss": 1.6356, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.765769467591625e-05, "epoch": 0.14, "percentage": 13.89, "elapsed_time": "0:10:46", "remaining_time": "1:06:50"}
23
+ {"current_steps": 230, "total_steps": 1584, "loss": 1.6389, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.744370452107789e-05, "epoch": 0.15, "percentage": 14.52, "elapsed_time": "0:11:16", "remaining_time": "1:06:20"}
24
+ {"current_steps": 240, "total_steps": 1584, "loss": 1.6546, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.722088621637309e-05, "epoch": 0.15, "percentage": 15.15, "elapsed_time": "0:11:45", "remaining_time": "1:05:50"}
25
+ {"current_steps": 250, "total_steps": 1584, "loss": 1.6354, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.698932740657479e-05, "epoch": 0.16, "percentage": 15.78, "elapsed_time": "0:12:14", "remaining_time": "1:05:20"}
26
+ {"current_steps": 260, "total_steps": 1584, "loss": 1.6342, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.6749119174501975e-05, "epoch": 0.16, "percentage": 16.41, "elapsed_time": "0:12:43", "remaining_time": "1:04:49"}
27
+ {"current_steps": 270, "total_steps": 1584, "loss": 1.6407, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.6500356005192514e-05, "epoch": 0.17, "percentage": 17.05, "elapsed_time": "0:13:13", "remaining_time": "1:04:19"}
28
+ {"current_steps": 280, "total_steps": 1584, "loss": 1.6339, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.6243135748737864e-05, "epoch": 0.18, "percentage": 17.68, "elapsed_time": "0:13:42", "remaining_time": "1:03:50"}
29
+ {"current_steps": 290, "total_steps": 1584, "loss": 1.6095, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.597755958179406e-05, "epoch": 0.18, "percentage": 18.31, "elapsed_time": "0:14:11", "remaining_time": "1:03:19"}
30
+ {"current_steps": 300, "total_steps": 1584, "loss": 1.6036, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.570373196778427e-05, "epoch": 0.19, "percentage": 18.94, "elapsed_time": "0:14:40", "remaining_time": "1:02:48"}
31
+ {"current_steps": 310, "total_steps": 1584, "loss": 1.6244, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.5421760615808474e-05, "epoch": 0.2, "percentage": 19.57, "elapsed_time": "0:15:09", "remaining_time": "1:02:17"}
32
+ {"current_steps": 320, "total_steps": 1584, "loss": 1.6449, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.513175643827647e-05, "epoch": 0.2, "percentage": 20.2, "elapsed_time": "0:15:38", "remaining_time": "1:01:47"}
33
+ {"current_steps": 330, "total_steps": 1584, "loss": 1.5948, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.4833833507280884e-05, "epoch": 0.21, "percentage": 20.83, "elapsed_time": "0:16:07", "remaining_time": "1:01:18"}
34
+ {"current_steps": 340, "total_steps": 1584, "loss": 1.627, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.4528109009727336e-05, "epoch": 0.21, "percentage": 21.46, "elapsed_time": "0:16:37", "remaining_time": "1:00:48"}
35
+ {"current_steps": 350, "total_steps": 1584, "loss": 1.6151, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.42147032012394e-05, "epoch": 0.22, "percentage": 22.1, "elapsed_time": "0:17:06", "remaining_time": "1:00:20"}
36
+ {"current_steps": 360, "total_steps": 1584, "loss": 1.5838, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.389373935885646e-05, "epoch": 0.23, "percentage": 22.73, "elapsed_time": "0:17:36", "remaining_time": "0:59:51"}
37
+ {"current_steps": 370, "total_steps": 1584, "loss": 1.5935, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.356534373254316e-05, "epoch": 0.23, "percentage": 23.36, "elapsed_time": "0:18:05", "remaining_time": "0:59:20"}
38
+ {"current_steps": 380, "total_steps": 1584, "loss": 1.6015, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.322964549552943e-05, "epoch": 0.24, "percentage": 23.99, "elapsed_time": "0:18:34", "remaining_time": "0:58:52"}
39
+ {"current_steps": 390, "total_steps": 1584, "loss": 1.577, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.288677669350066e-05, "epoch": 0.25, "percentage": 24.62, "elapsed_time": "0:19:04", "remaining_time": "0:58:22"}
40
+ {"current_steps": 400, "total_steps": 1584, "loss": 1.5843, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.2536872192658036e-05, "epoch": 0.25, "percentage": 25.25, "elapsed_time": "0:19:33", "remaining_time": "0:57:53"}
41
+ {"current_steps": 410, "total_steps": 1584, "loss": 1.6145, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.218006962666934e-05, "epoch": 0.26, "percentage": 25.88, "elapsed_time": "0:20:02", "remaining_time": "0:57:23"}
42
+ {"current_steps": 420, "total_steps": 1584, "loss": 1.5601, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.181650934253132e-05, "epoch": 0.27, "percentage": 26.52, "elapsed_time": "0:20:32", "remaining_time": "0:56:54"}
43
+ {"current_steps": 430, "total_steps": 1584, "loss": 1.5664, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.144633434536467e-05, "epoch": 0.27, "percentage": 27.15, "elapsed_time": "0:21:00", "remaining_time": "0:56:24"}
44
+ {"current_steps": 440, "total_steps": 1584, "loss": 1.6002, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.1069690242163484e-05, "epoch": 0.28, "percentage": 27.78, "elapsed_time": "0:21:30", "remaining_time": "0:55:54"}
45
+ {"current_steps": 450, "total_steps": 1584, "loss": 1.576, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.06867251845213e-05, "epoch": 0.28, "percentage": 28.41, "elapsed_time": "0:21:59", "remaining_time": "0:55:24"}
46
+ {"current_steps": 460, "total_steps": 1584, "loss": 1.5448, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.0297589810356165e-05, "epoch": 0.29, "percentage": 29.04, "elapsed_time": "0:22:28", "remaining_time": "0:54:55"}
47
+ {"current_steps": 470, "total_steps": 1584, "loss": 1.5595, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.9902437184657784e-05, "epoch": 0.3, "percentage": 29.67, "elapsed_time": "0:22:58", "remaining_time": "0:54:26"}
48
+ {"current_steps": 480, "total_steps": 1584, "loss": 1.5628, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.9501422739279956e-05, "epoch": 0.3, "percentage": 30.3, "elapsed_time": "0:23:27", "remaining_time": "0:53:57"}
49
+ {"current_steps": 490, "total_steps": 1584, "loss": 1.5731, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.909470421180201e-05, "epoch": 0.31, "percentage": 30.93, "elapsed_time": "0:23:57", "remaining_time": "0:53:28"}
50
+ {"current_steps": 500, "total_steps": 1584, "loss": 1.545, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.8682441583483314e-05, "epoch": 0.32, "percentage": 31.57, "elapsed_time": "0:24:26", "remaining_time": "0:52:59"}
51
+ {"current_steps": 510, "total_steps": 1584, "loss": 1.5793, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.8264797016335205e-05, "epoch": 0.32, "percentage": 32.2, "elapsed_time": "0:24:55", "remaining_time": "0:52:30"}
52
+ {"current_steps": 520, "total_steps": 1584, "loss": 1.5378, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.7841934789335164e-05, "epoch": 0.33, "percentage": 32.83, "elapsed_time": "0:25:25", "remaining_time": "0:52:01"}
53
+ {"current_steps": 530, "total_steps": 1584, "loss": 1.5345, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.741402123380828e-05, "epoch": 0.33, "percentage": 33.46, "elapsed_time": "0:25:54", "remaining_time": "0:51:31"}
54
+ {"current_steps": 540, "total_steps": 1584, "loss": 1.5517, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.6981224668001424e-05, "epoch": 0.34, "percentage": 34.09, "elapsed_time": "0:26:23", "remaining_time": "0:51:02"}
55
+ {"current_steps": 550, "total_steps": 1584, "loss": 1.5472, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.654371533087586e-05, "epoch": 0.35, "percentage": 34.72, "elapsed_time": "0:26:53", "remaining_time": "0:50:33"}
56
+ {"current_steps": 560, "total_steps": 1584, "loss": 1.5564, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.610166531514436e-05, "epoch": 0.35, "percentage": 35.35, "elapsed_time": "0:27:22", "remaining_time": "0:50:03"}
57
+ {"current_steps": 570, "total_steps": 1584, "loss": 1.5574, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.565524849957921e-05, "epoch": 0.36, "percentage": 35.98, "elapsed_time": "0:27:51", "remaining_time": "0:49:33"}
58
+ {"current_steps": 580, "total_steps": 1584, "loss": 1.5584, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.520464048061758e-05, "epoch": 0.37, "percentage": 36.62, "elapsed_time": "0:28:20", "remaining_time": "0:49:03"}
59
+ {"current_steps": 590, "total_steps": 1584, "loss": 1.518, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.47500185032913e-05, "epoch": 0.37, "percentage": 37.25, "elapsed_time": "0:28:50", "remaining_time": "0:48:35"}
60
+ {"current_steps": 600, "total_steps": 1584, "loss": 1.5339, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.4291561391508185e-05, "epoch": 0.38, "percentage": 37.88, "elapsed_time": "0:29:19", "remaining_time": "0:48:05"}
61
+ {"current_steps": 610, "total_steps": 1584, "loss": 1.5339, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.3829449477712324e-05, "epoch": 0.38, "percentage": 38.51, "elapsed_time": "0:29:48", "remaining_time": "0:47:36"}
62
+ {"current_steps": 620, "total_steps": 1584, "loss": 1.5399, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.336386453195088e-05, "epoch": 0.39, "percentage": 39.14, "elapsed_time": "0:30:18", "remaining_time": "0:47:07"}
63
+ {"current_steps": 630, "total_steps": 1584, "loss": 1.5233, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.2894989690375626e-05, "epoch": 0.4, "percentage": 39.77, "elapsed_time": "0:30:47", "remaining_time": "0:46:38"}
64
+ {"current_steps": 640, "total_steps": 1584, "loss": 1.538, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.2423009383206876e-05, "epoch": 0.4, "percentage": 40.4, "elapsed_time": "0:31:17", "remaining_time": "0:46:08"}
65
+ {"current_steps": 650, "total_steps": 1584, "loss": 1.528, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.194810926218861e-05, "epoch": 0.41, "percentage": 41.04, "elapsed_time": "0:31:46", "remaining_time": "0:45:39"}
66
+ {"current_steps": 660, "total_steps": 1584, "loss": 1.5307, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.147047612756302e-05, "epoch": 0.42, "percentage": 41.67, "elapsed_time": "0:32:15", "remaining_time": "0:45:09"}
67
+ {"current_steps": 670, "total_steps": 1584, "loss": 1.4915, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.099029785459328e-05, "epoch": 0.42, "percentage": 42.3, "elapsed_time": "0:32:45", "remaining_time": "0:44:40"}
68
+ {"current_steps": 680, "total_steps": 1584, "loss": 1.5268, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.0507763319663517e-05, "epoch": 0.43, "percentage": 42.93, "elapsed_time": "0:33:14", "remaining_time": "0:44:11"}
69
+ {"current_steps": 690, "total_steps": 1584, "loss": 1.5273, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.002306232598497e-05, "epoch": 0.44, "percentage": 43.56, "elapsed_time": "0:33:43", "remaining_time": "0:43:42"}
70
+ {"current_steps": 700, "total_steps": 1584, "loss": 1.5273, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.9536385528937567e-05, "epoch": 0.44, "percentage": 44.19, "elapsed_time": "0:34:13", "remaining_time": "0:43:12"}
71
+ {"current_steps": 710, "total_steps": 1584, "loss": 1.5072, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.9047924361076345e-05, "epoch": 0.45, "percentage": 44.82, "elapsed_time": "0:34:42", "remaining_time": "0:42:43"}
72
+ {"current_steps": 720, "total_steps": 1584, "loss": 1.4856, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.8557870956832132e-05, "epoch": 0.45, "percentage": 45.45, "elapsed_time": "0:35:12", "remaining_time": "0:42:14"}
73
+ {"current_steps": 730, "total_steps": 1584, "loss": 1.4983, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.8066418076936167e-05, "epoch": 0.46, "percentage": 46.09, "elapsed_time": "0:35:41", "remaining_time": "0:41:44"}
74
+ {"current_steps": 740, "total_steps": 1584, "loss": 1.5518, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.7573759032598366e-05, "epoch": 0.47, "percentage": 46.72, "elapsed_time": "0:36:10", "remaining_time": "0:41:15"}
75
+ {"current_steps": 750, "total_steps": 1584, "loss": 1.4998, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.7080087609469062e-05, "epoch": 0.47, "percentage": 47.35, "elapsed_time": "0:36:39", "remaining_time": "0:40:46"}
76
+ {"current_steps": 760, "total_steps": 1584, "loss": 1.5109, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.6585597991414114e-05, "epoch": 0.48, "percentage": 47.98, "elapsed_time": "0:37:08", "remaining_time": "0:40:16"}
77
+ {"current_steps": 770, "total_steps": 1584, "loss": 1.5007, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.6090484684133404e-05, "epoch": 0.49, "percentage": 48.61, "elapsed_time": "0:37:38", "remaining_time": "0:39:47"}
78
+ {"current_steps": 780, "total_steps": 1584, "loss": 1.5049, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.5594942438652688e-05, "epoch": 0.49, "percentage": 49.24, "elapsed_time": "0:38:07", "remaining_time": "0:39:18"}
79
+ {"current_steps": 790, "total_steps": 1584, "loss": 1.5154, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.509916617471903e-05, "epoch": 0.5, "percentage": 49.87, "elapsed_time": "0:38:37", "remaining_time": "0:38:49"}
80
+ {"current_steps": 800, "total_steps": 1584, "loss": 1.4883, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.46033509041298e-05, "epoch": 0.5, "percentage": 50.51, "elapsed_time": "0:39:06", "remaining_time": "0:38:19"}
81
+ {"current_steps": 810, "total_steps": 1584, "loss": 1.5053, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.410769165402549e-05, "epoch": 0.51, "percentage": 51.14, "elapsed_time": "0:39:36", "remaining_time": "0:37:50"}
82
+ {"current_steps": 820, "total_steps": 1584, "loss": 1.4871, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.3612383390176503e-05, "epoch": 0.52, "percentage": 51.77, "elapsed_time": "0:40:05", "remaining_time": "0:37:21"}
83
+ {"current_steps": 830, "total_steps": 1584, "loss": 1.5037, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.3117620940294048e-05, "epoch": 0.52, "percentage": 52.4, "elapsed_time": "0:40:35", "remaining_time": "0:36:52"}
84
+ {"current_steps": 840, "total_steps": 1584, "loss": 1.4525, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.2623598917395438e-05, "epoch": 0.53, "percentage": 53.03, "elapsed_time": "0:41:04", "remaining_time": "0:36:22"}
85
+ {"current_steps": 850, "total_steps": 1584, "loss": 1.4898, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.213051164325366e-05, "epoch": 0.54, "percentage": 53.66, "elapsed_time": "0:41:33", "remaining_time": "0:35:53"}
86
+ {"current_steps": 860, "total_steps": 1584, "loss": 1.488, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.1638553071961708e-05, "epoch": 0.54, "percentage": 54.29, "elapsed_time": "0:42:03", "remaining_time": "0:35:24"}
87
+ {"current_steps": 870, "total_steps": 1584, "loss": 1.4711, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.1147916713641367e-05, "epoch": 0.55, "percentage": 54.92, "elapsed_time": "0:42:32", "remaining_time": "0:34:54"}
88
+ {"current_steps": 880, "total_steps": 1584, "loss": 1.488, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.0658795558326743e-05, "epoch": 0.56, "percentage": 55.56, "elapsed_time": "0:43:01", "remaining_time": "0:34:25"}
89
+ {"current_steps": 890, "total_steps": 1584, "loss": 1.4791, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.017138200005236e-05, "epoch": 0.56, "percentage": 56.19, "elapsed_time": "0:43:31", "remaining_time": "0:33:56"}
90
+ {"current_steps": 900, "total_steps": 1584, "loss": 1.4956, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.9685867761175584e-05, "epoch": 0.57, "percentage": 56.82, "elapsed_time": "0:44:00", "remaining_time": "0:33:26"}
91
+ {"current_steps": 910, "total_steps": 1584, "loss": 1.4918, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.9202443816963425e-05, "epoch": 0.57, "percentage": 57.45, "elapsed_time": "0:44:29", "remaining_time": "0:32:57"}
92
+ {"current_steps": 920, "total_steps": 1584, "loss": 1.4577, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.872130032047302e-05, "epoch": 0.58, "percentage": 58.08, "elapsed_time": "0:44:59", "remaining_time": "0:32:28"}
93
+ {"current_steps": 930, "total_steps": 1584, "loss": 1.4888, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.824262652775568e-05, "epoch": 0.59, "percentage": 58.71, "elapsed_time": "0:45:28", "remaining_time": "0:31:58"}
94
+ {"current_steps": 940, "total_steps": 1584, "loss": 1.4673, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.7766610723413684e-05, "epoch": 0.59, "percentage": 59.34, "elapsed_time": "0:45:57", "remaining_time": "0:31:29"}
95
+ {"current_steps": 950, "total_steps": 1584, "loss": 1.4779, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.7293440146539196e-05, "epoch": 0.6, "percentage": 59.97, "elapsed_time": "0:46:26", "remaining_time": "0:30:59"}
96
+ {"current_steps": 960, "total_steps": 1584, "loss": 1.4583, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.682330091706446e-05, "epoch": 0.61, "percentage": 60.61, "elapsed_time": "0:46:55", "remaining_time": "0:30:30"}
97
+ {"current_steps": 970, "total_steps": 1584, "loss": 1.4471, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.6356377962552238e-05, "epoch": 0.61, "percentage": 61.24, "elapsed_time": "0:47:25", "remaining_time": "0:30:01"}
98
+ {"current_steps": 980, "total_steps": 1584, "loss": 1.4632, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.589285494545514e-05, "epoch": 0.62, "percentage": 61.87, "elapsed_time": "0:47:54", "remaining_time": "0:29:31"}
99
+ {"current_steps": 990, "total_steps": 1584, "loss": 1.4732, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.5432914190872757e-05, "epoch": 0.62, "percentage": 62.5, "elapsed_time": "0:48:23", "remaining_time": "0:29:02"}
100
+ {"current_steps": 1000, "total_steps": 1584, "loss": 1.452, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.4976736614834664e-05, "epoch": 0.63, "percentage": 63.13, "elapsed_time": "0:48:52", "remaining_time": "0:28:32"}
101
+ {"current_steps": 1010, "total_steps": 1584, "loss": 1.461, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.4524501653137787e-05, "epoch": 0.64, "percentage": 63.76, "elapsed_time": "0:49:49", "remaining_time": "0:28:19"}
102
+ {"current_steps": 1020, "total_steps": 1584, "loss": 1.4538, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.4076387190766017e-05, "epoch": 0.64, "percentage": 64.39, "elapsed_time": "0:50:19", "remaining_time": "0:27:49"}
103
+ {"current_steps": 1030, "total_steps": 1584, "loss": 1.4681, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.363256949191972e-05, "epoch": 0.65, "percentage": 65.03, "elapsed_time": "0:50:49", "remaining_time": "0:27:20"}
104
+ {"current_steps": 1040, "total_steps": 1584, "loss": 1.4548, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.3193223130682936e-05, "epoch": 0.66, "percentage": 65.66, "elapsed_time": "0:51:19", "remaining_time": "0:26:50"}
105
+ {"current_steps": 1050, "total_steps": 1584, "loss": 1.4535, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.2758520922355226e-05, "epoch": 0.66, "percentage": 66.29, "elapsed_time": "0:51:49", "remaining_time": "0:26:21"}
106
+ {"current_steps": 1060, "total_steps": 1584, "loss": 1.4621, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.2328633855475429e-05, "epoch": 0.67, "percentage": 66.92, "elapsed_time": "0:52:18", "remaining_time": "0:25:51"}
107
+ {"current_steps": 1070, "total_steps": 1584, "loss": 1.4621, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.1903731024563966e-05, "epoch": 0.68, "percentage": 67.55, "elapsed_time": "0:52:47", "remaining_time": "0:25:21"}
108
+ {"current_steps": 1080, "total_steps": 1584, "loss": 1.4636, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.148397956361007e-05, "epoch": 0.68, "percentage": 68.18, "elapsed_time": "0:53:18", "remaining_time": "0:24:52"}
109
+ {"current_steps": 1090, "total_steps": 1584, "loss": 1.4495, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.106954458033026e-05, "epoch": 0.69, "percentage": 68.81, "elapsed_time": "0:53:47", "remaining_time": "0:24:22"}
110
+ {"current_steps": 1100, "total_steps": 1584, "loss": 1.4395, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.0660589091223855e-05, "epoch": 0.69, "percentage": 69.44, "elapsed_time": "0:54:17", "remaining_time": "0:23:53"}
111
+ {"current_steps": 1110, "total_steps": 1584, "loss": 1.4583, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.025727395745095e-05, "epoch": 0.7, "percentage": 70.08, "elapsed_time": "0:54:46", "remaining_time": "0:23:23"}
112
+ {"current_steps": 1120, "total_steps": 1584, "loss": 1.4606, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 9.859757821558337e-06, "epoch": 0.71, "percentage": 70.71, "elapsed_time": "0:55:16", "remaining_time": "0:22:53"}
113
+ {"current_steps": 1130, "total_steps": 1584, "loss": 1.454, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 9.468197045077976e-06, "epoch": 0.71, "percentage": 71.34, "elapsed_time": "0:55:45", "remaining_time": "0:22:24"}
114
+ {"current_steps": 1140, "total_steps": 1584, "loss": 1.4654, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 9.082745647022797e-06, "epoch": 0.72, "percentage": 71.97, "elapsed_time": "0:56:14", "remaining_time": "0:21:54"}
115
+ {"current_steps": 1150, "total_steps": 1584, "loss": 1.4526, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 8.703555243303835e-06, "epoch": 0.73, "percentage": 72.6, "elapsed_time": "0:56:44", "remaining_time": "0:21:24"}
116
+ {"current_steps": 1160, "total_steps": 1584, "loss": 1.448, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 8.330774987092712e-06, "epoch": 0.73, "percentage": 73.23, "elapsed_time": "0:57:13", "remaining_time": "0:20:54"}
117
+ {"current_steps": 1170, "total_steps": 1584, "loss": 1.4606, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 7.96455151015272e-06, "epoch": 0.74, "percentage": 73.86, "elapsed_time": "0:57:42", "remaining_time": "0:20:25"}
118
+ {"current_steps": 1180, "total_steps": 1584, "loss": 1.4661, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 7.605028865161809e-06, "epoch": 0.74, "percentage": 74.49, "elapsed_time": "0:58:11", "remaining_time": "0:19:55"}
119
+ {"current_steps": 1190, "total_steps": 1584, "loss": 1.4567, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 7.25234846904993e-06, "epoch": 0.75, "percentage": 75.13, "elapsed_time": "0:58:41", "remaining_time": "0:19:25"}
120
+ {"current_steps": 1200, "total_steps": 1584, "loss": 1.4372, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 6.906649047373246e-06, "epoch": 0.76, "percentage": 75.76, "elapsed_time": "0:59:10", "remaining_time": "0:18:56"}
121
+ {"current_steps": 1210, "total_steps": 1584, "loss": 1.4542, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 6.568066579746901e-06, "epoch": 0.76, "percentage": 76.39, "elapsed_time": "0:59:40", "remaining_time": "0:18:26"}
122
+ {"current_steps": 1220, "total_steps": 1584, "loss": 1.4426, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 6.2367342463579475e-06, "epoch": 0.77, "percentage": 77.02, "elapsed_time": "1:00:09", "remaining_time": "0:17:56"}
123
+ {"current_steps": 1230, "total_steps": 1584, "loss": 1.4292, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 5.912782375579412e-06, "epoch": 0.78, "percentage": 77.65, "elapsed_time": "1:00:38", "remaining_time": "0:17:27"}
124
+ {"current_steps": 1240, "total_steps": 1584, "loss": 1.432, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 5.596338392706077e-06, "epoch": 0.78, "percentage": 78.28, "elapsed_time": "1:01:08", "remaining_time": "0:16:57"}
125
+ {"current_steps": 1250, "total_steps": 1584, "loss": 1.4427, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 5.2875267698322325e-06, "epoch": 0.79, "percentage": 78.91, "elapsed_time": "1:01:37", "remaining_time": "0:16:27"}
126
+ {"current_steps": 1260, "total_steps": 1584, "loss": 1.4199, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.986468976890993e-06, "epoch": 0.8, "percentage": 79.55, "elapsed_time": "1:02:06", "remaining_time": "0:15:58"}
127
+ {"current_steps": 1270, "total_steps": 1584, "loss": 1.4253, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.693283433874565e-06, "epoch": 0.8, "percentage": 80.18, "elapsed_time": "1:02:35", "remaining_time": "0:15:28"}
128
+ {"current_steps": 1280, "total_steps": 1584, "loss": 1.4382, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.408085464254183e-06, "epoch": 0.81, "percentage": 80.81, "elapsed_time": "1:03:04", "remaining_time": "0:14:58"}
129
+ {"current_steps": 1290, "total_steps": 1584, "loss": 1.439, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.130987249617993e-06, "epoch": 0.81, "percentage": 81.44, "elapsed_time": "1:03:33", "remaining_time": "0:14:29"}
130
+ {"current_steps": 1300, "total_steps": 1584, "loss": 1.4322, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.8620977855448935e-06, "epoch": 0.82, "percentage": 82.07, "elapsed_time": "1:04:03", "remaining_time": "0:13:59"}
131
+ {"current_steps": 1310, "total_steps": 1584, "loss": 1.4305, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.601522838731461e-06, "epoch": 0.83, "percentage": 82.7, "elapsed_time": "1:04:32", "remaining_time": "0:13:30"}
132
+ {"current_steps": 1320, "total_steps": 1584, "loss": 1.4188, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.3493649053890326e-06, "epoch": 0.83, "percentage": 83.33, "elapsed_time": "1:05:02", "remaining_time": "0:13:00"}
133
+ {"current_steps": 1330, "total_steps": 1584, "loss": 1.4426, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.1057231709272077e-06, "epoch": 0.84, "percentage": 83.96, "elapsed_time": "1:05:31", "remaining_time": "0:12:30"}
134
+ {"current_steps": 1340, "total_steps": 1584, "loss": 1.4185, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.8706934709395892e-06, "epoch": 0.85, "percentage": 84.6, "elapsed_time": "1:06:00", "remaining_time": "0:12:01"}
135
+ {"current_steps": 1350, "total_steps": 1584, "loss": 1.4276, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.6443682535072177e-06, "epoch": 0.85, "percentage": 85.23, "elapsed_time": "1:06:29", "remaining_time": "0:11:31"}
136
+ {"current_steps": 1360, "total_steps": 1584, "loss": 1.4174, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.4268365428344736e-06, "epoch": 0.86, "percentage": 85.86, "elapsed_time": "1:06:59", "remaining_time": "0:11:02"}
137
+ {"current_steps": 1370, "total_steps": 1584, "loss": 1.441, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.21818390423168e-06, "epoch": 0.86, "percentage": 86.49, "elapsed_time": "1:07:28", "remaining_time": "0:10:32"}
138
+ {"current_steps": 1380, "total_steps": 1584, "loss": 1.4322, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.0184924104583613e-06, "epoch": 0.87, "percentage": 87.12, "elapsed_time": "1:07:58", "remaining_time": "0:10:02"}
139
+ {"current_steps": 1390, "total_steps": 1584, "loss": 1.411, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.8278406094401623e-06, "epoch": 0.88, "percentage": 87.75, "elapsed_time": "1:08:28", "remaining_time": "0:09:33"}
140
+ {"current_steps": 1400, "total_steps": 1584, "loss": 1.4368, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.6463034933723337e-06, "epoch": 0.88, "percentage": 88.38, "elapsed_time": "1:08:57", "remaining_time": "0:09:03"}
141
+ {"current_steps": 1410, "total_steps": 1584, "loss": 1.396, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.4739524692218314e-06, "epoch": 0.89, "percentage": 89.02, "elapsed_time": "1:09:26", "remaining_time": "0:08:34"}
142
+ {"current_steps": 1420, "total_steps": 1584, "loss": 1.439, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.3108553306396265e-06, "epoch": 0.9, "percentage": 89.65, "elapsed_time": "1:09:55", "remaining_time": "0:08:04"}
143
+ {"current_steps": 1430, "total_steps": 1584, "loss": 1.4113, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.1570762312943295e-06, "epoch": 0.9, "percentage": 90.28, "elapsed_time": "1:10:25", "remaining_time": "0:07:35"}
144
+ {"current_steps": 1440, "total_steps": 1584, "loss": 1.4438, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.0126756596375686e-06, "epoch": 0.91, "percentage": 90.91, "elapsed_time": "1:10:54", "remaining_time": "0:07:05"}
145
+ {"current_steps": 1450, "total_steps": 1584, "loss": 1.4365, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 8.777104151110826e-07, "epoch": 0.91, "percentage": 91.54, "elapsed_time": "1:11:24", "remaining_time": "0:06:35"}
146
+ {"current_steps": 1460, "total_steps": 1584, "loss": 1.4355, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 7.522335858048707e-07, "epoch": 0.92, "percentage": 92.17, "elapsed_time": "1:11:53", "remaining_time": "0:06:06"}
147
+ {"current_steps": 1470, "total_steps": 1584, "loss": 1.431, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 6.362945275751736e-07, "epoch": 0.93, "percentage": 92.8, "elapsed_time": "1:12:23", "remaining_time": "0:05:36"}
148
+ {"current_steps": 1480, "total_steps": 1584, "loss": 1.4057, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 5.299388446305343e-07, "epoch": 0.93, "percentage": 93.43, "elapsed_time": "1:12:52", "remaining_time": "0:05:07"}
149
+ {"current_steps": 1490, "total_steps": 1584, "loss": 1.421, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.3320837159353813e-07, "epoch": 0.94, "percentage": 94.07, "elapsed_time": "1:13:21", "remaining_time": "0:04:37"}
150
+ {"current_steps": 1500, "total_steps": 1584, "loss": 1.4319, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.4614115704533767e-07, "epoch": 0.95, "percentage": 94.7, "elapsed_time": "1:13:51", "remaining_time": "0:04:08"}
151
+ {"current_steps": 1510, "total_steps": 1584, "loss": 1.4295, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.687714485593462e-07, "epoch": 0.95, "percentage": 95.33, "elapsed_time": "1:14:20", "remaining_time": "0:03:38"}
152
+ {"current_steps": 1520, "total_steps": 1584, "loss": 1.4294, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.011296792301165e-07, "epoch": 0.96, "percentage": 95.96, "elapsed_time": "1:14:49", "remaining_time": "0:03:09"}
153
+ {"current_steps": 1530, "total_steps": 1584, "loss": 1.4562, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.4324245570256633e-07, "epoch": 0.97, "percentage": 96.59, "elapsed_time": "1:15:19", "remaining_time": "0:02:39"}
154
+ {"current_steps": 1540, "total_steps": 1584, "loss": 1.4447, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 9.513254770636137e-08, "epoch": 0.97, "percentage": 97.22, "elapsed_time": "1:15:48", "remaining_time": "0:02:09"}
155
+ {"current_steps": 1550, "total_steps": 1584, "loss": 1.4229, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 5.681887909952388e-08, "epoch": 0.98, "percentage": 97.85, "elapsed_time": "1:16:18", "remaining_time": "0:01:40"}
156
+ {"current_steps": 1560, "total_steps": 1584, "loss": 1.4458, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.831652042480093e-08, "epoch": 0.98, "percentage": 98.48, "elapsed_time": "1:16:47", "remaining_time": "0:01:10"}
157
+ {"current_steps": 1570, "total_steps": 1584, "loss": 1.4267, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 9.636682981720158e-09, "epoch": 0.99, "percentage": 99.12, "elapsed_time": "1:17:16", "remaining_time": "0:00:41"}
158
+ {"current_steps": 1580, "total_steps": 1584, "loss": 1.4373, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 7.867144166728846e-10, "epoch": 1.0, "percentage": 99.75, "elapsed_time": "1:17:45", "remaining_time": "0:00:11"}
159
+ {"current_steps": 1584, "total_steps": 1584, "loss": null, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": null, "epoch": 1.0, "percentage": 100.0, "elapsed_time": "1:17:57", "remaining_time": "0:00:00"}
trainer_state.json ADDED
@@ -0,0 +1,1136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.9995267392333176,
5
+ "eval_steps": 500,
6
+ "global_step": 1584,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.01,
13
+ "grad_norm": 3.7190074920654297,
14
+ "learning_rate": 4.9995083170283816e-05,
15
+ "loss": 2.9245,
16
+ "step": 10
17
+ },
18
+ {
19
+ "epoch": 0.01,
20
+ "grad_norm": 3.431870222091675,
21
+ "learning_rate": 4.998033461515242e-05,
22
+ "loss": 2.0053,
23
+ "step": 20
24
+ },
25
+ {
26
+ "epoch": 0.02,
27
+ "grad_norm": 2.3315682411193848,
28
+ "learning_rate": 4.9955760135896534e-05,
29
+ "loss": 1.888,
30
+ "step": 30
31
+ },
32
+ {
33
+ "epoch": 0.03,
34
+ "grad_norm": 3.2937276363372803,
35
+ "learning_rate": 4.992136939879856e-05,
36
+ "loss": 1.8447,
37
+ "step": 40
38
+ },
39
+ {
40
+ "epoch": 0.03,
41
+ "grad_norm": 2.7375714778900146,
42
+ "learning_rate": 4.9877175931330346e-05,
43
+ "loss": 1.8212,
44
+ "step": 50
45
+ },
46
+ {
47
+ "epoch": 0.04,
48
+ "grad_norm": 2.15061092376709,
49
+ "learning_rate": 4.982319711683221e-05,
50
+ "loss": 1.793,
51
+ "step": 60
52
+ },
53
+ {
54
+ "epoch": 0.04,
55
+ "grad_norm": 2.0427424907684326,
56
+ "learning_rate": 4.975945418767529e-05,
57
+ "loss": 1.756,
58
+ "step": 70
59
+ },
60
+ {
61
+ "epoch": 0.05,
62
+ "grad_norm": 2.107785224914551,
63
+ "learning_rate": 4.968597221690986e-05,
64
+ "loss": 1.7285,
65
+ "step": 80
66
+ },
67
+ {
68
+ "epoch": 0.06,
69
+ "grad_norm": 2.100552558898926,
70
+ "learning_rate": 4.96027801084029e-05,
71
+ "loss": 1.7297,
72
+ "step": 90
73
+ },
74
+ {
75
+ "epoch": 0.06,
76
+ "grad_norm": 2.2227377891540527,
77
+ "learning_rate": 4.950991058546893e-05,
78
+ "loss": 1.7602,
79
+ "step": 100
80
+ },
81
+ {
82
+ "epoch": 0.07,
83
+ "grad_norm": 1.535144567489624,
84
+ "learning_rate": 4.940740017799833e-05,
85
+ "loss": 1.7433,
86
+ "step": 110
87
+ },
88
+ {
89
+ "epoch": 0.08,
90
+ "grad_norm": 1.6522979736328125,
91
+ "learning_rate": 4.929528920808854e-05,
92
+ "loss": 1.7363,
93
+ "step": 120
94
+ },
95
+ {
96
+ "epoch": 0.08,
97
+ "grad_norm": 2.8091869354248047,
98
+ "learning_rate": 4.917362177418342e-05,
99
+ "loss": 1.6872,
100
+ "step": 130
101
+ },
102
+ {
103
+ "epoch": 0.09,
104
+ "grad_norm": 2.1017510890960693,
105
+ "learning_rate": 4.904244573372733e-05,
106
+ "loss": 1.7084,
107
+ "step": 140
108
+ },
109
+ {
110
+ "epoch": 0.09,
111
+ "grad_norm": 1.6424258947372437,
112
+ "learning_rate": 4.8901812684340564e-05,
113
+ "loss": 1.6997,
114
+ "step": 150
115
+ },
116
+ {
117
+ "epoch": 0.1,
118
+ "grad_norm": 1.4547488689422607,
119
+ "learning_rate": 4.8751777943523634e-05,
120
+ "loss": 1.6747,
121
+ "step": 160
122
+ },
123
+ {
124
+ "epoch": 0.11,
125
+ "grad_norm": 1.6251146793365479,
126
+ "learning_rate": 4.8592400526898314e-05,
127
+ "loss": 1.6836,
128
+ "step": 170
129
+ },
130
+ {
131
+ "epoch": 0.11,
132
+ "grad_norm": 2.098386526107788,
133
+ "learning_rate": 4.842374312499405e-05,
134
+ "loss": 1.6552,
135
+ "step": 180
136
+ },
137
+ {
138
+ "epoch": 0.12,
139
+ "grad_norm": 2.2387640476226807,
140
+ "learning_rate": 4.824587207858888e-05,
141
+ "loss": 1.6489,
142
+ "step": 190
143
+ },
144
+ {
145
+ "epoch": 0.13,
146
+ "grad_norm": 1.7299611568450928,
147
+ "learning_rate": 4.805885735261454e-05,
148
+ "loss": 1.6576,
149
+ "step": 200
150
+ },
151
+ {
152
+ "epoch": 0.13,
153
+ "grad_norm": 1.5701665878295898,
154
+ "learning_rate": 4.786277250863599e-05,
155
+ "loss": 1.6533,
156
+ "step": 210
157
+ },
158
+ {
159
+ "epoch": 0.14,
160
+ "grad_norm": 2.417296886444092,
161
+ "learning_rate": 4.765769467591625e-05,
162
+ "loss": 1.6356,
163
+ "step": 220
164
+ },
165
+ {
166
+ "epoch": 0.15,
167
+ "grad_norm": 1.2636029720306396,
168
+ "learning_rate": 4.744370452107789e-05,
169
+ "loss": 1.6389,
170
+ "step": 230
171
+ },
172
+ {
173
+ "epoch": 0.15,
174
+ "grad_norm": 1.576324224472046,
175
+ "learning_rate": 4.722088621637309e-05,
176
+ "loss": 1.6546,
177
+ "step": 240
178
+ },
179
+ {
180
+ "epoch": 0.16,
181
+ "grad_norm": 1.9720542430877686,
182
+ "learning_rate": 4.698932740657479e-05,
183
+ "loss": 1.6354,
184
+ "step": 250
185
+ },
186
+ {
187
+ "epoch": 0.16,
188
+ "grad_norm": 1.5250279903411865,
189
+ "learning_rate": 4.6749119174501975e-05,
190
+ "loss": 1.6342,
191
+ "step": 260
192
+ },
193
+ {
194
+ "epoch": 0.17,
195
+ "grad_norm": 2.4737966060638428,
196
+ "learning_rate": 4.6500356005192514e-05,
197
+ "loss": 1.6407,
198
+ "step": 270
199
+ },
200
+ {
201
+ "epoch": 0.18,
202
+ "grad_norm": 1.2792372703552246,
203
+ "learning_rate": 4.6243135748737864e-05,
204
+ "loss": 1.6339,
205
+ "step": 280
206
+ },
207
+ {
208
+ "epoch": 0.18,
209
+ "grad_norm": 1.5593037605285645,
210
+ "learning_rate": 4.597755958179406e-05,
211
+ "loss": 1.6095,
212
+ "step": 290
213
+ },
214
+ {
215
+ "epoch": 0.19,
216
+ "grad_norm": 1.3141404390335083,
217
+ "learning_rate": 4.570373196778427e-05,
218
+ "loss": 1.6036,
219
+ "step": 300
220
+ },
221
+ {
222
+ "epoch": 0.2,
223
+ "grad_norm": 1.2617065906524658,
224
+ "learning_rate": 4.5421760615808474e-05,
225
+ "loss": 1.6244,
226
+ "step": 310
227
+ },
228
+ {
229
+ "epoch": 0.2,
230
+ "grad_norm": 1.64117431640625,
231
+ "learning_rate": 4.513175643827647e-05,
232
+ "loss": 1.6449,
233
+ "step": 320
234
+ },
235
+ {
236
+ "epoch": 0.21,
237
+ "grad_norm": 1.7132749557495117,
238
+ "learning_rate": 4.4833833507280884e-05,
239
+ "loss": 1.5948,
240
+ "step": 330
241
+ },
242
+ {
243
+ "epoch": 0.21,
244
+ "grad_norm": 2.1323654651641846,
245
+ "learning_rate": 4.4528109009727336e-05,
246
+ "loss": 1.627,
247
+ "step": 340
248
+ },
249
+ {
250
+ "epoch": 0.22,
251
+ "grad_norm": 2.253115653991699,
252
+ "learning_rate": 4.42147032012394e-05,
253
+ "loss": 1.6151,
254
+ "step": 350
255
+ },
256
+ {
257
+ "epoch": 0.23,
258
+ "grad_norm": 1.6143097877502441,
259
+ "learning_rate": 4.389373935885646e-05,
260
+ "loss": 1.5838,
261
+ "step": 360
262
+ },
263
+ {
264
+ "epoch": 0.23,
265
+ "grad_norm": 1.3353707790374756,
266
+ "learning_rate": 4.356534373254316e-05,
267
+ "loss": 1.5935,
268
+ "step": 370
269
+ },
270
+ {
271
+ "epoch": 0.24,
272
+ "grad_norm": 1.283742904663086,
273
+ "learning_rate": 4.322964549552943e-05,
274
+ "loss": 1.6015,
275
+ "step": 380
276
+ },
277
+ {
278
+ "epoch": 0.25,
279
+ "grad_norm": 1.437249779701233,
280
+ "learning_rate": 4.288677669350066e-05,
281
+ "loss": 1.577,
282
+ "step": 390
283
+ },
284
+ {
285
+ "epoch": 0.25,
286
+ "grad_norm": 1.5190638303756714,
287
+ "learning_rate": 4.2536872192658036e-05,
288
+ "loss": 1.5843,
289
+ "step": 400
290
+ },
291
+ {
292
+ "epoch": 0.26,
293
+ "grad_norm": 2.1320886611938477,
294
+ "learning_rate": 4.218006962666934e-05,
295
+ "loss": 1.6145,
296
+ "step": 410
297
+ },
298
+ {
299
+ "epoch": 0.27,
300
+ "grad_norm": 1.0696591138839722,
301
+ "learning_rate": 4.181650934253132e-05,
302
+ "loss": 1.5601,
303
+ "step": 420
304
+ },
305
+ {
306
+ "epoch": 0.27,
307
+ "grad_norm": 1.3149545192718506,
308
+ "learning_rate": 4.144633434536467e-05,
309
+ "loss": 1.5664,
310
+ "step": 430
311
+ },
312
+ {
313
+ "epoch": 0.28,
314
+ "grad_norm": 1.3661577701568604,
315
+ "learning_rate": 4.1069690242163484e-05,
316
+ "loss": 1.6002,
317
+ "step": 440
318
+ },
319
+ {
320
+ "epoch": 0.28,
321
+ "grad_norm": 1.6984481811523438,
322
+ "learning_rate": 4.06867251845213e-05,
323
+ "loss": 1.576,
324
+ "step": 450
325
+ },
326
+ {
327
+ "epoch": 0.29,
328
+ "grad_norm": 1.2728784084320068,
329
+ "learning_rate": 4.0297589810356165e-05,
330
+ "loss": 1.5448,
331
+ "step": 460
332
+ },
333
+ {
334
+ "epoch": 0.3,
335
+ "grad_norm": 1.4147616624832153,
336
+ "learning_rate": 3.9902437184657784e-05,
337
+ "loss": 1.5595,
338
+ "step": 470
339
+ },
340
+ {
341
+ "epoch": 0.3,
342
+ "grad_norm": 1.2289011478424072,
343
+ "learning_rate": 3.9501422739279956e-05,
344
+ "loss": 1.5628,
345
+ "step": 480
346
+ },
347
+ {
348
+ "epoch": 0.31,
349
+ "grad_norm": 1.5690233707427979,
350
+ "learning_rate": 3.909470421180201e-05,
351
+ "loss": 1.5731,
352
+ "step": 490
353
+ },
354
+ {
355
+ "epoch": 0.32,
356
+ "grad_norm": 1.4935098886489868,
357
+ "learning_rate": 3.8682441583483314e-05,
358
+ "loss": 1.545,
359
+ "step": 500
360
+ },
361
+ {
362
+ "epoch": 0.32,
363
+ "grad_norm": 1.2939772605895996,
364
+ "learning_rate": 3.8264797016335205e-05,
365
+ "loss": 1.5793,
366
+ "step": 510
367
+ },
368
+ {
369
+ "epoch": 0.33,
370
+ "grad_norm": 1.2150651216506958,
371
+ "learning_rate": 3.7841934789335164e-05,
372
+ "loss": 1.5378,
373
+ "step": 520
374
+ },
375
+ {
376
+ "epoch": 0.33,
377
+ "grad_norm": 1.2153139114379883,
378
+ "learning_rate": 3.741402123380828e-05,
379
+ "loss": 1.5345,
380
+ "step": 530
381
+ },
382
+ {
383
+ "epoch": 0.34,
384
+ "grad_norm": 1.290591835975647,
385
+ "learning_rate": 3.6981224668001424e-05,
386
+ "loss": 1.5517,
387
+ "step": 540
388
+ },
389
+ {
390
+ "epoch": 0.35,
391
+ "grad_norm": 1.1924967765808105,
392
+ "learning_rate": 3.654371533087586e-05,
393
+ "loss": 1.5472,
394
+ "step": 550
395
+ },
396
+ {
397
+ "epoch": 0.35,
398
+ "grad_norm": 1.6345056295394897,
399
+ "learning_rate": 3.610166531514436e-05,
400
+ "loss": 1.5564,
401
+ "step": 560
402
+ },
403
+ {
404
+ "epoch": 0.36,
405
+ "grad_norm": 2.185119867324829,
406
+ "learning_rate": 3.565524849957921e-05,
407
+ "loss": 1.5574,
408
+ "step": 570
409
+ },
410
+ {
411
+ "epoch": 0.37,
412
+ "grad_norm": 1.3646321296691895,
413
+ "learning_rate": 3.520464048061758e-05,
414
+ "loss": 1.5584,
415
+ "step": 580
416
+ },
417
+ {
418
+ "epoch": 0.37,
419
+ "grad_norm": 1.2333228588104248,
420
+ "learning_rate": 3.47500185032913e-05,
421
+ "loss": 1.518,
422
+ "step": 590
423
+ },
424
+ {
425
+ "epoch": 0.38,
426
+ "grad_norm": 1.3945318460464478,
427
+ "learning_rate": 3.4291561391508185e-05,
428
+ "loss": 1.5339,
429
+ "step": 600
430
+ },
431
+ {
432
+ "epoch": 0.38,
433
+ "grad_norm": 1.304306149482727,
434
+ "learning_rate": 3.3829449477712324e-05,
435
+ "loss": 1.5339,
436
+ "step": 610
437
+ },
438
+ {
439
+ "epoch": 0.39,
440
+ "grad_norm": 1.6393932104110718,
441
+ "learning_rate": 3.336386453195088e-05,
442
+ "loss": 1.5399,
443
+ "step": 620
444
+ },
445
+ {
446
+ "epoch": 0.4,
447
+ "grad_norm": 1.2000635862350464,
448
+ "learning_rate": 3.2894989690375626e-05,
449
+ "loss": 1.5233,
450
+ "step": 630
451
+ },
452
+ {
453
+ "epoch": 0.4,
454
+ "grad_norm": 1.1479601860046387,
455
+ "learning_rate": 3.2423009383206876e-05,
456
+ "loss": 1.538,
457
+ "step": 640
458
+ },
459
+ {
460
+ "epoch": 0.41,
461
+ "grad_norm": 1.1483389139175415,
462
+ "learning_rate": 3.194810926218861e-05,
463
+ "loss": 1.528,
464
+ "step": 650
465
+ },
466
+ {
467
+ "epoch": 0.42,
468
+ "grad_norm": 1.2403253316879272,
469
+ "learning_rate": 3.147047612756302e-05,
470
+ "loss": 1.5307,
471
+ "step": 660
472
+ },
473
+ {
474
+ "epoch": 0.42,
475
+ "grad_norm": 1.3997712135314941,
476
+ "learning_rate": 3.099029785459328e-05,
477
+ "loss": 1.4915,
478
+ "step": 670
479
+ },
480
+ {
481
+ "epoch": 0.43,
482
+ "grad_norm": 1.2010352611541748,
483
+ "learning_rate": 3.0507763319663517e-05,
484
+ "loss": 1.5268,
485
+ "step": 680
486
+ },
487
+ {
488
+ "epoch": 0.44,
489
+ "grad_norm": 1.0670932531356812,
490
+ "learning_rate": 3.002306232598497e-05,
491
+ "loss": 1.5273,
492
+ "step": 690
493
+ },
494
+ {
495
+ "epoch": 0.44,
496
+ "grad_norm": 1.2283655405044556,
497
+ "learning_rate": 2.9536385528937567e-05,
498
+ "loss": 1.5273,
499
+ "step": 700
500
+ },
501
+ {
502
+ "epoch": 0.45,
503
+ "grad_norm": 1.1306476593017578,
504
+ "learning_rate": 2.9047924361076345e-05,
505
+ "loss": 1.5072,
506
+ "step": 710
507
+ },
508
+ {
509
+ "epoch": 0.45,
510
+ "grad_norm": 1.1699943542480469,
511
+ "learning_rate": 2.8557870956832132e-05,
512
+ "loss": 1.4856,
513
+ "step": 720
514
+ },
515
+ {
516
+ "epoch": 0.46,
517
+ "grad_norm": 1.2550854682922363,
518
+ "learning_rate": 2.8066418076936167e-05,
519
+ "loss": 1.4983,
520
+ "step": 730
521
+ },
522
+ {
523
+ "epoch": 0.47,
524
+ "grad_norm": 1.0610970258712769,
525
+ "learning_rate": 2.7573759032598366e-05,
526
+ "loss": 1.5518,
527
+ "step": 740
528
+ },
529
+ {
530
+ "epoch": 0.47,
531
+ "grad_norm": 1.1754754781723022,
532
+ "learning_rate": 2.7080087609469062e-05,
533
+ "loss": 1.4998,
534
+ "step": 750
535
+ },
536
+ {
537
+ "epoch": 0.48,
538
+ "grad_norm": 1.1955766677856445,
539
+ "learning_rate": 2.6585597991414114e-05,
540
+ "loss": 1.5109,
541
+ "step": 760
542
+ },
543
+ {
544
+ "epoch": 0.49,
545
+ "grad_norm": 1.0891656875610352,
546
+ "learning_rate": 2.6090484684133404e-05,
547
+ "loss": 1.5007,
548
+ "step": 770
549
+ },
550
+ {
551
+ "epoch": 0.49,
552
+ "grad_norm": 1.0880335569381714,
553
+ "learning_rate": 2.5594942438652688e-05,
554
+ "loss": 1.5049,
555
+ "step": 780
556
+ },
557
+ {
558
+ "epoch": 0.5,
559
+ "grad_norm": 1.345954418182373,
560
+ "learning_rate": 2.509916617471903e-05,
561
+ "loss": 1.5154,
562
+ "step": 790
563
+ },
564
+ {
565
+ "epoch": 0.5,
566
+ "grad_norm": 1.1668224334716797,
567
+ "learning_rate": 2.46033509041298e-05,
568
+ "loss": 1.4883,
569
+ "step": 800
570
+ },
571
+ {
572
+ "epoch": 0.51,
573
+ "grad_norm": 1.055127501487732,
574
+ "learning_rate": 2.410769165402549e-05,
575
+ "loss": 1.5053,
576
+ "step": 810
577
+ },
578
+ {
579
+ "epoch": 0.52,
580
+ "grad_norm": 1.0528500080108643,
581
+ "learning_rate": 2.3612383390176503e-05,
582
+ "loss": 1.4871,
583
+ "step": 820
584
+ },
585
+ {
586
+ "epoch": 0.52,
587
+ "grad_norm": 1.328258991241455,
588
+ "learning_rate": 2.3117620940294048e-05,
589
+ "loss": 1.5037,
590
+ "step": 830
591
+ },
592
+ {
593
+ "epoch": 0.53,
594
+ "grad_norm": 1.0326772928237915,
595
+ "learning_rate": 2.2623598917395438e-05,
596
+ "loss": 1.4525,
597
+ "step": 840
598
+ },
599
+ {
600
+ "epoch": 0.54,
601
+ "grad_norm": 3.057058811187744,
602
+ "learning_rate": 2.213051164325366e-05,
603
+ "loss": 1.4898,
604
+ "step": 850
605
+ },
606
+ {
607
+ "epoch": 0.54,
608
+ "grad_norm": 1.1190940141677856,
609
+ "learning_rate": 2.1638553071961708e-05,
610
+ "loss": 1.488,
611
+ "step": 860
612
+ },
613
+ {
614
+ "epoch": 0.55,
615
+ "grad_norm": 1.1501041650772095,
616
+ "learning_rate": 2.1147916713641367e-05,
617
+ "loss": 1.4711,
618
+ "step": 870
619
+ },
620
+ {
621
+ "epoch": 0.56,
622
+ "grad_norm": 1.090022325515747,
623
+ "learning_rate": 2.0658795558326743e-05,
624
+ "loss": 1.488,
625
+ "step": 880
626
+ },
627
+ {
628
+ "epoch": 0.56,
629
+ "grad_norm": 1.0642565488815308,
630
+ "learning_rate": 2.017138200005236e-05,
631
+ "loss": 1.4791,
632
+ "step": 890
633
+ },
634
+ {
635
+ "epoch": 0.57,
636
+ "grad_norm": 1.3562296628952026,
637
+ "learning_rate": 1.9685867761175584e-05,
638
+ "loss": 1.4956,
639
+ "step": 900
640
+ },
641
+ {
642
+ "epoch": 0.57,
643
+ "grad_norm": 1.2069261074066162,
644
+ "learning_rate": 1.9202443816963425e-05,
645
+ "loss": 1.4918,
646
+ "step": 910
647
+ },
648
+ {
649
+ "epoch": 0.58,
650
+ "grad_norm": 1.3227437734603882,
651
+ "learning_rate": 1.872130032047302e-05,
652
+ "loss": 1.4577,
653
+ "step": 920
654
+ },
655
+ {
656
+ "epoch": 0.59,
657
+ "grad_norm": 1.0784181356430054,
658
+ "learning_rate": 1.824262652775568e-05,
659
+ "loss": 1.4888,
660
+ "step": 930
661
+ },
662
+ {
663
+ "epoch": 0.59,
664
+ "grad_norm": 1.000135898590088,
665
+ "learning_rate": 1.7766610723413684e-05,
666
+ "loss": 1.4673,
667
+ "step": 940
668
+ },
669
+ {
670
+ "epoch": 0.6,
671
+ "grad_norm": 1.136026382446289,
672
+ "learning_rate": 1.7293440146539196e-05,
673
+ "loss": 1.4779,
674
+ "step": 950
675
+ },
676
+ {
677
+ "epoch": 0.61,
678
+ "grad_norm": 1.123252272605896,
679
+ "learning_rate": 1.682330091706446e-05,
680
+ "loss": 1.4583,
681
+ "step": 960
682
+ },
683
+ {
684
+ "epoch": 0.61,
685
+ "grad_norm": 1.0559343099594116,
686
+ "learning_rate": 1.6356377962552238e-05,
687
+ "loss": 1.4471,
688
+ "step": 970
689
+ },
690
+ {
691
+ "epoch": 0.62,
692
+ "grad_norm": 1.0266658067703247,
693
+ "learning_rate": 1.589285494545514e-05,
694
+ "loss": 1.4632,
695
+ "step": 980
696
+ },
697
+ {
698
+ "epoch": 0.62,
699
+ "grad_norm": 1.1371444463729858,
700
+ "learning_rate": 1.5432914190872757e-05,
701
+ "loss": 1.4732,
702
+ "step": 990
703
+ },
704
+ {
705
+ "epoch": 0.63,
706
+ "grad_norm": 1.1203784942626953,
707
+ "learning_rate": 1.4976736614834664e-05,
708
+ "loss": 1.452,
709
+ "step": 1000
710
+ },
711
+ {
712
+ "epoch": 0.64,
713
+ "grad_norm": 1.0037944316864014,
714
+ "learning_rate": 1.4524501653137787e-05,
715
+ "loss": 1.461,
716
+ "step": 1010
717
+ },
718
+ {
719
+ "epoch": 0.64,
720
+ "grad_norm": 1.1353282928466797,
721
+ "learning_rate": 1.4076387190766017e-05,
722
+ "loss": 1.4538,
723
+ "step": 1020
724
+ },
725
+ {
726
+ "epoch": 0.65,
727
+ "grad_norm": 1.1203887462615967,
728
+ "learning_rate": 1.363256949191972e-05,
729
+ "loss": 1.4681,
730
+ "step": 1030
731
+ },
732
+ {
733
+ "epoch": 0.66,
734
+ "grad_norm": 1.0686651468276978,
735
+ "learning_rate": 1.3193223130682936e-05,
736
+ "loss": 1.4548,
737
+ "step": 1040
738
+ },
739
+ {
740
+ "epoch": 0.66,
741
+ "grad_norm": 1.0339988470077515,
742
+ "learning_rate": 1.2758520922355226e-05,
743
+ "loss": 1.4535,
744
+ "step": 1050
745
+ },
746
+ {
747
+ "epoch": 0.67,
748
+ "grad_norm": 1.4555269479751587,
749
+ "learning_rate": 1.2328633855475429e-05,
750
+ "loss": 1.4621,
751
+ "step": 1060
752
+ },
753
+ {
754
+ "epoch": 0.68,
755
+ "grad_norm": 1.0318940877914429,
756
+ "learning_rate": 1.1903731024563966e-05,
757
+ "loss": 1.4621,
758
+ "step": 1070
759
+ },
760
+ {
761
+ "epoch": 0.68,
762
+ "grad_norm": 1.084612488746643,
763
+ "learning_rate": 1.148397956361007e-05,
764
+ "loss": 1.4636,
765
+ "step": 1080
766
+ },
767
+ {
768
+ "epoch": 0.69,
769
+ "grad_norm": 1.0705621242523193,
770
+ "learning_rate": 1.106954458033026e-05,
771
+ "loss": 1.4495,
772
+ "step": 1090
773
+ },
774
+ {
775
+ "epoch": 0.69,
776
+ "grad_norm": 1.050857424736023,
777
+ "learning_rate": 1.0660589091223855e-05,
778
+ "loss": 1.4395,
779
+ "step": 1100
780
+ },
781
+ {
782
+ "epoch": 0.7,
783
+ "grad_norm": 1.0744839906692505,
784
+ "learning_rate": 1.025727395745095e-05,
785
+ "loss": 1.4583,
786
+ "step": 1110
787
+ },
788
+ {
789
+ "epoch": 0.71,
790
+ "grad_norm": 1.0446105003356934,
791
+ "learning_rate": 9.859757821558337e-06,
792
+ "loss": 1.4606,
793
+ "step": 1120
794
+ },
795
+ {
796
+ "epoch": 0.71,
797
+ "grad_norm": 1.1479051113128662,
798
+ "learning_rate": 9.468197045077976e-06,
799
+ "loss": 1.454,
800
+ "step": 1130
801
+ },
802
+ {
803
+ "epoch": 0.72,
804
+ "grad_norm": 0.985953152179718,
805
+ "learning_rate": 9.082745647022797e-06,
806
+ "loss": 1.4654,
807
+ "step": 1140
808
+ },
809
+ {
810
+ "epoch": 0.73,
811
+ "grad_norm": 1.1085201501846313,
812
+ "learning_rate": 8.703555243303835e-06,
813
+ "loss": 1.4526,
814
+ "step": 1150
815
+ },
816
+ {
817
+ "epoch": 0.73,
818
+ "grad_norm": 1.2304482460021973,
819
+ "learning_rate": 8.330774987092712e-06,
820
+ "loss": 1.448,
821
+ "step": 1160
822
+ },
823
+ {
824
+ "epoch": 0.74,
825
+ "grad_norm": 1.0740071535110474,
826
+ "learning_rate": 7.96455151015272e-06,
827
+ "loss": 1.4606,
828
+ "step": 1170
829
+ },
830
+ {
831
+ "epoch": 0.74,
832
+ "grad_norm": 1.0380760431289673,
833
+ "learning_rate": 7.605028865161809e-06,
834
+ "loss": 1.4661,
835
+ "step": 1180
836
+ },
837
+ {
838
+ "epoch": 0.75,
839
+ "grad_norm": 1.1115810871124268,
840
+ "learning_rate": 7.25234846904993e-06,
841
+ "loss": 1.4567,
842
+ "step": 1190
843
+ },
844
+ {
845
+ "epoch": 0.76,
846
+ "grad_norm": 0.9248858094215393,
847
+ "learning_rate": 6.906649047373246e-06,
848
+ "loss": 1.4372,
849
+ "step": 1200
850
+ },
851
+ {
852
+ "epoch": 0.76,
853
+ "grad_norm": 1.0288389921188354,
854
+ "learning_rate": 6.568066579746901e-06,
855
+ "loss": 1.4542,
856
+ "step": 1210
857
+ },
858
+ {
859
+ "epoch": 0.77,
860
+ "grad_norm": 1.0125763416290283,
861
+ "learning_rate": 6.2367342463579475e-06,
862
+ "loss": 1.4426,
863
+ "step": 1220
864
+ },
865
+ {
866
+ "epoch": 0.78,
867
+ "grad_norm": 0.9536031484603882,
868
+ "learning_rate": 5.912782375579412e-06,
869
+ "loss": 1.4292,
870
+ "step": 1230
871
+ },
872
+ {
873
+ "epoch": 0.78,
874
+ "grad_norm": 0.993061363697052,
875
+ "learning_rate": 5.596338392706077e-06,
876
+ "loss": 1.432,
877
+ "step": 1240
878
+ },
879
+ {
880
+ "epoch": 0.79,
881
+ "grad_norm": 0.9642956852912903,
882
+ "learning_rate": 5.2875267698322325e-06,
883
+ "loss": 1.4427,
884
+ "step": 1250
885
+ },
886
+ {
887
+ "epoch": 0.8,
888
+ "grad_norm": 0.9925894737243652,
889
+ "learning_rate": 4.986468976890993e-06,
890
+ "loss": 1.4199,
891
+ "step": 1260
892
+ },
893
+ {
894
+ "epoch": 0.8,
895
+ "grad_norm": 1.0030889511108398,
896
+ "learning_rate": 4.693283433874565e-06,
897
+ "loss": 1.4253,
898
+ "step": 1270
899
+ },
900
+ {
901
+ "epoch": 0.81,
902
+ "grad_norm": 0.986602783203125,
903
+ "learning_rate": 4.408085464254183e-06,
904
+ "loss": 1.4382,
905
+ "step": 1280
906
+ },
907
+ {
908
+ "epoch": 0.81,
909
+ "grad_norm": 0.9463419318199158,
910
+ "learning_rate": 4.130987249617993e-06,
911
+ "loss": 1.439,
912
+ "step": 1290
913
+ },
914
+ {
915
+ "epoch": 0.82,
916
+ "grad_norm": 0.9418216347694397,
917
+ "learning_rate": 3.8620977855448935e-06,
918
+ "loss": 1.4322,
919
+ "step": 1300
920
+ },
921
+ {
922
+ "epoch": 0.83,
923
+ "grad_norm": 1.067226529121399,
924
+ "learning_rate": 3.601522838731461e-06,
925
+ "loss": 1.4305,
926
+ "step": 1310
927
+ },
928
+ {
929
+ "epoch": 0.83,
930
+ "grad_norm": 0.9662885665893555,
931
+ "learning_rate": 3.3493649053890326e-06,
932
+ "loss": 1.4188,
933
+ "step": 1320
934
+ },
935
+ {
936
+ "epoch": 0.84,
937
+ "grad_norm": 1.1397868394851685,
938
+ "learning_rate": 3.1057231709272077e-06,
939
+ "loss": 1.4426,
940
+ "step": 1330
941
+ },
942
+ {
943
+ "epoch": 0.85,
944
+ "grad_norm": 1.0030759572982788,
945
+ "learning_rate": 2.8706934709395892e-06,
946
+ "loss": 1.4185,
947
+ "step": 1340
948
+ },
949
+ {
950
+ "epoch": 0.85,
951
+ "grad_norm": 0.9549908638000488,
952
+ "learning_rate": 2.6443682535072177e-06,
953
+ "loss": 1.4276,
954
+ "step": 1350
955
+ },
956
+ {
957
+ "epoch": 0.86,
958
+ "grad_norm": 0.9839365482330322,
959
+ "learning_rate": 2.4268365428344736e-06,
960
+ "loss": 1.4174,
961
+ "step": 1360
962
+ },
963
+ {
964
+ "epoch": 0.86,
965
+ "grad_norm": 0.954189121723175,
966
+ "learning_rate": 2.21818390423168e-06,
967
+ "loss": 1.441,
968
+ "step": 1370
969
+ },
970
+ {
971
+ "epoch": 0.87,
972
+ "grad_norm": 0.9914742708206177,
973
+ "learning_rate": 2.0184924104583613e-06,
974
+ "loss": 1.4322,
975
+ "step": 1380
976
+ },
977
+ {
978
+ "epoch": 0.88,
979
+ "grad_norm": 0.9965653419494629,
980
+ "learning_rate": 1.8278406094401623e-06,
981
+ "loss": 1.411,
982
+ "step": 1390
983
+ },
984
+ {
985
+ "epoch": 0.88,
986
+ "grad_norm": 1.0744175910949707,
987
+ "learning_rate": 1.6463034933723337e-06,
988
+ "loss": 1.4368,
989
+ "step": 1400
990
+ },
991
+ {
992
+ "epoch": 0.89,
993
+ "grad_norm": 0.9871243238449097,
994
+ "learning_rate": 1.4739524692218314e-06,
995
+ "loss": 1.396,
996
+ "step": 1410
997
+ },
998
+ {
999
+ "epoch": 0.9,
1000
+ "grad_norm": 0.9976981282234192,
1001
+ "learning_rate": 1.3108553306396265e-06,
1002
+ "loss": 1.439,
1003
+ "step": 1420
1004
+ },
1005
+ {
1006
+ "epoch": 0.9,
1007
+ "grad_norm": 0.9817109704017639,
1008
+ "learning_rate": 1.1570762312943295e-06,
1009
+ "loss": 1.4113,
1010
+ "step": 1430
1011
+ },
1012
+ {
1013
+ "epoch": 0.91,
1014
+ "grad_norm": 0.9741029143333435,
1015
+ "learning_rate": 1.0126756596375686e-06,
1016
+ "loss": 1.4438,
1017
+ "step": 1440
1018
+ },
1019
+ {
1020
+ "epoch": 0.91,
1021
+ "grad_norm": 1.0171328783035278,
1022
+ "learning_rate": 8.777104151110826e-07,
1023
+ "loss": 1.4365,
1024
+ "step": 1450
1025
+ },
1026
+ {
1027
+ "epoch": 0.92,
1028
+ "grad_norm": 0.980021595954895,
1029
+ "learning_rate": 7.522335858048707e-07,
1030
+ "loss": 1.4355,
1031
+ "step": 1460
1032
+ },
1033
+ {
1034
+ "epoch": 0.93,
1035
+ "grad_norm": 0.9966154098510742,
1036
+ "learning_rate": 6.362945275751736e-07,
1037
+ "loss": 1.431,
1038
+ "step": 1470
1039
+ },
1040
+ {
1041
+ "epoch": 0.93,
1042
+ "grad_norm": 0.9687898755073547,
1043
+ "learning_rate": 5.299388446305343e-07,
1044
+ "loss": 1.4057,
1045
+ "step": 1480
1046
+ },
1047
+ {
1048
+ "epoch": 0.94,
1049
+ "grad_norm": 0.9906119704246521,
1050
+ "learning_rate": 4.3320837159353813e-07,
1051
+ "loss": 1.421,
1052
+ "step": 1490
1053
+ },
1054
+ {
1055
+ "epoch": 0.95,
1056
+ "grad_norm": 1.0227527618408203,
1057
+ "learning_rate": 3.4614115704533767e-07,
1058
+ "loss": 1.4319,
1059
+ "step": 1500
1060
+ },
1061
+ {
1062
+ "epoch": 0.95,
1063
+ "grad_norm": 1.0115277767181396,
1064
+ "learning_rate": 2.687714485593462e-07,
1065
+ "loss": 1.4295,
1066
+ "step": 1510
1067
+ },
1068
+ {
1069
+ "epoch": 0.96,
1070
+ "grad_norm": 0.993654727935791,
1071
+ "learning_rate": 2.011296792301165e-07,
1072
+ "loss": 1.4294,
1073
+ "step": 1520
1074
+ },
1075
+ {
1076
+ "epoch": 0.97,
1077
+ "grad_norm": 0.8775748014450073,
1078
+ "learning_rate": 1.4324245570256633e-07,
1079
+ "loss": 1.4562,
1080
+ "step": 1530
1081
+ },
1082
+ {
1083
+ "epoch": 0.97,
1084
+ "grad_norm": 0.9754842519760132,
1085
+ "learning_rate": 9.513254770636137e-08,
1086
+ "loss": 1.4447,
1087
+ "step": 1540
1088
+ },
1089
+ {
1090
+ "epoch": 0.98,
1091
+ "grad_norm": 0.9996697902679443,
1092
+ "learning_rate": 5.681887909952388e-08,
1093
+ "loss": 1.4229,
1094
+ "step": 1550
1095
+ },
1096
+ {
1097
+ "epoch": 0.98,
1098
+ "grad_norm": 0.9914098381996155,
1099
+ "learning_rate": 2.831652042480093e-08,
1100
+ "loss": 1.4458,
1101
+ "step": 1560
1102
+ },
1103
+ {
1104
+ "epoch": 0.99,
1105
+ "grad_norm": 0.9639108777046204,
1106
+ "learning_rate": 9.636682981720158e-09,
1107
+ "loss": 1.4267,
1108
+ "step": 1570
1109
+ },
1110
+ {
1111
+ "epoch": 1.0,
1112
+ "grad_norm": 0.9515108466148376,
1113
+ "learning_rate": 7.867144166728846e-10,
1114
+ "loss": 1.4373,
1115
+ "step": 1580
1116
+ },
1117
+ {
1118
+ "epoch": 1.0,
1119
+ "step": 1584,
1120
+ "total_flos": 1.1098698583858217e+18,
1121
+ "train_loss": 1.5383612829627413,
1122
+ "train_runtime": 4681.1872,
1123
+ "train_samples_per_second": 21.666,
1124
+ "train_steps_per_second": 0.338
1125
+ }
1126
+ ],
1127
+ "logging_steps": 10,
1128
+ "max_steps": 1584,
1129
+ "num_input_tokens_seen": 0,
1130
+ "num_train_epochs": 1,
1131
+ "save_steps": 1000,
1132
+ "total_flos": 1.1098698583858217e+18,
1133
+ "train_batch_size": 4,
1134
+ "trial_name": null,
1135
+ "trial_params": null
1136
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f5f270ab83bb6b8b6198abab8938484e33528253ff0e05545853f6357e16e105
3
+ size 4603
training_loss.png ADDED