JW17 commited on
Commit
d324ba5
1 Parent(s): e6b7eee

Model save

Browse files
README.md ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: orpo-explorers/kaist-mistral-orpo-capybara-beta-0.05-1epoch
3
+ tags:
4
+ - trl
5
+ - orpo
6
+ - generated_from_trainer
7
+ model-index:
8
+ - name: kaist-mistral-orpo-capybara-beta0.05-1epoch-ohp-15k-strat-1-beta0.2-2epoch
9
+ results: []
10
+ ---
11
+
12
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
13
+ should probably proofread and complete it, then remove this comment. -->
14
+
15
+ # kaist-mistral-orpo-capybara-beta0.05-1epoch-ohp-15k-strat-1-beta0.2-2epoch
16
+
17
+ This model is a fine-tuned version of [orpo-explorers/kaist-mistral-orpo-capybara-beta-0.05-1epoch](https://huggingface.co/orpo-explorers/kaist-mistral-orpo-capybara-beta-0.05-1epoch) on an unknown dataset.
18
+
19
+ ## Model description
20
+
21
+ More information needed
22
+
23
+ ## Intended uses & limitations
24
+
25
+ More information needed
26
+
27
+ ## Training and evaluation data
28
+
29
+ More information needed
30
+
31
+ ## Training procedure
32
+
33
+ ### Training hyperparameters
34
+
35
+ The following hyperparameters were used during training:
36
+ - learning_rate: 5e-06
37
+ - train_batch_size: 8
38
+ - eval_batch_size: 8
39
+ - seed: 42
40
+ - distributed_type: multi-GPU
41
+ - num_devices: 4
42
+ - gradient_accumulation_steps: 2
43
+ - total_train_batch_size: 64
44
+ - total_eval_batch_size: 32
45
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
46
+ - lr_scheduler_type: cosine
47
+ - num_epochs: 2
48
+
49
+ ### Training results
50
+
51
+
52
+
53
+ ### Framework versions
54
+
55
+ - Transformers 4.39.3
56
+ - Pytorch 2.1.2.post303
57
+ - Datasets 2.18.0
58
+ - Tokenizers 0.15.2
all_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.0,
3
+ "train_loss": 0.508239566000746,
4
+ "train_runtime": 6462.3532,
5
+ "train_samples": 14928,
6
+ "train_samples_per_second": 4.62,
7
+ "train_steps_per_second": 0.072
8
+ }
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "transformers_version": "4.39.3"
6
+ }
model-00001-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7246330cf661db39f574199d63c6860f781fccfb99470a90a681cf62b62e306b
3
+ size 4943162336
model-00002-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:99a3bb30f88c03f88e3416a9039ea765707fae80ae8ceebea132acd3710bbf6b
3
+ size 4999819336
model-00003-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6194f5309e609c095907864add97826e9965d27d7bcd5647e0b77e756b161cd5
3
+ size 4540516344
model.safetensors.index.json ADDED
@@ -0,0 +1,298 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 14483464192
4
+ },
5
+ "weight_map": {
6
+ "lm_head.weight": "model-00003-of-00003.safetensors",
7
+ "model.embed_tokens.weight": "model-00001-of-00003.safetensors",
8
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00003.safetensors",
9
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
10
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
11
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
12
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
13
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
14
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
15
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
16
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
17
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00003.safetensors",
18
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
19
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
20
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
21
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
22
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
23
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
24
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
25
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
26
+ "model.layers.10.input_layernorm.weight": "model-00002-of-00003.safetensors",
27
+ "model.layers.10.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
28
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
29
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
30
+ "model.layers.10.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
31
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
32
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
33
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
34
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
35
+ "model.layers.11.input_layernorm.weight": "model-00002-of-00003.safetensors",
36
+ "model.layers.11.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
37
+ "model.layers.11.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
38
+ "model.layers.11.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
39
+ "model.layers.11.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
40
+ "model.layers.11.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
41
+ "model.layers.11.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
42
+ "model.layers.11.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
43
+ "model.layers.11.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
44
+ "model.layers.12.input_layernorm.weight": "model-00002-of-00003.safetensors",
45
+ "model.layers.12.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
46
+ "model.layers.12.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
47
+ "model.layers.12.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
48
+ "model.layers.12.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
49
+ "model.layers.12.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
50
+ "model.layers.12.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
51
+ "model.layers.12.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
52
+ "model.layers.12.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
53
+ "model.layers.13.input_layernorm.weight": "model-00002-of-00003.safetensors",
54
+ "model.layers.13.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
55
+ "model.layers.13.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
56
+ "model.layers.13.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
57
+ "model.layers.13.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
58
+ "model.layers.13.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
59
+ "model.layers.13.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
60
+ "model.layers.13.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
61
+ "model.layers.13.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
62
+ "model.layers.14.input_layernorm.weight": "model-00002-of-00003.safetensors",
63
+ "model.layers.14.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
64
+ "model.layers.14.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
65
+ "model.layers.14.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
66
+ "model.layers.14.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
67
+ "model.layers.14.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
68
+ "model.layers.14.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
69
+ "model.layers.14.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
70
+ "model.layers.14.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
71
+ "model.layers.15.input_layernorm.weight": "model-00002-of-00003.safetensors",
72
+ "model.layers.15.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
73
+ "model.layers.15.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
74
+ "model.layers.15.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
75
+ "model.layers.15.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
76
+ "model.layers.15.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
77
+ "model.layers.15.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
78
+ "model.layers.15.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
79
+ "model.layers.15.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
80
+ "model.layers.16.input_layernorm.weight": "model-00002-of-00003.safetensors",
81
+ "model.layers.16.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
82
+ "model.layers.16.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
83
+ "model.layers.16.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
84
+ "model.layers.16.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
85
+ "model.layers.16.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
86
+ "model.layers.16.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
87
+ "model.layers.16.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
88
+ "model.layers.16.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
89
+ "model.layers.17.input_layernorm.weight": "model-00002-of-00003.safetensors",
90
+ "model.layers.17.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
91
+ "model.layers.17.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
92
+ "model.layers.17.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
93
+ "model.layers.17.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
94
+ "model.layers.17.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
95
+ "model.layers.17.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
96
+ "model.layers.17.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
97
+ "model.layers.17.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
98
+ "model.layers.18.input_layernorm.weight": "model-00002-of-00003.safetensors",
99
+ "model.layers.18.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
100
+ "model.layers.18.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
101
+ "model.layers.18.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
102
+ "model.layers.18.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
103
+ "model.layers.18.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
104
+ "model.layers.18.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
105
+ "model.layers.18.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
106
+ "model.layers.18.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
107
+ "model.layers.19.input_layernorm.weight": "model-00002-of-00003.safetensors",
108
+ "model.layers.19.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
109
+ "model.layers.19.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
110
+ "model.layers.19.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
111
+ "model.layers.19.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
112
+ "model.layers.19.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
113
+ "model.layers.19.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
114
+ "model.layers.19.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
115
+ "model.layers.19.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
116
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00003.safetensors",
117
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
118
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
119
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
120
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
121
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
122
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
123
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
124
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
125
+ "model.layers.20.input_layernorm.weight": "model-00002-of-00003.safetensors",
126
+ "model.layers.20.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
127
+ "model.layers.20.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
128
+ "model.layers.20.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
129
+ "model.layers.20.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
130
+ "model.layers.20.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
131
+ "model.layers.20.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
132
+ "model.layers.20.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
133
+ "model.layers.20.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
134
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00003.safetensors",
135
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
136
+ "model.layers.21.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
137
+ "model.layers.21.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
138
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
139
+ "model.layers.21.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
140
+ "model.layers.21.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
141
+ "model.layers.21.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
142
+ "model.layers.21.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
143
+ "model.layers.22.input_layernorm.weight": "model-00003-of-00003.safetensors",
144
+ "model.layers.22.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
145
+ "model.layers.22.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
146
+ "model.layers.22.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
147
+ "model.layers.22.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
148
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
149
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
150
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
151
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
152
+ "model.layers.23.input_layernorm.weight": "model-00003-of-00003.safetensors",
153
+ "model.layers.23.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
154
+ "model.layers.23.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
155
+ "model.layers.23.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
156
+ "model.layers.23.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
157
+ "model.layers.23.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
158
+ "model.layers.23.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
159
+ "model.layers.23.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
160
+ "model.layers.23.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
161
+ "model.layers.24.input_layernorm.weight": "model-00003-of-00003.safetensors",
162
+ "model.layers.24.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
163
+ "model.layers.24.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
164
+ "model.layers.24.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
165
+ "model.layers.24.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
166
+ "model.layers.24.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
167
+ "model.layers.24.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
168
+ "model.layers.24.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
169
+ "model.layers.24.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
170
+ "model.layers.25.input_layernorm.weight": "model-00003-of-00003.safetensors",
171
+ "model.layers.25.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
172
+ "model.layers.25.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
173
+ "model.layers.25.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
174
+ "model.layers.25.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
175
+ "model.layers.25.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
176
+ "model.layers.25.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
177
+ "model.layers.25.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
178
+ "model.layers.25.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
179
+ "model.layers.26.input_layernorm.weight": "model-00003-of-00003.safetensors",
180
+ "model.layers.26.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
181
+ "model.layers.26.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
182
+ "model.layers.26.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
183
+ "model.layers.26.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
184
+ "model.layers.26.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
185
+ "model.layers.26.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
186
+ "model.layers.26.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
187
+ "model.layers.26.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
188
+ "model.layers.27.input_layernorm.weight": "model-00003-of-00003.safetensors",
189
+ "model.layers.27.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
190
+ "model.layers.27.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
191
+ "model.layers.27.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
192
+ "model.layers.27.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
193
+ "model.layers.27.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
194
+ "model.layers.27.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
195
+ "model.layers.27.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
196
+ "model.layers.27.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
197
+ "model.layers.28.input_layernorm.weight": "model-00003-of-00003.safetensors",
198
+ "model.layers.28.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
199
+ "model.layers.28.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
200
+ "model.layers.28.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
201
+ "model.layers.28.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
202
+ "model.layers.28.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
203
+ "model.layers.28.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
204
+ "model.layers.28.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
205
+ "model.layers.28.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
206
+ "model.layers.29.input_layernorm.weight": "model-00003-of-00003.safetensors",
207
+ "model.layers.29.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
208
+ "model.layers.29.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
209
+ "model.layers.29.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
210
+ "model.layers.29.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
211
+ "model.layers.29.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
212
+ "model.layers.29.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
213
+ "model.layers.29.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
214
+ "model.layers.29.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
215
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00003.safetensors",
216
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
217
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
218
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
219
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
220
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
221
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
222
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
223
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
224
+ "model.layers.30.input_layernorm.weight": "model-00003-of-00003.safetensors",
225
+ "model.layers.30.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
226
+ "model.layers.30.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
227
+ "model.layers.30.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
228
+ "model.layers.30.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
229
+ "model.layers.30.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
230
+ "model.layers.30.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
231
+ "model.layers.30.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
232
+ "model.layers.30.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
233
+ "model.layers.31.input_layernorm.weight": "model-00003-of-00003.safetensors",
234
+ "model.layers.31.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
235
+ "model.layers.31.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
236
+ "model.layers.31.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
237
+ "model.layers.31.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
238
+ "model.layers.31.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
239
+ "model.layers.31.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
240
+ "model.layers.31.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
241
+ "model.layers.31.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
242
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00003.safetensors",
243
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
244
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
245
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
246
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
247
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
248
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
249
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
250
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
251
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00003.safetensors",
252
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
253
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
254
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
255
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
256
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
257
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
258
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
259
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
260
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00003.safetensors",
261
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
262
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
263
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
264
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
265
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
266
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
267
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
268
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
269
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00003.safetensors",
270
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
271
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
272
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
273
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
274
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
275
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
276
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
277
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
278
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00003.safetensors",
279
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
280
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
281
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
282
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
283
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
284
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
285
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
286
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
287
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00003.safetensors",
288
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
289
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
290
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
291
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
292
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
293
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
294
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
295
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
296
+ "model.norm.weight": "model-00003-of-00003.safetensors"
297
+ }
298
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.0,
3
+ "train_loss": 0.508239566000746,
4
+ "train_runtime": 6462.3532,
5
+ "train_samples": 14928,
6
+ "train_samples_per_second": 4.62,
7
+ "train_steps_per_second": 0.072
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,858 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 1.9957173447537473,
5
+ "eval_steps": 500,
6
+ "global_step": 466,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.04,
13
+ "grad_norm": 4.9375,
14
+ "learning_rate": 4.994320979557256e-06,
15
+ "log_odds_chosen": 0.0681995302438736,
16
+ "log_odds_ratio": -0.7082546353340149,
17
+ "logits/chosen": -3.0977654457092285,
18
+ "logits/rejected": -3.0908126831054688,
19
+ "logps/chosen": -0.7222177386283875,
20
+ "logps/rejected": -0.7692006230354309,
21
+ "loss": 0.6137,
22
+ "nll_loss": 0.4647584855556488,
23
+ "rewards/accuracies": 0.512499988079071,
24
+ "rewards/chosen": -0.144443541765213,
25
+ "rewards/margins": 0.009396565146744251,
26
+ "rewards/rejected": -0.153840109705925,
27
+ "step": 10
28
+ },
29
+ {
30
+ "epoch": 0.09,
31
+ "grad_norm": 4.15625,
32
+ "learning_rate": 4.977309719247571e-06,
33
+ "log_odds_chosen": 0.04795869067311287,
34
+ "log_odds_ratio": -0.7321950793266296,
35
+ "logits/chosen": -3.070446729660034,
36
+ "logits/rejected": -3.076430559158325,
37
+ "logps/chosen": -0.7525266408920288,
38
+ "logps/rejected": -0.7740827202796936,
39
+ "loss": 0.5618,
40
+ "nll_loss": 0.42791947722435,
41
+ "rewards/accuracies": 0.550000011920929,
42
+ "rewards/chosen": -0.15050533413887024,
43
+ "rewards/margins": 0.004311202093958855,
44
+ "rewards/rejected": -0.15481653809547424,
45
+ "step": 20
46
+ },
47
+ {
48
+ "epoch": 0.13,
49
+ "grad_norm": 4.03125,
50
+ "learning_rate": 4.9490435049069925e-06,
51
+ "log_odds_chosen": 0.0981246829032898,
52
+ "log_odds_ratio": -0.6918349266052246,
53
+ "logits/chosen": -3.0190162658691406,
54
+ "logits/rejected": -3.0307419300079346,
55
+ "logps/chosen": -0.70826256275177,
56
+ "logps/rejected": -0.7406646013259888,
57
+ "loss": 0.5694,
58
+ "nll_loss": 0.43190431594848633,
59
+ "rewards/accuracies": 0.5687500238418579,
60
+ "rewards/chosen": -0.14165252447128296,
61
+ "rewards/margins": 0.006480403244495392,
62
+ "rewards/rejected": -0.14813292026519775,
63
+ "step": 30
64
+ },
65
+ {
66
+ "epoch": 0.17,
67
+ "grad_norm": 4.0625,
68
+ "learning_rate": 4.909650756062782e-06,
69
+ "log_odds_chosen": -0.031760524958372116,
70
+ "log_odds_ratio": -0.7639201879501343,
71
+ "logits/chosen": -3.0301899909973145,
72
+ "logits/rejected": -3.013631582260132,
73
+ "logps/chosen": -0.7143345475196838,
74
+ "logps/rejected": -0.7065819501876831,
75
+ "loss": 0.5928,
76
+ "nll_loss": 0.4166012704372406,
77
+ "rewards/accuracies": 0.4749999940395355,
78
+ "rewards/chosen": -0.14286690950393677,
79
+ "rewards/margins": -0.001550512621179223,
80
+ "rewards/rejected": -0.14131638407707214,
81
+ "step": 40
82
+ },
83
+ {
84
+ "epoch": 0.21,
85
+ "grad_norm": 4.0,
86
+ "learning_rate": 4.8593104424957275e-06,
87
+ "log_odds_chosen": 0.1677912026643753,
88
+ "log_odds_ratio": -0.6516054272651672,
89
+ "logits/chosen": -2.9732978343963623,
90
+ "logits/rejected": -2.9736242294311523,
91
+ "logps/chosen": -0.6973217725753784,
92
+ "logps/rejected": -0.7871943712234497,
93
+ "loss": 0.5525,
94
+ "nll_loss": 0.3650514483451843,
95
+ "rewards/accuracies": 0.6187499761581421,
96
+ "rewards/chosen": -0.1394643485546112,
97
+ "rewards/margins": 0.017974523827433586,
98
+ "rewards/rejected": -0.15743887424468994,
99
+ "step": 50
100
+ },
101
+ {
102
+ "epoch": 0.26,
103
+ "grad_norm": 4.59375,
104
+ "learning_rate": 4.7982512711416995e-06,
105
+ "log_odds_chosen": 0.13981187343597412,
106
+ "log_odds_ratio": -0.6755875945091248,
107
+ "logits/chosen": -2.983802556991577,
108
+ "logits/rejected": -2.9678592681884766,
109
+ "logps/chosen": -0.6857394576072693,
110
+ "logps/rejected": -0.7719460725784302,
111
+ "loss": 0.5749,
112
+ "nll_loss": 0.43894845247268677,
113
+ "rewards/accuracies": 0.6000000238418579,
114
+ "rewards/chosen": -0.13714787364006042,
115
+ "rewards/margins": 0.017241323366761208,
116
+ "rewards/rejected": -0.15438921749591827,
117
+ "step": 60
118
+ },
119
+ {
120
+ "epoch": 0.3,
121
+ "grad_norm": 4.6875,
122
+ "learning_rate": 4.726750647026569e-06,
123
+ "log_odds_chosen": 0.060419272631406784,
124
+ "log_odds_ratio": -0.7113076448440552,
125
+ "logits/chosen": -3.0847103595733643,
126
+ "logits/rejected": -3.0847182273864746,
127
+ "logps/chosen": -0.6998961567878723,
128
+ "logps/rejected": -0.7425668835639954,
129
+ "loss": 0.6077,
130
+ "nll_loss": 0.45065993070602417,
131
+ "rewards/accuracies": 0.5625,
132
+ "rewards/chosen": -0.13997924327850342,
133
+ "rewards/margins": 0.008534139953553677,
134
+ "rewards/rejected": -0.14851337671279907,
135
+ "step": 70
136
+ },
137
+ {
138
+ "epoch": 0.34,
139
+ "grad_norm": 4.5,
140
+ "learning_rate": 4.64513341295515e-06,
141
+ "log_odds_chosen": 0.14743533730506897,
142
+ "log_odds_ratio": -0.664400577545166,
143
+ "logits/chosen": -3.072849750518799,
144
+ "logits/rejected": -3.064882755279541,
145
+ "logps/chosen": -0.6606765985488892,
146
+ "logps/rejected": -0.7464475035667419,
147
+ "loss": 0.5869,
148
+ "nll_loss": 0.40598875284194946,
149
+ "rewards/accuracies": 0.625,
150
+ "rewards/chosen": -0.1321353018283844,
151
+ "rewards/margins": 0.017154179513454437,
152
+ "rewards/rejected": -0.14928947389125824,
153
+ "step": 80
154
+ },
155
+ {
156
+ "epoch": 0.39,
157
+ "grad_norm": 3.703125,
158
+ "learning_rate": 4.553770373680062e-06,
159
+ "log_odds_chosen": 0.1585932970046997,
160
+ "log_odds_ratio": -0.6709593534469604,
161
+ "logits/chosen": -3.08207631111145,
162
+ "logits/rejected": -3.066760540008545,
163
+ "logps/chosen": -0.699985146522522,
164
+ "logps/rejected": -0.7977792620658875,
165
+ "loss": 0.5663,
166
+ "nll_loss": 0.4425368905067444,
167
+ "rewards/accuracies": 0.5874999761581421,
168
+ "rewards/chosen": -0.13999703526496887,
169
+ "rewards/margins": 0.019558843225240707,
170
+ "rewards/rejected": -0.15955588221549988,
171
+ "step": 90
172
+ },
173
+ {
174
+ "epoch": 0.43,
175
+ "grad_norm": 3.546875,
176
+ "learning_rate": 4.453076611255507e-06,
177
+ "log_odds_chosen": 0.14516516029834747,
178
+ "log_odds_ratio": -0.6720137596130371,
179
+ "logits/chosen": -3.114285469055176,
180
+ "logits/rejected": -3.1085124015808105,
181
+ "logps/chosen": -0.6726102828979492,
182
+ "logps/rejected": -0.7475894689559937,
183
+ "loss": 0.5877,
184
+ "nll_loss": 0.4581482410430908,
185
+ "rewards/accuracies": 0.581250011920929,
186
+ "rewards/chosen": -0.13452205061912537,
187
+ "rewards/margins": 0.014995847828686237,
188
+ "rewards/rejected": -0.14951792359352112,
189
+ "step": 100
190
+ },
191
+ {
192
+ "epoch": 0.47,
193
+ "grad_norm": 3.9375,
194
+ "learning_rate": 4.343509599229697e-06,
195
+ "log_odds_chosen": 0.07305195182561874,
196
+ "log_odds_ratio": -0.7057459950447083,
197
+ "logits/chosen": -3.0203895568847656,
198
+ "logits/rejected": -3.048060417175293,
199
+ "logps/chosen": -0.7229627966880798,
200
+ "logps/rejected": -0.759824812412262,
201
+ "loss": 0.5618,
202
+ "nll_loss": 0.4208614230155945,
203
+ "rewards/accuracies": 0.5375000238418579,
204
+ "rewards/chosen": -0.1445925384759903,
205
+ "rewards/margins": 0.007372408173978329,
206
+ "rewards/rejected": -0.1519649475812912,
207
+ "step": 110
208
+ },
209
+ {
210
+ "epoch": 0.51,
211
+ "grad_norm": 3.640625,
212
+ "learning_rate": 4.22556712424355e-06,
213
+ "log_odds_chosen": 0.20443418622016907,
214
+ "log_odds_ratio": -0.6452816724777222,
215
+ "logits/chosen": -3.0728299617767334,
216
+ "logits/rejected": -3.073429822921753,
217
+ "logps/chosen": -0.6431705355644226,
218
+ "logps/rejected": -0.7486152648925781,
219
+ "loss": 0.5715,
220
+ "nll_loss": 0.44801267981529236,
221
+ "rewards/accuracies": 0.612500011920929,
222
+ "rewards/chosen": -0.12863411009311676,
223
+ "rewards/margins": 0.02108895219862461,
224
+ "rewards/rejected": -0.14972305297851562,
225
+ "step": 120
226
+ },
227
+ {
228
+ "epoch": 0.56,
229
+ "grad_norm": 4.15625,
230
+ "learning_rate": 4.099785024478276e-06,
231
+ "log_odds_chosen": 0.10702254623174667,
232
+ "log_odds_ratio": -0.6948094367980957,
233
+ "logits/chosen": -3.104290246963501,
234
+ "logits/rejected": -3.114654779434204,
235
+ "logps/chosen": -0.708793580532074,
236
+ "logps/rejected": -0.7508866786956787,
237
+ "loss": 0.5651,
238
+ "nll_loss": 0.408848375082016,
239
+ "rewards/accuracies": 0.581250011920929,
240
+ "rewards/chosen": -0.1417587250471115,
241
+ "rewards/margins": 0.008418610319495201,
242
+ "rewards/rejected": -0.15017732977867126,
243
+ "step": 130
244
+ },
245
+ {
246
+ "epoch": 0.6,
247
+ "grad_norm": 4.25,
248
+ "learning_rate": 3.9667347552265945e-06,
249
+ "log_odds_chosen": 0.0996425449848175,
250
+ "log_odds_ratio": -0.7059202194213867,
251
+ "logits/chosen": -3.094669818878174,
252
+ "logits/rejected": -3.0972161293029785,
253
+ "logps/chosen": -0.6547704935073853,
254
+ "logps/rejected": -0.7121762037277222,
255
+ "loss": 0.5688,
256
+ "nll_loss": 0.3880278468132019,
257
+ "rewards/accuracies": 0.581250011920929,
258
+ "rewards/chosen": -0.1309541016817093,
259
+ "rewards/margins": 0.011481141671538353,
260
+ "rewards/rejected": -0.1424352377653122,
261
+ "step": 140
262
+ },
263
+ {
264
+ "epoch": 0.64,
265
+ "grad_norm": 4.03125,
266
+ "learning_rate": 3.8270207926477e-06,
267
+ "log_odds_chosen": 0.1352790892124176,
268
+ "log_odds_ratio": -0.6838266253471375,
269
+ "logits/chosen": -3.096400737762451,
270
+ "logits/rejected": -3.08994460105896,
271
+ "logps/chosen": -0.7112253904342651,
272
+ "logps/rejected": -0.7838465571403503,
273
+ "loss": 0.5347,
274
+ "nll_loss": 0.3965539336204529,
275
+ "rewards/accuracies": 0.6187499761581421,
276
+ "rewards/chosen": -0.1422450691461563,
277
+ "rewards/margins": 0.014524241909384727,
278
+ "rewards/rejected": -0.1567693054676056,
279
+ "step": 150
280
+ },
281
+ {
282
+ "epoch": 0.69,
283
+ "grad_norm": 3.953125,
284
+ "learning_rate": 3.68127788750129e-06,
285
+ "log_odds_chosen": 0.05867941305041313,
286
+ "log_odds_ratio": -0.7102705836296082,
287
+ "logits/chosen": -3.093903064727783,
288
+ "logits/rejected": -3.092332363128662,
289
+ "logps/chosen": -0.7055046558380127,
290
+ "logps/rejected": -0.7451164722442627,
291
+ "loss": 0.5676,
292
+ "nll_loss": 0.40267667174339294,
293
+ "rewards/accuracies": 0.5062500238418579,
294
+ "rewards/chosen": -0.1411009281873703,
295
+ "rewards/margins": 0.007922361604869366,
296
+ "rewards/rejected": -0.14902329444885254,
297
+ "step": 160
298
+ },
299
+ {
300
+ "epoch": 0.73,
301
+ "grad_norm": 3.8125,
302
+ "learning_rate": 3.5301681813375343e-06,
303
+ "log_odds_chosen": 0.12995854020118713,
304
+ "log_odds_ratio": -0.6886736154556274,
305
+ "logits/chosen": -3.115527629852295,
306
+ "logits/rejected": -3.131301164627075,
307
+ "logps/chosen": -0.6788499355316162,
308
+ "logps/rejected": -0.7528234720230103,
309
+ "loss": 0.5571,
310
+ "nll_loss": 0.4142919182777405,
311
+ "rewards/accuracies": 0.543749988079071,
312
+ "rewards/chosen": -0.13576999306678772,
313
+ "rewards/margins": 0.014794701710343361,
314
+ "rewards/rejected": -0.15056470036506653,
315
+ "step": 170
316
+ },
317
+ {
318
+ "epoch": 0.77,
319
+ "grad_norm": 4.5,
320
+ "learning_rate": 3.3743781982447533e-06,
321
+ "log_odds_chosen": 0.19499924778938293,
322
+ "log_odds_ratio": -0.6581609845161438,
323
+ "logits/chosen": -3.1499171257019043,
324
+ "logits/rejected": -3.1490375995635986,
325
+ "logps/chosen": -0.6707606911659241,
326
+ "logps/rejected": -0.7726969718933105,
327
+ "loss": 0.5664,
328
+ "nll_loss": 0.40519580245018005,
329
+ "rewards/accuracies": 0.6312500238418579,
330
+ "rewards/chosen": -0.1341521441936493,
331
+ "rewards/margins": 0.020387252792716026,
332
+ "rewards/rejected": -0.15453937649726868,
333
+ "step": 180
334
+ },
335
+ {
336
+ "epoch": 0.81,
337
+ "grad_norm": 3.640625,
338
+ "learning_rate": 3.2146157258219534e-06,
339
+ "log_odds_chosen": 0.14189398288726807,
340
+ "log_odds_ratio": -0.674296498298645,
341
+ "logits/chosen": -3.1837196350097656,
342
+ "logits/rejected": -3.186469793319702,
343
+ "logps/chosen": -0.6766322255134583,
344
+ "logps/rejected": -0.7536298036575317,
345
+ "loss": 0.5414,
346
+ "nll_loss": 0.44522786140441895,
347
+ "rewards/accuracies": 0.59375,
348
+ "rewards/chosen": -0.13532646000385284,
349
+ "rewards/margins": 0.015399503521621227,
350
+ "rewards/rejected": -0.15072596073150635,
351
+ "step": 190
352
+ },
353
+ {
354
+ "epoch": 0.86,
355
+ "grad_norm": 3.84375,
356
+ "learning_rate": 3.0516065995466336e-06,
357
+ "log_odds_chosen": 0.1995691955089569,
358
+ "log_odds_ratio": -0.6483355760574341,
359
+ "logits/chosen": -3.134974956512451,
360
+ "logits/rejected": -3.1205341815948486,
361
+ "logps/chosen": -0.6498687863349915,
362
+ "logps/rejected": -0.7565893530845642,
363
+ "loss": 0.5465,
364
+ "nll_loss": 0.38540342450141907,
365
+ "rewards/accuracies": 0.612500011920929,
366
+ "rewards/chosen": -0.12997373938560486,
367
+ "rewards/margins": 0.021344134584069252,
368
+ "rewards/rejected": -0.15131787955760956,
369
+ "step": 200
370
+ },
371
+ {
372
+ "epoch": 0.9,
373
+ "grad_norm": 3.890625,
374
+ "learning_rate": 2.8860914051471722e-06,
375
+ "log_odds_chosen": 0.06047767400741577,
376
+ "log_odds_ratio": -0.7144695520401001,
377
+ "logits/chosen": -3.132312774658203,
378
+ "logits/rejected": -3.1476542949676514,
379
+ "logps/chosen": -0.6728143692016602,
380
+ "logps/rejected": -0.6911222338676453,
381
+ "loss": 0.5295,
382
+ "nll_loss": 0.3443630337715149,
383
+ "rewards/accuracies": 0.5375000238418579,
384
+ "rewards/chosen": -0.1345628798007965,
385
+ "rewards/margins": 0.003661577822640538,
386
+ "rewards/rejected": -0.13822445273399353,
387
+ "step": 210
388
+ },
389
+ {
390
+ "epoch": 0.94,
391
+ "grad_norm": 3.734375,
392
+ "learning_rate": 2.7188221139616303e-06,
393
+ "log_odds_chosen": 0.11169638484716415,
394
+ "log_odds_ratio": -0.6887539625167847,
395
+ "logits/chosen": -3.1210544109344482,
396
+ "logits/rejected": -3.1303868293762207,
397
+ "logps/chosen": -0.6995309591293335,
398
+ "logps/rejected": -0.7801494598388672,
399
+ "loss": 0.5703,
400
+ "nll_loss": 0.4030347764492035,
401
+ "rewards/accuracies": 0.550000011920929,
402
+ "rewards/chosen": -0.13990618288516998,
403
+ "rewards/margins": 0.016123712062835693,
404
+ "rewards/rejected": -0.15602989494800568,
405
+ "step": 220
406
+ },
407
+ {
408
+ "epoch": 0.99,
409
+ "grad_norm": 4.15625,
410
+ "learning_rate": 2.550558666569279e-06,
411
+ "log_odds_chosen": 0.09025579690933228,
412
+ "log_odds_ratio": -0.7007181644439697,
413
+ "logits/chosen": -3.1683359146118164,
414
+ "logits/rejected": -3.1638879776000977,
415
+ "logps/chosen": -0.6835473775863647,
416
+ "logps/rejected": -0.742916464805603,
417
+ "loss": 0.5612,
418
+ "nll_loss": 0.4019767642021179,
419
+ "rewards/accuracies": 0.5874999761581421,
420
+ "rewards/chosen": -0.13670948147773743,
421
+ "rewards/margins": 0.011873816139996052,
422
+ "rewards/rejected": -0.1485833078622818,
423
+ "step": 230
424
+ },
425
+ {
426
+ "epoch": 1.03,
427
+ "grad_norm": 4.15625,
428
+ "learning_rate": 2.3820655202161237e-06,
429
+ "log_odds_chosen": 0.4793614447116852,
430
+ "log_odds_ratio": -0.548845648765564,
431
+ "logits/chosen": -3.1354401111602783,
432
+ "logits/rejected": -3.124065637588501,
433
+ "logps/chosen": -0.5931288003921509,
434
+ "logps/rejected": -0.8161810636520386,
435
+ "loss": 0.5134,
436
+ "nll_loss": 0.40906891226768494,
437
+ "rewards/accuracies": 0.7437499761581421,
438
+ "rewards/chosen": -0.11862574517726898,
439
+ "rewards/margins": 0.04461048170924187,
440
+ "rewards/rejected": -0.16323623061180115,
441
+ "step": 240
442
+ },
443
+ {
444
+ "epoch": 1.07,
445
+ "grad_norm": 3.96875,
446
+ "learning_rate": 2.214108175720246e-06,
447
+ "log_odds_chosen": 0.7576763033866882,
448
+ "log_odds_ratio": -0.4282462000846863,
449
+ "logits/chosen": -3.118053674697876,
450
+ "logits/rejected": -3.1379125118255615,
451
+ "logps/chosen": -0.49083226919174194,
452
+ "logps/rejected": -0.853580117225647,
453
+ "loss": 0.459,
454
+ "nll_loss": 0.3588988184928894,
455
+ "rewards/accuracies": 0.875,
456
+ "rewards/chosen": -0.09816645830869675,
457
+ "rewards/margins": 0.07254956662654877,
458
+ "rewards/rejected": -0.17071601748466492,
459
+ "step": 250
460
+ },
461
+ {
462
+ "epoch": 1.11,
463
+ "grad_norm": 4.28125,
464
+ "learning_rate": 2.0474496996359676e-06,
465
+ "log_odds_chosen": 0.733576774597168,
466
+ "log_odds_ratio": -0.4507887363433838,
467
+ "logits/chosen": -3.137216567993164,
468
+ "logits/rejected": -3.1352245807647705,
469
+ "logps/chosen": -0.5455455183982849,
470
+ "logps/rejected": -0.9026147723197937,
471
+ "loss": 0.4736,
472
+ "nll_loss": 0.38031843304634094,
473
+ "rewards/accuracies": 0.824999988079071,
474
+ "rewards/chosen": -0.10910910367965698,
475
+ "rewards/margins": 0.07141385227441788,
476
+ "rewards/rejected": -0.18052296340465546,
477
+ "step": 260
478
+ },
479
+ {
480
+ "epoch": 1.16,
481
+ "grad_norm": 4.46875,
482
+ "learning_rate": 1.882847257477398e-06,
483
+ "log_odds_chosen": 0.8710149526596069,
484
+ "log_odds_ratio": -0.4077950417995453,
485
+ "logits/chosen": -3.1231517791748047,
486
+ "logits/rejected": -3.1361801624298096,
487
+ "logps/chosen": -0.5210133194923401,
488
+ "logps/rejected": -0.9504634737968445,
489
+ "loss": 0.4573,
490
+ "nll_loss": 0.3601577877998352,
491
+ "rewards/accuracies": 0.893750011920929,
492
+ "rewards/chosen": -0.10420265048742294,
493
+ "rewards/margins": 0.0858900398015976,
494
+ "rewards/rejected": -0.19009268283843994,
495
+ "step": 270
496
+ },
497
+ {
498
+ "epoch": 1.2,
499
+ "grad_norm": 4.625,
500
+ "learning_rate": 1.7210486737516947e-06,
501
+ "log_odds_chosen": 0.8500790596008301,
502
+ "log_odds_ratio": -0.409542977809906,
503
+ "logits/chosen": -3.1291608810424805,
504
+ "logits/rejected": -3.1443474292755127,
505
+ "logps/chosen": -0.5176515579223633,
506
+ "logps/rejected": -0.9122639894485474,
507
+ "loss": 0.441,
508
+ "nll_loss": 0.38534316420555115,
509
+ "rewards/accuracies": 0.856249988079071,
510
+ "rewards/chosen": -0.10353031009435654,
511
+ "rewards/margins": 0.07892249524593353,
512
+ "rewards/rejected": -0.18245279788970947,
513
+ "step": 280
514
+ },
515
+ {
516
+ "epoch": 1.24,
517
+ "grad_norm": 4.21875,
518
+ "learning_rate": 1.5627890344305256e-06,
519
+ "log_odds_chosen": 0.7590165734291077,
520
+ "log_odds_ratio": -0.4255223274230957,
521
+ "logits/chosen": -3.1419529914855957,
522
+ "logits/rejected": -3.1484503746032715,
523
+ "logps/chosen": -0.5282884836196899,
524
+ "logps/rejected": -0.8793913125991821,
525
+ "loss": 0.4385,
526
+ "nll_loss": 0.3436318039894104,
527
+ "rewards/accuracies": 0.8999999761581421,
528
+ "rewards/chosen": -0.10565771162509918,
529
+ "rewards/margins": 0.07022054493427277,
530
+ "rewards/rejected": -0.17587824165821075,
531
+ "step": 290
532
+ },
533
+ {
534
+ "epoch": 1.28,
535
+ "grad_norm": 4.40625,
536
+ "learning_rate": 1.4087873472954638e-06,
537
+ "log_odds_chosen": 0.8060294985771179,
538
+ "log_odds_ratio": -0.429814875125885,
539
+ "logits/chosen": -3.1374385356903076,
540
+ "logits/rejected": -3.137779712677002,
541
+ "logps/chosen": -0.5177971124649048,
542
+ "logps/rejected": -0.9097731709480286,
543
+ "loss": 0.4492,
544
+ "nll_loss": 0.35047072172164917,
545
+ "rewards/accuracies": 0.8500000238418579,
546
+ "rewards/chosen": -0.1035594493150711,
547
+ "rewards/margins": 0.07839521020650864,
548
+ "rewards/rejected": -0.18195465207099915,
549
+ "step": 300
550
+ },
551
+ {
552
+ "epoch": 1.33,
553
+ "grad_norm": 4.15625,
554
+ "learning_rate": 1.2597432753300753e-06,
555
+ "log_odds_chosen": 0.9330030679702759,
556
+ "log_odds_ratio": -0.3743920624256134,
557
+ "logits/chosen": -3.1239657402038574,
558
+ "logits/rejected": -3.1310207843780518,
559
+ "logps/chosen": -0.4897725582122803,
560
+ "logps/rejected": -0.9372097253799438,
561
+ "loss": 0.4426,
562
+ "nll_loss": 0.35372328758239746,
563
+ "rewards/accuracies": 0.9375,
564
+ "rewards/chosen": -0.09795451909303665,
565
+ "rewards/margins": 0.08948741853237152,
566
+ "rewards/rejected": -0.18744193017482758,
567
+ "step": 310
568
+ },
569
+ {
570
+ "epoch": 1.37,
571
+ "grad_norm": 4.34375,
572
+ "learning_rate": 1.116333957999608e-06,
573
+ "log_odds_chosen": 0.7943806648254395,
574
+ "log_odds_ratio": -0.43320780992507935,
575
+ "logits/chosen": -3.1169140338897705,
576
+ "logits/rejected": -3.1135616302490234,
577
+ "logps/chosen": -0.5348241925239563,
578
+ "logps/rejected": -0.9205295443534851,
579
+ "loss": 0.4582,
580
+ "nll_loss": 0.38884326815605164,
581
+ "rewards/accuracies": 0.8687499761581421,
582
+ "rewards/chosen": -0.1069648265838623,
583
+ "rewards/margins": 0.07714107632637024,
584
+ "rewards/rejected": -0.18410590291023254,
585
+ "step": 320
586
+ },
587
+ {
588
+ "epoch": 1.41,
589
+ "grad_norm": 4.28125,
590
+ "learning_rate": 9.792109348599036e-07,
591
+ "log_odds_chosen": 0.8558658361434937,
592
+ "log_odds_ratio": -0.40837377309799194,
593
+ "logits/chosen": -3.1396241188049316,
594
+ "logits/rejected": -3.1491103172302246,
595
+ "logps/chosen": -0.5247679948806763,
596
+ "logps/rejected": -0.9536547660827637,
597
+ "loss": 0.4342,
598
+ "nll_loss": 0.34443485736846924,
599
+ "rewards/accuracies": 0.893750011920929,
600
+ "rewards/chosen": -0.1049535870552063,
601
+ "rewards/margins": 0.08577735722064972,
602
+ "rewards/rejected": -0.1907309591770172,
603
+ "step": 330
604
+ },
605
+ {
606
+ "epoch": 1.46,
607
+ "grad_norm": 4.21875,
608
+ "learning_rate": 8.48997185472226e-07,
609
+ "log_odds_chosen": 0.8510887026786804,
610
+ "log_odds_ratio": -0.4203471541404724,
611
+ "logits/chosen": -3.1297011375427246,
612
+ "logits/rejected": -3.139014720916748,
613
+ "logps/chosen": -0.5276013612747192,
614
+ "logps/rejected": -0.9365232586860657,
615
+ "loss": 0.4576,
616
+ "nll_loss": 0.35662391781806946,
617
+ "rewards/accuracies": 0.84375,
618
+ "rewards/chosen": -0.10552027076482773,
619
+ "rewards/margins": 0.08178436756134033,
620
+ "rewards/rejected": -0.18730461597442627,
621
+ "step": 340
622
+ },
623
+ {
624
+ "epoch": 1.5,
625
+ "grad_norm": 4.75,
626
+ "learning_rate": 7.26284299072334e-07,
627
+ "log_odds_chosen": 0.8458667993545532,
628
+ "log_odds_ratio": -0.4165124297142029,
629
+ "logits/chosen": -3.1148293018341064,
630
+ "logits/rejected": -3.1330175399780273,
631
+ "logps/chosen": -0.523215651512146,
632
+ "logps/rejected": -0.9373798370361328,
633
+ "loss": 0.4358,
634
+ "nll_loss": 0.36000293493270874,
635
+ "rewards/accuracies": 0.862500011920929,
636
+ "rewards/chosen": -0.10464314371347427,
637
+ "rewards/margins": 0.08283283561468124,
638
+ "rewards/rejected": -0.1874759942293167,
639
+ "step": 350
640
+ },
641
+ {
642
+ "epoch": 1.54,
643
+ "grad_norm": 4.1875,
644
+ "learning_rate": 6.11629786852592e-07,
645
+ "log_odds_chosen": 0.7515332102775574,
646
+ "log_odds_ratio": -0.4506092965602875,
647
+ "logits/chosen": -3.124948501586914,
648
+ "logits/rejected": -3.1396474838256836,
649
+ "logps/chosen": -0.5801728963851929,
650
+ "logps/rejected": -0.9574558138847351,
651
+ "loss": 0.4377,
652
+ "nll_loss": 0.3803955018520355,
653
+ "rewards/accuracies": 0.8062499761581421,
654
+ "rewards/chosen": -0.11603458225727081,
655
+ "rewards/margins": 0.07545658200979233,
656
+ "rewards/rejected": -0.19149115681648254,
657
+ "step": 360
658
+ },
659
+ {
660
+ "epoch": 1.58,
661
+ "grad_norm": 4.3125,
662
+ "learning_rate": 5.055545490679981e-07,
663
+ "log_odds_chosen": 0.8198745846748352,
664
+ "log_odds_ratio": -0.4333067834377289,
665
+ "logits/chosen": -3.0795700550079346,
666
+ "logits/rejected": -3.0752415657043457,
667
+ "logps/chosen": -0.5191539525985718,
668
+ "logps/rejected": -0.9393302798271179,
669
+ "loss": 0.4312,
670
+ "nll_loss": 0.3498368263244629,
671
+ "rewards/accuracies": 0.862500011920929,
672
+ "rewards/chosen": -0.10383079200983047,
673
+ "rewards/margins": 0.08403525501489639,
674
+ "rewards/rejected": -0.18786606192588806,
675
+ "step": 370
676
+ },
677
+ {
678
+ "epoch": 1.63,
679
+ "grad_norm": 4.46875,
680
+ "learning_rate": 4.0854050847362966e-07,
681
+ "log_odds_chosen": 0.752915620803833,
682
+ "log_odds_ratio": -0.4345216751098633,
683
+ "logits/chosen": -3.094586133956909,
684
+ "logits/rejected": -3.128570318222046,
685
+ "logps/chosen": -0.4940599501132965,
686
+ "logps/rejected": -0.8385321497917175,
687
+ "loss": 0.4401,
688
+ "nll_loss": 0.3326614499092102,
689
+ "rewards/accuracies": 0.875,
690
+ "rewards/chosen": -0.09881198406219482,
691
+ "rewards/margins": 0.06889443844556808,
692
+ "rewards/rejected": -0.1677064150571823,
693
+ "step": 380
694
+ },
695
+ {
696
+ "epoch": 1.67,
697
+ "grad_norm": 4.59375,
698
+ "learning_rate": 3.2102842084530293e-07,
699
+ "log_odds_chosen": 0.8905293345451355,
700
+ "log_odds_ratio": -0.40456423163414,
701
+ "logits/chosen": -3.128862142562866,
702
+ "logits/rejected": -3.1354432106018066,
703
+ "logps/chosen": -0.48209333419799805,
704
+ "logps/rejected": -0.8838127851486206,
705
+ "loss": 0.4485,
706
+ "nll_loss": 0.34660106897354126,
707
+ "rewards/accuracies": 0.893750011920929,
708
+ "rewards/chosen": -0.09641867876052856,
709
+ "rewards/margins": 0.08034388720989227,
710
+ "rewards/rejected": -0.17676255106925964,
711
+ "step": 390
712
+ },
713
+ {
714
+ "epoch": 1.71,
715
+ "grad_norm": 4.125,
716
+ "learning_rate": 2.4341587253072035e-07,
717
+ "log_odds_chosen": 0.7810487747192383,
718
+ "log_odds_ratio": -0.4349869191646576,
719
+ "logits/chosen": -3.108931064605713,
720
+ "logits/rejected": -3.120337963104248,
721
+ "logps/chosen": -0.5262182354927063,
722
+ "logps/rejected": -0.8968960046768188,
723
+ "loss": 0.439,
724
+ "nll_loss": 0.3392297923564911,
725
+ "rewards/accuracies": 0.875,
726
+ "rewards/chosen": -0.10524364560842514,
727
+ "rewards/margins": 0.07413554936647415,
728
+ "rewards/rejected": -0.1793791949748993,
729
+ "step": 400
730
+ },
731
+ {
732
+ "epoch": 1.76,
733
+ "grad_norm": 3.796875,
734
+ "learning_rate": 1.7605547412867574e-07,
735
+ "log_odds_chosen": 0.8104718923568726,
736
+ "log_odds_ratio": -0.43349266052246094,
737
+ "logits/chosen": -3.1305861473083496,
738
+ "logits/rejected": -3.1289594173431396,
739
+ "logps/chosen": -0.532878577709198,
740
+ "logps/rejected": -0.9315230250358582,
741
+ "loss": 0.4151,
742
+ "nll_loss": 0.31806549429893494,
743
+ "rewards/accuracies": 0.831250011920929,
744
+ "rewards/chosen": -0.10657572746276855,
745
+ "rewards/margins": 0.07972888648509979,
746
+ "rewards/rejected": -0.18630459904670715,
747
+ "step": 410
748
+ },
749
+ {
750
+ "epoch": 1.8,
751
+ "grad_norm": 4.3125,
752
+ "learning_rate": 1.1925325850281416e-07,
753
+ "log_odds_chosen": 0.8517113924026489,
754
+ "log_odds_ratio": -0.41829895973205566,
755
+ "logits/chosen": -3.1101014614105225,
756
+ "logits/rejected": -3.1129114627838135,
757
+ "logps/chosen": -0.5812093019485474,
758
+ "logps/rejected": -1.0427311658859253,
759
+ "loss": 0.4593,
760
+ "nll_loss": 0.40889596939086914,
761
+ "rewards/accuracies": 0.856249988079071,
762
+ "rewards/chosen": -0.11624185740947723,
763
+ "rewards/margins": 0.09230439364910126,
764
+ "rewards/rejected": -0.2085462361574173,
765
+ "step": 420
766
+ },
767
+ {
768
+ "epoch": 1.84,
769
+ "grad_norm": 4.40625,
770
+ "learning_rate": 7.326729040812136e-08,
771
+ "log_odds_chosen": 0.8634054064750671,
772
+ "log_odds_ratio": -0.41465049982070923,
773
+ "logits/chosen": -3.142824172973633,
774
+ "logits/rejected": -3.1401774883270264,
775
+ "logps/chosen": -0.5450412631034851,
776
+ "logps/rejected": -0.9720172882080078,
777
+ "loss": 0.4621,
778
+ "nll_loss": 0.39653775095939636,
779
+ "rewards/accuracies": 0.856249988079071,
780
+ "rewards/chosen": -0.10900826752185822,
781
+ "rewards/margins": 0.0853951945900917,
782
+ "rewards/rejected": -0.19440343976020813,
783
+ "step": 430
784
+ },
785
+ {
786
+ "epoch": 1.88,
787
+ "grad_norm": 4.15625,
788
+ "learning_rate": 3.830649404690939e-08,
789
+ "log_odds_chosen": 0.900564968585968,
790
+ "log_odds_ratio": -0.4068581461906433,
791
+ "logits/chosen": -3.128530979156494,
792
+ "logits/rejected": -3.1158556938171387,
793
+ "logps/chosen": -0.503007709980011,
794
+ "logps/rejected": -0.9345412254333496,
795
+ "loss": 0.4293,
796
+ "nll_loss": 0.33139926195144653,
797
+ "rewards/accuracies": 0.8812500238418579,
798
+ "rewards/chosen": -0.10060155391693115,
799
+ "rewards/margins": 0.08630671352148056,
800
+ "rewards/rejected": -0.1869082748889923,
801
+ "step": 440
802
+ },
803
+ {
804
+ "epoch": 1.93,
805
+ "grad_norm": 4.46875,
806
+ "learning_rate": 1.452970388096192e-08,
807
+ "log_odds_chosen": 0.8053537607192993,
808
+ "log_odds_ratio": -0.4206915497779846,
809
+ "logits/chosen": -3.1261043548583984,
810
+ "logits/rejected": -3.133706569671631,
811
+ "logps/chosen": -0.5194380283355713,
812
+ "logps/rejected": -0.9027393460273743,
813
+ "loss": 0.4336,
814
+ "nll_loss": 0.37930217385292053,
815
+ "rewards/accuracies": 0.887499988079071,
816
+ "rewards/chosen": -0.10388760268688202,
817
+ "rewards/margins": 0.07666026055812836,
818
+ "rewards/rejected": -0.18054786324501038,
819
+ "step": 450
820
+ },
821
+ {
822
+ "epoch": 1.97,
823
+ "grad_norm": 4.40625,
824
+ "learning_rate": 2.044943012210754e-09,
825
+ "log_odds_chosen": 0.8168908357620239,
826
+ "log_odds_ratio": -0.4145738482475281,
827
+ "logits/chosen": -3.13958740234375,
828
+ "logits/rejected": -3.1476328372955322,
829
+ "logps/chosen": -0.5368055105209351,
830
+ "logps/rejected": -0.9489006996154785,
831
+ "loss": 0.4614,
832
+ "nll_loss": 0.3538859188556671,
833
+ "rewards/accuracies": 0.875,
834
+ "rewards/chosen": -0.10736110061407089,
835
+ "rewards/margins": 0.0824190229177475,
836
+ "rewards/rejected": -0.18978014588356018,
837
+ "step": 460
838
+ },
839
+ {
840
+ "epoch": 2.0,
841
+ "step": 466,
842
+ "total_flos": 0.0,
843
+ "train_loss": 0.508239566000746,
844
+ "train_runtime": 6462.3532,
845
+ "train_samples_per_second": 4.62,
846
+ "train_steps_per_second": 0.072
847
+ }
848
+ ],
849
+ "logging_steps": 10,
850
+ "max_steps": 466,
851
+ "num_input_tokens_seen": 0,
852
+ "num_train_epochs": 2,
853
+ "save_steps": 500,
854
+ "total_flos": 0.0,
855
+ "train_batch_size": 8,
856
+ "trial_name": null,
857
+ "trial_params": null
858
+ }