RikkiXu commited on
Commit
eae4afd
1 Parent(s): 1c9e834

Model save

Browse files
README.md CHANGED
@@ -13,7 +13,7 @@ should probably proofread and complete it, then remove this comment. -->
13
 
14
  # zephyr-7b-dpo-full
15
 
16
- This model was trained from scratch on an unknown dataset.
17
 
18
  ## Model description
19
 
@@ -52,7 +52,7 @@ The following hyperparameters were used during training:
52
 
53
  ### Framework versions
54
 
55
- - Transformers 4.40.2
56
  - Pytorch 2.1.2+cu118
57
- - Datasets 2.19.1
58
  - Tokenizers 0.19.1
 
13
 
14
  # zephyr-7b-dpo-full
15
 
16
+ This model was trained from scratch on the None dataset.
17
 
18
  ## Model description
19
 
 
52
 
53
  ### Framework versions
54
 
55
+ - Transformers 4.41.1
56
  - Pytorch 2.1.2+cu118
57
+ - Datasets 2.16.1
58
  - Tokenizers 0.19.1
all_results.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "epoch": 0.9984,
3
  "total_flos": 0.0,
4
- "train_loss": 0.3884877807054764,
5
- "train_runtime": 4677.6403,
6
  "train_samples": 39942,
7
- "train_samples_per_second": 8.539,
8
- "train_steps_per_second": 0.033
9
  }
 
1
  {
2
  "epoch": 0.9984,
3
  "total_flos": 0.0,
4
+ "train_loss": 0.4014626894241724,
5
+ "train_runtime": 5052.0322,
6
  "train_samples": 39942,
7
+ "train_samples_per_second": 7.906,
8
+ "train_steps_per_second": 0.031
9
  }
config.json CHANGED
@@ -20,7 +20,7 @@
20
  "sliding_window": 4096,
21
  "tie_word_embeddings": false,
22
  "torch_dtype": "bfloat16",
23
- "transformers_version": "4.40.2",
24
  "use_cache": false,
25
  "vocab_size": 32002
26
  }
 
20
  "sliding_window": 4096,
21
  "tie_word_embeddings": false,
22
  "torch_dtype": "bfloat16",
23
+ "transformers_version": "4.41.1",
24
  "use_cache": false,
25
  "vocab_size": 32002
26
  }
generation_config.json CHANGED
@@ -2,5 +2,5 @@
2
  "_from_model_config": true,
3
  "bos_token_id": 1,
4
  "eos_token_id": 32000,
5
- "transformers_version": "4.40.2"
6
  }
 
2
  "_from_model_config": true,
3
  "bos_token_id": 1,
4
  "eos_token_id": 32000,
5
+ "transformers_version": "4.41.1"
6
  }
model-00001-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9dba7919b04fd1d70e11a29220f5eaf634bb315409f9a94f7a263955001973c6
3
  size 4943178720
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7f512a730a26a61a1c16526c02b35b83222d091cd9296f1da7e71affd7995410
3
  size 4943178720
model-00002-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a84b4be1c4d4aad2ce051a91acda1c33720055694a806a6fb36a24f1b682e2c3
3
  size 4999819336
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ac0006e40eab93ba96e9c4ac961c6bd73d92585b3f54f427b278b0fb7b9767b2
3
  size 4999819336
model-00003-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:509b7c42a2681737189e49583a27d65db54dd70bda3ef6f8890b6b3e93dca2ff
3
  size 4540532728
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a220e930529efb9b6318941d96516bbbb5fa2c9d14cf80d2860389fe098f2e86
3
  size 4540532728
runs/Jun05_23-17-05_n136-082-130/events.out.tfevents.1717600754.n136-082-130.1889466.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:eb020d37b9be1057d9fcbd66defea51b3268e8b466994f77a5e0067c756f0b28
3
- size 12591
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:38495588af7a3d409c5c24789e209e8c63e45c66ecbc5399988e055f803c60c7
3
+ size 16361
train_results.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "epoch": 0.9984,
3
  "total_flos": 0.0,
4
- "train_loss": 0.3884877807054764,
5
- "train_runtime": 4677.6403,
6
  "train_samples": 39942,
7
- "train_samples_per_second": 8.539,
8
- "train_steps_per_second": 0.033
9
  }
 
1
  {
2
  "epoch": 0.9984,
3
  "total_flos": 0.0,
4
+ "train_loss": 0.4014626894241724,
5
+ "train_runtime": 5052.0322,
6
  "train_samples": 39942,
7
+ "train_samples_per_second": 7.906,
8
+ "train_steps_per_second": 0.031
9
  }
trainer_state.json CHANGED
@@ -10,7 +10,7 @@
10
  "log_history": [
11
  {
12
  "epoch": 0.0064,
13
- "grad_norm": 1341.8773394764246,
14
  "learning_rate": 3.125e-09,
15
  "logits/chosen": -3.9499800205230713,
16
  "logits/rejected": -4.237819194793701,
@@ -25,237 +25,237 @@
25
  },
26
  {
27
  "epoch": 0.064,
28
- "grad_norm": 1342.2810836893796,
29
  "learning_rate": 3.125e-08,
30
- "logits/chosen": -4.129705905914307,
31
- "logits/rejected": -4.352028846740723,
32
- "logps/chosen": -351.5079650878906,
33
- "logps/rejected": -308.8138427734375,
34
- "loss": 0.7326,
35
- "rewards/accuracies": 0.3680555522441864,
36
- "rewards/chosen": -0.04078766331076622,
37
- "rewards/margins": -0.11378024518489838,
38
- "rewards/rejected": 0.07299260050058365,
39
  "step": 10
40
  },
41
  {
42
  "epoch": 0.128,
43
- "grad_norm": 1252.3965895279962,
44
  "learning_rate": 4.9899357349880975e-08,
45
- "logits/chosen": -4.194980144500732,
46
- "logits/rejected": -4.382790565490723,
47
- "logps/chosen": -334.9039001464844,
48
- "logps/rejected": -293.8416748046875,
49
- "loss": 0.683,
50
- "rewards/accuracies": 0.581250011920929,
51
- "rewards/chosen": 0.22410114109516144,
52
- "rewards/margins": 0.11712154000997543,
53
- "rewards/rejected": 0.10697959363460541,
54
  "step": 20
55
  },
56
  {
57
  "epoch": 0.192,
58
- "grad_norm": 904.3776918610464,
59
  "learning_rate": 4.877641290737884e-08,
60
- "logits/chosen": -4.230466365814209,
61
- "logits/rejected": -4.363996505737305,
62
- "logps/chosen": -327.71453857421875,
63
- "logps/rejected": -295.3287658691406,
64
- "loss": 0.5498,
65
- "rewards/accuracies": 0.7437499761581421,
66
- "rewards/chosen": 0.9708820581436157,
67
- "rewards/margins": 0.5084127187728882,
68
- "rewards/rejected": 0.46246927976608276,
69
  "step": 30
70
  },
71
  {
72
  "epoch": 0.256,
73
- "grad_norm": 894.6327423356746,
74
  "learning_rate": 4.646121984004665e-08,
75
- "logits/chosen": -4.1493096351623535,
76
- "logits/rejected": -4.351648807525635,
77
- "logps/chosen": -330.09368896484375,
78
- "logps/rejected": -288.2974853515625,
79
- "loss": 0.4125,
80
  "rewards/accuracies": 0.8218749761581421,
81
- "rewards/chosen": 1.9414455890655518,
82
- "rewards/margins": 1.1434320211410522,
83
- "rewards/rejected": 0.7980135083198547,
84
  "step": 40
85
  },
86
  {
87
  "epoch": 0.32,
88
- "grad_norm": 706.4309708182283,
89
  "learning_rate": 4.3069871595684784e-08,
90
- "logits/chosen": -4.244365215301514,
91
- "logits/rejected": -4.423664093017578,
92
- "logps/chosen": -329.6412353515625,
93
- "logps/rejected": -291.22528076171875,
94
- "loss": 0.3694,
95
- "rewards/accuracies": 0.840624988079071,
96
- "rewards/chosen": 2.6057987213134766,
97
- "rewards/margins": 1.537340521812439,
98
- "rewards/rejected": 1.068458080291748,
99
  "step": 50
100
  },
101
  {
102
  "epoch": 0.384,
103
- "grad_norm": 679.6447682422123,
104
  "learning_rate": 3.8772424536302564e-08,
105
- "logits/chosen": -4.262530326843262,
106
- "logits/rejected": -4.4340620040893555,
107
- "logps/chosen": -320.7197570800781,
108
- "logps/rejected": -291.15264892578125,
109
- "loss": 0.3459,
110
- "rewards/accuracies": 0.8343750238418579,
111
- "rewards/chosen": 3.022132158279419,
112
- "rewards/margins": 1.8344866037368774,
113
- "rewards/rejected": 1.187645673751831,
114
  "step": 60
115
  },
116
  {
117
  "epoch": 0.448,
118
- "grad_norm": 600.9568341116722,
119
  "learning_rate": 3.378437060203357e-08,
120
- "logits/chosen": -4.188047885894775,
121
- "logits/rejected": -4.377224445343018,
122
- "logps/chosen": -320.23345947265625,
123
- "logps/rejected": -288.5027770996094,
124
- "loss": 0.3189,
125
- "rewards/accuracies": 0.8187500238418579,
126
- "rewards/chosen": 3.3037331104278564,
127
- "rewards/margins": 2.1254096031188965,
128
- "rewards/rejected": 1.1783230304718018,
129
  "step": 70
130
  },
131
  {
132
  "epoch": 0.512,
133
- "grad_norm": 654.7049863576665,
134
  "learning_rate": 2.8355831645441387e-08,
135
- "logits/chosen": -4.0522565841674805,
136
- "logits/rejected": -4.341280937194824,
137
- "logps/chosen": -345.8344421386719,
138
- "logps/rejected": -307.4328918457031,
139
- "loss": 0.3105,
140
- "rewards/accuracies": 0.8999999761581421,
141
- "rewards/chosen": 3.7246456146240234,
142
- "rewards/margins": 2.5337729454040527,
143
- "rewards/rejected": 1.1908724308013916,
144
  "step": 80
145
  },
146
  {
147
  "epoch": 0.576,
148
- "grad_norm": 638.1282144295093,
149
  "learning_rate": 2.2759017277414164e-08,
150
- "logits/chosen": -4.180428504943848,
151
- "logits/rejected": -4.390549659729004,
152
- "logps/chosen": -332.82275390625,
153
- "logps/rejected": -295.1810607910156,
154
- "loss": 0.3099,
155
- "rewards/accuracies": 0.875,
156
- "rewards/chosen": 3.2552542686462402,
157
- "rewards/margins": 2.3172354698181152,
158
- "rewards/rejected": 0.9380186796188354,
159
  "step": 90
160
  },
161
  {
162
  "epoch": 0.64,
163
- "grad_norm": 680.3285346474286,
164
  "learning_rate": 1.7274575140626317e-08,
165
- "logits/chosen": -4.167009353637695,
166
- "logits/rejected": -4.386021614074707,
167
- "logps/chosen": -330.049560546875,
168
- "logps/rejected": -285.8011169433594,
169
- "loss": 0.3123,
170
- "rewards/accuracies": 0.8843749761581421,
171
- "rewards/chosen": 3.6218514442443848,
172
- "rewards/margins": 2.723836898803711,
173
- "rewards/rejected": 0.8980148434638977,
174
  "step": 100
175
  },
176
  {
177
  "epoch": 0.704,
178
- "grad_norm": 616.2712616857408,
179
  "learning_rate": 1.217751806485235e-08,
180
- "logits/chosen": -4.145500183105469,
181
- "logits/rejected": -4.386542320251465,
182
- "logps/chosen": -311.7583923339844,
183
- "logps/rejected": -276.3233947753906,
184
- "loss": 0.3022,
185
- "rewards/accuracies": 0.8843749761581421,
186
- "rewards/chosen": 3.584909439086914,
187
- "rewards/margins": 2.6118006706237793,
188
- "rewards/rejected": 0.9731090664863586,
189
  "step": 110
190
  },
191
  {
192
  "epoch": 0.768,
193
- "grad_norm": 649.1888991009114,
194
  "learning_rate": 7.723433775328384e-09,
195
- "logits/chosen": -4.141805171966553,
196
- "logits/rejected": -4.35054874420166,
197
- "logps/chosen": -325.5559997558594,
198
- "logps/rejected": -280.5980529785156,
199
- "loss": 0.3033,
200
- "rewards/accuracies": 0.8656250238418579,
201
- "rewards/chosen": 3.6838138103485107,
202
- "rewards/margins": 2.6417319774627686,
203
- "rewards/rejected": 1.0420820713043213,
204
  "step": 120
205
  },
206
  {
207
  "epoch": 0.832,
208
- "grad_norm": 747.4298760038148,
209
  "learning_rate": 4.135668656967433e-09,
210
- "logits/chosen": -4.228358268737793,
211
- "logits/rejected": -4.38976526260376,
212
- "logps/chosen": -331.02642822265625,
213
- "logps/rejected": -286.7439880371094,
214
- "loss": 0.3064,
215
  "rewards/accuracies": 0.875,
216
- "rewards/chosen": 3.7264277935028076,
217
- "rewards/margins": 2.6530587673187256,
218
- "rewards/rejected": 1.073369026184082,
219
  "step": 130
220
  },
221
  {
222
  "epoch": 0.896,
223
- "grad_norm": 697.5841535989922,
224
  "learning_rate": 1.5941282340065698e-09,
225
- "logits/chosen": -4.18213415145874,
226
- "logits/rejected": -4.3970947265625,
227
- "logps/chosen": -332.56500244140625,
228
- "logps/rejected": -303.63543701171875,
229
- "loss": 0.3069,
230
- "rewards/accuracies": 0.856249988079071,
231
- "rewards/chosen": 3.5617058277130127,
232
- "rewards/margins": 2.6050186157226562,
233
- "rewards/rejected": 0.9566874504089355,
234
  "step": 140
235
  },
236
  {
237
  "epoch": 0.96,
238
- "grad_norm": 567.1610784183449,
239
  "learning_rate": 2.262559558016325e-10,
240
- "logits/chosen": -4.118973731994629,
241
- "logits/rejected": -4.348026752471924,
242
- "logps/chosen": -339.0107116699219,
243
- "logps/rejected": -295.09564208984375,
244
- "loss": 0.3078,
245
- "rewards/accuracies": 0.8656250238418579,
246
- "rewards/chosen": 3.7477049827575684,
247
- "rewards/margins": 2.61022686958313,
248
- "rewards/rejected": 1.1374781131744385,
249
  "step": 150
250
  },
251
  {
252
  "epoch": 0.9984,
253
  "step": 156,
254
  "total_flos": 0.0,
255
- "train_loss": 0.3884877807054764,
256
- "train_runtime": 4677.6403,
257
- "train_samples_per_second": 8.539,
258
- "train_steps_per_second": 0.033
259
  }
260
  ],
261
  "logging_steps": 10,
@@ -263,6 +263,18 @@
263
  "num_input_tokens_seen": 0,
264
  "num_train_epochs": 1,
265
  "save_steps": 100,
 
 
 
 
 
 
 
 
 
 
 
 
266
  "total_flos": 0.0,
267
  "train_batch_size": 8,
268
  "trial_name": null,
 
10
  "log_history": [
11
  {
12
  "epoch": 0.0064,
13
+ "grad_norm": 1341.948853106591,
14
  "learning_rate": 3.125e-09,
15
  "logits/chosen": -3.9499800205230713,
16
  "logits/rejected": -4.237819194793701,
 
25
  },
26
  {
27
  "epoch": 0.064,
28
+ "grad_norm": 1338.549586928997,
29
  "learning_rate": 3.125e-08,
30
+ "logits/chosen": -4.129465579986572,
31
+ "logits/rejected": -4.351754188537598,
32
+ "logps/chosen": -351.4585266113281,
33
+ "logps/rejected": -308.89202880859375,
34
+ "loss": 0.7278,
35
+ "rewards/accuracies": 0.375,
36
+ "rewards/chosen": -0.016052477061748505,
37
+ "rewards/margins": -0.04994054138660431,
38
+ "rewards/rejected": 0.033888060599565506,
39
  "step": 10
40
  },
41
  {
42
  "epoch": 0.128,
43
+ "grad_norm": 1285.3502810697903,
44
  "learning_rate": 4.9899357349880975e-08,
45
+ "logits/chosen": -4.195886611938477,
46
+ "logits/rejected": -4.383217811584473,
47
+ "logps/chosen": -334.9483337402344,
48
+ "logps/rejected": -293.82867431640625,
49
+ "loss": 0.6803,
50
+ "rewards/accuracies": 0.550000011920929,
51
+ "rewards/chosen": 0.20189008116722107,
52
+ "rewards/margins": 0.08841639757156372,
53
+ "rewards/rejected": 0.11347369104623795,
54
  "step": 20
55
  },
56
  {
57
  "epoch": 0.192,
58
+ "grad_norm": 938.2936106934807,
59
  "learning_rate": 4.877641290737884e-08,
60
+ "logits/chosen": -4.2334418296813965,
61
+ "logits/rejected": -4.3670477867126465,
62
+ "logps/chosen": -327.77716064453125,
63
+ "logps/rejected": -295.27203369140625,
64
+ "loss": 0.5536,
65
+ "rewards/accuracies": 0.699999988079071,
66
+ "rewards/chosen": 0.9395688772201538,
67
+ "rewards/margins": 0.4487342834472656,
68
+ "rewards/rejected": 0.49083465337753296,
69
  "step": 30
70
  },
71
  {
72
  "epoch": 0.256,
73
+ "grad_norm": 899.5533271284629,
74
  "learning_rate": 4.646121984004665e-08,
75
+ "logits/chosen": -4.14696741104126,
76
+ "logits/rejected": -4.348996162414551,
77
+ "logps/chosen": -330.3467712402344,
78
+ "logps/rejected": -288.37066650390625,
79
+ "loss": 0.4332,
80
  "rewards/accuracies": 0.8218749761581421,
81
+ "rewards/chosen": 1.8148998022079468,
82
+ "rewards/margins": 1.0534685850143433,
83
+ "rewards/rejected": 0.7614310383796692,
84
  "step": 40
85
  },
86
  {
87
  "epoch": 0.32,
88
+ "grad_norm": 737.601892593307,
89
  "learning_rate": 4.3069871595684784e-08,
90
+ "logits/chosen": -4.238839149475098,
91
+ "logits/rejected": -4.417731285095215,
92
+ "logps/chosen": -329.99560546875,
93
+ "logps/rejected": -291.286865234375,
94
+ "loss": 0.3783,
95
+ "rewards/accuracies": 0.8531249761581421,
96
+ "rewards/chosen": 2.4285898208618164,
97
+ "rewards/margins": 1.3909423351287842,
98
+ "rewards/rejected": 1.0376476049423218,
99
  "step": 50
100
  },
101
  {
102
  "epoch": 0.384,
103
+ "grad_norm": 725.6613657693058,
104
  "learning_rate": 3.8772424536302564e-08,
105
+ "logits/chosen": -4.256905555725098,
106
+ "logits/rejected": -4.42824125289917,
107
+ "logps/chosen": -321.1641845703125,
108
+ "logps/rejected": -291.2255859375,
109
+ "loss": 0.3554,
110
+ "rewards/accuracies": 0.846875011920929,
111
+ "rewards/chosen": 2.799910306930542,
112
+ "rewards/margins": 1.64876389503479,
113
+ "rewards/rejected": 1.151146411895752,
114
  "step": 60
115
  },
116
  {
117
  "epoch": 0.448,
118
+ "grad_norm": 650.256008502672,
119
  "learning_rate": 3.378437060203357e-08,
120
+ "logits/chosen": -4.1877875328063965,
121
+ "logits/rejected": -4.377414703369141,
122
+ "logps/chosen": -320.77117919921875,
123
+ "logps/rejected": -288.512451171875,
124
+ "loss": 0.3377,
125
+ "rewards/accuracies": 0.7906249761581421,
126
+ "rewards/chosen": 3.0348763465881348,
127
+ "rewards/margins": 1.8613855838775635,
128
+ "rewards/rejected": 1.1734905242919922,
129
  "step": 70
130
  },
131
  {
132
  "epoch": 0.512,
133
+ "grad_norm": 643.4652639779088,
134
  "learning_rate": 2.8355831645441387e-08,
135
+ "logits/chosen": -4.054490089416504,
136
+ "logits/rejected": -4.3433709144592285,
137
+ "logps/chosen": -346.53253173828125,
138
+ "logps/rejected": -307.34930419921875,
139
+ "loss": 0.3369,
140
+ "rewards/accuracies": 0.8656250238418579,
141
+ "rewards/chosen": 3.375593662261963,
142
+ "rewards/margins": 2.1429190635681152,
143
+ "rewards/rejected": 1.2326747179031372,
144
  "step": 80
145
  },
146
  {
147
  "epoch": 0.576,
148
+ "grad_norm": 658.4601656800384,
149
  "learning_rate": 2.2759017277414164e-08,
150
+ "logits/chosen": -4.182233810424805,
151
+ "logits/rejected": -4.391860485076904,
152
+ "logps/chosen": -333.4163513183594,
153
+ "logps/rejected": -295.03515625,
154
+ "loss": 0.3301,
155
+ "rewards/accuracies": 0.859375,
156
+ "rewards/chosen": 2.9584574699401855,
157
+ "rewards/margins": 1.9474881887435913,
158
+ "rewards/rejected": 1.0109691619873047,
159
  "step": 90
160
  },
161
  {
162
  "epoch": 0.64,
163
+ "grad_norm": 716.6222925151064,
164
  "learning_rate": 1.7274575140626317e-08,
165
+ "logits/chosen": -4.168593406677246,
166
+ "logits/rejected": -4.387479305267334,
167
+ "logps/chosen": -330.75775146484375,
168
+ "logps/rejected": -285.5531311035156,
169
+ "loss": 0.3315,
170
+ "rewards/accuracies": 0.862500011920929,
171
+ "rewards/chosen": 3.267747163772583,
172
+ "rewards/margins": 2.245748281478882,
173
+ "rewards/rejected": 1.0219987630844116,
174
  "step": 100
175
  },
176
  {
177
  "epoch": 0.704,
178
+ "grad_norm": 660.4496184068971,
179
  "learning_rate": 1.217751806485235e-08,
180
+ "logits/chosen": -4.1479597091674805,
181
+ "logits/rejected": -4.388964653015137,
182
+ "logps/chosen": -312.3727722167969,
183
+ "logps/rejected": -276.13372802734375,
184
+ "loss": 0.3225,
185
+ "rewards/accuracies": 0.8656250238418579,
186
+ "rewards/chosen": 3.277716875076294,
187
+ "rewards/margins": 2.2097737789154053,
188
+ "rewards/rejected": 1.0679429769515991,
189
  "step": 110
190
  },
191
  {
192
  "epoch": 0.768,
193
+ "grad_norm": 637.7261383555538,
194
  "learning_rate": 7.723433775328384e-09,
195
+ "logits/chosen": -4.145724296569824,
196
+ "logits/rejected": -4.354761123657227,
197
+ "logps/chosen": -326.25091552734375,
198
+ "logps/rejected": -280.4151916503906,
199
+ "loss": 0.3134,
200
+ "rewards/accuracies": 0.8531249761581421,
201
+ "rewards/chosen": 3.3363654613494873,
202
+ "rewards/margins": 2.202853202819824,
203
+ "rewards/rejected": 1.1335121393203735,
204
  "step": 120
205
  },
206
  {
207
  "epoch": 0.832,
208
+ "grad_norm": 768.5710075733069,
209
  "learning_rate": 4.135668656967433e-09,
210
+ "logits/chosen": -4.2324018478393555,
211
+ "logits/rejected": -4.393607139587402,
212
+ "logps/chosen": -331.71051025390625,
213
+ "logps/rejected": -286.67938232421875,
214
+ "loss": 0.3203,
215
  "rewards/accuracies": 0.875,
216
+ "rewards/chosen": 3.384364604949951,
217
+ "rewards/margins": 2.2786831855773926,
218
+ "rewards/rejected": 1.1056816577911377,
219
  "step": 130
220
  },
221
  {
222
  "epoch": 0.896,
223
+ "grad_norm": 742.2155895768275,
224
  "learning_rate": 1.5941282340065698e-09,
225
+ "logits/chosen": -4.18549919128418,
226
+ "logits/rejected": -4.4001359939575195,
227
+ "logps/chosen": -333.18035888671875,
228
+ "logps/rejected": -303.45220947265625,
229
+ "loss": 0.3282,
230
+ "rewards/accuracies": 0.84375,
231
+ "rewards/chosen": 3.2540221214294434,
232
+ "rewards/margins": 2.2057249546051025,
233
+ "rewards/rejected": 1.048297643661499,
234
  "step": 140
235
  },
236
  {
237
  "epoch": 0.96,
238
+ "grad_norm": 668.7483012920786,
239
  "learning_rate": 2.262559558016325e-10,
240
+ "logits/chosen": -4.1221604347229,
241
+ "logits/rejected": -4.351316452026367,
242
+ "logps/chosen": -339.6897888183594,
243
+ "logps/rejected": -295.03692626953125,
244
+ "loss": 0.3208,
245
+ "rewards/accuracies": 0.859375,
246
+ "rewards/chosen": 3.4081547260284424,
247
+ "rewards/margins": 2.2413132190704346,
248
+ "rewards/rejected": 1.1668416261672974,
249
  "step": 150
250
  },
251
  {
252
  "epoch": 0.9984,
253
  "step": 156,
254
  "total_flos": 0.0,
255
+ "train_loss": 0.4014626894241724,
256
+ "train_runtime": 5052.0322,
257
+ "train_samples_per_second": 7.906,
258
+ "train_steps_per_second": 0.031
259
  }
260
  ],
261
  "logging_steps": 10,
 
263
  "num_input_tokens_seen": 0,
264
  "num_train_epochs": 1,
265
  "save_steps": 100,
266
+ "stateful_callbacks": {
267
+ "TrainerControl": {
268
+ "args": {
269
+ "should_epoch_stop": false,
270
+ "should_evaluate": false,
271
+ "should_log": false,
272
+ "should_save": true,
273
+ "should_training_stop": false
274
+ },
275
+ "attributes": {}
276
+ }
277
+ },
278
  "total_flos": 0.0,
279
  "train_batch_size": 8,
280
  "trial_name": null,
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:44368936e8a5f160f38c764acd78a1ed87cb99f1b31bc5a44994e052788c660a
3
- size 6392
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2d92a249952e57df29e57ec30d0088226bf91a9212b8928552a333e4b9069864
3
+ size 6520