BraylonDash commited on
Commit
1c4a63e
1 Parent(s): deab481

Model save

Browse files
README.md ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ tags:
4
+ - trl
5
+ - dpo
6
+ - generated_from_trainer
7
+ base_model: DUAL-GPO/7b-kto-i0-merged
8
+ model-index:
9
+ - name: 7b-kto-10-40-i1
10
+ results: []
11
+ ---
12
+
13
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
14
+ should probably proofread and complete it, then remove this comment. -->
15
+
16
+ # 7b-kto-10-40-i1
17
+
18
+ This model is a fine-tuned version of [DUAL-GPO/7b-kto-i0-merged](https://huggingface.co/DUAL-GPO/7b-kto-i0-merged) on the None dataset.
19
+
20
+ ## Model description
21
+
22
+ More information needed
23
+
24
+ ## Intended uses & limitations
25
+
26
+ More information needed
27
+
28
+ ## Training and evaluation data
29
+
30
+ More information needed
31
+
32
+ ## Training procedure
33
+
34
+ ### Training hyperparameters
35
+
36
+ The following hyperparameters were used during training:
37
+ - learning_rate: 5e-06
38
+ - train_batch_size: 4
39
+ - eval_batch_size: 4
40
+ - seed: 42
41
+ - distributed_type: multi-GPU
42
+ - num_devices: 4
43
+ - gradient_accumulation_steps: 4
44
+ - total_train_batch_size: 64
45
+ - total_eval_batch_size: 16
46
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
47
+ - lr_scheduler_type: cosine
48
+ - lr_scheduler_warmup_ratio: 0.1
49
+ - num_epochs: 1
50
+
51
+ ### Training results
52
+
53
+
54
+
55
+ ### Framework versions
56
+
57
+ - PEFT 0.7.1
58
+ - Transformers 4.36.2
59
+ - Pytorch 2.1.2
60
+ - Datasets 2.14.6
61
+ - Tokenizers 0.15.2
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f93c48015e0200f863c6208754447fe0c61af5f172e73c2145625d1a7d352706
3
  size 151021328
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:30c6bbca93edeb16cf5b9ed9e87763c2eb71affdda8712dd87e6fcb05a6ca874
3
  size 151021328
all_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.0,
3
+ "train_loss": 0.47607828460187995,
4
+ "train_runtime": 5505.1583,
5
+ "train_samples": 15000,
6
+ "train_samples_per_second": 2.725,
7
+ "train_steps_per_second": 0.043
8
+ }
emissions.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ timestamp,project_name,run_id,duration,emissions,emissions_rate,cpu_power,gpu_power,ram_power,cpu_energy,gpu_energy,ram_energy,energy_consumed,country_name,country_iso_code,region,cloud_provider,cloud_region,os,python_version,codecarbon_version,cpu_count,cpu_model,gpu_count,gpu_model,longitude,latitude,ram_total_size,tracking_mode,on_cloud,pue
2
+ 2024-09-30T15:42:09,codecarbon,99792cb5-b5b8-44a2-beca-61332a266183,5505.167066574097,0.005048954220945405,9.171300634273764e-07,42.5,921.118,188.74309015274048,0.06499084281093548,1.7712632639109436,0.28763728764522034,2.1238913943671,Canada,CAN,quebec,,,Linux-5.15.0-84-generic-x86_64-with-glibc2.35,3.10.14,2.2.3,32,Intel(R) Xeon(R) W-3335 CPU @ 3.40GHz,4,4 x NVIDIA GeForce RTX 4090,-71.2,46.8,503.3149070739746,machine,N,1.0
runs/Sep30_14-08-49_gpu4-119-5/events.out.tfevents.1727669424.gpu4-119-5.1813925.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:17bd629cfb55390255f5b85e057e023d59f8e3c0b12c79d631f196f63913eb27
3
- size 18998
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:611827b1e836217f85f9b011684c76ba5c3f4084672c643e3071b6b48548c976
3
+ size 19986
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.0,
3
+ "train_loss": 0.47607828460187995,
4
+ "train_runtime": 5505.1583,
5
+ "train_samples": 15000,
6
+ "train_samples_per_second": 2.725,
7
+ "train_steps_per_second": 0.043
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,366 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.997867803837953,
5
+ "eval_steps": 500,
6
+ "global_step": 234,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.0,
13
+ "learning_rate": 2.0833333333333333e-07,
14
+ "logits/chosen": -2.024216890335083,
15
+ "logits/rejected": -1.8819010257720947,
16
+ "logps/chosen": -1305.559326171875,
17
+ "logps/rejected": -3790.57275390625,
18
+ "loss": 0.5,
19
+ "rewards/accuracies": 0.0,
20
+ "rewards/chosen": 0.0,
21
+ "rewards/margins": 0.0,
22
+ "rewards/rejected": 0.0,
23
+ "step": 1
24
+ },
25
+ {
26
+ "epoch": 0.04,
27
+ "learning_rate": 2.0833333333333334e-06,
28
+ "logits/chosen": -2.2936673164367676,
29
+ "logits/rejected": -2.1570396423339844,
30
+ "logps/chosen": -1376.771728515625,
31
+ "logps/rejected": -3041.7578125,
32
+ "loss": 0.4966,
33
+ "rewards/accuracies": 0.4305555522441864,
34
+ "rewards/chosen": -0.037909653037786484,
35
+ "rewards/margins": 0.009223480708897114,
36
+ "rewards/rejected": -0.04713314026594162,
37
+ "step": 10
38
+ },
39
+ {
40
+ "epoch": 0.09,
41
+ "learning_rate": 4.166666666666667e-06,
42
+ "logits/chosen": -2.1768686771392822,
43
+ "logits/rejected": -2.084563732147217,
44
+ "logps/chosen": -2079.7646484375,
45
+ "logps/rejected": -3319.23583984375,
46
+ "loss": 0.4897,
47
+ "rewards/accuracies": 0.5062500238418579,
48
+ "rewards/chosen": -0.7143774628639221,
49
+ "rewards/margins": 0.04834098741412163,
50
+ "rewards/rejected": -0.7627183794975281,
51
+ "step": 20
52
+ },
53
+ {
54
+ "epoch": 0.13,
55
+ "learning_rate": 4.989935734988098e-06,
56
+ "logits/chosen": -2.145012617111206,
57
+ "logits/rejected": -2.027907609939575,
58
+ "logps/chosen": -2782.030517578125,
59
+ "logps/rejected": -4380.7275390625,
60
+ "loss": 0.4871,
61
+ "rewards/accuracies": 0.4437499940395355,
62
+ "rewards/chosen": -1.3868815898895264,
63
+ "rewards/margins": 0.28924745321273804,
64
+ "rewards/rejected": -1.6761291027069092,
65
+ "step": 30
66
+ },
67
+ {
68
+ "epoch": 0.17,
69
+ "learning_rate": 4.9287250957321685e-06,
70
+ "logits/chosen": -2.2485415935516357,
71
+ "logits/rejected": -2.1334125995635986,
72
+ "logps/chosen": -2663.207763671875,
73
+ "logps/rejected": -3975.64892578125,
74
+ "loss": 0.4821,
75
+ "rewards/accuracies": 0.5562499761581421,
76
+ "rewards/chosen": -1.1883825063705444,
77
+ "rewards/margins": 0.22184336185455322,
78
+ "rewards/rejected": -1.4102258682250977,
79
+ "step": 40
80
+ },
81
+ {
82
+ "epoch": 0.21,
83
+ "learning_rate": 4.813260751184992e-06,
84
+ "logits/chosen": -2.2985146045684814,
85
+ "logits/rejected": -2.210111141204834,
86
+ "logps/chosen": -1755.809326171875,
87
+ "logps/rejected": -3258.091796875,
88
+ "loss": 0.4806,
89
+ "rewards/accuracies": 0.5625,
90
+ "rewards/chosen": -0.6678739190101624,
91
+ "rewards/margins": 0.2041586935520172,
92
+ "rewards/rejected": -0.872032642364502,
93
+ "step": 50
94
+ },
95
+ {
96
+ "epoch": 0.26,
97
+ "learning_rate": 4.646121984004666e-06,
98
+ "logits/chosen": -2.2362232208251953,
99
+ "logits/rejected": -2.1845810413360596,
100
+ "logps/chosen": -2074.88134765625,
101
+ "logps/rejected": -3796.244873046875,
102
+ "loss": 0.473,
103
+ "rewards/accuracies": 0.4937500059604645,
104
+ "rewards/chosen": -0.5578988790512085,
105
+ "rewards/margins": 0.24681946635246277,
106
+ "rewards/rejected": -0.8047183752059937,
107
+ "step": 60
108
+ },
109
+ {
110
+ "epoch": 0.3,
111
+ "learning_rate": 4.431042398061499e-06,
112
+ "logits/chosen": -2.382171392440796,
113
+ "logits/rejected": -2.307375431060791,
114
+ "logps/chosen": -1571.745361328125,
115
+ "logps/rejected": -3834.587158203125,
116
+ "loss": 0.4815,
117
+ "rewards/accuracies": 0.606249988079071,
118
+ "rewards/chosen": -0.559153139591217,
119
+ "rewards/margins": 0.5565303564071655,
120
+ "rewards/rejected": -1.1156834363937378,
121
+ "step": 70
122
+ },
123
+ {
124
+ "epoch": 0.34,
125
+ "learning_rate": 4.172826515897146e-06,
126
+ "logits/chosen": -2.408031940460205,
127
+ "logits/rejected": -2.3543269634246826,
128
+ "logps/chosen": -2339.724609375,
129
+ "logps/rejected": -3922.291748046875,
130
+ "loss": 0.4755,
131
+ "rewards/accuracies": 0.53125,
132
+ "rewards/chosen": -1.0054388046264648,
133
+ "rewards/margins": 0.39337557554244995,
134
+ "rewards/rejected": -1.39881432056427,
135
+ "step": 80
136
+ },
137
+ {
138
+ "epoch": 0.38,
139
+ "learning_rate": 3.8772424536302565e-06,
140
+ "logits/chosen": -2.4260120391845703,
141
+ "logits/rejected": -2.373760461807251,
142
+ "logps/chosen": -2360.41552734375,
143
+ "logps/rejected": -3782.758544921875,
144
+ "loss": 0.4727,
145
+ "rewards/accuracies": 0.518750011920929,
146
+ "rewards/chosen": -0.8136354684829712,
147
+ "rewards/margins": 0.4064968228340149,
148
+ "rewards/rejected": -1.2201323509216309,
149
+ "step": 90
150
+ },
151
+ {
152
+ "epoch": 0.43,
153
+ "learning_rate": 3.5508930707739143e-06,
154
+ "logits/chosen": -2.4029459953308105,
155
+ "logits/rejected": -2.3526365756988525,
156
+ "logps/chosen": -2014.8980712890625,
157
+ "logps/rejected": -3683.920654296875,
158
+ "loss": 0.4696,
159
+ "rewards/accuracies": 0.5375000238418579,
160
+ "rewards/chosen": -0.5975068211555481,
161
+ "rewards/margins": 0.31760281324386597,
162
+ "rewards/rejected": -0.9151096343994141,
163
+ "step": 100
164
+ },
165
+ {
166
+ "epoch": 0.47,
167
+ "learning_rate": 3.201068473265007e-06,
168
+ "logits/chosen": -2.452263355255127,
169
+ "logits/rejected": -2.4388153553009033,
170
+ "logps/chosen": -2102.702392578125,
171
+ "logps/rejected": -4107.12255859375,
172
+ "loss": 0.4725,
173
+ "rewards/accuracies": 0.53125,
174
+ "rewards/chosen": -0.7340173125267029,
175
+ "rewards/margins": 0.48010140657424927,
176
+ "rewards/rejected": -1.2141185998916626,
177
+ "step": 110
178
+ },
179
+ {
180
+ "epoch": 0.51,
181
+ "learning_rate": 2.835583164544139e-06,
182
+ "logits/chosen": -2.4622802734375,
183
+ "logits/rejected": -2.453519344329834,
184
+ "logps/chosen": -2598.88037109375,
185
+ "logps/rejected": -4125.0986328125,
186
+ "loss": 0.4738,
187
+ "rewards/accuracies": 0.6000000238418579,
188
+ "rewards/chosen": -0.9378460049629211,
189
+ "rewards/margins": 0.3462037444114685,
190
+ "rewards/rejected": -1.2840497493743896,
191
+ "step": 120
192
+ },
193
+ {
194
+ "epoch": 0.55,
195
+ "learning_rate": 2.4626014824618418e-06,
196
+ "logits/chosen": -2.5731091499328613,
197
+ "logits/rejected": -2.559770107269287,
198
+ "logps/chosen": -2807.160400390625,
199
+ "logps/rejected": -4325.0205078125,
200
+ "loss": 0.4802,
201
+ "rewards/accuracies": 0.550000011920929,
202
+ "rewards/chosen": -1.1455607414245605,
203
+ "rewards/margins": 0.6021825075149536,
204
+ "rewards/rejected": -1.7477432489395142,
205
+ "step": 130
206
+ },
207
+ {
208
+ "epoch": 0.6,
209
+ "learning_rate": 2.090455221462156e-06,
210
+ "logits/chosen": -2.468670606613159,
211
+ "logits/rejected": -2.4780094623565674,
212
+ "logps/chosen": -2137.4091796875,
213
+ "logps/rejected": -4046.2734375,
214
+ "loss": 0.4714,
215
+ "rewards/accuracies": 0.574999988079071,
216
+ "rewards/chosen": -0.8530462980270386,
217
+ "rewards/margins": 0.3694917857646942,
218
+ "rewards/rejected": -1.2225382328033447,
219
+ "step": 140
220
+ },
221
+ {
222
+ "epoch": 0.64,
223
+ "learning_rate": 1.7274575140626318e-06,
224
+ "logits/chosen": -2.4550023078918457,
225
+ "logits/rejected": -2.4524893760681152,
226
+ "logps/chosen": -1734.0328369140625,
227
+ "logps/rejected": -3453.727783203125,
228
+ "loss": 0.4732,
229
+ "rewards/accuracies": 0.550000011920929,
230
+ "rewards/chosen": -0.5440791845321655,
231
+ "rewards/margins": 0.4087960124015808,
232
+ "rewards/rejected": -0.9528751373291016,
233
+ "step": 150
234
+ },
235
+ {
236
+ "epoch": 0.68,
237
+ "learning_rate": 1.3817171292109182e-06,
238
+ "logits/chosen": -2.520023822784424,
239
+ "logits/rejected": -2.549712657928467,
240
+ "logps/chosen": -2198.69189453125,
241
+ "logps/rejected": -3628.389892578125,
242
+ "loss": 0.4758,
243
+ "rewards/accuracies": 0.5375000238418579,
244
+ "rewards/chosen": -0.6990865468978882,
245
+ "rewards/margins": 0.3725363612174988,
246
+ "rewards/rejected": -1.0716229677200317,
247
+ "step": 160
248
+ },
249
+ {
250
+ "epoch": 0.72,
251
+ "learning_rate": 1.0609573357858166e-06,
252
+ "logits/chosen": -2.5392794609069824,
253
+ "logits/rejected": -2.5447866916656494,
254
+ "logps/chosen": -1648.3802490234375,
255
+ "logps/rejected": -4229.2060546875,
256
+ "loss": 0.4682,
257
+ "rewards/accuracies": 0.574999988079071,
258
+ "rewards/chosen": -0.7170668840408325,
259
+ "rewards/margins": 0.6884183883666992,
260
+ "rewards/rejected": -1.4054853916168213,
261
+ "step": 170
262
+ },
263
+ {
264
+ "epoch": 0.77,
265
+ "learning_rate": 7.723433775328385e-07,
266
+ "logits/chosen": -2.524719476699829,
267
+ "logits/rejected": -2.5857484340667725,
268
+ "logps/chosen": -2204.722900390625,
269
+ "logps/rejected": -4786.68408203125,
270
+ "loss": 0.47,
271
+ "rewards/accuracies": 0.6000000238418579,
272
+ "rewards/chosen": -0.9566167593002319,
273
+ "rewards/margins": 0.7360025644302368,
274
+ "rewards/rejected": -1.6926193237304688,
275
+ "step": 180
276
+ },
277
+ {
278
+ "epoch": 0.81,
279
+ "learning_rate": 5.223224133591475e-07,
280
+ "logits/chosen": -2.5650665760040283,
281
+ "logits/rejected": -2.5723259449005127,
282
+ "logps/chosen": -2591.955322265625,
283
+ "logps/rejected": -4333.18701171875,
284
+ "loss": 0.4722,
285
+ "rewards/accuracies": 0.5375000238418579,
286
+ "rewards/chosen": -1.0086050033569336,
287
+ "rewards/margins": 0.689775824546814,
288
+ "rewards/rejected": -1.6983808279037476,
289
+ "step": 190
290
+ },
291
+ {
292
+ "epoch": 0.85,
293
+ "learning_rate": 3.164794984571759e-07,
294
+ "logits/chosen": -2.4675166606903076,
295
+ "logits/rejected": -2.536062717437744,
296
+ "logps/chosen": -2089.778564453125,
297
+ "logps/rejected": -4025.212158203125,
298
+ "loss": 0.474,
299
+ "rewards/accuracies": 0.48750001192092896,
300
+ "rewards/chosen": -0.8992789387702942,
301
+ "rewards/margins": 0.5448096990585327,
302
+ "rewards/rejected": -1.4440886974334717,
303
+ "step": 200
304
+ },
305
+ {
306
+ "epoch": 0.9,
307
+ "learning_rate": 1.59412823400657e-07,
308
+ "logits/chosen": -2.560384750366211,
309
+ "logits/rejected": -2.6164653301239014,
310
+ "logps/chosen": -2061.99658203125,
311
+ "logps/rejected": -3734.251953125,
312
+ "loss": 0.4744,
313
+ "rewards/accuracies": 0.48750001192092896,
314
+ "rewards/chosen": -0.9695846438407898,
315
+ "rewards/margins": 0.20665684342384338,
316
+ "rewards/rejected": -1.176241397857666,
317
+ "step": 210
318
+ },
319
+ {
320
+ "epoch": 0.94,
321
+ "learning_rate": 5.463099816548578e-08,
322
+ "logits/chosen": -2.4755778312683105,
323
+ "logits/rejected": -2.5548205375671387,
324
+ "logps/chosen": -1978.981689453125,
325
+ "logps/rejected": -4176.359375,
326
+ "loss": 0.4721,
327
+ "rewards/accuracies": 0.59375,
328
+ "rewards/chosen": -0.8206374049186707,
329
+ "rewards/margins": 0.5447834730148315,
330
+ "rewards/rejected": -1.365420937538147,
331
+ "step": 220
332
+ },
333
+ {
334
+ "epoch": 0.98,
335
+ "learning_rate": 4.474675580662113e-09,
336
+ "logits/chosen": -2.485661745071411,
337
+ "logits/rejected": -2.55751371383667,
338
+ "logps/chosen": -2132.883056640625,
339
+ "logps/rejected": -4433.28271484375,
340
+ "loss": 0.4707,
341
+ "rewards/accuracies": 0.606249988079071,
342
+ "rewards/chosen": -0.7833830714225769,
343
+ "rewards/margins": 0.7396507263183594,
344
+ "rewards/rejected": -1.523033857345581,
345
+ "step": 230
346
+ },
347
+ {
348
+ "epoch": 1.0,
349
+ "step": 234,
350
+ "total_flos": 0.0,
351
+ "train_loss": 0.47607828460187995,
352
+ "train_runtime": 5505.1583,
353
+ "train_samples_per_second": 2.725,
354
+ "train_steps_per_second": 0.043
355
+ }
356
+ ],
357
+ "logging_steps": 10,
358
+ "max_steps": 234,
359
+ "num_input_tokens_seen": 0,
360
+ "num_train_epochs": 1,
361
+ "save_steps": 20,
362
+ "total_flos": 0.0,
363
+ "train_batch_size": 4,
364
+ "trial_name": null,
365
+ "trial_params": null
366
+ }