chansung commited on
Commit
e773d8c
1 Parent(s): 284ebb4

Model save

Browse files
README.md ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ license: gemma
4
+ base_model: google/gemma-7b
5
+ tags:
6
+ - trl
7
+ - sft
8
+ - generated_from_trainer
9
+ datasets:
10
+ - generator
11
+ model-index:
12
+ - name: gemma7b-gpt4o_1k_classification-fft
13
+ results: []
14
+ ---
15
+
16
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
17
+ should probably proofread and complete it, then remove this comment. -->
18
+
19
+ # gemma7b-gpt4o_1k_classification-fft
20
+
21
+ This model is a fine-tuned version of [google/gemma-7b](https://huggingface.co/google/gemma-7b) on the generator dataset.
22
+ It achieves the following results on the evaluation set:
23
+ - Loss: 5.7345
24
+
25
+ ## Model description
26
+
27
+ More information needed
28
+
29
+ ## Intended uses & limitations
30
+
31
+ More information needed
32
+
33
+ ## Training and evaluation data
34
+
35
+ More information needed
36
+
37
+ ## Training procedure
38
+
39
+ ### Training hyperparameters
40
+
41
+ The following hyperparameters were used during training:
42
+ - learning_rate: 0.0003
43
+ - train_batch_size: 2
44
+ - eval_batch_size: 2
45
+ - seed: 42
46
+ - distributed_type: multi-GPU
47
+ - num_devices: 8
48
+ - gradient_accumulation_steps: 2
49
+ - total_train_batch_size: 32
50
+ - total_eval_batch_size: 16
51
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
52
+ - lr_scheduler_type: cosine
53
+ - lr_scheduler_warmup_ratio: 0.1
54
+ - num_epochs: 1
55
+
56
+ ### Training results
57
+
58
+ | Training Loss | Epoch | Step | Validation Loss |
59
+ |:-------------:|:------:|:----:|:---------------:|
60
+ | 4.3115 | 0.9979 | 239 | 5.7345 |
61
+
62
+
63
+ ### Framework versions
64
+
65
+ - Transformers 4.45.1
66
+ - Pytorch 2.4.1+cu121
67
+ - Datasets 3.0.1
68
+ - Tokenizers 0.20.0
all_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 0.9979123173277662,
3
+ "total_flos": 8193589641216.0,
4
+ "train_loss": 2.8353221366595025,
5
+ "train_runtime": 2240.0339,
6
+ "train_samples": 92634,
7
+ "train_samples_per_second": 3.416,
8
+ "train_steps_per_second": 0.107
9
+ }
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 2,
4
+ "eos_token_id": 1,
5
+ "pad_token_id": 0,
6
+ "transformers_version": "4.45.1"
7
+ }
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 0.9979123173277662,
3
+ "total_flos": 8193589641216.0,
4
+ "train_loss": 2.8353221366595025,
5
+ "train_runtime": 2240.0339,
6
+ "train_samples": 92634,
7
+ "train_samples_per_second": 3.416,
8
+ "train_steps_per_second": 0.107
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,386 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.9979123173277662,
5
+ "eval_steps": 500,
6
+ "global_step": 239,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.0041753653444676405,
13
+ "grad_norm": 14820.435529068707,
14
+ "learning_rate": 1.2499999999999999e-05,
15
+ "loss": 60.135,
16
+ "step": 1
17
+ },
18
+ {
19
+ "epoch": 0.020876826722338204,
20
+ "grad_norm": 1628.1311380537702,
21
+ "learning_rate": 6.25e-05,
22
+ "loss": 50.6887,
23
+ "step": 5
24
+ },
25
+ {
26
+ "epoch": 0.04175365344467641,
27
+ "grad_norm": 3033.800304830903,
28
+ "learning_rate": 0.000125,
29
+ "loss": 42.1741,
30
+ "step": 10
31
+ },
32
+ {
33
+ "epoch": 0.06263048016701461,
34
+ "grad_norm": 6954.121232870651,
35
+ "learning_rate": 0.00018749999999999998,
36
+ "loss": 161.1634,
37
+ "step": 15
38
+ },
39
+ {
40
+ "epoch": 0.08350730688935282,
41
+ "grad_norm": 850.7915596439383,
42
+ "learning_rate": 0.00025,
43
+ "loss": 187.8921,
44
+ "step": 20
45
+ },
46
+ {
47
+ "epoch": 0.10438413361169102,
48
+ "grad_norm": 51.008981364388355,
49
+ "learning_rate": 0.0002999839868651235,
50
+ "loss": 27.0222,
51
+ "step": 25
52
+ },
53
+ {
54
+ "epoch": 0.12526096033402923,
55
+ "grad_norm": 41.28593832910529,
56
+ "learning_rate": 0.000299423886051382,
57
+ "loss": 40.1282,
58
+ "step": 30
59
+ },
60
+ {
61
+ "epoch": 0.14613778705636743,
62
+ "grad_norm": 94.54405893422229,
63
+ "learning_rate": 0.0002980665441538907,
64
+ "loss": 22.8182,
65
+ "step": 35
66
+ },
67
+ {
68
+ "epoch": 0.16701461377870563,
69
+ "grad_norm": 117.0436511760194,
70
+ "learning_rate": 0.0002959192031789579,
71
+ "loss": 19.1922,
72
+ "step": 40
73
+ },
74
+ {
75
+ "epoch": 0.18789144050104384,
76
+ "grad_norm": 23.24704464882293,
77
+ "learning_rate": 0.00029299332011978107,
78
+ "loss": 17.9784,
79
+ "step": 45
80
+ },
81
+ {
82
+ "epoch": 0.20876826722338204,
83
+ "grad_norm": 67.09115479950934,
84
+ "learning_rate": 0.0002893045058284311,
85
+ "loss": 13.938,
86
+ "step": 50
87
+ },
88
+ {
89
+ "epoch": 0.22964509394572025,
90
+ "grad_norm": 74.81050241825953,
91
+ "learning_rate": 0.00028487244172520246,
92
+ "loss": 12.678,
93
+ "step": 55
94
+ },
95
+ {
96
+ "epoch": 0.25052192066805845,
97
+ "grad_norm": 36.783670859376656,
98
+ "learning_rate": 0.0002797207747897198,
99
+ "loss": 11.5948,
100
+ "step": 60
101
+ },
102
+ {
103
+ "epoch": 0.27139874739039666,
104
+ "grad_norm": 42.67794856216472,
105
+ "learning_rate": 0.0002738769913940706,
106
+ "loss": 9.6311,
107
+ "step": 65
108
+ },
109
+ {
110
+ "epoch": 0.29227557411273486,
111
+ "grad_norm": 20.131225618062707,
112
+ "learning_rate": 0.0002673722706511174,
113
+ "loss": 8.6553,
114
+ "step": 70
115
+ },
116
+ {
117
+ "epoch": 0.31315240083507306,
118
+ "grad_norm": 16.501356621775457,
119
+ "learning_rate": 0.0002602413180604401,
120
+ "loss": 7.8996,
121
+ "step": 75
122
+ },
123
+ {
124
+ "epoch": 0.33402922755741127,
125
+ "grad_norm": 17.24824317039284,
126
+ "learning_rate": 0.00025252218033947993,
127
+ "loss": 7.1388,
128
+ "step": 80
129
+ },
130
+ {
131
+ "epoch": 0.35490605427974947,
132
+ "grad_norm": 23.28427284101539,
133
+ "learning_rate": 0.0002442560424278399,
134
+ "loss": 6.7571,
135
+ "step": 85
136
+ },
137
+ {
138
+ "epoch": 0.3757828810020877,
139
+ "grad_norm": 19.752003570940577,
140
+ "learning_rate": 0.00023548700774781242,
141
+ "loss": 6.4415,
142
+ "step": 90
143
+ },
144
+ {
145
+ "epoch": 0.3966597077244259,
146
+ "grad_norm": 16.62982088461208,
147
+ "learning_rate": 0.00022626186289353913,
148
+ "loss": 6.4479,
149
+ "step": 95
150
+ },
151
+ {
152
+ "epoch": 0.4175365344467641,
153
+ "grad_norm": 23.009578526601246,
154
+ "learning_rate": 0.0002166298280042877,
155
+ "loss": 6.1347,
156
+ "step": 100
157
+ },
158
+ {
159
+ "epoch": 0.4384133611691023,
160
+ "grad_norm": 9.902525054887667,
161
+ "learning_rate": 0.00020664229415371266,
162
+ "loss": 5.868,
163
+ "step": 105
164
+ },
165
+ {
166
+ "epoch": 0.4592901878914405,
167
+ "grad_norm": 18.872092670717567,
168
+ "learning_rate": 0.0001963525491562421,
169
+ "loss": 5.8684,
170
+ "step": 110
171
+ },
172
+ {
173
+ "epoch": 0.4801670146137787,
174
+ "grad_norm": 10.396862801643927,
175
+ "learning_rate": 0.00018581549325353126,
176
+ "loss": 5.6768,
177
+ "step": 115
178
+ },
179
+ {
180
+ "epoch": 0.5010438413361169,
181
+ "grad_norm": 6.643173772718449,
182
+ "learning_rate": 0.00017508734619791966,
183
+ "loss": 5.5378,
184
+ "step": 120
185
+ },
186
+ {
187
+ "epoch": 0.5219206680584552,
188
+ "grad_norm": 13.620597347405528,
189
+ "learning_rate": 0.00016422534729572738,
190
+ "loss": 5.4243,
191
+ "step": 125
192
+ },
193
+ {
194
+ "epoch": 0.5427974947807933,
195
+ "grad_norm": 12.675575738004211,
196
+ "learning_rate": 0.0001532874500107902,
197
+ "loss": 5.4237,
198
+ "step": 130
199
+ },
200
+ {
201
+ "epoch": 0.5636743215031316,
202
+ "grad_norm": 10.600479885243871,
203
+ "learning_rate": 0.00014233201275765494,
204
+ "loss": 5.3795,
205
+ "step": 135
206
+ },
207
+ {
208
+ "epoch": 0.5845511482254697,
209
+ "grad_norm": 14.207759763111122,
210
+ "learning_rate": 0.0001314174875341878,
211
+ "loss": 5.2682,
212
+ "step": 140
213
+ },
214
+ {
215
+ "epoch": 0.605427974947808,
216
+ "grad_norm": 10.28585644469174,
217
+ "learning_rate": 0.00012060210805487529,
218
+ "loss": 5.1446,
219
+ "step": 145
220
+ },
221
+ {
222
+ "epoch": 0.6263048016701461,
223
+ "grad_norm": 9.411071111902643,
224
+ "learning_rate": 0.00010994357904876106,
225
+ "loss": 5.0323,
226
+ "step": 150
227
+ },
228
+ {
229
+ "epoch": 0.6471816283924844,
230
+ "grad_norm": 4.20316220773891,
231
+ "learning_rate": 9.949876837974944e-05,
232
+ "loss": 4.9314,
233
+ "step": 155
234
+ },
235
+ {
236
+ "epoch": 0.6680584551148225,
237
+ "grad_norm": 6.642144688442386,
238
+ "learning_rate": 8.932340363194595e-05,
239
+ "loss": 4.9206,
240
+ "step": 160
241
+ },
242
+ {
243
+ "epoch": 0.6889352818371608,
244
+ "grad_norm": 7.2411357694832255,
245
+ "learning_rate": 7.947177477888472e-05,
246
+ "loss": 4.8218,
247
+ "step": 165
248
+ },
249
+ {
250
+ "epoch": 0.7098121085594989,
251
+ "grad_norm": 6.883273869392802,
252
+ "learning_rate": 6.999644452302975e-05,
253
+ "loss": 4.6982,
254
+ "step": 170
255
+ },
256
+ {
257
+ "epoch": 0.7306889352818372,
258
+ "grad_norm": 5.045481934015123,
259
+ "learning_rate": 6.0947967851014405e-05,
260
+ "loss": 4.6601,
261
+ "step": 175
262
+ },
263
+ {
264
+ "epoch": 0.7515657620041754,
265
+ "grad_norm": 4.832077424408084,
266
+ "learning_rate": 5.237462230091467e-05,
267
+ "loss": 4.5969,
268
+ "step": 180
269
+ },
270
+ {
271
+ "epoch": 0.7724425887265136,
272
+ "grad_norm": 4.500645176472541,
273
+ "learning_rate": 4.432215038069449e-05,
274
+ "loss": 4.548,
275
+ "step": 185
276
+ },
277
+ {
278
+ "epoch": 0.7933194154488518,
279
+ "grad_norm": 3.503698684667856,
280
+ "learning_rate": 3.6833515512134606e-05,
281
+ "loss": 4.5618,
282
+ "step": 190
283
+ },
284
+ {
285
+ "epoch": 0.81419624217119,
286
+ "grad_norm": 3.680197433632794,
287
+ "learning_rate": 2.9948672802388135e-05,
288
+ "loss": 4.5023,
289
+ "step": 195
290
+ },
291
+ {
292
+ "epoch": 0.8350730688935282,
293
+ "grad_norm": 2.8773090613106302,
294
+ "learning_rate": 2.3704355866196373e-05,
295
+ "loss": 4.5226,
296
+ "step": 200
297
+ },
298
+ {
299
+ "epoch": 0.8559498956158664,
300
+ "grad_norm": 2.5614101608626467,
301
+ "learning_rate": 1.813388083616068e-05,
302
+ "loss": 4.4463,
303
+ "step": 205
304
+ },
305
+ {
306
+ "epoch": 0.8768267223382046,
307
+ "grad_norm": 2.2102224877308676,
308
+ "learning_rate": 1.326696860675981e-05,
309
+ "loss": 4.3724,
310
+ "step": 210
311
+ },
312
+ {
313
+ "epoch": 0.8977035490605428,
314
+ "grad_norm": 2.056590025798714,
315
+ "learning_rate": 9.129586260518634e-06,
316
+ "loss": 4.3747,
317
+ "step": 215
318
+ },
319
+ {
320
+ "epoch": 0.918580375782881,
321
+ "grad_norm": 1.7949115931981623,
322
+ "learning_rate": 5.743808522387544e-06,
323
+ "loss": 4.3585,
324
+ "step": 220
325
+ },
326
+ {
327
+ "epoch": 0.9394572025052192,
328
+ "grad_norm": 1.5441796176488383,
329
+ "learning_rate": 3.1276999815337544e-06,
330
+ "loss": 4.3784,
331
+ "step": 225
332
+ },
333
+ {
334
+ "epoch": 0.9603340292275574,
335
+ "grad_norm": 1.549016859105234,
336
+ "learning_rate": 1.2952187089419642e-06,
337
+ "loss": 4.3766,
338
+ "step": 230
339
+ },
340
+ {
341
+ "epoch": 0.9812108559498957,
342
+ "grad_norm": 1.5003100935642444,
343
+ "learning_rate": 2.5614178506644934e-07,
344
+ "loss": 4.3115,
345
+ "step": 235
346
+ },
347
+ {
348
+ "epoch": 0.9979123173277662,
349
+ "eval_loss": 5.734466552734375,
350
+ "eval_runtime": 1.4349,
351
+ "eval_samples_per_second": 1.394,
352
+ "eval_steps_per_second": 0.697,
353
+ "step": 239
354
+ },
355
+ {
356
+ "epoch": 0.9979123173277662,
357
+ "step": 239,
358
+ "total_flos": 8193589641216.0,
359
+ "train_loss": 2.8353221366595025,
360
+ "train_runtime": 2240.0339,
361
+ "train_samples_per_second": 3.416,
362
+ "train_steps_per_second": 0.107
363
+ }
364
+ ],
365
+ "logging_steps": 5,
366
+ "max_steps": 239,
367
+ "num_input_tokens_seen": 0,
368
+ "num_train_epochs": 1,
369
+ "save_steps": 100,
370
+ "stateful_callbacks": {
371
+ "TrainerControl": {
372
+ "args": {
373
+ "should_epoch_stop": false,
374
+ "should_evaluate": false,
375
+ "should_log": false,
376
+ "should_save": true,
377
+ "should_training_stop": true
378
+ },
379
+ "attributes": {}
380
+ }
381
+ },
382
+ "total_flos": 8193589641216.0,
383
+ "train_batch_size": 2,
384
+ "trial_name": null,
385
+ "trial_params": null
386
+ }