|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.019561097866617763, |
|
"eval_steps": 34, |
|
"global_step": 40, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0004890274466654441, |
|
"grad_norm": 1.4216102361679077, |
|
"learning_rate": 6.666666666666667e-06, |
|
"loss": 3.258, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0004890274466654441, |
|
"eval_loss": 3.5600969791412354, |
|
"eval_runtime": 1313.2977, |
|
"eval_samples_per_second": 1.967, |
|
"eval_steps_per_second": 0.656, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0009780548933308881, |
|
"grad_norm": 1.504632830619812, |
|
"learning_rate": 1.3333333333333333e-05, |
|
"loss": 3.8029, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0014670823399963323, |
|
"grad_norm": 1.6489232778549194, |
|
"learning_rate": 2e-05, |
|
"loss": 3.6594, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0019561097866617762, |
|
"grad_norm": 1.274643063545227, |
|
"learning_rate": 2.6666666666666667e-05, |
|
"loss": 3.1495, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0024451372333272204, |
|
"grad_norm": 1.4067387580871582, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 3.6677, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0029341646799926646, |
|
"grad_norm": 1.7147626876831055, |
|
"learning_rate": 4e-05, |
|
"loss": 3.8075, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.0034231921266581087, |
|
"grad_norm": 1.672584891319275, |
|
"learning_rate": 4.666666666666667e-05, |
|
"loss": 3.3499, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.0039122195733235525, |
|
"grad_norm": 2.0120997428894043, |
|
"learning_rate": 5.333333333333333e-05, |
|
"loss": 4.2653, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.004401247019988997, |
|
"grad_norm": 2.0601062774658203, |
|
"learning_rate": 6e-05, |
|
"loss": 3.3055, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.004890274466654441, |
|
"grad_norm": 2.0153276920318604, |
|
"learning_rate": 6.666666666666667e-05, |
|
"loss": 3.1547, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.005379301913319885, |
|
"grad_norm": 2.443328857421875, |
|
"learning_rate": 7.333333333333333e-05, |
|
"loss": 3.4654, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.005868329359985329, |
|
"grad_norm": 2.801769971847534, |
|
"learning_rate": 8e-05, |
|
"loss": 2.8224, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.006357356806650773, |
|
"grad_norm": 2.6738572120666504, |
|
"learning_rate": 8.666666666666667e-05, |
|
"loss": 2.8465, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.0068463842533162175, |
|
"grad_norm": 2.8812367916107178, |
|
"learning_rate": 9.333333333333334e-05, |
|
"loss": 2.622, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.007335411699981661, |
|
"grad_norm": 2.9697248935699463, |
|
"learning_rate": 0.0001, |
|
"loss": 2.9623, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.007824439146647105, |
|
"grad_norm": 2.5231637954711914, |
|
"learning_rate": 0.00010666666666666667, |
|
"loss": 2.2665, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.00831346659331255, |
|
"grad_norm": 2.341275691986084, |
|
"learning_rate": 0.00011333333333333334, |
|
"loss": 2.2247, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.008802494039977994, |
|
"grad_norm": 2.683793306350708, |
|
"learning_rate": 0.00012, |
|
"loss": 1.9371, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.009291521486643438, |
|
"grad_norm": 2.972334384918213, |
|
"learning_rate": 0.00012666666666666666, |
|
"loss": 1.8682, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.009780548933308882, |
|
"grad_norm": 3.541074275970459, |
|
"learning_rate": 0.00013333333333333334, |
|
"loss": 1.912, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.010269576379974325, |
|
"grad_norm": 2.9546866416931152, |
|
"learning_rate": 0.00014, |
|
"loss": 1.6904, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.01075860382663977, |
|
"grad_norm": 2.5912656784057617, |
|
"learning_rate": 0.00014666666666666666, |
|
"loss": 1.5096, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.011247631273305215, |
|
"grad_norm": 2.258963108062744, |
|
"learning_rate": 0.00015333333333333334, |
|
"loss": 1.3978, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.011736658719970658, |
|
"grad_norm": 2.275815725326538, |
|
"learning_rate": 0.00016, |
|
"loss": 1.1538, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.012225686166636102, |
|
"grad_norm": 2.224825382232666, |
|
"learning_rate": 0.0001666666666666667, |
|
"loss": 1.471, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.012714713613301546, |
|
"grad_norm": 1.876035451889038, |
|
"learning_rate": 0.00017333333333333334, |
|
"loss": 1.4368, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.013203741059966991, |
|
"grad_norm": 1.6051979064941406, |
|
"learning_rate": 0.00018, |
|
"loss": 1.3951, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.013692768506632435, |
|
"grad_norm": 1.7268619537353516, |
|
"learning_rate": 0.0001866666666666667, |
|
"loss": 1.1915, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.014181795953297879, |
|
"grad_norm": 1.888218879699707, |
|
"learning_rate": 0.00019333333333333333, |
|
"loss": 1.1358, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.014670823399963322, |
|
"grad_norm": 1.7404896020889282, |
|
"learning_rate": 0.0002, |
|
"loss": 1.2051, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.015159850846628768, |
|
"grad_norm": 1.560935139656067, |
|
"learning_rate": 0.00019998292504580528, |
|
"loss": 1.1611, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.01564887829329421, |
|
"grad_norm": 2.000502824783325, |
|
"learning_rate": 0.0001999317060143023, |
|
"loss": 1.4315, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.016137905739959654, |
|
"grad_norm": 1.7972925901412964, |
|
"learning_rate": 0.0001998463603967434, |
|
"loss": 1.1507, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.0166269331866251, |
|
"grad_norm": 1.4039409160614014, |
|
"learning_rate": 0.00019972691733857883, |
|
"loss": 1.1626, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.0166269331866251, |
|
"eval_loss": 1.1705251932144165, |
|
"eval_runtime": 1316.0492, |
|
"eval_samples_per_second": 1.963, |
|
"eval_steps_per_second": 0.654, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.017115960633290545, |
|
"grad_norm": 1.5077097415924072, |
|
"learning_rate": 0.00019957341762950344, |
|
"loss": 1.0223, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.017604988079955988, |
|
"grad_norm": 1.4470211267471313, |
|
"learning_rate": 0.0001993859136895274, |
|
"loss": 1.0228, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.018094015526621432, |
|
"grad_norm": 1.7781013250350952, |
|
"learning_rate": 0.00019916446955107428, |
|
"loss": 1.0388, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.018583042973286876, |
|
"grad_norm": 1.8719812631607056, |
|
"learning_rate": 0.0001989091608371146, |
|
"loss": 1.0568, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.01907207041995232, |
|
"grad_norm": 1.389418363571167, |
|
"learning_rate": 0.00019862007473534025, |
|
"loss": 1.2011, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.019561097866617763, |
|
"grad_norm": 1.561482310295105, |
|
"learning_rate": 0.0001982973099683902, |
|
"loss": 1.2364, |
|
"step": 40 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 200, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 5, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.7755816782200832e+17, |
|
"train_batch_size": 3, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|