|
{ |
|
"best_metric": 3.5237081050872803, |
|
"best_model_checkpoint": "/Users/frapadovani/Desktop/babyLM_controlled/models_trained/reduce_on_plateau_convergence/age_random/checkpoint-42000", |
|
"epoch": 0.2222445642683656, |
|
"eval_steps": 2000, |
|
"global_step": 42000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.010583074488969791, |
|
"grad_norm": 1.3282362222671509, |
|
"learning_rate": 0.0001, |
|
"loss": 4.8574, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.010583074488969791, |
|
"eval_loss": 4.1984477043151855, |
|
"eval_runtime": 2.3492, |
|
"eval_samples_per_second": 744.941, |
|
"eval_steps_per_second": 46.825, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.021166148977939583, |
|
"grad_norm": 1.406479835510254, |
|
"learning_rate": 0.0001, |
|
"loss": 4.145, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.021166148977939583, |
|
"eval_loss": 3.9832916259765625, |
|
"eval_runtime": 2.1928, |
|
"eval_samples_per_second": 798.069, |
|
"eval_steps_per_second": 50.164, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.03174922346690937, |
|
"grad_norm": 1.4979146718978882, |
|
"learning_rate": 0.0001, |
|
"loss": 3.9948, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.03174922346690937, |
|
"eval_loss": 3.872448444366455, |
|
"eval_runtime": 2.0908, |
|
"eval_samples_per_second": 837.0, |
|
"eval_steps_per_second": 52.611, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.042332297955879165, |
|
"grad_norm": 1.4652293920516968, |
|
"learning_rate": 0.0001, |
|
"loss": 3.9047, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 0.042332297955879165, |
|
"eval_loss": 3.8042104244232178, |
|
"eval_runtime": 2.0939, |
|
"eval_samples_per_second": 835.759, |
|
"eval_steps_per_second": 52.533, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 0.05291537244484895, |
|
"grad_norm": 1.523988127708435, |
|
"learning_rate": 0.0001, |
|
"loss": 3.8428, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 0.05291537244484895, |
|
"eval_loss": 3.751901149749756, |
|
"eval_runtime": 2.1539, |
|
"eval_samples_per_second": 812.471, |
|
"eval_steps_per_second": 51.07, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 0.06349844693381874, |
|
"grad_norm": 1.6126410961151123, |
|
"learning_rate": 0.0001, |
|
"loss": 3.7955, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 0.06349844693381874, |
|
"eval_loss": 3.71409010887146, |
|
"eval_runtime": 2.0662, |
|
"eval_samples_per_second": 846.973, |
|
"eval_steps_per_second": 53.238, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 0.07408152142278854, |
|
"grad_norm": 1.656988263130188, |
|
"learning_rate": 0.0001, |
|
"loss": 3.7557, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 0.07408152142278854, |
|
"eval_loss": 3.6855504512786865, |
|
"eval_runtime": 2.0523, |
|
"eval_samples_per_second": 852.711, |
|
"eval_steps_per_second": 53.599, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 0.08466459591175833, |
|
"grad_norm": 1.6087511777877808, |
|
"learning_rate": 0.0001, |
|
"loss": 3.725, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 0.08466459591175833, |
|
"eval_loss": 3.658583164215088, |
|
"eval_runtime": 2.1352, |
|
"eval_samples_per_second": 819.607, |
|
"eval_steps_per_second": 51.518, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 0.09524767040072811, |
|
"grad_norm": 1.6472262144088745, |
|
"learning_rate": 0.0001, |
|
"loss": 3.6947, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 0.09524767040072811, |
|
"eval_loss": 3.638516664505005, |
|
"eval_runtime": 95.757, |
|
"eval_samples_per_second": 18.275, |
|
"eval_steps_per_second": 1.149, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 0.1058307448896979, |
|
"grad_norm": 1.6628919839859009, |
|
"learning_rate": 0.0001, |
|
"loss": 3.6726, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 0.1058307448896979, |
|
"eval_loss": 3.622284412384033, |
|
"eval_runtime": 2.1518, |
|
"eval_samples_per_second": 813.27, |
|
"eval_steps_per_second": 51.12, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 0.1164138193786677, |
|
"grad_norm": 1.5287901163101196, |
|
"learning_rate": 0.0001, |
|
"loss": 3.6556, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 0.1164138193786677, |
|
"eval_loss": 3.6024463176727295, |
|
"eval_runtime": 2.0124, |
|
"eval_samples_per_second": 869.594, |
|
"eval_steps_per_second": 54.66, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 0.1269968938676375, |
|
"grad_norm": 1.6352204084396362, |
|
"learning_rate": 0.0001, |
|
"loss": 3.6309, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 0.1269968938676375, |
|
"eval_loss": 3.5923054218292236, |
|
"eval_runtime": 2.0331, |
|
"eval_samples_per_second": 860.735, |
|
"eval_steps_per_second": 54.103, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 0.13757996835660727, |
|
"grad_norm": 1.5545786619186401, |
|
"learning_rate": 0.0001, |
|
"loss": 3.619, |
|
"step": 26000 |
|
}, |
|
{ |
|
"epoch": 0.13757996835660727, |
|
"eval_loss": 3.5808773040771484, |
|
"eval_runtime": 2.1131, |
|
"eval_samples_per_second": 828.175, |
|
"eval_steps_per_second": 52.057, |
|
"step": 26000 |
|
}, |
|
{ |
|
"epoch": 0.14816304284557708, |
|
"grad_norm": 1.6340019702911377, |
|
"learning_rate": 0.0001, |
|
"loss": 3.6045, |
|
"step": 28000 |
|
}, |
|
{ |
|
"epoch": 0.14816304284557708, |
|
"eval_loss": 3.5688397884368896, |
|
"eval_runtime": 2.0238, |
|
"eval_samples_per_second": 864.697, |
|
"eval_steps_per_second": 54.352, |
|
"step": 28000 |
|
}, |
|
{ |
|
"epoch": 0.15874611733454685, |
|
"grad_norm": 1.6332552433013916, |
|
"learning_rate": 0.0001, |
|
"loss": 3.5854, |
|
"step": 30000 |
|
}, |
|
{ |
|
"epoch": 0.15874611733454685, |
|
"eval_loss": 3.5667507648468018, |
|
"eval_runtime": 1.9535, |
|
"eval_samples_per_second": 895.836, |
|
"eval_steps_per_second": 56.31, |
|
"step": 30000 |
|
}, |
|
{ |
|
"epoch": 0.16932919182351666, |
|
"grad_norm": 1.6340084075927734, |
|
"learning_rate": 0.0001, |
|
"loss": 3.5793, |
|
"step": 32000 |
|
}, |
|
{ |
|
"epoch": 0.16932919182351666, |
|
"eval_loss": 3.557473659515381, |
|
"eval_runtime": 2.0124, |
|
"eval_samples_per_second": 869.611, |
|
"eval_steps_per_second": 54.661, |
|
"step": 32000 |
|
}, |
|
{ |
|
"epoch": 0.17991226631248644, |
|
"grad_norm": 1.5192989110946655, |
|
"learning_rate": 0.0001, |
|
"loss": 3.5685, |
|
"step": 34000 |
|
}, |
|
{ |
|
"epoch": 0.17991226631248644, |
|
"eval_loss": 3.545267343521118, |
|
"eval_runtime": 1.995, |
|
"eval_samples_per_second": 877.2, |
|
"eval_steps_per_second": 55.138, |
|
"step": 34000 |
|
}, |
|
{ |
|
"epoch": 0.19049534080145622, |
|
"grad_norm": 1.5996226072311401, |
|
"learning_rate": 0.0001, |
|
"loss": 3.556, |
|
"step": 36000 |
|
}, |
|
{ |
|
"epoch": 0.19049534080145622, |
|
"eval_loss": 3.540522336959839, |
|
"eval_runtime": 1.9783, |
|
"eval_samples_per_second": 884.601, |
|
"eval_steps_per_second": 55.603, |
|
"step": 36000 |
|
}, |
|
{ |
|
"epoch": 0.20107841529042603, |
|
"grad_norm": 1.5821503400802612, |
|
"learning_rate": 0.0001, |
|
"loss": 3.5475, |
|
"step": 38000 |
|
}, |
|
{ |
|
"epoch": 0.20107841529042603, |
|
"eval_loss": 3.533203125, |
|
"eval_runtime": 1.9335, |
|
"eval_samples_per_second": 905.095, |
|
"eval_steps_per_second": 56.892, |
|
"step": 38000 |
|
}, |
|
{ |
|
"epoch": 0.2116614897793958, |
|
"grad_norm": 1.63215970993042, |
|
"learning_rate": 0.0001, |
|
"loss": 3.5358, |
|
"step": 40000 |
|
}, |
|
{ |
|
"epoch": 0.2116614897793958, |
|
"eval_loss": 3.5292651653289795, |
|
"eval_runtime": 2.0497, |
|
"eval_samples_per_second": 853.794, |
|
"eval_steps_per_second": 53.667, |
|
"step": 40000 |
|
}, |
|
{ |
|
"epoch": 0.2222445642683656, |
|
"grad_norm": 1.6347813606262207, |
|
"learning_rate": 0.0001, |
|
"loss": 3.5305, |
|
"step": 42000 |
|
}, |
|
{ |
|
"epoch": 0.2222445642683656, |
|
"eval_loss": 3.5237081050872803, |
|
"eval_runtime": 2.0264, |
|
"eval_samples_per_second": 863.619, |
|
"eval_steps_per_second": 54.285, |
|
"step": 42000 |
|
} |
|
], |
|
"logging_steps": 2000, |
|
"max_steps": 188981, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 2000, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 3, |
|
"early_stopping_threshold": 0.001 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 409441588862976.0, |
|
"train_batch_size": 16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|