dada22231's picture
Training in progress, step 50, checkpoint
72fb7c6 verified
raw
history blame
9.91 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.039278815196394,
"eval_steps": 25,
"global_step": 50,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0206052801030264,
"grad_norm": 3.2468912601470947,
"learning_rate": 5e-05,
"loss": 2.9909,
"step": 1
},
{
"epoch": 0.0206052801030264,
"eval_loss": 3.228774309158325,
"eval_runtime": 3.4328,
"eval_samples_per_second": 14.566,
"eval_steps_per_second": 3.787,
"step": 1
},
{
"epoch": 0.0412105602060528,
"grad_norm": 3.9502065181732178,
"learning_rate": 0.0001,
"loss": 3.2572,
"step": 2
},
{
"epoch": 0.061815840309079204,
"grad_norm": 3.576547384262085,
"learning_rate": 9.990365154573717e-05,
"loss": 3.0055,
"step": 3
},
{
"epoch": 0.0824211204121056,
"grad_norm": 2.5420279502868652,
"learning_rate": 9.961501876182148e-05,
"loss": 2.3547,
"step": 4
},
{
"epoch": 0.103026400515132,
"grad_norm": 2.1390628814697266,
"learning_rate": 9.913533761814537e-05,
"loss": 2.0257,
"step": 5
},
{
"epoch": 0.12363168061815841,
"grad_norm": 1.7916548252105713,
"learning_rate": 9.846666218300807e-05,
"loss": 1.7607,
"step": 6
},
{
"epoch": 0.1442369607211848,
"grad_norm": 1.8303478956222534,
"learning_rate": 9.761185582727977e-05,
"loss": 1.6577,
"step": 7
},
{
"epoch": 0.1648422408242112,
"grad_norm": 1.7895830869674683,
"learning_rate": 9.657457896300791e-05,
"loss": 1.6774,
"step": 8
},
{
"epoch": 0.1854475209272376,
"grad_norm": 1.265881061553955,
"learning_rate": 9.535927336897098e-05,
"loss": 1.5762,
"step": 9
},
{
"epoch": 0.206052801030264,
"grad_norm": 1.290668249130249,
"learning_rate": 9.397114317029975e-05,
"loss": 1.4451,
"step": 10
},
{
"epoch": 0.22665808113329042,
"grad_norm": 1.081251859664917,
"learning_rate": 9.241613255361455e-05,
"loss": 1.4128,
"step": 11
},
{
"epoch": 0.24726336123631681,
"grad_norm": 1.317069411277771,
"learning_rate": 9.070090031310558e-05,
"loss": 1.326,
"step": 12
},
{
"epoch": 0.2678686413393432,
"grad_norm": 1.264490008354187,
"learning_rate": 8.883279133655399e-05,
"loss": 1.6552,
"step": 13
},
{
"epoch": 0.2884739214423696,
"grad_norm": 1.1361223459243774,
"learning_rate": 8.681980515339464e-05,
"loss": 1.6278,
"step": 14
},
{
"epoch": 0.30907920154539603,
"grad_norm": 1.0125176906585693,
"learning_rate": 8.467056167950311e-05,
"loss": 1.5921,
"step": 15
},
{
"epoch": 0.3296844816484224,
"grad_norm": 1.0024099349975586,
"learning_rate": 8.239426430539243e-05,
"loss": 1.485,
"step": 16
},
{
"epoch": 0.3502897617514488,
"grad_norm": 1.19777512550354,
"learning_rate": 8.000066048588211e-05,
"loss": 1.4315,
"step": 17
},
{
"epoch": 0.3708950418544752,
"grad_norm": 1.0741177797317505,
"learning_rate": 7.75e-05,
"loss": 1.3615,
"step": 18
},
{
"epoch": 0.3915003219575016,
"grad_norm": 1.0112042427062988,
"learning_rate": 7.490299105985507e-05,
"loss": 1.4198,
"step": 19
},
{
"epoch": 0.412105602060528,
"grad_norm": 1.0728622674942017,
"learning_rate": 7.222075445642904e-05,
"loss": 1.4319,
"step": 20
},
{
"epoch": 0.4327108821635544,
"grad_norm": 1.076214075088501,
"learning_rate": 6.946477593864228e-05,
"loss": 1.3549,
"step": 21
},
{
"epoch": 0.45331616226658084,
"grad_norm": 1.0060209035873413,
"learning_rate": 6.664685702961344e-05,
"loss": 1.2328,
"step": 22
},
{
"epoch": 0.4739214423696072,
"grad_norm": 1.0517760515213013,
"learning_rate": 6.377906449072578e-05,
"loss": 1.2736,
"step": 23
},
{
"epoch": 0.49452672247263363,
"grad_norm": 1.1817563772201538,
"learning_rate": 6.087367864990233e-05,
"loss": 1.2071,
"step": 24
},
{
"epoch": 0.51513200257566,
"grad_norm": 1.1937774419784546,
"learning_rate": 5.794314081535644e-05,
"loss": 1.5126,
"step": 25
},
{
"epoch": 0.51513200257566,
"eval_loss": 1.314895749092102,
"eval_runtime": 3.4937,
"eval_samples_per_second": 14.312,
"eval_steps_per_second": 3.721,
"step": 25
},
{
"epoch": 0.5357372826786864,
"grad_norm": 1.0864949226379395,
"learning_rate": 5.500000000000001e-05,
"loss": 1.3438,
"step": 26
},
{
"epoch": 0.5563425627817128,
"grad_norm": 1.1756632328033447,
"learning_rate": 5.205685918464356e-05,
"loss": 1.475,
"step": 27
},
{
"epoch": 0.5769478428847392,
"grad_norm": 1.0386406183242798,
"learning_rate": 4.912632135009769e-05,
"loss": 1.3067,
"step": 28
},
{
"epoch": 0.5975531229877656,
"grad_norm": 1.084520697593689,
"learning_rate": 4.6220935509274235e-05,
"loss": 1.3079,
"step": 29
},
{
"epoch": 0.6181584030907921,
"grad_norm": 1.199838638305664,
"learning_rate": 4.3353142970386564e-05,
"loss": 1.2165,
"step": 30
},
{
"epoch": 0.6387636831938184,
"grad_norm": 1.2528841495513916,
"learning_rate": 4.053522406135775e-05,
"loss": 1.2907,
"step": 31
},
{
"epoch": 0.6593689632968448,
"grad_norm": 1.1232812404632568,
"learning_rate": 3.777924554357096e-05,
"loss": 1.2557,
"step": 32
},
{
"epoch": 0.6799742433998712,
"grad_norm": 1.1742340326309204,
"learning_rate": 3.509700894014496e-05,
"loss": 1.2703,
"step": 33
},
{
"epoch": 0.7005795235028976,
"grad_norm": 1.1688557863235474,
"learning_rate": 3.250000000000001e-05,
"loss": 1.2046,
"step": 34
},
{
"epoch": 0.721184803605924,
"grad_norm": 1.1665050983428955,
"learning_rate": 2.9999339514117912e-05,
"loss": 1.1234,
"step": 35
},
{
"epoch": 0.7417900837089504,
"grad_norm": 1.281623125076294,
"learning_rate": 2.760573569460757e-05,
"loss": 1.1562,
"step": 36
},
{
"epoch": 0.7623953638119768,
"grad_norm": 1.1950199604034424,
"learning_rate": 2.53294383204969e-05,
"loss": 1.4527,
"step": 37
},
{
"epoch": 0.7830006439150032,
"grad_norm": 1.2145071029663086,
"learning_rate": 2.3180194846605367e-05,
"loss": 1.4938,
"step": 38
},
{
"epoch": 0.8036059240180297,
"grad_norm": 1.1486097574234009,
"learning_rate": 2.1167208663446025e-05,
"loss": 1.3334,
"step": 39
},
{
"epoch": 0.824211204121056,
"grad_norm": 1.0132158994674683,
"learning_rate": 1.9299099686894423e-05,
"loss": 1.3069,
"step": 40
},
{
"epoch": 0.8448164842240824,
"grad_norm": 1.069280743598938,
"learning_rate": 1.758386744638546e-05,
"loss": 1.2995,
"step": 41
},
{
"epoch": 0.8654217643271088,
"grad_norm": 1.0366171598434448,
"learning_rate": 1.602885682970026e-05,
"loss": 1.1784,
"step": 42
},
{
"epoch": 0.8860270444301352,
"grad_norm": 0.9968025088310242,
"learning_rate": 1.464072663102903e-05,
"loss": 1.1579,
"step": 43
},
{
"epoch": 0.9066323245331617,
"grad_norm": 1.0707521438598633,
"learning_rate": 1.3425421036992098e-05,
"loss": 1.2042,
"step": 44
},
{
"epoch": 0.927237604636188,
"grad_norm": 1.0155386924743652,
"learning_rate": 1.2388144172720251e-05,
"loss": 1.164,
"step": 45
},
{
"epoch": 0.9478428847392144,
"grad_norm": 1.0132240056991577,
"learning_rate": 1.1533337816991932e-05,
"loss": 1.1677,
"step": 46
},
{
"epoch": 0.9684481648422408,
"grad_norm": 1.047545075416565,
"learning_rate": 1.0864662381854632e-05,
"loss": 1.0935,
"step": 47
},
{
"epoch": 0.9890534449452673,
"grad_norm": 1.1519137620925903,
"learning_rate": 1.0384981238178534e-05,
"loss": 1.0777,
"step": 48
},
{
"epoch": 1.0186735350933678,
"grad_norm": 3.185882806777954,
"learning_rate": 1.0096348454262845e-05,
"loss": 2.4695,
"step": 49
},
{
"epoch": 1.039278815196394,
"grad_norm": 1.053970456123352,
"learning_rate": 1e-05,
"loss": 1.3533,
"step": 50
},
{
"epoch": 1.039278815196394,
"eval_loss": 1.2204114198684692,
"eval_runtime": 3.4847,
"eval_samples_per_second": 14.348,
"eval_steps_per_second": 3.731,
"step": 50
}
],
"logging_steps": 1,
"max_steps": 50,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 25,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 5.968083617316864e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}