musicgen-melody-lora-kk-colab / trainer_state.json
jane102350's picture
End of training
851dcb6 verified
raw
history blame
No virus
9.13 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 9.523809523809524,
"eval_steps": 500,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.19047619047619047,
"grad_norm": 1.0693126916885376,
"learning_rate": 0.000196,
"loss": 9.5582,
"step": 2
},
{
"epoch": 0.38095238095238093,
"grad_norm": 1.2914847135543823,
"learning_rate": 0.000192,
"loss": 9.2227,
"step": 4
},
{
"epoch": 0.5714285714285714,
"grad_norm": 1.9945707321166992,
"learning_rate": 0.000188,
"loss": 8.7759,
"step": 6
},
{
"epoch": 0.7619047619047619,
"grad_norm": 2.011664628982544,
"learning_rate": 0.00018400000000000003,
"loss": 8.2142,
"step": 8
},
{
"epoch": 0.9523809523809523,
"grad_norm": 1.3491569757461548,
"learning_rate": 0.00018,
"loss": 7.9009,
"step": 10
},
{
"epoch": 1.1428571428571428,
"grad_norm": 0.9671052098274231,
"learning_rate": 0.00017600000000000002,
"loss": 7.6909,
"step": 12
},
{
"epoch": 1.3333333333333333,
"grad_norm": 1.116225004196167,
"learning_rate": 0.000172,
"loss": 7.5216,
"step": 14
},
{
"epoch": 1.5238095238095237,
"grad_norm": 1.0181453227996826,
"learning_rate": 0.000168,
"loss": 7.4308,
"step": 16
},
{
"epoch": 1.7142857142857144,
"grad_norm": 0.9790288209915161,
"learning_rate": 0.000164,
"loss": 7.4444,
"step": 18
},
{
"epoch": 1.9047619047619047,
"grad_norm": 0.9529135823249817,
"learning_rate": 0.00016,
"loss": 7.3866,
"step": 20
},
{
"epoch": 2.0952380952380953,
"grad_norm": 0.7934174537658691,
"learning_rate": 0.00015600000000000002,
"loss": 7.3822,
"step": 22
},
{
"epoch": 2.2857142857142856,
"grad_norm": 0.7163369059562683,
"learning_rate": 0.000152,
"loss": 7.3191,
"step": 24
},
{
"epoch": 2.4761904761904763,
"grad_norm": 1.347898244857788,
"learning_rate": 0.000148,
"loss": 7.2142,
"step": 26
},
{
"epoch": 2.6666666666666665,
"grad_norm": 0.7439594864845276,
"learning_rate": 0.000144,
"loss": 7.2718,
"step": 28
},
{
"epoch": 2.857142857142857,
"grad_norm": 1.4523087739944458,
"learning_rate": 0.00014,
"loss": 7.2901,
"step": 30
},
{
"epoch": 3.0476190476190474,
"grad_norm": 0.7053799033164978,
"learning_rate": 0.00013600000000000003,
"loss": 7.2125,
"step": 32
},
{
"epoch": 3.238095238095238,
"grad_norm": 0.6156577467918396,
"learning_rate": 0.000132,
"loss": 7.229,
"step": 34
},
{
"epoch": 3.4285714285714284,
"grad_norm": 0.40743571519851685,
"learning_rate": 0.00012800000000000002,
"loss": 7.2999,
"step": 36
},
{
"epoch": 3.619047619047619,
"grad_norm": 1.0756566524505615,
"learning_rate": 0.000124,
"loss": 7.212,
"step": 38
},
{
"epoch": 3.8095238095238093,
"grad_norm": 0.7201813459396362,
"learning_rate": 0.00012,
"loss": 7.1826,
"step": 40
},
{
"epoch": 4.0,
"grad_norm": 0.5773327946662903,
"learning_rate": 0.000116,
"loss": 7.1456,
"step": 42
},
{
"epoch": 4.190476190476191,
"grad_norm": 0.6004664301872253,
"learning_rate": 0.00011200000000000001,
"loss": 7.0349,
"step": 44
},
{
"epoch": 4.380952380952381,
"grad_norm": 1.4157112836837769,
"learning_rate": 0.00010800000000000001,
"loss": 7.2767,
"step": 46
},
{
"epoch": 4.571428571428571,
"grad_norm": 0.852541446685791,
"learning_rate": 0.00010400000000000001,
"loss": 7.1745,
"step": 48
},
{
"epoch": 4.761904761904762,
"grad_norm": 0.7835370898246765,
"learning_rate": 0.0001,
"loss": 7.2477,
"step": 50
},
{
"epoch": 4.9523809523809526,
"grad_norm": 0.414756178855896,
"learning_rate": 9.6e-05,
"loss": 7.1781,
"step": 52
},
{
"epoch": 5.142857142857143,
"grad_norm": 0.7038145661354065,
"learning_rate": 9.200000000000001e-05,
"loss": 7.1103,
"step": 54
},
{
"epoch": 5.333333333333333,
"grad_norm": 0.8368222713470459,
"learning_rate": 8.800000000000001e-05,
"loss": 7.1595,
"step": 56
},
{
"epoch": 5.523809523809524,
"grad_norm": 0.6943209171295166,
"learning_rate": 8.4e-05,
"loss": 7.1062,
"step": 58
},
{
"epoch": 5.714285714285714,
"grad_norm": 0.4186341464519501,
"learning_rate": 8e-05,
"loss": 7.2348,
"step": 60
},
{
"epoch": 5.904761904761905,
"grad_norm": 1.0224595069885254,
"learning_rate": 7.6e-05,
"loss": 7.1154,
"step": 62
},
{
"epoch": 6.095238095238095,
"grad_norm": 0.428688645362854,
"learning_rate": 7.2e-05,
"loss": 7.1194,
"step": 64
},
{
"epoch": 6.285714285714286,
"grad_norm": 0.913233757019043,
"learning_rate": 6.800000000000001e-05,
"loss": 7.1919,
"step": 66
},
{
"epoch": 6.476190476190476,
"grad_norm": 0.5481642484664917,
"learning_rate": 6.400000000000001e-05,
"loss": 7.126,
"step": 68
},
{
"epoch": 6.666666666666667,
"grad_norm": 0.49522772431373596,
"learning_rate": 6e-05,
"loss": 7.1564,
"step": 70
},
{
"epoch": 6.857142857142857,
"grad_norm": 0.40602990984916687,
"learning_rate": 5.6000000000000006e-05,
"loss": 7.081,
"step": 72
},
{
"epoch": 7.0476190476190474,
"grad_norm": 0.4593268036842346,
"learning_rate": 5.2000000000000004e-05,
"loss": 7.0596,
"step": 74
},
{
"epoch": 7.238095238095238,
"grad_norm": 0.44626158475875854,
"learning_rate": 4.8e-05,
"loss": 7.0674,
"step": 76
},
{
"epoch": 7.428571428571429,
"grad_norm": 0.6573432087898254,
"learning_rate": 4.4000000000000006e-05,
"loss": 7.0745,
"step": 78
},
{
"epoch": 7.619047619047619,
"grad_norm": 0.3820817172527313,
"learning_rate": 4e-05,
"loss": 7.0785,
"step": 80
},
{
"epoch": 7.809523809523809,
"grad_norm": 0.8610634803771973,
"learning_rate": 3.6e-05,
"loss": 7.1974,
"step": 82
},
{
"epoch": 8.0,
"grad_norm": 0.44188380241394043,
"learning_rate": 3.2000000000000005e-05,
"loss": 7.0847,
"step": 84
},
{
"epoch": 8.19047619047619,
"grad_norm": 0.6792606711387634,
"learning_rate": 2.8000000000000003e-05,
"loss": 7.1191,
"step": 86
},
{
"epoch": 8.380952380952381,
"grad_norm": 0.4903930723667145,
"learning_rate": 2.4e-05,
"loss": 7.1288,
"step": 88
},
{
"epoch": 8.571428571428571,
"grad_norm": 0.5853165984153748,
"learning_rate": 2e-05,
"loss": 7.0479,
"step": 90
},
{
"epoch": 8.761904761904763,
"grad_norm": 0.6836739182472229,
"learning_rate": 1.6000000000000003e-05,
"loss": 7.0448,
"step": 92
},
{
"epoch": 8.952380952380953,
"grad_norm": 0.5737291574478149,
"learning_rate": 1.2e-05,
"loss": 7.0717,
"step": 94
},
{
"epoch": 9.142857142857142,
"grad_norm": 1.709892988204956,
"learning_rate": 8.000000000000001e-06,
"loss": 6.9482,
"step": 96
},
{
"epoch": 9.333333333333334,
"grad_norm": 0.61203932762146,
"learning_rate": 4.000000000000001e-06,
"loss": 7.1598,
"step": 98
},
{
"epoch": 9.523809523809524,
"grad_norm": 0.3827505111694336,
"learning_rate": 0.0,
"loss": 7.1112,
"step": 100
},
{
"epoch": 9.523809523809524,
"step": 100,
"total_flos": 500328301455504.0,
"train_loss": 7.34823148727417,
"train_runtime": 426.3789,
"train_samples_per_second": 3.94,
"train_steps_per_second": 0.235
}
],
"logging_steps": 2,
"max_steps": 100,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": false,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 500328301455504.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}