musicgen-small-lora-minmaj-chords / trainer_state.json
luizapzbn's picture
Training in progress, step 500
233187b verified
raw
history blame
12.7 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.898550724637681,
"eval_steps": 500,
"global_step": 136,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.057971014492753624,
"grad_norm": 55.32845687866211,
"learning_rate": 0.0001985294117647059,
"loss": 76.4214,
"step": 2
},
{
"epoch": 0.11594202898550725,
"grad_norm": 59.647117614746094,
"learning_rate": 0.00019558823529411764,
"loss": 66.9992,
"step": 4
},
{
"epoch": 0.17391304347826086,
"grad_norm": 37.24838638305664,
"learning_rate": 0.00019264705882352944,
"loss": 53.3101,
"step": 6
},
{
"epoch": 0.2318840579710145,
"grad_norm": 31.579923629760742,
"learning_rate": 0.00018970588235294117,
"loss": 43.1133,
"step": 8
},
{
"epoch": 0.2898550724637681,
"grad_norm": 33.05708312988281,
"learning_rate": 0.00018676470588235297,
"loss": 41.4356,
"step": 10
},
{
"epoch": 0.34782608695652173,
"grad_norm": 19.261913299560547,
"learning_rate": 0.0001838235294117647,
"loss": 40.5694,
"step": 12
},
{
"epoch": 0.4057971014492754,
"grad_norm": 10.231746673583984,
"learning_rate": 0.00018088235294117647,
"loss": 35.4952,
"step": 14
},
{
"epoch": 0.463768115942029,
"grad_norm": 12.686185836791992,
"learning_rate": 0.00017794117647058823,
"loss": 35.9178,
"step": 16
},
{
"epoch": 0.5217391304347826,
"grad_norm": 5.934300899505615,
"learning_rate": 0.000175,
"loss": 36.0789,
"step": 18
},
{
"epoch": 0.5797101449275363,
"grad_norm": 6.5412092208862305,
"learning_rate": 0.0001720588235294118,
"loss": 36.4717,
"step": 20
},
{
"epoch": 0.6376811594202898,
"grad_norm": 7.47074031829834,
"learning_rate": 0.00016911764705882353,
"loss": 36.1205,
"step": 22
},
{
"epoch": 0.6956521739130435,
"grad_norm": 7.197345733642578,
"learning_rate": 0.00016617647058823532,
"loss": 35.8026,
"step": 24
},
{
"epoch": 0.7536231884057971,
"grad_norm": 8.729438781738281,
"learning_rate": 0.00016323529411764706,
"loss": 36.109,
"step": 26
},
{
"epoch": 0.8115942028985508,
"grad_norm": 10.730180740356445,
"learning_rate": 0.00016029411764705885,
"loss": 34.3386,
"step": 28
},
{
"epoch": 0.8695652173913043,
"grad_norm": 8.981538772583008,
"learning_rate": 0.0001573529411764706,
"loss": 33.3324,
"step": 30
},
{
"epoch": 0.927536231884058,
"grad_norm": 13.125741958618164,
"learning_rate": 0.00015441176470588238,
"loss": 34.8168,
"step": 32
},
{
"epoch": 0.9855072463768116,
"grad_norm": 7.202057361602783,
"learning_rate": 0.00015147058823529412,
"loss": 33.9379,
"step": 34
},
{
"epoch": 1.0289855072463767,
"grad_norm": 7.689969539642334,
"learning_rate": 0.00014852941176470588,
"loss": 26.4506,
"step": 36
},
{
"epoch": 1.0869565217391304,
"grad_norm": 12.119438171386719,
"learning_rate": 0.00014558823529411765,
"loss": 34.1455,
"step": 38
},
{
"epoch": 1.144927536231884,
"grad_norm": 10.431544303894043,
"learning_rate": 0.0001426470588235294,
"loss": 33.6817,
"step": 40
},
{
"epoch": 1.2028985507246377,
"grad_norm": 6.7350311279296875,
"learning_rate": 0.00013970588235294118,
"loss": 34.3042,
"step": 42
},
{
"epoch": 1.2608695652173914,
"grad_norm": 7.63714599609375,
"learning_rate": 0.00013676470588235294,
"loss": 33.6539,
"step": 44
},
{
"epoch": 1.318840579710145,
"grad_norm": 14.94995403289795,
"learning_rate": 0.0001338235294117647,
"loss": 34.8423,
"step": 46
},
{
"epoch": 1.3768115942028984,
"grad_norm": 5.653194904327393,
"learning_rate": 0.00013088235294117647,
"loss": 33.6543,
"step": 48
},
{
"epoch": 1.434782608695652,
"grad_norm": 7.0490946769714355,
"learning_rate": 0.00012794117647058824,
"loss": 34.4564,
"step": 50
},
{
"epoch": 1.4927536231884058,
"grad_norm": 9.5311279296875,
"learning_rate": 0.000125,
"loss": 34.0708,
"step": 52
},
{
"epoch": 1.5507246376811594,
"grad_norm": 9.313169479370117,
"learning_rate": 0.00012205882352941178,
"loss": 35.0762,
"step": 54
},
{
"epoch": 1.608695652173913,
"grad_norm": 6.120841026306152,
"learning_rate": 0.00011911764705882353,
"loss": 33.5708,
"step": 56
},
{
"epoch": 1.6666666666666665,
"grad_norm": 19.58426856994629,
"learning_rate": 0.00011617647058823531,
"loss": 32.583,
"step": 58
},
{
"epoch": 1.7246376811594204,
"grad_norm": 10.036286354064941,
"learning_rate": 0.00011323529411764706,
"loss": 35.4897,
"step": 60
},
{
"epoch": 1.7826086956521738,
"grad_norm": 9.211186408996582,
"learning_rate": 0.00011029411764705884,
"loss": 33.5828,
"step": 62
},
{
"epoch": 1.8405797101449275,
"grad_norm": 7.165535926818848,
"learning_rate": 0.00010735294117647059,
"loss": 34.0128,
"step": 64
},
{
"epoch": 1.8985507246376812,
"grad_norm": 8.84890079498291,
"learning_rate": 0.00010441176470588237,
"loss": 34.0061,
"step": 66
},
{
"epoch": 1.9565217391304348,
"grad_norm": 6.122589111328125,
"learning_rate": 0.00010147058823529412,
"loss": 34.1371,
"step": 68
},
{
"epoch": 2.0,
"grad_norm": 5.6373186111450195,
"learning_rate": 9.852941176470589e-05,
"loss": 25.4753,
"step": 70
},
{
"epoch": 2.0579710144927534,
"grad_norm": 10.314593315124512,
"learning_rate": 9.558823529411765e-05,
"loss": 33.9195,
"step": 72
},
{
"epoch": 2.1159420289855073,
"grad_norm": 8.193323135375977,
"learning_rate": 9.264705882352942e-05,
"loss": 33.1874,
"step": 74
},
{
"epoch": 2.1739130434782608,
"grad_norm": 9.070418357849121,
"learning_rate": 8.970588235294118e-05,
"loss": 33.3055,
"step": 76
},
{
"epoch": 2.2318840579710146,
"grad_norm": 8.063404083251953,
"learning_rate": 8.676470588235295e-05,
"loss": 34.1831,
"step": 78
},
{
"epoch": 2.289855072463768,
"grad_norm": 7.3460516929626465,
"learning_rate": 8.382352941176471e-05,
"loss": 32.5002,
"step": 80
},
{
"epoch": 2.3478260869565215,
"grad_norm": 7.9106974601745605,
"learning_rate": 8.088235294117648e-05,
"loss": 33.9427,
"step": 82
},
{
"epoch": 2.4057971014492754,
"grad_norm": 6.9293532371521,
"learning_rate": 7.794117647058824e-05,
"loss": 32.7437,
"step": 84
},
{
"epoch": 2.463768115942029,
"grad_norm": 5.583312511444092,
"learning_rate": 7.500000000000001e-05,
"loss": 32.4636,
"step": 86
},
{
"epoch": 2.5217391304347827,
"grad_norm": 9.602357864379883,
"learning_rate": 7.205882352941177e-05,
"loss": 34.4266,
"step": 88
},
{
"epoch": 2.579710144927536,
"grad_norm": 7.29642391204834,
"learning_rate": 6.911764705882354e-05,
"loss": 33.2233,
"step": 90
},
{
"epoch": 2.63768115942029,
"grad_norm": 7.916087627410889,
"learning_rate": 6.61764705882353e-05,
"loss": 32.328,
"step": 92
},
{
"epoch": 2.6956521739130435,
"grad_norm": 9.700789451599121,
"learning_rate": 6.323529411764705e-05,
"loss": 33.3864,
"step": 94
},
{
"epoch": 2.753623188405797,
"grad_norm": 7.925725936889648,
"learning_rate": 6.0294117647058825e-05,
"loss": 33.0956,
"step": 96
},
{
"epoch": 2.8115942028985508,
"grad_norm": 7.136451721191406,
"learning_rate": 5.735294117647059e-05,
"loss": 34.077,
"step": 98
},
{
"epoch": 2.869565217391304,
"grad_norm": 7.057907581329346,
"learning_rate": 5.441176470588235e-05,
"loss": 32.854,
"step": 100
},
{
"epoch": 2.927536231884058,
"grad_norm": 8.482731819152832,
"learning_rate": 5.147058823529411e-05,
"loss": 31.9702,
"step": 102
},
{
"epoch": 2.9855072463768115,
"grad_norm": 8.195663452148438,
"learning_rate": 4.8529411764705885e-05,
"loss": 32.0516,
"step": 104
},
{
"epoch": 3.028985507246377,
"grad_norm": 13.711084365844727,
"learning_rate": 4.558823529411765e-05,
"loss": 23.3778,
"step": 106
},
{
"epoch": 3.0869565217391304,
"grad_norm": 13.546501159667969,
"learning_rate": 4.2647058823529415e-05,
"loss": 32.14,
"step": 108
},
{
"epoch": 3.1449275362318843,
"grad_norm": 11.532342910766602,
"learning_rate": 3.970588235294117e-05,
"loss": 32.6737,
"step": 110
},
{
"epoch": 3.2028985507246377,
"grad_norm": 9.859041213989258,
"learning_rate": 3.6764705882352945e-05,
"loss": 32.2716,
"step": 112
},
{
"epoch": 3.260869565217391,
"grad_norm": 7.993894577026367,
"learning_rate": 3.382352941176471e-05,
"loss": 32.8686,
"step": 114
},
{
"epoch": 3.318840579710145,
"grad_norm": 5.935787677764893,
"learning_rate": 3.0882352941176475e-05,
"loss": 33.3099,
"step": 116
},
{
"epoch": 3.3768115942028984,
"grad_norm": 8.535140037536621,
"learning_rate": 2.7941176470588236e-05,
"loss": 33.1781,
"step": 118
},
{
"epoch": 3.4347826086956523,
"grad_norm": 8.691988945007324,
"learning_rate": 2.5e-05,
"loss": 31.1977,
"step": 120
},
{
"epoch": 3.4927536231884058,
"grad_norm": 7.092562675476074,
"learning_rate": 2.2058823529411766e-05,
"loss": 32.9427,
"step": 122
},
{
"epoch": 3.550724637681159,
"grad_norm": 10.397586822509766,
"learning_rate": 1.9117647058823528e-05,
"loss": 32.8582,
"step": 124
},
{
"epoch": 3.608695652173913,
"grad_norm": 7.606701374053955,
"learning_rate": 1.6176470588235296e-05,
"loss": 33.5666,
"step": 126
},
{
"epoch": 3.6666666666666665,
"grad_norm": 9.118799209594727,
"learning_rate": 1.323529411764706e-05,
"loss": 32.2489,
"step": 128
},
{
"epoch": 3.7246376811594204,
"grad_norm": 8.418676376342773,
"learning_rate": 1.0294117647058824e-05,
"loss": 32.8995,
"step": 130
},
{
"epoch": 3.782608695652174,
"grad_norm": 7.379878044128418,
"learning_rate": 7.3529411764705884e-06,
"loss": 33.5503,
"step": 132
},
{
"epoch": 3.8405797101449277,
"grad_norm": 7.183637619018555,
"learning_rate": 4.411764705882353e-06,
"loss": 31.911,
"step": 134
},
{
"epoch": 3.898550724637681,
"grad_norm": 12.431236267089844,
"learning_rate": 1.4705882352941177e-06,
"loss": 31.6491,
"step": 136
},
{
"epoch": 3.898550724637681,
"step": 136,
"total_flos": 29261690983296.0,
"train_loss": 35.055379671208996,
"train_runtime": 354.4991,
"train_samples_per_second": 6.229,
"train_steps_per_second": 0.384
}
],
"logging_steps": 2,
"max_steps": 136,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 29261690983296.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}