whisper-small / trainer_state.json
AsemBadr's picture
Upload trainer_state.json
cc5c7a9 verified
raw
history blame
13.6 kB
{
"best_metric": 3.3522434244455903,
"best_model_checkpoint": "./whisper-small/checkpoint-2000",
"epoch": 6.472491909385114,
"eval_steps": 10,
"global_step": 2000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.08,
"grad_norm": 81.5032730102539,
"learning_rate": 4.2000000000000006e-07,
"loss": 2.9603,
"step": 25
},
{
"epoch": 0.16,
"grad_norm": 17.11170768737793,
"learning_rate": 9.200000000000001e-07,
"loss": 2.144,
"step": 50
},
{
"epoch": 0.24,
"grad_norm": 11.967795372009277,
"learning_rate": 1.42e-06,
"loss": 1.3446,
"step": 75
},
{
"epoch": 0.32,
"grad_norm": 8.235530853271484,
"learning_rate": 1.9200000000000003e-06,
"loss": 0.7532,
"step": 100
},
{
"epoch": 0.4,
"grad_norm": 6.933337688446045,
"learning_rate": 2.42e-06,
"loss": 0.5681,
"step": 125
},
{
"epoch": 0.49,
"grad_norm": 8.942669868469238,
"learning_rate": 2.92e-06,
"loss": 0.496,
"step": 150
},
{
"epoch": 0.57,
"grad_norm": 11.463906288146973,
"learning_rate": 3.4200000000000007e-06,
"loss": 0.4111,
"step": 175
},
{
"epoch": 0.65,
"grad_norm": 7.532162666320801,
"learning_rate": 3.920000000000001e-06,
"loss": 0.3332,
"step": 200
},
{
"epoch": 0.73,
"grad_norm": 7.5595197677612305,
"learning_rate": 4.42e-06,
"loss": 0.2728,
"step": 225
},
{
"epoch": 0.81,
"grad_norm": 4.813773155212402,
"learning_rate": 4.92e-06,
"loss": 0.1087,
"step": 250
},
{
"epoch": 0.89,
"grad_norm": 3.7373056411743164,
"learning_rate": 5.420000000000001e-06,
"loss": 0.0653,
"step": 275
},
{
"epoch": 0.97,
"grad_norm": 4.375819683074951,
"learning_rate": 5.92e-06,
"loss": 0.059,
"step": 300
},
{
"epoch": 1.05,
"grad_norm": 1.661054015159607,
"learning_rate": 6.42e-06,
"loss": 0.0381,
"step": 325
},
{
"epoch": 1.13,
"grad_norm": 2.9067795276641846,
"learning_rate": 6.92e-06,
"loss": 0.0327,
"step": 350
},
{
"epoch": 1.21,
"grad_norm": 4.853701114654541,
"learning_rate": 7.420000000000001e-06,
"loss": 0.0295,
"step": 375
},
{
"epoch": 1.29,
"grad_norm": 1.736449122428894,
"learning_rate": 7.92e-06,
"loss": 0.0311,
"step": 400
},
{
"epoch": 1.38,
"grad_norm": 3.658984899520874,
"learning_rate": 8.42e-06,
"loss": 0.0296,
"step": 425
},
{
"epoch": 1.46,
"grad_norm": 2.040172576904297,
"learning_rate": 8.920000000000001e-06,
"loss": 0.0221,
"step": 450
},
{
"epoch": 1.54,
"grad_norm": 2.4981894493103027,
"learning_rate": 9.42e-06,
"loss": 0.0251,
"step": 475
},
{
"epoch": 1.62,
"grad_norm": 2.639390230178833,
"learning_rate": 9.920000000000002e-06,
"loss": 0.0261,
"step": 500
},
{
"epoch": 1.7,
"grad_norm": 2.526102066040039,
"learning_rate": 9.940000000000001e-06,
"loss": 0.0253,
"step": 525
},
{
"epoch": 1.78,
"grad_norm": 1.3722740411758423,
"learning_rate": 9.86857142857143e-06,
"loss": 0.0203,
"step": 550
},
{
"epoch": 1.86,
"grad_norm": 2.3701043128967285,
"learning_rate": 9.797142857142858e-06,
"loss": 0.0225,
"step": 575
},
{
"epoch": 1.94,
"grad_norm": 2.8724324703216553,
"learning_rate": 9.725714285714287e-06,
"loss": 0.0201,
"step": 600
},
{
"epoch": 2.02,
"grad_norm": 1.0449714660644531,
"learning_rate": 9.654285714285716e-06,
"loss": 0.0149,
"step": 625
},
{
"epoch": 2.1,
"grad_norm": 5.038928031921387,
"learning_rate": 9.582857142857143e-06,
"loss": 0.0104,
"step": 650
},
{
"epoch": 2.18,
"grad_norm": 1.6107451915740967,
"learning_rate": 9.511428571428572e-06,
"loss": 0.0086,
"step": 675
},
{
"epoch": 2.27,
"grad_norm": 1.3224350214004517,
"learning_rate": 9.440000000000001e-06,
"loss": 0.006,
"step": 700
},
{
"epoch": 2.35,
"grad_norm": 0.14248408377170563,
"learning_rate": 9.368571428571428e-06,
"loss": 0.0059,
"step": 725
},
{
"epoch": 2.43,
"grad_norm": 2.3801496028900146,
"learning_rate": 9.297142857142857e-06,
"loss": 0.0056,
"step": 750
},
{
"epoch": 2.51,
"grad_norm": 1.156813621520996,
"learning_rate": 9.225714285714286e-06,
"loss": 0.0067,
"step": 775
},
{
"epoch": 2.59,
"grad_norm": 0.9941128492355347,
"learning_rate": 9.154285714285715e-06,
"loss": 0.0076,
"step": 800
},
{
"epoch": 2.67,
"grad_norm": 1.4023518562316895,
"learning_rate": 9.082857142857143e-06,
"loss": 0.006,
"step": 825
},
{
"epoch": 2.75,
"grad_norm": 1.1814390420913696,
"learning_rate": 9.011428571428572e-06,
"loss": 0.0053,
"step": 850
},
{
"epoch": 2.83,
"grad_norm": 1.8819189071655273,
"learning_rate": 8.94e-06,
"loss": 0.008,
"step": 875
},
{
"epoch": 2.91,
"grad_norm": 0.9754526019096375,
"learning_rate": 8.86857142857143e-06,
"loss": 0.0047,
"step": 900
},
{
"epoch": 2.99,
"grad_norm": 0.40376096963882446,
"learning_rate": 8.797142857142857e-06,
"loss": 0.0062,
"step": 925
},
{
"epoch": 3.07,
"grad_norm": 0.07103849202394485,
"learning_rate": 8.725714285714286e-06,
"loss": 0.0028,
"step": 950
},
{
"epoch": 3.16,
"grad_norm": 0.5621446967124939,
"learning_rate": 8.654285714285715e-06,
"loss": 0.0025,
"step": 975
},
{
"epoch": 3.24,
"grad_norm": 0.506003201007843,
"learning_rate": 8.582857142857144e-06,
"loss": 0.0026,
"step": 1000
},
{
"epoch": 3.24,
"eval_loss": 0.020510252565145493,
"eval_runtime": 791.6891,
"eval_samples_per_second": 1.557,
"eval_steps_per_second": 0.196,
"eval_wer": 4.486848891181021,
"step": 1000
},
{
"epoch": 3.32,
"grad_norm": 0.723780632019043,
"learning_rate": 8.511428571428571e-06,
"loss": 0.003,
"step": 1025
},
{
"epoch": 3.4,
"grad_norm": 0.02721921168267727,
"learning_rate": 8.44e-06,
"loss": 0.0028,
"step": 1050
},
{
"epoch": 3.48,
"grad_norm": 1.0563775300979614,
"learning_rate": 8.36857142857143e-06,
"loss": 0.002,
"step": 1075
},
{
"epoch": 3.56,
"grad_norm": 1.54115629196167,
"learning_rate": 8.297142857142859e-06,
"loss": 0.0027,
"step": 1100
},
{
"epoch": 3.64,
"grad_norm": 0.017975807189941406,
"learning_rate": 8.225714285714288e-06,
"loss": 0.0016,
"step": 1125
},
{
"epoch": 3.72,
"grad_norm": 0.15264229476451874,
"learning_rate": 8.154285714285715e-06,
"loss": 0.0034,
"step": 1150
},
{
"epoch": 3.8,
"grad_norm": 0.655139148235321,
"learning_rate": 8.082857142857144e-06,
"loss": 0.0025,
"step": 1175
},
{
"epoch": 3.88,
"grad_norm": 0.9618725776672363,
"learning_rate": 8.011428571428573e-06,
"loss": 0.0017,
"step": 1200
},
{
"epoch": 3.96,
"grad_norm": 3.206550121307373,
"learning_rate": 7.94e-06,
"loss": 0.0031,
"step": 1225
},
{
"epoch": 4.05,
"grad_norm": 0.41130420565605164,
"learning_rate": 7.86857142857143e-06,
"loss": 0.0012,
"step": 1250
},
{
"epoch": 4.13,
"grad_norm": 0.0929947942495346,
"learning_rate": 7.797142857142858e-06,
"loss": 0.0012,
"step": 1275
},
{
"epoch": 4.21,
"grad_norm": 0.050773363560438156,
"learning_rate": 7.725714285714286e-06,
"loss": 0.0005,
"step": 1300
},
{
"epoch": 4.29,
"grad_norm": 0.0827890932559967,
"learning_rate": 7.654285714285715e-06,
"loss": 0.0005,
"step": 1325
},
{
"epoch": 4.37,
"grad_norm": 0.02210867777466774,
"learning_rate": 7.5828571428571444e-06,
"loss": 0.0007,
"step": 1350
},
{
"epoch": 4.45,
"grad_norm": 0.014925210736691952,
"learning_rate": 7.511428571428572e-06,
"loss": 0.0012,
"step": 1375
},
{
"epoch": 4.53,
"grad_norm": 0.027211977168917656,
"learning_rate": 7.440000000000001e-06,
"loss": 0.0009,
"step": 1400
},
{
"epoch": 4.61,
"grad_norm": 1.4061143398284912,
"learning_rate": 7.36857142857143e-06,
"loss": 0.0008,
"step": 1425
},
{
"epoch": 4.69,
"grad_norm": 0.1997688263654709,
"learning_rate": 7.297142857142858e-06,
"loss": 0.0013,
"step": 1450
},
{
"epoch": 4.77,
"grad_norm": 0.011363201774656773,
"learning_rate": 7.225714285714286e-06,
"loss": 0.0005,
"step": 1475
},
{
"epoch": 4.85,
"grad_norm": 0.020546328276395798,
"learning_rate": 7.154285714285715e-06,
"loss": 0.0004,
"step": 1500
},
{
"epoch": 4.94,
"grad_norm": 0.026502510532736778,
"learning_rate": 7.082857142857143e-06,
"loss": 0.0007,
"step": 1525
},
{
"epoch": 5.02,
"grad_norm": 0.1575097292661667,
"learning_rate": 7.011428571428572e-06,
"loss": 0.0009,
"step": 1550
},
{
"epoch": 5.1,
"grad_norm": 0.89250248670578,
"learning_rate": 6.9400000000000005e-06,
"loss": 0.001,
"step": 1575
},
{
"epoch": 5.18,
"grad_norm": 0.5113543272018433,
"learning_rate": 6.868571428571429e-06,
"loss": 0.0003,
"step": 1600
},
{
"epoch": 5.26,
"grad_norm": 0.05453397333621979,
"learning_rate": 6.797142857142858e-06,
"loss": 0.0003,
"step": 1625
},
{
"epoch": 5.34,
"grad_norm": 0.01590396836400032,
"learning_rate": 6.725714285714287e-06,
"loss": 0.0002,
"step": 1650
},
{
"epoch": 5.42,
"grad_norm": 0.00851709395647049,
"learning_rate": 6.654285714285716e-06,
"loss": 0.0005,
"step": 1675
},
{
"epoch": 5.5,
"grad_norm": 0.014185091480612755,
"learning_rate": 6.582857142857143e-06,
"loss": 0.0004,
"step": 1700
},
{
"epoch": 5.58,
"grad_norm": 0.017834780737757683,
"learning_rate": 6.511428571428572e-06,
"loss": 0.0003,
"step": 1725
},
{
"epoch": 5.66,
"grad_norm": 0.017684083431959152,
"learning_rate": 6.440000000000001e-06,
"loss": 0.0003,
"step": 1750
},
{
"epoch": 5.74,
"grad_norm": 0.009825997054576874,
"learning_rate": 6.368571428571429e-06,
"loss": 0.0001,
"step": 1775
},
{
"epoch": 5.83,
"grad_norm": 0.08621440827846527,
"learning_rate": 6.297142857142857e-06,
"loss": 0.0004,
"step": 1800
},
{
"epoch": 5.91,
"grad_norm": 0.019367387518286705,
"learning_rate": 6.225714285714286e-06,
"loss": 0.0007,
"step": 1825
},
{
"epoch": 5.99,
"grad_norm": 0.04589274898171425,
"learning_rate": 6.1542857142857145e-06,
"loss": 0.0003,
"step": 1850
},
{
"epoch": 6.07,
"grad_norm": 0.010185680352151394,
"learning_rate": 6.0828571428571435e-06,
"loss": 0.0002,
"step": 1875
},
{
"epoch": 6.15,
"grad_norm": 0.004478767979890108,
"learning_rate": 6.011428571428572e-06,
"loss": 0.0002,
"step": 1900
},
{
"epoch": 6.23,
"grad_norm": 0.03858327120542526,
"learning_rate": 5.94e-06,
"loss": 0.0001,
"step": 1925
},
{
"epoch": 6.31,
"grad_norm": 0.008098349906504154,
"learning_rate": 5.868571428571429e-06,
"loss": 0.0002,
"step": 1950
},
{
"epoch": 6.39,
"grad_norm": 0.0035556757356971502,
"learning_rate": 5.797142857142858e-06,
"loss": 0.0001,
"step": 1975
},
{
"epoch": 6.47,
"grad_norm": 0.00917895045131445,
"learning_rate": 5.725714285714287e-06,
"loss": 0.0003,
"step": 2000
},
{
"epoch": 6.47,
"eval_loss": 0.01798020303249359,
"eval_runtime": 807.9427,
"eval_samples_per_second": 1.526,
"eval_steps_per_second": 0.192,
"eval_wer": 3.3522434244455903,
"step": 2000
}
],
"logging_steps": 25,
"max_steps": 4000,
"num_input_tokens_seen": 0,
"num_train_epochs": 13,
"save_steps": 10,
"total_flos": 9.21395466141696e+18,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}