JackWong0911's picture
End of training
012c462 verified
raw
history blame contribute delete
No virus
7.48 kB
{
"best_metric": 0.7440476190476191,
"best_model_checkpoint": "vivit-b-16x2-kinetics400-finetuned-kinectic/checkpoint-200",
"epoch": 5.166666666666667,
"eval_steps": 500,
"global_step": 300,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03,
"grad_norm": 44.361122131347656,
"learning_rate": 1.6666666666666667e-05,
"loss": 2.0408,
"step": 10
},
{
"epoch": 0.07,
"grad_norm": 41.430381774902344,
"learning_rate": 3.3333333333333335e-05,
"loss": 1.9331,
"step": 20
},
{
"epoch": 0.1,
"grad_norm": 31.959531784057617,
"learning_rate": 5e-05,
"loss": 1.5466,
"step": 30
},
{
"epoch": 0.13,
"grad_norm": 36.29765701293945,
"learning_rate": 4.814814814814815e-05,
"loss": 1.1729,
"step": 40
},
{
"epoch": 0.17,
"grad_norm": 43.942626953125,
"learning_rate": 4.62962962962963e-05,
"loss": 1.0732,
"step": 50
},
{
"epoch": 0.17,
"eval_accuracy": 0.47023809523809523,
"eval_loss": 1.296799659729004,
"eval_runtime": 89.7317,
"eval_samples_per_second": 1.872,
"eval_steps_per_second": 1.872,
"step": 50
},
{
"epoch": 1.03,
"grad_norm": 41.87215042114258,
"learning_rate": 4.4444444444444447e-05,
"loss": 0.9702,
"step": 60
},
{
"epoch": 1.07,
"grad_norm": 51.68751525878906,
"learning_rate": 4.259259259259259e-05,
"loss": 0.6609,
"step": 70
},
{
"epoch": 1.1,
"grad_norm": 58.199771881103516,
"learning_rate": 4.074074074074074e-05,
"loss": 0.8115,
"step": 80
},
{
"epoch": 1.13,
"grad_norm": 24.880760192871094,
"learning_rate": 3.888888888888889e-05,
"loss": 0.7691,
"step": 90
},
{
"epoch": 1.17,
"grad_norm": 18.063873291015625,
"learning_rate": 3.7037037037037037e-05,
"loss": 1.5128,
"step": 100
},
{
"epoch": 1.17,
"eval_accuracy": 0.6607142857142857,
"eval_loss": 1.040852665901184,
"eval_runtime": 89.5087,
"eval_samples_per_second": 1.877,
"eval_steps_per_second": 1.877,
"step": 100
},
{
"epoch": 2.03,
"grad_norm": 5.521175861358643,
"learning_rate": 3.518518518518519e-05,
"loss": 0.2528,
"step": 110
},
{
"epoch": 2.07,
"grad_norm": 2.120676279067993,
"learning_rate": 3.3333333333333335e-05,
"loss": 0.1885,
"step": 120
},
{
"epoch": 2.1,
"grad_norm": 2.3402693271636963,
"learning_rate": 3.148148148148148e-05,
"loss": 0.1405,
"step": 130
},
{
"epoch": 2.13,
"grad_norm": 1.86380934715271,
"learning_rate": 2.962962962962963e-05,
"loss": 0.3616,
"step": 140
},
{
"epoch": 2.17,
"grad_norm": 25.8436336517334,
"learning_rate": 2.777777777777778e-05,
"loss": 0.1189,
"step": 150
},
{
"epoch": 2.17,
"eval_accuracy": 0.6607142857142857,
"eval_loss": 0.9205451011657715,
"eval_runtime": 89.0696,
"eval_samples_per_second": 1.886,
"eval_steps_per_second": 1.886,
"step": 150
},
{
"epoch": 3.03,
"grad_norm": 0.7615412473678589,
"learning_rate": 2.5925925925925925e-05,
"loss": 0.2627,
"step": 160
},
{
"epoch": 3.07,
"grad_norm": 0.7165939211845398,
"learning_rate": 2.4074074074074074e-05,
"loss": 0.5066,
"step": 170
},
{
"epoch": 3.1,
"grad_norm": 0.48741012811660767,
"learning_rate": 2.2222222222222223e-05,
"loss": 0.3714,
"step": 180
},
{
"epoch": 3.13,
"grad_norm": 0.7969440221786499,
"learning_rate": 2.037037037037037e-05,
"loss": 0.0071,
"step": 190
},
{
"epoch": 3.17,
"grad_norm": 0.039452001452445984,
"learning_rate": 1.8518518518518518e-05,
"loss": 0.3398,
"step": 200
},
{
"epoch": 3.17,
"eval_accuracy": 0.7440476190476191,
"eval_loss": 0.7473598718643188,
"eval_runtime": 92.3069,
"eval_samples_per_second": 1.82,
"eval_steps_per_second": 1.82,
"step": 200
},
{
"epoch": 4.03,
"grad_norm": 0.09469372779130936,
"learning_rate": 1.6666666666666667e-05,
"loss": 0.0076,
"step": 210
},
{
"epoch": 4.07,
"grad_norm": 3.8569278717041016,
"learning_rate": 1.4814814814814815e-05,
"loss": 0.0231,
"step": 220
},
{
"epoch": 4.1,
"grad_norm": 34.09035873413086,
"learning_rate": 1.2962962962962962e-05,
"loss": 0.0509,
"step": 230
},
{
"epoch": 4.13,
"grad_norm": 0.07331710308790207,
"learning_rate": 1.1111111111111112e-05,
"loss": 0.6114,
"step": 240
},
{
"epoch": 4.17,
"grad_norm": 0.16509070992469788,
"learning_rate": 9.259259259259259e-06,
"loss": 0.0102,
"step": 250
},
{
"epoch": 4.17,
"eval_accuracy": 0.6785714285714286,
"eval_loss": 1.097224473953247,
"eval_runtime": 89.5157,
"eval_samples_per_second": 1.877,
"eval_steps_per_second": 1.877,
"step": 250
},
{
"epoch": 5.03,
"grad_norm": 37.233829498291016,
"learning_rate": 7.4074074074074075e-06,
"loss": 0.052,
"step": 260
},
{
"epoch": 5.07,
"grad_norm": 0.7301992177963257,
"learning_rate": 5.555555555555556e-06,
"loss": 0.0158,
"step": 270
},
{
"epoch": 5.1,
"grad_norm": 0.07097156345844269,
"learning_rate": 3.7037037037037037e-06,
"loss": 0.068,
"step": 280
},
{
"epoch": 5.13,
"grad_norm": 41.699851989746094,
"learning_rate": 1.8518518518518519e-06,
"loss": 0.1216,
"step": 290
},
{
"epoch": 5.17,
"grad_norm": 0.10490654408931732,
"learning_rate": 0.0,
"loss": 0.0035,
"step": 300
},
{
"epoch": 5.17,
"eval_accuracy": 0.7142857142857143,
"eval_loss": 0.9135054349899292,
"eval_runtime": 90.4025,
"eval_samples_per_second": 1.858,
"eval_steps_per_second": 1.858,
"step": 300
},
{
"epoch": 5.17,
"step": 300,
"total_flos": 2.35711775029248e+17,
"train_loss": 0.5335011211285988,
"train_runtime": 666.5368,
"train_samples_per_second": 0.45,
"train_steps_per_second": 0.45
},
{
"epoch": 5.17,
"eval_accuracy": 0.7341772151898734,
"eval_loss": 0.9263604879379272,
"eval_runtime": 53.6181,
"eval_samples_per_second": 2.947,
"eval_steps_per_second": 2.947,
"step": 300
},
{
"epoch": 5.17,
"eval_accuracy": 0.7341772151898734,
"eval_loss": 0.9263607263565063,
"eval_runtime": 53.7885,
"eval_samples_per_second": 2.937,
"eval_steps_per_second": 2.937,
"step": 300
}
],
"logging_steps": 10,
"max_steps": 300,
"num_input_tokens_seen": 0,
"num_train_epochs": 9223372036854775807,
"save_steps": 500,
"total_flos": 2.35711775029248e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}