DunnBC22's picture
All Dunn!!!
987e7dd
{
"best_metric": 0.7772539776075428,
"best_model_checkpoint": "ibert-roberta-base-Abusive_Or_Threatening_Speech/checkpoint-1828",
"epoch": 1.0,
"global_step": 1828,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"learning_rate": 1.9989059080962803e-05,
"loss": 0.7234,
"step": 1
},
{
"epoch": 0.03,
"learning_rate": 1.9452954048140046e-05,
"loss": 0.3222,
"step": 50
},
{
"epoch": 0.05,
"learning_rate": 1.890590809628009e-05,
"loss": 0.132,
"step": 100
},
{
"epoch": 0.08,
"learning_rate": 1.8358862144420135e-05,
"loss": 0.1327,
"step": 150
},
{
"epoch": 0.11,
"learning_rate": 1.7811816192560176e-05,
"loss": 0.1067,
"step": 200
},
{
"epoch": 0.14,
"learning_rate": 1.726477024070022e-05,
"loss": 0.1085,
"step": 250
},
{
"epoch": 0.16,
"learning_rate": 1.6717724288840264e-05,
"loss": 0.0917,
"step": 300
},
{
"epoch": 0.19,
"learning_rate": 1.617067833698031e-05,
"loss": 0.1098,
"step": 350
},
{
"epoch": 0.22,
"learning_rate": 1.562363238512035e-05,
"loss": 0.1011,
"step": 400
},
{
"epoch": 0.25,
"learning_rate": 1.5076586433260396e-05,
"loss": 0.0979,
"step": 450
},
{
"epoch": 0.27,
"learning_rate": 1.452954048140044e-05,
"loss": 0.1076,
"step": 500
},
{
"epoch": 0.3,
"learning_rate": 1.3982494529540483e-05,
"loss": 0.1102,
"step": 550
},
{
"epoch": 0.33,
"learning_rate": 1.3435448577680525e-05,
"loss": 0.0987,
"step": 600
},
{
"epoch": 0.36,
"learning_rate": 1.2888402625820571e-05,
"loss": 0.0958,
"step": 650
},
{
"epoch": 0.38,
"learning_rate": 1.2341356673960614e-05,
"loss": 0.0906,
"step": 700
},
{
"epoch": 0.41,
"learning_rate": 1.1794310722100657e-05,
"loss": 0.1108,
"step": 750
},
{
"epoch": 0.44,
"learning_rate": 1.12472647702407e-05,
"loss": 0.1282,
"step": 800
},
{
"epoch": 0.46,
"learning_rate": 1.0700218818380745e-05,
"loss": 0.0975,
"step": 850
},
{
"epoch": 0.49,
"learning_rate": 1.0153172866520788e-05,
"loss": 0.0944,
"step": 900
},
{
"epoch": 0.52,
"learning_rate": 9.606126914660832e-06,
"loss": 0.088,
"step": 950
},
{
"epoch": 0.55,
"learning_rate": 9.059080962800875e-06,
"loss": 0.0919,
"step": 1000
},
{
"epoch": 0.57,
"learning_rate": 8.51203501094092e-06,
"loss": 0.0934,
"step": 1050
},
{
"epoch": 0.6,
"learning_rate": 7.964989059080964e-06,
"loss": 0.0924,
"step": 1100
},
{
"epoch": 0.63,
"learning_rate": 7.417943107221007e-06,
"loss": 0.0975,
"step": 1150
},
{
"epoch": 0.66,
"learning_rate": 6.87089715536105e-06,
"loss": 0.0857,
"step": 1200
},
{
"epoch": 0.68,
"learning_rate": 6.323851203501094e-06,
"loss": 0.0917,
"step": 1250
},
{
"epoch": 0.71,
"learning_rate": 5.776805251641139e-06,
"loss": 0.0924,
"step": 1300
},
{
"epoch": 0.74,
"learning_rate": 5.229759299781182e-06,
"loss": 0.0873,
"step": 1350
},
{
"epoch": 0.77,
"learning_rate": 4.682713347921226e-06,
"loss": 0.0732,
"step": 1400
},
{
"epoch": 0.79,
"learning_rate": 4.135667396061269e-06,
"loss": 0.1094,
"step": 1450
},
{
"epoch": 0.82,
"learning_rate": 3.5886214442013136e-06,
"loss": 0.0918,
"step": 1500
},
{
"epoch": 0.85,
"learning_rate": 3.041575492341357e-06,
"loss": 0.0919,
"step": 1550
},
{
"epoch": 0.88,
"learning_rate": 2.4945295404814006e-06,
"loss": 0.0875,
"step": 1600
},
{
"epoch": 0.9,
"learning_rate": 1.9474835886214445e-06,
"loss": 0.0868,
"step": 1650
},
{
"epoch": 0.93,
"learning_rate": 1.400437636761488e-06,
"loss": 0.0969,
"step": 1700
},
{
"epoch": 0.96,
"learning_rate": 8.533916849015319e-07,
"loss": 0.0973,
"step": 1750
},
{
"epoch": 0.98,
"learning_rate": 3.063457330415755e-07,
"loss": 0.0771,
"step": 1800
},
{
"epoch": 1.0,
"eval_F1": 0.7772539776075428,
"eval_Precision": 0.7083780880773362,
"eval_Recall": 0.8609660574412533,
"eval_accuracy": 0.9741476592688849,
"eval_loss": 0.08016789704561234,
"eval_runtime": 13441.5223,
"eval_samples_per_second": 2.176,
"eval_steps_per_second": 0.034,
"step": 1828
},
{
"epoch": 1.0,
"step": 1828,
"total_flos": 1.92457374411324e+16,
"train_loss": 0.10460054101646599,
"train_runtime": 331532.8168,
"train_samples_per_second": 0.353,
"train_steps_per_second": 0.006
}
],
"max_steps": 1828,
"num_train_epochs": 1,
"total_flos": 1.92457374411324e+16,
"trial_name": null,
"trial_params": null
}