|
{ |
|
"best_metric": 0.49125435948371887, |
|
"best_model_checkpoint": "../../experiments_checkpoints/MAdAiLab/google_t5/t5_base_twitter/checkpoint-250", |
|
"epoch": 0.9191176470588235, |
|
"eval_steps": 50, |
|
"global_step": 250, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.04, |
|
"grad_norm": 1.5012608766555786, |
|
"learning_rate": 0.0004938725490196079, |
|
"loss": 0.6085, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"grad_norm": 1.0885683298110962, |
|
"learning_rate": 0.0004877450980392157, |
|
"loss": 0.5402, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"grad_norm": 3.7890007495880127, |
|
"learning_rate": 0.00048161764705882356, |
|
"loss": 0.5591, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"grad_norm": 1.2080425024032593, |
|
"learning_rate": 0.00047549019607843134, |
|
"loss": 0.5687, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"grad_norm": 0.9302119612693787, |
|
"learning_rate": 0.0004693627450980392, |
|
"loss": 0.4808, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"eval_accuracy": 0.7444852941176471, |
|
"eval_f1_macro": 0.6739663773514942, |
|
"eval_f1_micro": 0.7444852941176471, |
|
"eval_loss": 0.5169704556465149, |
|
"eval_runtime": 2.6255, |
|
"eval_samples_per_second": 414.395, |
|
"eval_steps_per_second": 12.95, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"grad_norm": 2.9698123931884766, |
|
"learning_rate": 0.0004632352941176471, |
|
"loss": 0.4972, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"grad_norm": 1.0578800439834595, |
|
"learning_rate": 0.0004571078431372549, |
|
"loss": 0.4967, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"grad_norm": 2.512136697769165, |
|
"learning_rate": 0.0004509803921568628, |
|
"loss": 0.4987, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"grad_norm": 1.629062533378601, |
|
"learning_rate": 0.00044485294117647056, |
|
"loss": 0.4347, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"grad_norm": 1.581597089767456, |
|
"learning_rate": 0.00043872549019607844, |
|
"loss": 0.5169, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"eval_accuracy": 0.7555147058823529, |
|
"eval_f1_macro": 0.7269323671497585, |
|
"eval_f1_micro": 0.7555147058823529, |
|
"eval_loss": 0.5100224018096924, |
|
"eval_runtime": 2.6367, |
|
"eval_samples_per_second": 412.631, |
|
"eval_steps_per_second": 12.895, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"grad_norm": 1.149016261100769, |
|
"learning_rate": 0.0004325980392156863, |
|
"loss": 0.4898, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"grad_norm": 1.1472299098968506, |
|
"learning_rate": 0.0004264705882352941, |
|
"loss": 0.497, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"grad_norm": 1.7374740839004517, |
|
"learning_rate": 0.000420343137254902, |
|
"loss": 0.4811, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"grad_norm": 1.1296411752700806, |
|
"learning_rate": 0.0004142156862745098, |
|
"loss": 0.5066, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"grad_norm": 0.9138337969779968, |
|
"learning_rate": 0.00040808823529411766, |
|
"loss": 0.4548, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"eval_accuracy": 0.7647058823529411, |
|
"eval_f1_macro": 0.7017083911650301, |
|
"eval_f1_micro": 0.7647058823529411, |
|
"eval_loss": 0.49222531914711, |
|
"eval_runtime": 2.6971, |
|
"eval_samples_per_second": 403.396, |
|
"eval_steps_per_second": 12.606, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"grad_norm": 1.1084121465682983, |
|
"learning_rate": 0.0004019607843137255, |
|
"loss": 0.4683, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"grad_norm": 2.0122885704040527, |
|
"learning_rate": 0.0003958333333333333, |
|
"loss": 0.5217, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"grad_norm": 2.2070980072021484, |
|
"learning_rate": 0.0003897058823529412, |
|
"loss": 0.524, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"grad_norm": 1.27259361743927, |
|
"learning_rate": 0.00038357843137254904, |
|
"loss": 0.4955, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"grad_norm": 0.8282164931297302, |
|
"learning_rate": 0.0003774509803921569, |
|
"loss": 0.498, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"eval_accuracy": 0.7518382352941176, |
|
"eval_f1_macro": 0.6776331672629108, |
|
"eval_f1_micro": 0.7518382352941176, |
|
"eval_loss": 0.5056816935539246, |
|
"eval_runtime": 2.6992, |
|
"eval_samples_per_second": 403.083, |
|
"eval_steps_per_second": 12.596, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"grad_norm": 1.8028568029403687, |
|
"learning_rate": 0.0003713235294117647, |
|
"loss": 0.4926, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"grad_norm": 2.6962406635284424, |
|
"learning_rate": 0.00036519607843137254, |
|
"loss": 0.5134, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"grad_norm": 0.7592838406562805, |
|
"learning_rate": 0.0003590686274509804, |
|
"loss": 0.469, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"grad_norm": 0.637353777885437, |
|
"learning_rate": 0.00035294117647058826, |
|
"loss": 0.4381, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"grad_norm": 0.9144633412361145, |
|
"learning_rate": 0.0003468137254901961, |
|
"loss": 0.4844, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"eval_accuracy": 0.765625, |
|
"eval_f1_macro": 0.7266124240384777, |
|
"eval_f1_micro": 0.765625, |
|
"eval_loss": 0.49125435948371887, |
|
"eval_runtime": 2.6614, |
|
"eval_samples_per_second": 408.809, |
|
"eval_steps_per_second": 12.775, |
|
"step": 250 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 816, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 50, |
|
"total_flos": 1221553815552000.0, |
|
"train_batch_size": 16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|