bikashpatra's picture
Upload folder using huggingface_hub
5dfa342 verified
{
"best_metric": 5.530978679656982,
"best_model_checkpoint": "autotrain-r8zdq-nww6o/checkpoint-64",
"epoch": 1.0,
"eval_steps": 500,
"global_step": 64,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.046875,
"grad_norm": 15.872576713562012,
"learning_rate": 2.5e-06,
"loss": 4.0459,
"step": 3
},
{
"epoch": 0.09375,
"grad_norm": 14.468564987182617,
"learning_rate": 1e-05,
"loss": 3.8906,
"step": 6
},
{
"epoch": 0.140625,
"grad_norm": 12.806556701660156,
"learning_rate": 1.75e-05,
"loss": 3.1036,
"step": 9
},
{
"epoch": 0.1875,
"grad_norm": 7.326994895935059,
"learning_rate": 2.5e-05,
"loss": 2.1466,
"step": 12
},
{
"epoch": 0.234375,
"grad_norm": 2.5583295822143555,
"learning_rate": 3.2500000000000004e-05,
"loss": 1.7102,
"step": 15
},
{
"epoch": 0.28125,
"grad_norm": 3.5812530517578125,
"learning_rate": 4e-05,
"loss": 1.5867,
"step": 18
},
{
"epoch": 0.328125,
"grad_norm": 1.4613455533981323,
"learning_rate": 4.75e-05,
"loss": 1.3785,
"step": 21
},
{
"epoch": 0.375,
"grad_norm": 3.1722567081451416,
"learning_rate": 4.941860465116279e-05,
"loss": 1.4526,
"step": 24
},
{
"epoch": 0.421875,
"grad_norm": 3.3172366619110107,
"learning_rate": 4.854651162790698e-05,
"loss": 1.3307,
"step": 27
},
{
"epoch": 0.46875,
"grad_norm": 2.2563154697418213,
"learning_rate": 4.7674418604651164e-05,
"loss": 1.4428,
"step": 30
},
{
"epoch": 0.515625,
"grad_norm": 2.1187117099761963,
"learning_rate": 4.680232558139535e-05,
"loss": 1.4933,
"step": 33
},
{
"epoch": 0.5625,
"grad_norm": 3.107919931411743,
"learning_rate": 4.593023255813954e-05,
"loss": 1.3909,
"step": 36
},
{
"epoch": 0.609375,
"grad_norm": 2.8481452465057373,
"learning_rate": 4.505813953488372e-05,
"loss": 1.4789,
"step": 39
},
{
"epoch": 0.65625,
"grad_norm": 1.2975138425827026,
"learning_rate": 4.418604651162791e-05,
"loss": 1.3402,
"step": 42
},
{
"epoch": 0.703125,
"grad_norm": 2.2250919342041016,
"learning_rate": 4.3313953488372096e-05,
"loss": 1.5143,
"step": 45
},
{
"epoch": 0.75,
"grad_norm": 1.7821475267410278,
"learning_rate": 4.2441860465116276e-05,
"loss": 1.2815,
"step": 48
},
{
"epoch": 0.796875,
"grad_norm": 1.3603293895721436,
"learning_rate": 4.156976744186047e-05,
"loss": 1.2912,
"step": 51
},
{
"epoch": 0.84375,
"grad_norm": 1.1273295879364014,
"learning_rate": 4.0697674418604655e-05,
"loss": 1.3891,
"step": 54
},
{
"epoch": 0.890625,
"grad_norm": 1.2710983753204346,
"learning_rate": 3.9825581395348835e-05,
"loss": 1.4103,
"step": 57
},
{
"epoch": 0.9375,
"grad_norm": 1.6584129333496094,
"learning_rate": 3.895348837209303e-05,
"loss": 1.4676,
"step": 60
},
{
"epoch": 0.984375,
"grad_norm": 1.8036679029464722,
"learning_rate": 3.8081395348837215e-05,
"loss": 1.3896,
"step": 63
},
{
"epoch": 1.0,
"eval_accuracy": 0.0026870229007633587,
"eval_f1": 0.0,
"eval_loss": 5.530978679656982,
"eval_precision": 0.0,
"eval_recall": 0.0,
"eval_runtime": 0.3581,
"eval_samples_per_second": 388.142,
"eval_steps_per_second": 25.132,
"step": 64
}
],
"logging_steps": 3,
"max_steps": 192,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.01
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 33260533691136.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}