lora-out / checkpoint-20 /trainer_state.json
AlanFeder's picture
Upload folder using huggingface_hub
56c1032 verified
raw
history blame contribute delete
No virus
5.28 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.9024390243902438,
"eval_steps": 3,
"global_step": 20,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0975609756097561,
"grad_norm": 0.13416488468647003,
"learning_rate": 2e-05,
"loss": 1.1198,
"step": 1
},
{
"epoch": 0.0975609756097561,
"eval_loss": 1.0946272611618042,
"eval_runtime": 105.1442,
"eval_samples_per_second": 0.951,
"eval_steps_per_second": 0.476,
"step": 1
},
{
"epoch": 0.1951219512195122,
"grad_norm": 0.14614562690258026,
"learning_rate": 4e-05,
"loss": 1.1433,
"step": 2
},
{
"epoch": 0.2926829268292683,
"grad_norm": 0.12903447449207306,
"learning_rate": 6e-05,
"loss": 1.0338,
"step": 3
},
{
"epoch": 0.2926829268292683,
"eval_loss": 1.0899513959884644,
"eval_runtime": 105.534,
"eval_samples_per_second": 0.948,
"eval_steps_per_second": 0.474,
"step": 3
},
{
"epoch": 0.3902439024390244,
"grad_norm": 0.12645691633224487,
"learning_rate": 8e-05,
"loss": 1.0196,
"step": 4
},
{
"epoch": 0.4878048780487805,
"grad_norm": 0.15484200417995453,
"learning_rate": 0.0001,
"loss": 1.0868,
"step": 5
},
{
"epoch": 0.5853658536585366,
"grad_norm": 0.13987194001674652,
"learning_rate": 0.00012,
"loss": 1.0658,
"step": 6
},
{
"epoch": 0.5853658536585366,
"eval_loss": 1.0339258909225464,
"eval_runtime": 105.5831,
"eval_samples_per_second": 0.947,
"eval_steps_per_second": 0.474,
"step": 6
},
{
"epoch": 0.6829268292682927,
"grad_norm": 0.10199978202581406,
"learning_rate": 0.00014,
"loss": 1.0156,
"step": 7
},
{
"epoch": 0.7804878048780488,
"grad_norm": 0.12288854271173477,
"learning_rate": 0.00016,
"loss": 1.0503,
"step": 8
},
{
"epoch": 0.8780487804878049,
"grad_norm": 0.12781274318695068,
"learning_rate": 0.00018,
"loss": 1.0521,
"step": 9
},
{
"epoch": 0.8780487804878049,
"eval_loss": 0.9954264163970947,
"eval_runtime": 105.5993,
"eval_samples_per_second": 0.947,
"eval_steps_per_second": 0.473,
"step": 9
},
{
"epoch": 0.975609756097561,
"grad_norm": 0.11168772727251053,
"learning_rate": 0.0002,
"loss": 1.0098,
"step": 10
},
{
"epoch": 1.024390243902439,
"grad_norm": 1.0994621515274048,
"learning_rate": 0.00019945218953682734,
"loss": 1.0201,
"step": 11
},
{
"epoch": 1.1219512195121952,
"grad_norm": 0.13263079524040222,
"learning_rate": 0.00019781476007338058,
"loss": 1.0211,
"step": 12
},
{
"epoch": 1.1219512195121952,
"eval_loss": 0.980489194393158,
"eval_runtime": 105.5984,
"eval_samples_per_second": 0.947,
"eval_steps_per_second": 0.473,
"step": 12
},
{
"epoch": 1.2195121951219512,
"grad_norm": 0.10054657608270645,
"learning_rate": 0.00019510565162951537,
"loss": 0.9632,
"step": 13
},
{
"epoch": 1.3170731707317074,
"grad_norm": 0.09211089462041855,
"learning_rate": 0.0001913545457642601,
"loss": 0.9381,
"step": 14
},
{
"epoch": 1.4146341463414633,
"grad_norm": 0.0957920104265213,
"learning_rate": 0.00018660254037844388,
"loss": 0.9945,
"step": 15
},
{
"epoch": 1.4146341463414633,
"eval_loss": 0.9690712094306946,
"eval_runtime": 105.5491,
"eval_samples_per_second": 0.947,
"eval_steps_per_second": 0.474,
"step": 15
},
{
"epoch": 1.5121951219512195,
"grad_norm": 0.11032579094171524,
"learning_rate": 0.00018090169943749476,
"loss": 0.9766,
"step": 16
},
{
"epoch": 1.6097560975609757,
"grad_norm": 0.08608294278383255,
"learning_rate": 0.00017431448254773944,
"loss": 0.9178,
"step": 17
},
{
"epoch": 1.7073170731707317,
"grad_norm": 0.08259813487529755,
"learning_rate": 0.00016691306063588583,
"loss": 0.967,
"step": 18
},
{
"epoch": 1.7073170731707317,
"eval_loss": 0.9576988220214844,
"eval_runtime": 105.558,
"eval_samples_per_second": 0.947,
"eval_steps_per_second": 0.474,
"step": 18
},
{
"epoch": 1.8048780487804879,
"grad_norm": 0.08044092357158661,
"learning_rate": 0.00015877852522924732,
"loss": 1.0114,
"step": 19
},
{
"epoch": 1.9024390243902438,
"grad_norm": 0.08861377835273743,
"learning_rate": 0.00015000000000000001,
"loss": 1.0564,
"step": 20
}
],
"logging_steps": 1,
"max_steps": 40,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 10,
"total_flos": 2.965391547354317e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}