gemma-lima / trainer_state.json
pkarypis's picture
Model save
85c6a12 verified
raw
history blame contribute delete
No virus
4.38 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 9.090909090909092,
"eval_steps": 500,
"global_step": 50,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.18,
"grad_norm": 3220.643835599575,
"learning_rate": 4.000000000000001e-06,
"loss": 14.9516,
"step": 1
},
{
"epoch": 0.91,
"grad_norm": 411.2907564885413,
"learning_rate": 2e-05,
"loss": 10.4256,
"step": 5
},
{
"epoch": 0.91,
"eval_loss": 47.00014877319336,
"eval_runtime": 2.5065,
"eval_samples_per_second": 4.389,
"eval_steps_per_second": 0.399,
"step": 5
},
{
"epoch": 1.82,
"grad_norm": 87.6083201371645,
"learning_rate": 1.9396926207859085e-05,
"loss": 6.0419,
"step": 10
},
{
"epoch": 2.0,
"eval_loss": 43.96914291381836,
"eval_runtime": 2.6958,
"eval_samples_per_second": 4.08,
"eval_steps_per_second": 0.371,
"step": 11
},
{
"epoch": 2.73,
"grad_norm": 328.84229322130653,
"learning_rate": 1.766044443118978e-05,
"loss": 5.2838,
"step": 15
},
{
"epoch": 2.91,
"eval_loss": 40.78567123413086,
"eval_runtime": 2.4992,
"eval_samples_per_second": 4.401,
"eval_steps_per_second": 0.4,
"step": 16
},
{
"epoch": 3.64,
"grad_norm": 138.82218027827275,
"learning_rate": 1.5000000000000002e-05,
"loss": 4.8705,
"step": 20
},
{
"epoch": 4.0,
"eval_loss": 33.928184509277344,
"eval_runtime": 2.6726,
"eval_samples_per_second": 4.116,
"eval_steps_per_second": 0.374,
"step": 22
},
{
"epoch": 4.55,
"grad_norm": 77.94936474819474,
"learning_rate": 1.1736481776669307e-05,
"loss": 4.196,
"step": 25
},
{
"epoch": 4.91,
"eval_loss": 17.533571243286133,
"eval_runtime": 2.5096,
"eval_samples_per_second": 4.383,
"eval_steps_per_second": 0.398,
"step": 27
},
{
"epoch": 5.45,
"grad_norm": 64.81716616301149,
"learning_rate": 8.263518223330698e-06,
"loss": 3.0724,
"step": 30
},
{
"epoch": 6.0,
"eval_loss": 2.7088449001312256,
"eval_runtime": 2.6734,
"eval_samples_per_second": 4.115,
"eval_steps_per_second": 0.374,
"step": 33
},
{
"epoch": 6.36,
"grad_norm": 14.389103750009635,
"learning_rate": 5.000000000000003e-06,
"loss": 2.1966,
"step": 35
},
{
"epoch": 6.91,
"eval_loss": 2.7433879375457764,
"eval_runtime": 2.4975,
"eval_samples_per_second": 4.404,
"eval_steps_per_second": 0.4,
"step": 38
},
{
"epoch": 7.27,
"grad_norm": 18.34112469972439,
"learning_rate": 2.339555568810221e-06,
"loss": 2.1116,
"step": 40
},
{
"epoch": 8.0,
"eval_loss": 2.7265045642852783,
"eval_runtime": 2.6806,
"eval_samples_per_second": 4.104,
"eval_steps_per_second": 0.373,
"step": 44
},
{
"epoch": 8.18,
"grad_norm": 8.674981290289173,
"learning_rate": 6.030737921409169e-07,
"loss": 2.0641,
"step": 45
},
{
"epoch": 8.91,
"eval_loss": 2.716779947280884,
"eval_runtime": 2.4827,
"eval_samples_per_second": 4.431,
"eval_steps_per_second": 0.403,
"step": 49
},
{
"epoch": 9.09,
"grad_norm": 9.524348161964365,
"learning_rate": 0.0,
"loss": 2.0467,
"step": 50
},
{
"epoch": 9.09,
"eval_loss": 2.725921869277954,
"eval_runtime": 2.669,
"eval_samples_per_second": 4.121,
"eval_steps_per_second": 0.375,
"step": 50
},
{
"epoch": 9.09,
"step": 50,
"total_flos": 6816515751936.0,
"train_loss": 4.321450042724609,
"train_runtime": 737.8878,
"train_samples_per_second": 8.863,
"train_steps_per_second": 0.068
}
],
"logging_steps": 5,
"max_steps": 50,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 1000000000,
"total_flos": 6816515751936.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}