Hanzalwi's picture
Training in progress, step 1100, checkpoint
fce8cdd
raw
history blame
3.87 kB
{
"best_metric": 1.0934962034225464,
"best_model_checkpoint": "./outputs/checkpoint-1100",
"epoch": 1.4666666666666668,
"eval_steps": 100,
"global_step": 1100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.13,
"learning_rate": 0.0002,
"loss": 1.2967,
"step": 100
},
{
"epoch": 0.13,
"eval_loss": 1.1817998886108398,
"eval_runtime": 187.6804,
"eval_samples_per_second": 10.283,
"eval_steps_per_second": 1.289,
"step": 100
},
{
"epoch": 0.27,
"learning_rate": 0.0002,
"loss": 1.0115,
"step": 200
},
{
"epoch": 0.27,
"eval_loss": 1.1461560726165771,
"eval_runtime": 186.2762,
"eval_samples_per_second": 10.361,
"eval_steps_per_second": 1.299,
"step": 200
},
{
"epoch": 0.4,
"learning_rate": 0.0002,
"loss": 0.9918,
"step": 300
},
{
"epoch": 0.4,
"eval_loss": 1.1315025091171265,
"eval_runtime": 186.4761,
"eval_samples_per_second": 10.35,
"eval_steps_per_second": 1.298,
"step": 300
},
{
"epoch": 0.53,
"learning_rate": 0.0002,
"loss": 0.9798,
"step": 400
},
{
"epoch": 0.53,
"eval_loss": 1.1221688985824585,
"eval_runtime": 186.7425,
"eval_samples_per_second": 10.335,
"eval_steps_per_second": 1.296,
"step": 400
},
{
"epoch": 0.67,
"learning_rate": 0.0002,
"loss": 0.9657,
"step": 500
},
{
"epoch": 0.67,
"eval_loss": 1.1161261796951294,
"eval_runtime": 186.7488,
"eval_samples_per_second": 10.335,
"eval_steps_per_second": 1.296,
"step": 500
},
{
"epoch": 0.8,
"learning_rate": 0.0002,
"loss": 0.9675,
"step": 600
},
{
"epoch": 0.8,
"eval_loss": 1.111202597618103,
"eval_runtime": 186.3885,
"eval_samples_per_second": 10.355,
"eval_steps_per_second": 1.298,
"step": 600
},
{
"epoch": 0.93,
"learning_rate": 0.0002,
"loss": 0.9638,
"step": 700
},
{
"epoch": 0.93,
"eval_loss": 1.1068408489227295,
"eval_runtime": 186.3328,
"eval_samples_per_second": 10.358,
"eval_steps_per_second": 1.299,
"step": 700
},
{
"epoch": 1.07,
"learning_rate": 0.0002,
"loss": 0.9531,
"step": 800
},
{
"epoch": 1.07,
"eval_loss": 1.102936029434204,
"eval_runtime": 186.6863,
"eval_samples_per_second": 10.338,
"eval_steps_per_second": 1.296,
"step": 800
},
{
"epoch": 1.2,
"learning_rate": 0.0002,
"loss": 0.9517,
"step": 900
},
{
"epoch": 1.2,
"eval_loss": 1.0996404886245728,
"eval_runtime": 186.5382,
"eval_samples_per_second": 10.346,
"eval_steps_per_second": 1.297,
"step": 900
},
{
"epoch": 1.33,
"learning_rate": 0.0002,
"loss": 0.9489,
"step": 1000
},
{
"epoch": 1.33,
"eval_loss": 1.098088026046753,
"eval_runtime": 186.2211,
"eval_samples_per_second": 10.364,
"eval_steps_per_second": 1.3,
"step": 1000
},
{
"epoch": 1.47,
"learning_rate": 0.0002,
"loss": 0.9445,
"step": 1100
},
{
"epoch": 1.47,
"eval_loss": 1.0934962034225464,
"eval_runtime": 186.2589,
"eval_samples_per_second": 10.362,
"eval_steps_per_second": 1.299,
"step": 1100
}
],
"logging_steps": 100,
"max_steps": 2250,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 100,
"total_flos": 1.555051176866857e+17,
"trial_name": null,
"trial_params": null
}