joseagmz's picture
Upload folder using huggingface_hub
0f32cc1 verified
raw
history blame
2.59 kB
{
"best_metric": 1.2972379922866821,
"best_model_checkpoint": "./mistral-7B-Tinybook-epochs-5-lr-0002/checkpoint-6",
"epoch": 6.0,
"eval_steps": 1,
"global_step": 6,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 1.0,
"grad_norm": 6.5535804804898685,
"learning_rate": 2.0000000000000003e-06,
"loss": 1.4261,
"step": 1
},
{
"epoch": 1.0,
"eval_loss": 1.5357962846755981,
"eval_runtime": 5.1379,
"eval_samples_per_second": 0.195,
"eval_steps_per_second": 0.195,
"step": 1
},
{
"epoch": 2.0,
"grad_norm": 6.556183599930382,
"learning_rate": 4.000000000000001e-06,
"loss": 1.4254,
"step": 2
},
{
"epoch": 2.0,
"eval_loss": 1.5169316530227661,
"eval_runtime": 5.1377,
"eval_samples_per_second": 0.195,
"eval_steps_per_second": 0.195,
"step": 2
},
{
"epoch": 3.0,
"grad_norm": 5.439154362306879,
"learning_rate": 6e-06,
"loss": 1.3801,
"step": 3
},
{
"epoch": 3.0,
"eval_loss": 1.4326227903366089,
"eval_runtime": 5.1426,
"eval_samples_per_second": 0.194,
"eval_steps_per_second": 0.194,
"step": 3
},
{
"epoch": 4.0,
"grad_norm": 10.287730670257169,
"learning_rate": 8.000000000000001e-06,
"loss": 1.1051,
"step": 4
},
{
"epoch": 4.0,
"eval_loss": 1.5058702230453491,
"eval_runtime": 5.1475,
"eval_samples_per_second": 0.194,
"eval_steps_per_second": 0.194,
"step": 4
},
{
"epoch": 5.0,
"grad_norm": 102.47095148595771,
"learning_rate": 1e-05,
"loss": 0.9669,
"step": 5
},
{
"epoch": 5.0,
"eval_loss": 1.3066877126693726,
"eval_runtime": 5.1369,
"eval_samples_per_second": 0.195,
"eval_steps_per_second": 0.195,
"step": 5
},
{
"epoch": 6.0,
"grad_norm": 16.035718150173285,
"learning_rate": 1.2e-05,
"loss": 0.7256,
"step": 6
},
{
"epoch": 6.0,
"eval_loss": 1.2972379922866821,
"eval_runtime": 5.143,
"eval_samples_per_second": 0.194,
"eval_steps_per_second": 0.194,
"step": 6
}
],
"logging_steps": 1,
"max_steps": 6,
"num_input_tokens_seen": 0,
"num_train_epochs": 6,
"save_steps": 1,
"total_flos": 1.3420921537481933e+17,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}