llama_code_expert_v01 / trainer_state.json
kajol's picture
Upload folder using huggingface_hub
920052c verified
{
"best_metric": 0.8162932395935059,
"best_model_checkpoint": "out/checkpoint-10",
"epoch": 0.015841584158415842,
"eval_steps": 1,
"global_step": 10,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"grad_norm": 0.9330146908760071,
"learning_rate": 0.0002,
"loss": 1.7298,
"step": 1
},
{
"epoch": 0.0,
"eval_loss": 1.4902005195617676,
"eval_runtime": 735.1257,
"eval_samples_per_second": 0.382,
"eval_steps_per_second": 0.382,
"step": 1
},
{
"epoch": 0.0,
"grad_norm": 1.1639673709869385,
"learning_rate": 0.0002,
"loss": 1.4658,
"step": 2
},
{
"epoch": 0.0,
"eval_loss": 1.2683874368667603,
"eval_runtime": 735.139,
"eval_samples_per_second": 0.382,
"eval_steps_per_second": 0.382,
"step": 2
},
{
"epoch": 0.0,
"grad_norm": 1.126691222190857,
"learning_rate": 0.0002,
"loss": 1.1932,
"step": 3
},
{
"epoch": 0.0,
"eval_loss": 1.0989333391189575,
"eval_runtime": 735.2696,
"eval_samples_per_second": 0.382,
"eval_steps_per_second": 0.382,
"step": 3
},
{
"epoch": 0.01,
"grad_norm": 0.9368861317634583,
"learning_rate": 0.0002,
"loss": 1.1229,
"step": 4
},
{
"epoch": 0.01,
"eval_loss": 0.9994365572929382,
"eval_runtime": 735.2205,
"eval_samples_per_second": 0.382,
"eval_steps_per_second": 0.382,
"step": 4
},
{
"epoch": 0.01,
"grad_norm": 0.5979511141777039,
"learning_rate": 0.0002,
"loss": 1.0212,
"step": 5
},
{
"epoch": 0.01,
"eval_loss": 0.947055995464325,
"eval_runtime": 735.2173,
"eval_samples_per_second": 0.382,
"eval_steps_per_second": 0.382,
"step": 5
},
{
"epoch": 0.01,
"grad_norm": 0.3878212571144104,
"learning_rate": 0.0002,
"loss": 0.9752,
"step": 6
},
{
"epoch": 0.01,
"eval_loss": 0.9192918539047241,
"eval_runtime": 735.2312,
"eval_samples_per_second": 0.382,
"eval_steps_per_second": 0.382,
"step": 6
},
{
"epoch": 0.01,
"grad_norm": 0.3154258728027344,
"learning_rate": 0.0002,
"loss": 0.8726,
"step": 7
},
{
"epoch": 0.01,
"eval_loss": 0.8976112604141235,
"eval_runtime": 735.2259,
"eval_samples_per_second": 0.382,
"eval_steps_per_second": 0.382,
"step": 7
},
{
"epoch": 0.01,
"grad_norm": 0.34492114186286926,
"learning_rate": 0.0002,
"loss": 0.8995,
"step": 8
},
{
"epoch": 0.01,
"eval_loss": 0.8734176754951477,
"eval_runtime": 735.259,
"eval_samples_per_second": 0.382,
"eval_steps_per_second": 0.382,
"step": 8
},
{
"epoch": 0.01,
"grad_norm": 0.4008582830429077,
"learning_rate": 0.0002,
"loss": 0.9564,
"step": 9
},
{
"epoch": 0.01,
"eval_loss": 0.8452931046485901,
"eval_runtime": 735.3081,
"eval_samples_per_second": 0.382,
"eval_steps_per_second": 0.382,
"step": 9
},
{
"epoch": 0.02,
"grad_norm": 0.4416767954826355,
"learning_rate": 0.0002,
"loss": 0.817,
"step": 10
},
{
"epoch": 0.02,
"eval_loss": 0.8162932395935059,
"eval_runtime": 735.2796,
"eval_samples_per_second": 0.382,
"eval_steps_per_second": 0.382,
"step": 10
}
],
"logging_steps": 1,
"max_steps": 10,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 1,
"total_flos": 1628733135912960.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}