reft_llama3 / checkpoint-1000 /trainer_state.json
chilz's picture
Upload folder using huggingface_hub
31e3389 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.5252100840336135,
"eval_steps": 500,
"global_step": 1000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.026260504201680673,
"grad_norm": 2.606417417526245,
"learning_rate": 0.0038949579831932773,
"loss": 1.2269,
"step": 50
},
{
"epoch": 0.052521008403361345,
"grad_norm": 2.463599920272827,
"learning_rate": 0.0037899159663865546,
"loss": 1.1055,
"step": 100
},
{
"epoch": 0.07878151260504201,
"grad_norm": 3.1962060928344727,
"learning_rate": 0.003684873949579832,
"loss": 1.0706,
"step": 150
},
{
"epoch": 0.10504201680672269,
"grad_norm": 3.148374557495117,
"learning_rate": 0.0035798319327731095,
"loss": 1.1056,
"step": 200
},
{
"epoch": 0.13130252100840337,
"grad_norm": 2.5041284561157227,
"learning_rate": 0.0034747899159663868,
"loss": 1.0834,
"step": 250
},
{
"epoch": 0.15756302521008403,
"grad_norm": 2.5208585262298584,
"learning_rate": 0.003369747899159664,
"loss": 1.1076,
"step": 300
},
{
"epoch": 0.18382352941176472,
"grad_norm": 2.758544445037842,
"learning_rate": 0.0032647058823529413,
"loss": 1.0285,
"step": 350
},
{
"epoch": 0.21008403361344538,
"grad_norm": 3.854949951171875,
"learning_rate": 0.0031596638655462185,
"loss": 1.0204,
"step": 400
},
{
"epoch": 0.23634453781512604,
"grad_norm": 3.600606918334961,
"learning_rate": 0.0030546218487394958,
"loss": 1.0311,
"step": 450
},
{
"epoch": 0.26260504201680673,
"grad_norm": 3.2583634853363037,
"learning_rate": 0.0029495798319327735,
"loss": 0.9868,
"step": 500
},
{
"epoch": 0.28886554621848737,
"grad_norm": 3.2210769653320312,
"learning_rate": 0.0028445378151260507,
"loss": 1.0129,
"step": 550
},
{
"epoch": 0.31512605042016806,
"grad_norm": 3.139937162399292,
"learning_rate": 0.002739495798319328,
"loss": 1.0096,
"step": 600
},
{
"epoch": 0.34138655462184875,
"grad_norm": 2.95237398147583,
"learning_rate": 0.002634453781512605,
"loss": 0.9322,
"step": 650
},
{
"epoch": 0.36764705882352944,
"grad_norm": 3.39207124710083,
"learning_rate": 0.0025294117647058825,
"loss": 0.939,
"step": 700
},
{
"epoch": 0.3939075630252101,
"grad_norm": 3.5582635402679443,
"learning_rate": 0.0024243697478991597,
"loss": 1.0108,
"step": 750
},
{
"epoch": 0.42016806722689076,
"grad_norm": 3.2852275371551514,
"learning_rate": 0.0023193277310924374,
"loss": 0.884,
"step": 800
},
{
"epoch": 0.44642857142857145,
"grad_norm": 3.8662281036376953,
"learning_rate": 0.0022142857142857146,
"loss": 0.8889,
"step": 850
},
{
"epoch": 0.4726890756302521,
"grad_norm": 3.1788485050201416,
"learning_rate": 0.002109243697478992,
"loss": 0.8649,
"step": 900
},
{
"epoch": 0.4989495798319328,
"grad_norm": 3.658193588256836,
"learning_rate": 0.002004201680672269,
"loss": 0.821,
"step": 950
},
{
"epoch": 0.5252100840336135,
"grad_norm": 3.557441234588623,
"learning_rate": 0.0018991596638655462,
"loss": 0.8908,
"step": 1000
}
],
"logging_steps": 50,
"max_steps": 1904,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 200,
"total_flos": 0.0,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}