Llama-2-7b-ultrachat200k-1e / trainer_state.json
kykim0's picture
Upload folder using huggingface_hub
00bb72a
raw
history blame contribute delete
No virus
7.87 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.7019704433497537,
"eval_steps": 500,
"global_step": 285,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"learning_rate": 1.9999700625010444e-05,
"loss": 1.5474,
"step": 1
},
{
"epoch": 0.01,
"learning_rate": 1.999251652147735e-05,
"loss": 1.182,
"step": 5
},
{
"epoch": 0.02,
"learning_rate": 1.997007728639956e-05,
"loss": 1.0465,
"step": 10
},
{
"epoch": 0.04,
"learning_rate": 1.9932715879473385e-05,
"loss": 1.0136,
"step": 15
},
{
"epoch": 0.05,
"learning_rate": 1.9880488219356086e-05,
"loss": 1.0103,
"step": 20
},
{
"epoch": 0.06,
"learning_rate": 1.981347247496222e-05,
"loss": 0.9887,
"step": 25
},
{
"epoch": 0.07,
"learning_rate": 1.973176894846855e-05,
"loss": 0.9842,
"step": 30
},
{
"epoch": 0.09,
"learning_rate": 1.963549992519223e-05,
"loss": 0.9791,
"step": 35
},
{
"epoch": 0.1,
"learning_rate": 1.9524809490566878e-05,
"loss": 0.978,
"step": 40
},
{
"epoch": 0.11,
"learning_rate": 1.939986331449053e-05,
"loss": 0.9758,
"step": 45
},
{
"epoch": 0.12,
"learning_rate": 1.926084840336821e-05,
"loss": 0.9684,
"step": 50
},
{
"epoch": 0.14,
"learning_rate": 1.910797282022027e-05,
"loss": 0.9671,
"step": 55
},
{
"epoch": 0.15,
"learning_rate": 1.894146537327533e-05,
"loss": 0.969,
"step": 60
},
{
"epoch": 0.16,
"learning_rate": 1.8761575273514005e-05,
"loss": 0.9561,
"step": 65
},
{
"epoch": 0.17,
"learning_rate": 1.8568571761675893e-05,
"loss": 0.9526,
"step": 70
},
{
"epoch": 0.18,
"learning_rate": 1.8362743705288127e-05,
"loss": 0.9519,
"step": 75
},
{
"epoch": 0.2,
"learning_rate": 1.814439916631857e-05,
"loss": 0.9571,
"step": 80
},
{
"epoch": 0.21,
"learning_rate": 1.791386494010081e-05,
"loss": 0.9549,
"step": 85
},
{
"epoch": 0.22,
"learning_rate": 1.7671486066220965e-05,
"loss": 0.9615,
"step": 90
},
{
"epoch": 0.23,
"learning_rate": 1.7417625312098453e-05,
"loss": 0.9499,
"step": 95
},
{
"epoch": 0.25,
"learning_rate": 1.7152662630033506e-05,
"loss": 0.9409,
"step": 100
},
{
"epoch": 0.26,
"learning_rate": 1.6876994588534234e-05,
"loss": 0.9519,
"step": 105
},
{
"epoch": 0.27,
"learning_rate": 1.659103377877423e-05,
"loss": 0.9671,
"step": 110
},
{
"epoch": 0.28,
"learning_rate": 1.629520819706912e-05,
"loss": 0.9508,
"step": 115
},
{
"epoch": 0.3,
"learning_rate": 1.598996060429634e-05,
"loss": 0.9413,
"step": 120
},
{
"epoch": 0.31,
"learning_rate": 1.56757478632168e-05,
"loss": 0.9463,
"step": 125
},
{
"epoch": 0.32,
"learning_rate": 1.5353040254690396e-05,
"loss": 0.9459,
"step": 130
},
{
"epoch": 0.33,
"learning_rate": 1.5022320773808612e-05,
"loss": 0.9494,
"step": 135
},
{
"epoch": 0.34,
"learning_rate": 1.4684084406997903e-05,
"loss": 0.9587,
"step": 140
},
{
"epoch": 0.36,
"learning_rate": 1.4338837391175582e-05,
"loss": 0.9391,
"step": 145
},
{
"epoch": 0.37,
"learning_rate": 1.3987096456067236e-05,
"loss": 0.9497,
"step": 150
},
{
"epoch": 0.38,
"learning_rate": 1.3629388050819547e-05,
"loss": 0.9409,
"step": 155
},
{
"epoch": 0.39,
"learning_rate": 1.3266247556066122e-05,
"loss": 0.9424,
"step": 160
},
{
"epoch": 0.41,
"learning_rate": 1.2898218482625606e-05,
"loss": 0.9341,
"step": 165
},
{
"epoch": 0.42,
"learning_rate": 1.252585165803135e-05,
"loss": 0.9365,
"step": 170
},
{
"epoch": 0.43,
"learning_rate": 1.2149704402110243e-05,
"loss": 0.9427,
"step": 175
},
{
"epoch": 0.44,
"learning_rate": 1.1770339692844484e-05,
"loss": 0.9374,
"step": 180
},
{
"epoch": 0.46,
"learning_rate": 1.1388325323764889e-05,
"loss": 0.934,
"step": 185
},
{
"epoch": 0.47,
"learning_rate": 1.1004233054136726e-05,
"loss": 0.9445,
"step": 190
},
{
"epoch": 0.48,
"learning_rate": 1.0618637753210086e-05,
"loss": 0.946,
"step": 195
},
{
"epoch": 0.49,
"learning_rate": 1.0232116539815558e-05,
"loss": 0.942,
"step": 200
},
{
"epoch": 0.5,
"learning_rate": 9.845247918592937e-06,
"loss": 0.937,
"step": 205
},
{
"epoch": 0.52,
"learning_rate": 9.458610914145826e-06,
"loss": 0.9355,
"step": 210
},
{
"epoch": 0.53,
"learning_rate": 9.072784204417995e-06,
"loss": 0.9354,
"step": 215
},
{
"epoch": 0.54,
"learning_rate": 8.688345254588579e-06,
"loss": 0.9381,
"step": 220
},
{
"epoch": 0.55,
"learning_rate": 8.305869452782446e-06,
"loss": 0.9495,
"step": 225
},
{
"epoch": 0.57,
"learning_rate": 7.92592924888925e-06,
"loss": 0.9363,
"step": 230
},
{
"epoch": 0.58,
"learning_rate": 7.549093297780133e-06,
"loss": 0.9397,
"step": 235
},
{
"epoch": 0.59,
"learning_rate": 7.175925608204428e-06,
"loss": 0.936,
"step": 240
},
{
"epoch": 0.6,
"learning_rate": 6.806984698640202e-06,
"loss": 0.9385,
"step": 245
},
{
"epoch": 0.62,
"learning_rate": 6.442822761362015e-06,
"loss": 0.941,
"step": 250
},
{
"epoch": 0.63,
"learning_rate": 6.083984835977154e-06,
"loss": 0.9424,
"step": 255
},
{
"epoch": 0.64,
"learning_rate": 5.731007993667155e-06,
"loss": 0.9386,
"step": 260
},
{
"epoch": 0.65,
"learning_rate": 5.38442053335571e-06,
"loss": 0.925,
"step": 265
},
{
"epoch": 0.67,
"learning_rate": 5.044741191005908e-06,
"loss": 0.9361,
"step": 270
},
{
"epoch": 0.68,
"learning_rate": 4.712478363230361e-06,
"loss": 0.9347,
"step": 275
},
{
"epoch": 0.69,
"learning_rate": 4.388129346376177e-06,
"loss": 0.937,
"step": 280
},
{
"epoch": 0.7,
"learning_rate": 4.07217959222365e-06,
"loss": 0.9338,
"step": 285
},
{
"epoch": 0.7,
"eval_loss": 0.9377741813659668,
"eval_runtime": 655.6233,
"eval_samples_per_second": 35.249,
"eval_steps_per_second": 0.552,
"step": 285
},
{
"epoch": 0.7,
"step": 285,
"total_flos": 477725856890880.0,
"train_loss": 0.9589325068289773,
"train_runtime": 23886.1895,
"train_samples_per_second": 8.702,
"train_steps_per_second": 0.017
}
],
"logging_steps": 5,
"max_steps": 406,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"total_flos": 477725856890880.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}