NLP_HW_0 / trainer_state.json
ganghyeon's picture
Upload fine-tuned Llama model for order analysis
5538283 verified
raw
history blame
5.51 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 560,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03571428571428571,
"grad_norm": 1.8447134494781494,
"learning_rate": 3.571428571428572e-05,
"loss": 2.5344,
"step": 20
},
{
"epoch": 0.07142857142857142,
"grad_norm": 1.3903896808624268,
"learning_rate": 7.142857142857143e-05,
"loss": 1.8578,
"step": 40
},
{
"epoch": 0.10714285714285714,
"grad_norm": 0.9996151328086853,
"learning_rate": 9.920634920634922e-05,
"loss": 1.161,
"step": 60
},
{
"epoch": 0.14285714285714285,
"grad_norm": 0.7677631974220276,
"learning_rate": 9.523809523809524e-05,
"loss": 0.9116,
"step": 80
},
{
"epoch": 0.17857142857142858,
"grad_norm": 1.040696144104004,
"learning_rate": 9.126984126984128e-05,
"loss": 0.8243,
"step": 100
},
{
"epoch": 0.21428571428571427,
"grad_norm": 0.970762312412262,
"learning_rate": 8.730158730158731e-05,
"loss": 0.7555,
"step": 120
},
{
"epoch": 0.25,
"grad_norm": 1.0803948640823364,
"learning_rate": 8.333333333333334e-05,
"loss": 0.6679,
"step": 140
},
{
"epoch": 0.2857142857142857,
"grad_norm": 1.0338200330734253,
"learning_rate": 7.936507936507937e-05,
"loss": 0.6522,
"step": 160
},
{
"epoch": 0.32142857142857145,
"grad_norm": 0.8714532256126404,
"learning_rate": 7.53968253968254e-05,
"loss": 0.6209,
"step": 180
},
{
"epoch": 0.35714285714285715,
"grad_norm": 0.9677976965904236,
"learning_rate": 7.142857142857143e-05,
"loss": 0.6242,
"step": 200
},
{
"epoch": 0.39285714285714285,
"grad_norm": 0.962899386882782,
"learning_rate": 6.746031746031747e-05,
"loss": 0.5844,
"step": 220
},
{
"epoch": 0.42857142857142855,
"grad_norm": 1.0176018476486206,
"learning_rate": 6.349206349206349e-05,
"loss": 0.5915,
"step": 240
},
{
"epoch": 0.4642857142857143,
"grad_norm": 0.9592286348342896,
"learning_rate": 5.9523809523809524e-05,
"loss": 0.5948,
"step": 260
},
{
"epoch": 0.5,
"grad_norm": 0.9406882524490356,
"learning_rate": 5.555555555555556e-05,
"loss": 0.5557,
"step": 280
},
{
"epoch": 0.5357142857142857,
"grad_norm": 1.0566672086715698,
"learning_rate": 5.158730158730159e-05,
"loss": 0.5596,
"step": 300
},
{
"epoch": 0.5714285714285714,
"grad_norm": 1.0166610479354858,
"learning_rate": 4.761904761904762e-05,
"loss": 0.5514,
"step": 320
},
{
"epoch": 0.6071428571428571,
"grad_norm": 1.110986590385437,
"learning_rate": 4.3650793650793655e-05,
"loss": 0.5523,
"step": 340
},
{
"epoch": 0.6428571428571429,
"grad_norm": 1.1951007843017578,
"learning_rate": 3.968253968253968e-05,
"loss": 0.5395,
"step": 360
},
{
"epoch": 0.6785714285714286,
"grad_norm": 1.0911144018173218,
"learning_rate": 3.571428571428572e-05,
"loss": 0.548,
"step": 380
},
{
"epoch": 0.7142857142857143,
"grad_norm": 1.1718319654464722,
"learning_rate": 3.1746031746031745e-05,
"loss": 0.5188,
"step": 400
},
{
"epoch": 0.75,
"grad_norm": 0.9721041917800903,
"learning_rate": 2.777777777777778e-05,
"loss": 0.5081,
"step": 420
},
{
"epoch": 0.7857142857142857,
"grad_norm": 1.103784203529358,
"learning_rate": 2.380952380952381e-05,
"loss": 0.5183,
"step": 440
},
{
"epoch": 0.8214285714285714,
"grad_norm": 1.1778147220611572,
"learning_rate": 1.984126984126984e-05,
"loss": 0.5064,
"step": 460
},
{
"epoch": 0.8571428571428571,
"grad_norm": 1.0783812999725342,
"learning_rate": 1.5873015873015872e-05,
"loss": 0.527,
"step": 480
},
{
"epoch": 0.8928571428571429,
"grad_norm": 1.0787134170532227,
"learning_rate": 1.1904761904761905e-05,
"loss": 0.5132,
"step": 500
},
{
"epoch": 0.9285714285714286,
"grad_norm": 1.0964124202728271,
"learning_rate": 7.936507936507936e-06,
"loss": 0.4971,
"step": 520
},
{
"epoch": 0.9642857142857143,
"grad_norm": 1.1419587135314941,
"learning_rate": 3.968253968253968e-06,
"loss": 0.5368,
"step": 540
},
{
"epoch": 1.0,
"grad_norm": 1.0203648805618286,
"learning_rate": 0.0,
"loss": 0.501,
"step": 560
}
],
"logging_steps": 20,
"max_steps": 560,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 8419093040332800.0,
"train_batch_size": 5,
"trial_name": null,
"trial_params": null
}