{ | |
"best_metric": null, | |
"best_model_checkpoint": null, | |
"epoch": 3.9994069974303224, | |
"global_step": 632, | |
"is_hyper_param_search": false, | |
"is_local_process_zero": true, | |
"is_world_process_zero": true, | |
"log_history": [ | |
{ | |
"epoch": 0.03, | |
"learning_rate": 1.6398077091987075e-05, | |
"loss": 2.639, | |
"step": 5 | |
}, | |
{ | |
"epoch": 0.06, | |
"learning_rate": 2.3460344492986226e-05, | |
"loss": 2.6696, | |
"step": 10 | |
}, | |
{ | |
"epoch": 0.09, | |
"learning_rate": 2.7591506092636183e-05, | |
"loss": 2.6473, | |
"step": 15 | |
}, | |
{ | |
"epoch": 0.13, | |
"learning_rate": 3e-05, | |
"loss": 2.6481, | |
"step": 20 | |
}, | |
{ | |
"epoch": 0.16, | |
"learning_rate": 3e-05, | |
"loss": 2.641, | |
"step": 25 | |
}, | |
{ | |
"epoch": 0.19, | |
"learning_rate": 3e-05, | |
"loss": 2.663, | |
"step": 30 | |
}, | |
{ | |
"epoch": 0.22, | |
"learning_rate": 3e-05, | |
"loss": 2.6205, | |
"step": 35 | |
}, | |
{ | |
"epoch": 0.25, | |
"learning_rate": 3e-05, | |
"loss": 2.6176, | |
"step": 40 | |
}, | |
{ | |
"epoch": 0.28, | |
"learning_rate": 3e-05, | |
"loss": 2.6764, | |
"step": 45 | |
}, | |
{ | |
"epoch": 0.32, | |
"learning_rate": 3e-05, | |
"loss": 2.6393, | |
"step": 50 | |
}, | |
{ | |
"epoch": 0.35, | |
"learning_rate": 3e-05, | |
"loss": 2.6457, | |
"step": 55 | |
}, | |
{ | |
"epoch": 0.38, | |
"learning_rate": 3e-05, | |
"loss": 2.6594, | |
"step": 60 | |
}, | |
{ | |
"epoch": 0.41, | |
"learning_rate": 3e-05, | |
"loss": 2.6146, | |
"step": 65 | |
}, | |
{ | |
"epoch": 0.44, | |
"learning_rate": 3e-05, | |
"loss": 2.6564, | |
"step": 70 | |
}, | |
{ | |
"epoch": 0.47, | |
"learning_rate": 3e-05, | |
"loss": 2.6465, | |
"step": 75 | |
}, | |
{ | |
"epoch": 0.51, | |
"learning_rate": 3e-05, | |
"loss": 2.6293, | |
"step": 80 | |
}, | |
{ | |
"epoch": 0.54, | |
"learning_rate": 3e-05, | |
"loss": 2.6427, | |
"step": 85 | |
}, | |
{ | |
"epoch": 0.57, | |
"learning_rate": 3e-05, | |
"loss": 2.6203, | |
"step": 90 | |
}, | |
{ | |
"epoch": 0.6, | |
"learning_rate": 3e-05, | |
"loss": 2.6286, | |
"step": 95 | |
}, | |
{ | |
"epoch": 0.63, | |
"learning_rate": 3e-05, | |
"loss": 2.6523, | |
"step": 100 | |
}, | |
{ | |
"epoch": 0.66, | |
"learning_rate": 3e-05, | |
"loss": 2.6239, | |
"step": 105 | |
}, | |
{ | |
"epoch": 0.7, | |
"learning_rate": 3e-05, | |
"loss": 2.6053, | |
"step": 110 | |
}, | |
{ | |
"epoch": 0.73, | |
"learning_rate": 3e-05, | |
"loss": 2.6772, | |
"step": 115 | |
}, | |
{ | |
"epoch": 0.76, | |
"learning_rate": 3e-05, | |
"loss": 2.6096, | |
"step": 120 | |
}, | |
{ | |
"epoch": 0.79, | |
"learning_rate": 3e-05, | |
"loss": 2.6703, | |
"step": 125 | |
}, | |
{ | |
"epoch": 0.82, | |
"learning_rate": 3e-05, | |
"loss": 2.6278, | |
"step": 130 | |
}, | |
{ | |
"epoch": 0.85, | |
"learning_rate": 3e-05, | |
"loss": 2.6324, | |
"step": 135 | |
}, | |
{ | |
"epoch": 0.89, | |
"learning_rate": 3e-05, | |
"loss": 2.6098, | |
"step": 140 | |
}, | |
{ | |
"epoch": 0.92, | |
"learning_rate": 3e-05, | |
"loss": 2.6498, | |
"step": 145 | |
}, | |
{ | |
"epoch": 0.95, | |
"learning_rate": 3e-05, | |
"loss": 2.6319, | |
"step": 150 | |
}, | |
{ | |
"epoch": 0.98, | |
"learning_rate": 3e-05, | |
"loss": 2.6507, | |
"step": 155 | |
}, | |
{ | |
"epoch": 1.01, | |
"learning_rate": 3e-05, | |
"loss": 2.6601, | |
"step": 160 | |
}, | |
{ | |
"epoch": 1.04, | |
"learning_rate": 3e-05, | |
"loss": 2.5753, | |
"step": 165 | |
}, | |
{ | |
"epoch": 1.08, | |
"learning_rate": 3e-05, | |
"loss": 2.6221, | |
"step": 170 | |
}, | |
{ | |
"epoch": 1.11, | |
"learning_rate": 3e-05, | |
"loss": 2.5597, | |
"step": 175 | |
}, | |
{ | |
"epoch": 1.14, | |
"learning_rate": 3e-05, | |
"loss": 2.5934, | |
"step": 180 | |
}, | |
{ | |
"epoch": 1.17, | |
"learning_rate": 3e-05, | |
"loss": 2.5633, | |
"step": 185 | |
}, | |
{ | |
"epoch": 1.2, | |
"learning_rate": 3e-05, | |
"loss": 2.5715, | |
"step": 190 | |
}, | |
{ | |
"epoch": 1.23, | |
"learning_rate": 3e-05, | |
"loss": 2.5809, | |
"step": 195 | |
}, | |
{ | |
"epoch": 1.27, | |
"learning_rate": 3e-05, | |
"loss": 2.5561, | |
"step": 200 | |
}, | |
{ | |
"epoch": 1.3, | |
"learning_rate": 3e-05, | |
"loss": 2.5633, | |
"step": 205 | |
}, | |
{ | |
"epoch": 1.33, | |
"learning_rate": 3e-05, | |
"loss": 2.5526, | |
"step": 210 | |
}, | |
{ | |
"epoch": 1.36, | |
"learning_rate": 3e-05, | |
"loss": 2.6323, | |
"step": 215 | |
}, | |
{ | |
"epoch": 1.39, | |
"learning_rate": 3e-05, | |
"loss": 2.5679, | |
"step": 220 | |
}, | |
{ | |
"epoch": 1.42, | |
"learning_rate": 3e-05, | |
"loss": 2.5805, | |
"step": 225 | |
}, | |
{ | |
"epoch": 1.46, | |
"learning_rate": 3e-05, | |
"loss": 2.5833, | |
"step": 230 | |
}, | |
{ | |
"epoch": 1.49, | |
"learning_rate": 3e-05, | |
"loss": 2.5859, | |
"step": 235 | |
}, | |
{ | |
"epoch": 1.52, | |
"learning_rate": 3e-05, | |
"loss": 2.5947, | |
"step": 240 | |
}, | |
{ | |
"epoch": 1.55, | |
"learning_rate": 3e-05, | |
"loss": 2.6159, | |
"step": 245 | |
}, | |
{ | |
"epoch": 1.58, | |
"learning_rate": 3e-05, | |
"loss": 2.5969, | |
"step": 250 | |
}, | |
{ | |
"epoch": 1.61, | |
"learning_rate": 3e-05, | |
"loss": 2.5649, | |
"step": 255 | |
}, | |
{ | |
"epoch": 1.65, | |
"learning_rate": 3e-05, | |
"loss": 2.5304, | |
"step": 260 | |
}, | |
{ | |
"epoch": 1.68, | |
"learning_rate": 3e-05, | |
"loss": 2.5628, | |
"step": 265 | |
}, | |
{ | |
"epoch": 1.71, | |
"learning_rate": 3e-05, | |
"loss": 2.5857, | |
"step": 270 | |
}, | |
{ | |
"epoch": 1.74, | |
"learning_rate": 3e-05, | |
"loss": 2.5914, | |
"step": 275 | |
}, | |
{ | |
"epoch": 1.77, | |
"learning_rate": 3e-05, | |
"loss": 2.5572, | |
"step": 280 | |
}, | |
{ | |
"epoch": 1.8, | |
"learning_rate": 3e-05, | |
"loss": 2.6037, | |
"step": 285 | |
}, | |
{ | |
"epoch": 1.83, | |
"learning_rate": 3e-05, | |
"loss": 2.6095, | |
"step": 290 | |
}, | |
{ | |
"epoch": 1.87, | |
"learning_rate": 3e-05, | |
"loss": 2.5787, | |
"step": 295 | |
}, | |
{ | |
"epoch": 1.9, | |
"learning_rate": 3e-05, | |
"loss": 2.5881, | |
"step": 300 | |
}, | |
{ | |
"epoch": 1.93, | |
"learning_rate": 3e-05, | |
"loss": 2.5706, | |
"step": 305 | |
}, | |
{ | |
"epoch": 1.96, | |
"learning_rate": 3e-05, | |
"loss": 2.5636, | |
"step": 310 | |
}, | |
{ | |
"epoch": 1.99, | |
"learning_rate": 3e-05, | |
"loss": 2.576, | |
"step": 315 | |
}, | |
{ | |
"epoch": 2.03, | |
"learning_rate": 3e-05, | |
"loss": 2.5955, | |
"step": 320 | |
}, | |
{ | |
"epoch": 2.06, | |
"learning_rate": 3e-05, | |
"loss": 2.5293, | |
"step": 325 | |
}, | |
{ | |
"epoch": 2.09, | |
"learning_rate": 3e-05, | |
"loss": 2.5107, | |
"step": 330 | |
}, | |
{ | |
"epoch": 2.12, | |
"learning_rate": 3e-05, | |
"loss": 2.5378, | |
"step": 335 | |
}, | |
{ | |
"epoch": 2.15, | |
"learning_rate": 3e-05, | |
"loss": 2.5262, | |
"step": 340 | |
}, | |
{ | |
"epoch": 2.18, | |
"learning_rate": 3e-05, | |
"loss": 2.5485, | |
"step": 345 | |
}, | |
{ | |
"epoch": 2.22, | |
"learning_rate": 3e-05, | |
"loss": 2.5123, | |
"step": 350 | |
}, | |
{ | |
"epoch": 2.25, | |
"learning_rate": 3e-05, | |
"loss": 2.509, | |
"step": 355 | |
}, | |
{ | |
"epoch": 2.28, | |
"learning_rate": 3e-05, | |
"loss": 2.4993, | |
"step": 360 | |
}, | |
{ | |
"epoch": 2.31, | |
"learning_rate": 3e-05, | |
"loss": 2.5133, | |
"step": 365 | |
}, | |
{ | |
"epoch": 2.34, | |
"learning_rate": 3e-05, | |
"loss": 2.5346, | |
"step": 370 | |
}, | |
{ | |
"epoch": 2.37, | |
"learning_rate": 3e-05, | |
"loss": 2.5174, | |
"step": 375 | |
}, | |
{ | |
"epoch": 2.4, | |
"learning_rate": 3e-05, | |
"loss": 2.5282, | |
"step": 380 | |
}, | |
{ | |
"epoch": 2.44, | |
"learning_rate": 3e-05, | |
"loss": 2.5489, | |
"step": 385 | |
}, | |
{ | |
"epoch": 2.47, | |
"learning_rate": 3e-05, | |
"loss": 2.4989, | |
"step": 390 | |
}, | |
{ | |
"epoch": 2.5, | |
"learning_rate": 3e-05, | |
"loss": 2.5243, | |
"step": 395 | |
}, | |
{ | |
"epoch": 2.53, | |
"learning_rate": 3e-05, | |
"loss": 2.5228, | |
"step": 400 | |
}, | |
{ | |
"epoch": 2.56, | |
"learning_rate": 3e-05, | |
"loss": 2.5013, | |
"step": 405 | |
}, | |
{ | |
"epoch": 2.59, | |
"learning_rate": 3e-05, | |
"loss": 2.509, | |
"step": 410 | |
}, | |
{ | |
"epoch": 2.63, | |
"learning_rate": 3e-05, | |
"loss": 2.566, | |
"step": 415 | |
}, | |
{ | |
"epoch": 2.66, | |
"learning_rate": 3e-05, | |
"loss": 2.5291, | |
"step": 420 | |
}, | |
{ | |
"epoch": 2.69, | |
"learning_rate": 3e-05, | |
"loss": 2.5421, | |
"step": 425 | |
}, | |
{ | |
"epoch": 2.72, | |
"learning_rate": 3e-05, | |
"loss": 2.5454, | |
"step": 430 | |
}, | |
{ | |
"epoch": 2.75, | |
"learning_rate": 3e-05, | |
"loss": 2.5324, | |
"step": 435 | |
}, | |
{ | |
"epoch": 2.78, | |
"learning_rate": 3e-05, | |
"loss": 2.5457, | |
"step": 440 | |
}, | |
{ | |
"epoch": 2.82, | |
"learning_rate": 3e-05, | |
"loss": 2.5365, | |
"step": 445 | |
}, | |
{ | |
"epoch": 2.85, | |
"learning_rate": 3e-05, | |
"loss": 2.5395, | |
"step": 450 | |
}, | |
{ | |
"epoch": 2.88, | |
"learning_rate": 3e-05, | |
"loss": 2.5458, | |
"step": 455 | |
}, | |
{ | |
"epoch": 2.91, | |
"learning_rate": 3e-05, | |
"loss": 2.4935, | |
"step": 460 | |
}, | |
{ | |
"epoch": 2.94, | |
"learning_rate": 3e-05, | |
"loss": 2.5347, | |
"step": 465 | |
}, | |
{ | |
"epoch": 2.97, | |
"learning_rate": 3e-05, | |
"loss": 2.5287, | |
"step": 470 | |
}, | |
{ | |
"epoch": 3.01, | |
"learning_rate": 3e-05, | |
"loss": 2.5784, | |
"step": 475 | |
}, | |
{ | |
"epoch": 3.04, | |
"learning_rate": 3e-05, | |
"loss": 2.4889, | |
"step": 480 | |
}, | |
{ | |
"epoch": 3.07, | |
"learning_rate": 3e-05, | |
"loss": 2.4622, | |
"step": 485 | |
}, | |
{ | |
"epoch": 3.1, | |
"learning_rate": 3e-05, | |
"loss": 2.4628, | |
"step": 490 | |
}, | |
{ | |
"epoch": 3.13, | |
"learning_rate": 3e-05, | |
"loss": 2.5204, | |
"step": 495 | |
}, | |
{ | |
"epoch": 3.16, | |
"learning_rate": 3e-05, | |
"loss": 2.4543, | |
"step": 500 | |
}, | |
{ | |
"epoch": 3.2, | |
"learning_rate": 3e-05, | |
"loss": 2.4717, | |
"step": 505 | |
}, | |
{ | |
"epoch": 3.23, | |
"learning_rate": 3e-05, | |
"loss": 2.5113, | |
"step": 510 | |
}, | |
{ | |
"epoch": 3.26, | |
"learning_rate": 3e-05, | |
"loss": 2.4838, | |
"step": 515 | |
}, | |
{ | |
"epoch": 3.29, | |
"learning_rate": 3e-05, | |
"loss": 2.5224, | |
"step": 520 | |
}, | |
{ | |
"epoch": 3.32, | |
"learning_rate": 3e-05, | |
"loss": 2.4858, | |
"step": 525 | |
}, | |
{ | |
"epoch": 3.35, | |
"learning_rate": 3e-05, | |
"loss": 2.5227, | |
"step": 530 | |
}, | |
{ | |
"epoch": 3.39, | |
"learning_rate": 3e-05, | |
"loss": 2.4824, | |
"step": 535 | |
}, | |
{ | |
"epoch": 3.42, | |
"learning_rate": 3e-05, | |
"loss": 2.4836, | |
"step": 540 | |
}, | |
{ | |
"epoch": 3.45, | |
"learning_rate": 3e-05, | |
"loss": 2.4968, | |
"step": 545 | |
}, | |
{ | |
"epoch": 3.48, | |
"learning_rate": 3e-05, | |
"loss": 2.5004, | |
"step": 550 | |
}, | |
{ | |
"epoch": 3.51, | |
"learning_rate": 3e-05, | |
"loss": 2.4756, | |
"step": 555 | |
}, | |
{ | |
"epoch": 3.54, | |
"learning_rate": 3e-05, | |
"loss": 2.5068, | |
"step": 560 | |
}, | |
{ | |
"epoch": 3.58, | |
"learning_rate": 3e-05, | |
"loss": 2.468, | |
"step": 565 | |
}, | |
{ | |
"epoch": 3.61, | |
"learning_rate": 3e-05, | |
"loss": 2.4764, | |
"step": 570 | |
}, | |
{ | |
"epoch": 3.64, | |
"learning_rate": 3e-05, | |
"loss": 2.4927, | |
"step": 575 | |
}, | |
{ | |
"epoch": 3.67, | |
"learning_rate": 3e-05, | |
"loss": 2.4993, | |
"step": 580 | |
}, | |
{ | |
"epoch": 3.7, | |
"learning_rate": 3e-05, | |
"loss": 2.4786, | |
"step": 585 | |
}, | |
{ | |
"epoch": 3.73, | |
"learning_rate": 3e-05, | |
"loss": 2.489, | |
"step": 590 | |
}, | |
{ | |
"epoch": 3.77, | |
"learning_rate": 3e-05, | |
"loss": 2.4271, | |
"step": 595 | |
}, | |
{ | |
"epoch": 3.8, | |
"learning_rate": 3e-05, | |
"loss": 2.4502, | |
"step": 600 | |
}, | |
{ | |
"epoch": 3.83, | |
"learning_rate": 3e-05, | |
"loss": 2.4682, | |
"step": 605 | |
}, | |
{ | |
"epoch": 3.86, | |
"learning_rate": 3e-05, | |
"loss": 2.4702, | |
"step": 610 | |
}, | |
{ | |
"epoch": 3.89, | |
"learning_rate": 3e-05, | |
"loss": 2.4722, | |
"step": 615 | |
}, | |
{ | |
"epoch": 3.92, | |
"learning_rate": 3e-05, | |
"loss": 2.5132, | |
"step": 620 | |
}, | |
{ | |
"epoch": 3.96, | |
"learning_rate": 3e-05, | |
"loss": 2.446, | |
"step": 625 | |
}, | |
{ | |
"epoch": 3.99, | |
"learning_rate": 3e-05, | |
"loss": 2.4824, | |
"step": 630 | |
}, | |
{ | |
"epoch": 4.0, | |
"step": 632, | |
"total_flos": 2.1853297764099686e+17, | |
"train_loss": 2.5587786006022104, | |
"train_runtime": 54071.5154, | |
"train_samples_per_second": 0.748, | |
"train_steps_per_second": 0.012 | |
} | |
], | |
"max_steps": 632, | |
"num_train_epochs": 4, | |
"total_flos": 2.1853297764099686e+17, | |
"trial_name": null, | |
"trial_params": null | |
} | |