| { |
| "best_metric": null, |
| "best_model_checkpoint": null, |
| "epoch": 7.142857142857143, |
| "eval_steps": 500, |
| "global_step": 5000, |
| "is_hyper_param_search": false, |
| "is_local_process_zero": true, |
| "is_world_process_zero": true, |
| "log_history": [ |
| { |
| "epoch": 0.07142857142857142, |
| "grad_norm": 0.0002574920654296875, |
| "learning_rate": 0.00019800000000000002, |
| "loss": 0.7765, |
| "step": 50 |
| }, |
| { |
| "epoch": 0.14285714285714285, |
| "grad_norm": 0.00133514404296875, |
| "learning_rate": 0.000196, |
| "loss": 0.3218, |
| "step": 100 |
| }, |
| { |
| "epoch": 0.21428571428571427, |
| "grad_norm": 0.01507568359375, |
| "learning_rate": 0.000194, |
| "loss": 0.3479, |
| "step": 150 |
| }, |
| { |
| "epoch": 0.2857142857142857, |
| "grad_norm": 0.000885009765625, |
| "learning_rate": 0.000192, |
| "loss": 0.5917, |
| "step": 200 |
| }, |
| { |
| "epoch": 0.35714285714285715, |
| "grad_norm": 4.65625, |
| "learning_rate": 0.00019, |
| "loss": 0.5147, |
| "step": 250 |
| }, |
| { |
| "epoch": 0.42857142857142855, |
| "grad_norm": 0.002349853515625, |
| "learning_rate": 0.000188, |
| "loss": 0.2799, |
| "step": 300 |
| }, |
| { |
| "epoch": 0.5, |
| "grad_norm": 8.7738037109375e-05, |
| "learning_rate": 0.00018600000000000002, |
| "loss": 0.3272, |
| "step": 350 |
| }, |
| { |
| "epoch": 0.5714285714285714, |
| "grad_norm": 0.000308990478515625, |
| "learning_rate": 0.00018400000000000003, |
| "loss": 0.4057, |
| "step": 400 |
| }, |
| { |
| "epoch": 0.6428571428571429, |
| "grad_norm": 3.734375, |
| "learning_rate": 0.000182, |
| "loss": 0.665, |
| "step": 450 |
| }, |
| { |
| "epoch": 0.7142857142857143, |
| "grad_norm": 0.212890625, |
| "learning_rate": 0.00018, |
| "loss": 0.3702, |
| "step": 500 |
| }, |
| { |
| "epoch": 0.7857142857142857, |
| "grad_norm": 0.0162353515625, |
| "learning_rate": 0.00017800000000000002, |
| "loss": 0.5988, |
| "step": 550 |
| }, |
| { |
| "epoch": 0.8571428571428571, |
| "grad_norm": 6.961822509765625e-05, |
| "learning_rate": 0.00017600000000000002, |
| "loss": 0.1097, |
| "step": 600 |
| }, |
| { |
| "epoch": 0.9285714285714286, |
| "grad_norm": 8.7738037109375e-05, |
| "learning_rate": 0.000174, |
| "loss": 0.362, |
| "step": 650 |
| }, |
| { |
| "epoch": 1.0, |
| "grad_norm": 2.6941299438476562e-05, |
| "learning_rate": 0.000172, |
| "loss": 0.2517, |
| "step": 700 |
| }, |
| { |
| "epoch": 1.0714285714285714, |
| "grad_norm": 0.000732421875, |
| "learning_rate": 0.00017, |
| "loss": 0.1627, |
| "step": 750 |
| }, |
| { |
| "epoch": 1.1428571428571428, |
| "grad_norm": 0.0002727508544921875, |
| "learning_rate": 0.000168, |
| "loss": 0.1745, |
| "step": 800 |
| }, |
| { |
| "epoch": 1.2142857142857142, |
| "grad_norm": 0.0002079010009765625, |
| "learning_rate": 0.000166, |
| "loss": 0.2209, |
| "step": 850 |
| }, |
| { |
| "epoch": 1.2857142857142856, |
| "grad_norm": 0.0003185272216796875, |
| "learning_rate": 0.000164, |
| "loss": 0.1962, |
| "step": 900 |
| }, |
| { |
| "epoch": 1.3571428571428572, |
| "grad_norm": 0.0001583099365234375, |
| "learning_rate": 0.000162, |
| "loss": 0.221, |
| "step": 950 |
| }, |
| { |
| "epoch": 1.4285714285714286, |
| "grad_norm": 1.9375, |
| "learning_rate": 0.00016, |
| "loss": 0.2535, |
| "step": 1000 |
| }, |
| { |
| "epoch": 1.5, |
| "grad_norm": 0.00011110305786132812, |
| "learning_rate": 0.00015800000000000002, |
| "loss": 0.2031, |
| "step": 1050 |
| }, |
| { |
| "epoch": 1.5714285714285714, |
| "grad_norm": 0.00010919570922851562, |
| "learning_rate": 0.00015600000000000002, |
| "loss": 0.1939, |
| "step": 1100 |
| }, |
| { |
| "epoch": 1.6428571428571428, |
| "grad_norm": 6.961822509765625e-05, |
| "learning_rate": 0.000154, |
| "loss": 0.2314, |
| "step": 1150 |
| }, |
| { |
| "epoch": 1.7142857142857144, |
| "grad_norm": 8.296966552734375e-05, |
| "learning_rate": 0.000152, |
| "loss": 0.1713, |
| "step": 1200 |
| }, |
| { |
| "epoch": 1.7857142857142856, |
| "grad_norm": 5.054473876953125e-05, |
| "learning_rate": 0.00015000000000000001, |
| "loss": 0.1591, |
| "step": 1250 |
| }, |
| { |
| "epoch": 1.8571428571428572, |
| "grad_norm": 4.375, |
| "learning_rate": 0.000148, |
| "loss": 0.162, |
| "step": 1300 |
| }, |
| { |
| "epoch": 1.9285714285714286, |
| "grad_norm": 0.0013427734375, |
| "learning_rate": 0.000146, |
| "loss": 0.1312, |
| "step": 1350 |
| }, |
| { |
| "epoch": 2.0, |
| "grad_norm": 4.029273986816406e-05, |
| "learning_rate": 0.000144, |
| "loss": 0.2479, |
| "step": 1400 |
| }, |
| { |
| "epoch": 2.0714285714285716, |
| "grad_norm": 0.00019073486328125, |
| "learning_rate": 0.000142, |
| "loss": 0.1328, |
| "step": 1450 |
| }, |
| { |
| "epoch": 2.142857142857143, |
| "grad_norm": 5.030632019042969e-05, |
| "learning_rate": 0.00014, |
| "loss": 0.0929, |
| "step": 1500 |
| }, |
| { |
| "epoch": 2.2142857142857144, |
| "grad_norm": 1.53125, |
| "learning_rate": 0.000138, |
| "loss": 0.1952, |
| "step": 1550 |
| }, |
| { |
| "epoch": 2.2857142857142856, |
| "grad_norm": 9.918212890625e-05, |
| "learning_rate": 0.00013600000000000003, |
| "loss": 0.1879, |
| "step": 1600 |
| }, |
| { |
| "epoch": 2.357142857142857, |
| "grad_norm": 5.46875, |
| "learning_rate": 0.000134, |
| "loss": 0.1255, |
| "step": 1650 |
| }, |
| { |
| "epoch": 2.4285714285714284, |
| "grad_norm": 0.00020694732666015625, |
| "learning_rate": 0.000132, |
| "loss": 0.1906, |
| "step": 1700 |
| }, |
| { |
| "epoch": 2.5, |
| "grad_norm": 0.0035858154296875, |
| "learning_rate": 0.00013000000000000002, |
| "loss": 0.0698, |
| "step": 1750 |
| }, |
| { |
| "epoch": 2.571428571428571, |
| "grad_norm": 8.0108642578125e-05, |
| "learning_rate": 0.00012800000000000002, |
| "loss": 0.1395, |
| "step": 1800 |
| }, |
| { |
| "epoch": 2.642857142857143, |
| "grad_norm": 0.000118255615234375, |
| "learning_rate": 0.000126, |
| "loss": 0.1277, |
| "step": 1850 |
| }, |
| { |
| "epoch": 2.7142857142857144, |
| "grad_norm": 0.000347137451171875, |
| "learning_rate": 0.000124, |
| "loss": 0.0916, |
| "step": 1900 |
| }, |
| { |
| "epoch": 2.7857142857142856, |
| "grad_norm": 4.458427429199219e-05, |
| "learning_rate": 0.000122, |
| "loss": 0.1283, |
| "step": 1950 |
| }, |
| { |
| "epoch": 2.857142857142857, |
| "grad_norm": 0.000820159912109375, |
| "learning_rate": 0.00012, |
| "loss": 0.097, |
| "step": 2000 |
| }, |
| { |
| "epoch": 2.928571428571429, |
| "grad_norm": 4.601478576660156e-05, |
| "learning_rate": 0.000118, |
| "loss": 0.0421, |
| "step": 2050 |
| }, |
| { |
| "epoch": 3.0, |
| "grad_norm": 7.09375, |
| "learning_rate": 0.000116, |
| "loss": 0.122, |
| "step": 2100 |
| }, |
| { |
| "epoch": 3.0714285714285716, |
| "grad_norm": 1.90625, |
| "learning_rate": 0.00011399999999999999, |
| "loss": 0.1375, |
| "step": 2150 |
| }, |
| { |
| "epoch": 3.142857142857143, |
| "grad_norm": 9.72747802734375e-05, |
| "learning_rate": 0.00011200000000000001, |
| "loss": 0.1241, |
| "step": 2200 |
| }, |
| { |
| "epoch": 3.2142857142857144, |
| "grad_norm": 0.0001316070556640625, |
| "learning_rate": 0.00011000000000000002, |
| "loss": 0.125, |
| "step": 2250 |
| }, |
| { |
| "epoch": 3.2857142857142856, |
| "grad_norm": 0.00015735626220703125, |
| "learning_rate": 0.00010800000000000001, |
| "loss": 0.0277, |
| "step": 2300 |
| }, |
| { |
| "epoch": 3.357142857142857, |
| "grad_norm": 0.00057220458984375, |
| "learning_rate": 0.00010600000000000002, |
| "loss": 0.0974, |
| "step": 2350 |
| }, |
| { |
| "epoch": 3.4285714285714284, |
| "grad_norm": 0.00099945068359375, |
| "learning_rate": 0.00010400000000000001, |
| "loss": 0.083, |
| "step": 2400 |
| }, |
| { |
| "epoch": 3.5, |
| "grad_norm": 0.000179290771484375, |
| "learning_rate": 0.00010200000000000001, |
| "loss": 0.0856, |
| "step": 2450 |
| }, |
| { |
| "epoch": 3.571428571428571, |
| "grad_norm": 0.00011014938354492188, |
| "learning_rate": 0.0001, |
| "loss": 0.1169, |
| "step": 2500 |
| }, |
| { |
| "epoch": 3.642857142857143, |
| "grad_norm": 1.625, |
| "learning_rate": 9.8e-05, |
| "loss": 0.061, |
| "step": 2550 |
| }, |
| { |
| "epoch": 3.7142857142857144, |
| "grad_norm": 0.000579833984375, |
| "learning_rate": 9.6e-05, |
| "loss": 0.1022, |
| "step": 2600 |
| }, |
| { |
| "epoch": 3.7857142857142856, |
| "grad_norm": 2.1696090698242188e-05, |
| "learning_rate": 9.4e-05, |
| "loss": 0.0985, |
| "step": 2650 |
| }, |
| { |
| "epoch": 3.857142857142857, |
| "grad_norm": 0.00012969970703125, |
| "learning_rate": 9.200000000000001e-05, |
| "loss": 0.0822, |
| "step": 2700 |
| }, |
| { |
| "epoch": 3.928571428571429, |
| "grad_norm": 1.7762184143066406e-05, |
| "learning_rate": 9e-05, |
| "loss": 0.1027, |
| "step": 2750 |
| }, |
| { |
| "epoch": 4.0, |
| "grad_norm": 0.0002956390380859375, |
| "learning_rate": 8.800000000000001e-05, |
| "loss": 0.0908, |
| "step": 2800 |
| }, |
| { |
| "epoch": 4.071428571428571, |
| "grad_norm": 5.125, |
| "learning_rate": 8.6e-05, |
| "loss": 0.055, |
| "step": 2850 |
| }, |
| { |
| "epoch": 4.142857142857143, |
| "grad_norm": 2.234375, |
| "learning_rate": 8.4e-05, |
| "loss": 0.0411, |
| "step": 2900 |
| }, |
| { |
| "epoch": 4.214285714285714, |
| "grad_norm": 1.704692840576172e-05, |
| "learning_rate": 8.2e-05, |
| "loss": 0.0862, |
| "step": 2950 |
| }, |
| { |
| "epoch": 4.285714285714286, |
| "grad_norm": 0.0030517578125, |
| "learning_rate": 8e-05, |
| "loss": 0.0639, |
| "step": 3000 |
| }, |
| { |
| "epoch": 4.357142857142857, |
| "grad_norm": 1.7642974853515625e-05, |
| "learning_rate": 7.800000000000001e-05, |
| "loss": 0.0855, |
| "step": 3050 |
| }, |
| { |
| "epoch": 4.428571428571429, |
| "grad_norm": 6.818771362304688e-05, |
| "learning_rate": 7.6e-05, |
| "loss": 0.0793, |
| "step": 3100 |
| }, |
| { |
| "epoch": 4.5, |
| "grad_norm": 4.0, |
| "learning_rate": 7.4e-05, |
| "loss": 0.1272, |
| "step": 3150 |
| }, |
| { |
| "epoch": 4.571428571428571, |
| "grad_norm": 3.382563591003418e-06, |
| "learning_rate": 7.2e-05, |
| "loss": 0.0904, |
| "step": 3200 |
| }, |
| { |
| "epoch": 4.642857142857143, |
| "grad_norm": 2.140625, |
| "learning_rate": 7e-05, |
| "loss": 0.0605, |
| "step": 3250 |
| }, |
| { |
| "epoch": 4.714285714285714, |
| "grad_norm": 3.0040740966796875e-05, |
| "learning_rate": 6.800000000000001e-05, |
| "loss": 0.1199, |
| "step": 3300 |
| }, |
| { |
| "epoch": 4.785714285714286, |
| "grad_norm": 0.00037384033203125, |
| "learning_rate": 6.6e-05, |
| "loss": 0.1049, |
| "step": 3350 |
| }, |
| { |
| "epoch": 4.857142857142857, |
| "grad_norm": 9.918212890625e-05, |
| "learning_rate": 6.400000000000001e-05, |
| "loss": 0.0821, |
| "step": 3400 |
| }, |
| { |
| "epoch": 4.928571428571429, |
| "grad_norm": 0.00156402587890625, |
| "learning_rate": 6.2e-05, |
| "loss": 0.0807, |
| "step": 3450 |
| }, |
| { |
| "epoch": 5.0, |
| "grad_norm": 0.001068115234375, |
| "learning_rate": 6e-05, |
| "loss": 0.084, |
| "step": 3500 |
| }, |
| { |
| "epoch": 5.071428571428571, |
| "grad_norm": 4.124641418457031e-05, |
| "learning_rate": 5.8e-05, |
| "loss": 0.0922, |
| "step": 3550 |
| }, |
| { |
| "epoch": 5.142857142857143, |
| "grad_norm": 1.5854835510253906e-05, |
| "learning_rate": 5.6000000000000006e-05, |
| "loss": 0.058, |
| "step": 3600 |
| }, |
| { |
| "epoch": 5.214285714285714, |
| "grad_norm": 4.03125, |
| "learning_rate": 5.4000000000000005e-05, |
| "loss": 0.0838, |
| "step": 3650 |
| }, |
| { |
| "epoch": 5.285714285714286, |
| "grad_norm": 4.28125, |
| "learning_rate": 5.2000000000000004e-05, |
| "loss": 0.0592, |
| "step": 3700 |
| }, |
| { |
| "epoch": 5.357142857142857, |
| "grad_norm": 1.3172626495361328e-05, |
| "learning_rate": 5e-05, |
| "loss": 0.0694, |
| "step": 3750 |
| }, |
| { |
| "epoch": 5.428571428571429, |
| "grad_norm": 0.00013637542724609375, |
| "learning_rate": 4.8e-05, |
| "loss": 0.0848, |
| "step": 3800 |
| }, |
| { |
| "epoch": 5.5, |
| "grad_norm": 0.00014495849609375, |
| "learning_rate": 4.600000000000001e-05, |
| "loss": 0.1067, |
| "step": 3850 |
| }, |
| { |
| "epoch": 5.571428571428571, |
| "grad_norm": 0.0006256103515625, |
| "learning_rate": 4.4000000000000006e-05, |
| "loss": 0.0789, |
| "step": 3900 |
| }, |
| { |
| "epoch": 5.642857142857143, |
| "grad_norm": 2.171875, |
| "learning_rate": 4.2e-05, |
| "loss": 0.0608, |
| "step": 3950 |
| }, |
| { |
| "epoch": 5.714285714285714, |
| "grad_norm": 0.000453948974609375, |
| "learning_rate": 4e-05, |
| "loss": 0.0327, |
| "step": 4000 |
| }, |
| { |
| "epoch": 5.785714285714286, |
| "grad_norm": 0.0003566741943359375, |
| "learning_rate": 3.8e-05, |
| "loss": 0.085, |
| "step": 4050 |
| }, |
| { |
| "epoch": 5.857142857142857, |
| "grad_norm": 2.47955322265625e-05, |
| "learning_rate": 3.6e-05, |
| "loss": 0.1033, |
| "step": 4100 |
| }, |
| { |
| "epoch": 5.928571428571429, |
| "grad_norm": 5.221366882324219e-05, |
| "learning_rate": 3.4000000000000007e-05, |
| "loss": 0.0794, |
| "step": 4150 |
| }, |
| { |
| "epoch": 6.0, |
| "grad_norm": 8.392333984375e-05, |
| "learning_rate": 3.2000000000000005e-05, |
| "loss": 0.0581, |
| "step": 4200 |
| }, |
| { |
| "epoch": 6.071428571428571, |
| "grad_norm": 0.0001659393310546875, |
| "learning_rate": 3e-05, |
| "loss": 0.033, |
| "step": 4250 |
| }, |
| { |
| "epoch": 6.142857142857143, |
| "grad_norm": 0.0004634857177734375, |
| "learning_rate": 2.8000000000000003e-05, |
| "loss": 0.0904, |
| "step": 4300 |
| }, |
| { |
| "epoch": 6.214285714285714, |
| "grad_norm": 0.00020503997802734375, |
| "learning_rate": 2.6000000000000002e-05, |
| "loss": 0.086, |
| "step": 4350 |
| }, |
| { |
| "epoch": 6.285714285714286, |
| "grad_norm": 6.079673767089844e-05, |
| "learning_rate": 2.4e-05, |
| "loss": 0.0653, |
| "step": 4400 |
| }, |
| { |
| "epoch": 6.357142857142857, |
| "grad_norm": 8.440017700195312e-05, |
| "learning_rate": 2.2000000000000003e-05, |
| "loss": 0.0519, |
| "step": 4450 |
| }, |
| { |
| "epoch": 6.428571428571429, |
| "grad_norm": 1.2578125, |
| "learning_rate": 2e-05, |
| "loss": 0.0781, |
| "step": 4500 |
| }, |
| { |
| "epoch": 6.5, |
| "grad_norm": 0.00049591064453125, |
| "learning_rate": 1.8e-05, |
| "loss": 0.0625, |
| "step": 4550 |
| }, |
| { |
| "epoch": 6.571428571428571, |
| "grad_norm": 8.106231689453125e-05, |
| "learning_rate": 1.6000000000000003e-05, |
| "loss": 0.0884, |
| "step": 4600 |
| }, |
| { |
| "epoch": 6.642857142857143, |
| "grad_norm": 2.8967857360839844e-05, |
| "learning_rate": 1.4000000000000001e-05, |
| "loss": 0.0893, |
| "step": 4650 |
| }, |
| { |
| "epoch": 6.714285714285714, |
| "grad_norm": 1.055002212524414e-05, |
| "learning_rate": 1.2e-05, |
| "loss": 0.0631, |
| "step": 4700 |
| }, |
| { |
| "epoch": 6.785714285714286, |
| "grad_norm": 7.009506225585938e-05, |
| "learning_rate": 1e-05, |
| "loss": 0.0841, |
| "step": 4750 |
| }, |
| { |
| "epoch": 6.857142857142857, |
| "grad_norm": 0.0003681182861328125, |
| "learning_rate": 8.000000000000001e-06, |
| "loss": 0.0794, |
| "step": 4800 |
| }, |
| { |
| "epoch": 6.928571428571429, |
| "grad_norm": 0.00031280517578125, |
| "learning_rate": 6e-06, |
| "loss": 0.0826, |
| "step": 4850 |
| }, |
| { |
| "epoch": 7.0, |
| "grad_norm": 2.944469451904297e-05, |
| "learning_rate": 4.000000000000001e-06, |
| "loss": 0.0306, |
| "step": 4900 |
| }, |
| { |
| "epoch": 7.071428571428571, |
| "grad_norm": 0.000453948974609375, |
| "learning_rate": 2.0000000000000003e-06, |
| "loss": 0.0654, |
| "step": 4950 |
| }, |
| { |
| "epoch": 7.142857142857143, |
| "grad_norm": 0.0001373291015625, |
| "learning_rate": 0.0, |
| "loss": 0.0638, |
| "step": 5000 |
| } |
| ], |
| "logging_steps": 50, |
| "max_steps": 5000, |
| "num_input_tokens_seen": 0, |
| "num_train_epochs": 8, |
| "save_steps": 1000, |
| "stateful_callbacks": { |
| "TrainerControl": { |
| "args": { |
| "should_epoch_stop": false, |
| "should_evaluate": false, |
| "should_log": false, |
| "should_save": true, |
| "should_training_stop": true |
| }, |
| "attributes": {} |
| } |
| }, |
| "total_flos": 1.7611786549248e+16, |
| "train_batch_size": 1, |
| "trial_name": null, |
| "trial_params": null |
| } |
|
|