storytell / trainer_state.json
ranamhamoud's picture
Upload 11 files
3b6cd44 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 5.691519635742743,
"eval_steps": 500,
"global_step": 10000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01,
"grad_norm": 0.1484375,
"learning_rate": 0.0002,
"loss": 1.495,
"step": 10
},
{
"epoch": 0.01,
"grad_norm": 0.2119140625,
"learning_rate": 0.0002,
"loss": 1.3828,
"step": 20
},
{
"epoch": 0.02,
"grad_norm": 0.2373046875,
"learning_rate": 0.0002,
"loss": 1.2271,
"step": 30
},
{
"epoch": 0.02,
"grad_norm": 0.28515625,
"learning_rate": 0.0002,
"loss": 1.1604,
"step": 40
},
{
"epoch": 0.03,
"grad_norm": 0.2890625,
"learning_rate": 0.0002,
"loss": 1.099,
"step": 50
},
{
"epoch": 0.03,
"grad_norm": 0.1904296875,
"learning_rate": 0.0002,
"loss": 1.2541,
"step": 60
},
{
"epoch": 0.04,
"grad_norm": 0.2060546875,
"learning_rate": 0.0002,
"loss": 1.2232,
"step": 70
},
{
"epoch": 0.05,
"grad_norm": 0.29296875,
"learning_rate": 0.0002,
"loss": 1.229,
"step": 80
},
{
"epoch": 0.05,
"grad_norm": 0.2890625,
"learning_rate": 0.0002,
"loss": 1.1292,
"step": 90
},
{
"epoch": 0.06,
"grad_norm": 0.287109375,
"learning_rate": 0.0002,
"loss": 1.0562,
"step": 100
},
{
"epoch": 0.06,
"grad_norm": 0.208984375,
"learning_rate": 0.0002,
"loss": 1.1468,
"step": 110
},
{
"epoch": 0.07,
"grad_norm": 0.21875,
"learning_rate": 0.0002,
"loss": 1.1161,
"step": 120
},
{
"epoch": 0.07,
"grad_norm": 0.212890625,
"learning_rate": 0.0002,
"loss": 1.1258,
"step": 130
},
{
"epoch": 0.08,
"grad_norm": 0.279296875,
"learning_rate": 0.0002,
"loss": 1.0508,
"step": 140
},
{
"epoch": 0.09,
"grad_norm": 0.271484375,
"learning_rate": 0.0002,
"loss": 1.0176,
"step": 150
},
{
"epoch": 0.09,
"grad_norm": 0.2138671875,
"learning_rate": 0.0002,
"loss": 1.1983,
"step": 160
},
{
"epoch": 0.1,
"grad_norm": 0.2353515625,
"learning_rate": 0.0002,
"loss": 1.1958,
"step": 170
},
{
"epoch": 0.1,
"grad_norm": 0.236328125,
"learning_rate": 0.0002,
"loss": 1.125,
"step": 180
},
{
"epoch": 0.11,
"grad_norm": 0.251953125,
"learning_rate": 0.0002,
"loss": 1.0656,
"step": 190
},
{
"epoch": 0.11,
"grad_norm": 0.28125,
"learning_rate": 0.0002,
"loss": 0.9628,
"step": 200
},
{
"epoch": 0.12,
"grad_norm": 0.2021484375,
"learning_rate": 0.0002,
"loss": 1.0976,
"step": 210
},
{
"epoch": 0.13,
"grad_norm": 0.2373046875,
"learning_rate": 0.0002,
"loss": 1.1465,
"step": 220
},
{
"epoch": 0.13,
"grad_norm": 0.21875,
"learning_rate": 0.0002,
"loss": 1.0914,
"step": 230
},
{
"epoch": 0.14,
"grad_norm": 0.27734375,
"learning_rate": 0.0002,
"loss": 1.0155,
"step": 240
},
{
"epoch": 0.14,
"grad_norm": 0.291015625,
"learning_rate": 0.0002,
"loss": 0.9932,
"step": 250
},
{
"epoch": 0.15,
"grad_norm": 0.1982421875,
"learning_rate": 0.0002,
"loss": 1.1324,
"step": 260
},
{
"epoch": 0.15,
"grad_norm": 0.212890625,
"learning_rate": 0.0002,
"loss": 1.1367,
"step": 270
},
{
"epoch": 0.16,
"grad_norm": 0.248046875,
"learning_rate": 0.0002,
"loss": 1.0788,
"step": 280
},
{
"epoch": 0.17,
"grad_norm": 0.2470703125,
"learning_rate": 0.0002,
"loss": 1.051,
"step": 290
},
{
"epoch": 0.17,
"grad_norm": 0.26171875,
"learning_rate": 0.0002,
"loss": 1.0043,
"step": 300
},
{
"epoch": 0.18,
"grad_norm": 0.1982421875,
"learning_rate": 0.0002,
"loss": 1.0812,
"step": 310
},
{
"epoch": 0.18,
"grad_norm": 0.2294921875,
"learning_rate": 0.0002,
"loss": 1.0901,
"step": 320
},
{
"epoch": 0.19,
"grad_norm": 0.234375,
"learning_rate": 0.0002,
"loss": 1.0626,
"step": 330
},
{
"epoch": 0.19,
"grad_norm": 0.24609375,
"learning_rate": 0.0002,
"loss": 0.9682,
"step": 340
},
{
"epoch": 0.2,
"grad_norm": 0.326171875,
"learning_rate": 0.0002,
"loss": 0.9774,
"step": 350
},
{
"epoch": 0.2,
"grad_norm": 0.203125,
"learning_rate": 0.0002,
"loss": 1.1245,
"step": 360
},
{
"epoch": 0.21,
"grad_norm": 0.224609375,
"learning_rate": 0.0002,
"loss": 1.1601,
"step": 370
},
{
"epoch": 0.22,
"grad_norm": 0.2470703125,
"learning_rate": 0.0002,
"loss": 1.0697,
"step": 380
},
{
"epoch": 0.22,
"grad_norm": 0.2421875,
"learning_rate": 0.0002,
"loss": 0.993,
"step": 390
},
{
"epoch": 0.23,
"grad_norm": 0.287109375,
"learning_rate": 0.0002,
"loss": 0.9591,
"step": 400
},
{
"epoch": 0.23,
"grad_norm": 0.2216796875,
"learning_rate": 0.0002,
"loss": 1.1005,
"step": 410
},
{
"epoch": 0.24,
"grad_norm": 0.2294921875,
"learning_rate": 0.0002,
"loss": 1.0966,
"step": 420
},
{
"epoch": 0.24,
"grad_norm": 0.236328125,
"learning_rate": 0.0002,
"loss": 1.0549,
"step": 430
},
{
"epoch": 0.25,
"grad_norm": 0.265625,
"learning_rate": 0.0002,
"loss": 1.0008,
"step": 440
},
{
"epoch": 0.26,
"grad_norm": 0.3046875,
"learning_rate": 0.0002,
"loss": 0.9528,
"step": 450
},
{
"epoch": 0.26,
"grad_norm": 0.216796875,
"learning_rate": 0.0002,
"loss": 1.0955,
"step": 460
},
{
"epoch": 0.27,
"grad_norm": 0.236328125,
"learning_rate": 0.0002,
"loss": 1.0835,
"step": 470
},
{
"epoch": 0.27,
"grad_norm": 0.2333984375,
"learning_rate": 0.0002,
"loss": 1.0694,
"step": 480
},
{
"epoch": 0.28,
"grad_norm": 0.2890625,
"learning_rate": 0.0002,
"loss": 1.0206,
"step": 490
},
{
"epoch": 0.28,
"grad_norm": 0.3046875,
"learning_rate": 0.0002,
"loss": 0.9314,
"step": 500
},
{
"epoch": 0.29,
"grad_norm": 0.197265625,
"learning_rate": 0.0002,
"loss": 1.1097,
"step": 510
},
{
"epoch": 0.3,
"grad_norm": 0.2412109375,
"learning_rate": 0.0002,
"loss": 1.0696,
"step": 520
},
{
"epoch": 0.3,
"grad_norm": 0.2353515625,
"learning_rate": 0.0002,
"loss": 1.0366,
"step": 530
},
{
"epoch": 0.31,
"grad_norm": 0.259765625,
"learning_rate": 0.0002,
"loss": 1.0058,
"step": 540
},
{
"epoch": 0.31,
"grad_norm": 0.287109375,
"learning_rate": 0.0002,
"loss": 0.9348,
"step": 550
},
{
"epoch": 0.32,
"grad_norm": 0.22265625,
"learning_rate": 0.0002,
"loss": 1.0774,
"step": 560
},
{
"epoch": 0.32,
"grad_norm": 0.2294921875,
"learning_rate": 0.0002,
"loss": 1.0795,
"step": 570
},
{
"epoch": 0.33,
"grad_norm": 0.2373046875,
"learning_rate": 0.0002,
"loss": 1.0754,
"step": 580
},
{
"epoch": 0.34,
"grad_norm": 0.275390625,
"learning_rate": 0.0002,
"loss": 0.997,
"step": 590
},
{
"epoch": 0.34,
"grad_norm": 0.3125,
"learning_rate": 0.0002,
"loss": 0.9705,
"step": 600
},
{
"epoch": 0.35,
"grad_norm": 0.2265625,
"learning_rate": 0.0002,
"loss": 1.1025,
"step": 610
},
{
"epoch": 0.35,
"grad_norm": 0.2265625,
"learning_rate": 0.0002,
"loss": 1.0675,
"step": 620
},
{
"epoch": 0.36,
"grad_norm": 0.224609375,
"learning_rate": 0.0002,
"loss": 1.0343,
"step": 630
},
{
"epoch": 0.36,
"grad_norm": 0.251953125,
"learning_rate": 0.0002,
"loss": 0.9763,
"step": 640
},
{
"epoch": 0.37,
"grad_norm": 0.28125,
"learning_rate": 0.0002,
"loss": 0.9035,
"step": 650
},
{
"epoch": 0.38,
"grad_norm": 0.21484375,
"learning_rate": 0.0002,
"loss": 1.0758,
"step": 660
},
{
"epoch": 0.38,
"grad_norm": 0.23828125,
"learning_rate": 0.0002,
"loss": 1.0746,
"step": 670
},
{
"epoch": 0.39,
"grad_norm": 0.2333984375,
"learning_rate": 0.0002,
"loss": 1.0232,
"step": 680
},
{
"epoch": 0.39,
"grad_norm": 0.25390625,
"learning_rate": 0.0002,
"loss": 0.9997,
"step": 690
},
{
"epoch": 0.4,
"grad_norm": 0.30078125,
"learning_rate": 0.0002,
"loss": 0.953,
"step": 700
},
{
"epoch": 0.4,
"grad_norm": 0.2109375,
"learning_rate": 0.0002,
"loss": 1.1189,
"step": 710
},
{
"epoch": 0.41,
"grad_norm": 0.2353515625,
"learning_rate": 0.0002,
"loss": 1.0821,
"step": 720
},
{
"epoch": 0.42,
"grad_norm": 0.2333984375,
"learning_rate": 0.0002,
"loss": 0.9964,
"step": 730
},
{
"epoch": 0.42,
"grad_norm": 0.2451171875,
"learning_rate": 0.0002,
"loss": 0.9904,
"step": 740
},
{
"epoch": 0.43,
"grad_norm": 0.294921875,
"learning_rate": 0.0002,
"loss": 0.9861,
"step": 750
},
{
"epoch": 0.43,
"grad_norm": 0.197265625,
"learning_rate": 0.0002,
"loss": 1.0426,
"step": 760
},
{
"epoch": 0.44,
"grad_norm": 0.23046875,
"learning_rate": 0.0002,
"loss": 1.1045,
"step": 770
},
{
"epoch": 0.44,
"grad_norm": 0.232421875,
"learning_rate": 0.0002,
"loss": 1.0563,
"step": 780
},
{
"epoch": 0.45,
"grad_norm": 0.2490234375,
"learning_rate": 0.0002,
"loss": 0.9486,
"step": 790
},
{
"epoch": 0.46,
"grad_norm": 0.337890625,
"learning_rate": 0.0002,
"loss": 0.8978,
"step": 800
},
{
"epoch": 0.46,
"grad_norm": 0.21875,
"learning_rate": 0.0002,
"loss": 1.038,
"step": 810
},
{
"epoch": 0.47,
"grad_norm": 0.228515625,
"learning_rate": 0.0002,
"loss": 1.0695,
"step": 820
},
{
"epoch": 0.47,
"grad_norm": 0.2275390625,
"learning_rate": 0.0002,
"loss": 0.9979,
"step": 830
},
{
"epoch": 0.48,
"grad_norm": 0.25390625,
"learning_rate": 0.0002,
"loss": 0.9699,
"step": 840
},
{
"epoch": 0.48,
"grad_norm": 0.26953125,
"learning_rate": 0.0002,
"loss": 0.928,
"step": 850
},
{
"epoch": 0.49,
"grad_norm": 0.205078125,
"learning_rate": 0.0002,
"loss": 1.0532,
"step": 860
},
{
"epoch": 0.5,
"grad_norm": 0.2158203125,
"learning_rate": 0.0002,
"loss": 1.0447,
"step": 870
},
{
"epoch": 0.5,
"grad_norm": 0.228515625,
"learning_rate": 0.0002,
"loss": 1.0093,
"step": 880
},
{
"epoch": 0.51,
"grad_norm": 0.25390625,
"learning_rate": 0.0002,
"loss": 0.9471,
"step": 890
},
{
"epoch": 0.51,
"grad_norm": 0.296875,
"learning_rate": 0.0002,
"loss": 0.9345,
"step": 900
},
{
"epoch": 0.52,
"grad_norm": 0.22265625,
"learning_rate": 0.0002,
"loss": 1.0507,
"step": 910
},
{
"epoch": 0.52,
"grad_norm": 0.2255859375,
"learning_rate": 0.0002,
"loss": 1.0093,
"step": 920
},
{
"epoch": 0.53,
"grad_norm": 0.23046875,
"learning_rate": 0.0002,
"loss": 1.0098,
"step": 930
},
{
"epoch": 0.54,
"grad_norm": 0.25390625,
"learning_rate": 0.0002,
"loss": 0.9844,
"step": 940
},
{
"epoch": 0.54,
"grad_norm": 0.291015625,
"learning_rate": 0.0002,
"loss": 0.9225,
"step": 950
},
{
"epoch": 0.55,
"grad_norm": 0.2021484375,
"learning_rate": 0.0002,
"loss": 1.0379,
"step": 960
},
{
"epoch": 0.55,
"grad_norm": 0.22265625,
"learning_rate": 0.0002,
"loss": 1.0356,
"step": 970
},
{
"epoch": 0.56,
"grad_norm": 0.2236328125,
"learning_rate": 0.0002,
"loss": 1.0603,
"step": 980
},
{
"epoch": 0.56,
"grad_norm": 0.255859375,
"learning_rate": 0.0002,
"loss": 0.9772,
"step": 990
},
{
"epoch": 0.57,
"grad_norm": 0.28515625,
"learning_rate": 0.0002,
"loss": 0.9391,
"step": 1000
},
{
"epoch": 0.57,
"grad_norm": 0.201171875,
"learning_rate": 0.0002,
"loss": 1.0791,
"step": 1010
},
{
"epoch": 0.58,
"grad_norm": 0.2255859375,
"learning_rate": 0.0002,
"loss": 1.0449,
"step": 1020
},
{
"epoch": 0.59,
"grad_norm": 0.2333984375,
"learning_rate": 0.0002,
"loss": 1.0394,
"step": 1030
},
{
"epoch": 0.59,
"grad_norm": 0.255859375,
"learning_rate": 0.0002,
"loss": 0.9459,
"step": 1040
},
{
"epoch": 0.6,
"grad_norm": 0.310546875,
"learning_rate": 0.0002,
"loss": 0.9259,
"step": 1050
},
{
"epoch": 0.6,
"grad_norm": 0.22265625,
"learning_rate": 0.0002,
"loss": 0.9938,
"step": 1060
},
{
"epoch": 0.61,
"grad_norm": 0.23828125,
"learning_rate": 0.0002,
"loss": 1.0291,
"step": 1070
},
{
"epoch": 0.61,
"grad_norm": 0.2451171875,
"learning_rate": 0.0002,
"loss": 1.0005,
"step": 1080
},
{
"epoch": 0.62,
"grad_norm": 0.23046875,
"learning_rate": 0.0002,
"loss": 0.9573,
"step": 1090
},
{
"epoch": 0.63,
"grad_norm": 0.28125,
"learning_rate": 0.0002,
"loss": 0.9081,
"step": 1100
},
{
"epoch": 0.63,
"grad_norm": 0.208984375,
"learning_rate": 0.0002,
"loss": 1.0571,
"step": 1110
},
{
"epoch": 0.64,
"grad_norm": 0.224609375,
"learning_rate": 0.0002,
"loss": 1.0457,
"step": 1120
},
{
"epoch": 0.64,
"grad_norm": 0.2412109375,
"learning_rate": 0.0002,
"loss": 1.025,
"step": 1130
},
{
"epoch": 0.65,
"grad_norm": 0.255859375,
"learning_rate": 0.0002,
"loss": 0.9688,
"step": 1140
},
{
"epoch": 0.65,
"grad_norm": 0.265625,
"learning_rate": 0.0002,
"loss": 0.9028,
"step": 1150
},
{
"epoch": 0.66,
"grad_norm": 0.21484375,
"learning_rate": 0.0002,
"loss": 1.0761,
"step": 1160
},
{
"epoch": 0.67,
"grad_norm": 0.328125,
"learning_rate": 0.0002,
"loss": 1.042,
"step": 1170
},
{
"epoch": 0.67,
"grad_norm": 0.2470703125,
"learning_rate": 0.0002,
"loss": 0.9584,
"step": 1180
},
{
"epoch": 0.68,
"grad_norm": 0.2470703125,
"learning_rate": 0.0002,
"loss": 0.9269,
"step": 1190
},
{
"epoch": 0.68,
"grad_norm": 0.28515625,
"learning_rate": 0.0002,
"loss": 0.8703,
"step": 1200
},
{
"epoch": 0.69,
"grad_norm": 0.2109375,
"learning_rate": 0.0002,
"loss": 1.0499,
"step": 1210
},
{
"epoch": 0.69,
"grad_norm": 0.25,
"learning_rate": 0.0002,
"loss": 0.999,
"step": 1220
},
{
"epoch": 0.7,
"grad_norm": 0.232421875,
"learning_rate": 0.0002,
"loss": 0.9965,
"step": 1230
},
{
"epoch": 0.71,
"grad_norm": 0.267578125,
"learning_rate": 0.0002,
"loss": 0.937,
"step": 1240
},
{
"epoch": 0.71,
"grad_norm": 0.291015625,
"learning_rate": 0.0002,
"loss": 0.9117,
"step": 1250
},
{
"epoch": 0.72,
"grad_norm": 0.2275390625,
"learning_rate": 0.0002,
"loss": 1.0298,
"step": 1260
},
{
"epoch": 0.72,
"grad_norm": 0.2197265625,
"learning_rate": 0.0002,
"loss": 1.0433,
"step": 1270
},
{
"epoch": 0.73,
"grad_norm": 0.240234375,
"learning_rate": 0.0002,
"loss": 0.9567,
"step": 1280
},
{
"epoch": 0.73,
"grad_norm": 0.265625,
"learning_rate": 0.0002,
"loss": 0.9681,
"step": 1290
},
{
"epoch": 0.74,
"grad_norm": 0.28125,
"learning_rate": 0.0002,
"loss": 0.8989,
"step": 1300
},
{
"epoch": 0.75,
"grad_norm": 0.240234375,
"learning_rate": 0.0002,
"loss": 1.0711,
"step": 1310
},
{
"epoch": 0.75,
"grad_norm": 0.228515625,
"learning_rate": 0.0002,
"loss": 1.0023,
"step": 1320
},
{
"epoch": 0.76,
"grad_norm": 0.2373046875,
"learning_rate": 0.0002,
"loss": 0.9999,
"step": 1330
},
{
"epoch": 0.76,
"grad_norm": 0.255859375,
"learning_rate": 0.0002,
"loss": 0.9522,
"step": 1340
},
{
"epoch": 0.77,
"grad_norm": 0.2734375,
"learning_rate": 0.0002,
"loss": 0.8813,
"step": 1350
},
{
"epoch": 0.77,
"grad_norm": 0.2275390625,
"learning_rate": 0.0002,
"loss": 1.0647,
"step": 1360
},
{
"epoch": 0.78,
"grad_norm": 0.24609375,
"learning_rate": 0.0002,
"loss": 1.0166,
"step": 1370
},
{
"epoch": 0.79,
"grad_norm": 0.224609375,
"learning_rate": 0.0002,
"loss": 0.9722,
"step": 1380
},
{
"epoch": 0.79,
"grad_norm": 0.25390625,
"learning_rate": 0.0002,
"loss": 0.9438,
"step": 1390
},
{
"epoch": 0.8,
"grad_norm": 0.2734375,
"learning_rate": 0.0002,
"loss": 0.9141,
"step": 1400
},
{
"epoch": 0.8,
"grad_norm": 0.220703125,
"learning_rate": 0.0002,
"loss": 1.0024,
"step": 1410
},
{
"epoch": 0.81,
"grad_norm": 0.244140625,
"learning_rate": 0.0002,
"loss": 0.9796,
"step": 1420
},
{
"epoch": 0.81,
"grad_norm": 0.236328125,
"learning_rate": 0.0002,
"loss": 1.025,
"step": 1430
},
{
"epoch": 0.82,
"grad_norm": 0.2451171875,
"learning_rate": 0.0002,
"loss": 0.9433,
"step": 1440
},
{
"epoch": 0.83,
"grad_norm": 0.29296875,
"learning_rate": 0.0002,
"loss": 0.8746,
"step": 1450
},
{
"epoch": 0.83,
"grad_norm": 0.2138671875,
"learning_rate": 0.0002,
"loss": 0.9818,
"step": 1460
},
{
"epoch": 0.84,
"grad_norm": 0.2314453125,
"learning_rate": 0.0002,
"loss": 1.0507,
"step": 1470
},
{
"epoch": 0.84,
"grad_norm": 0.2353515625,
"learning_rate": 0.0002,
"loss": 0.9784,
"step": 1480
},
{
"epoch": 0.85,
"grad_norm": 0.2421875,
"learning_rate": 0.0002,
"loss": 0.9124,
"step": 1490
},
{
"epoch": 0.85,
"grad_norm": 0.296875,
"learning_rate": 0.0002,
"loss": 0.895,
"step": 1500
},
{
"epoch": 0.86,
"grad_norm": 0.2158203125,
"learning_rate": 0.0002,
"loss": 1.0743,
"step": 1510
},
{
"epoch": 0.87,
"grad_norm": 0.2197265625,
"learning_rate": 0.0002,
"loss": 1.0102,
"step": 1520
},
{
"epoch": 0.87,
"grad_norm": 0.2197265625,
"learning_rate": 0.0002,
"loss": 0.9876,
"step": 1530
},
{
"epoch": 0.88,
"grad_norm": 0.25,
"learning_rate": 0.0002,
"loss": 0.9359,
"step": 1540
},
{
"epoch": 0.88,
"grad_norm": 0.275390625,
"learning_rate": 0.0002,
"loss": 0.8637,
"step": 1550
},
{
"epoch": 0.89,
"grad_norm": 0.2451171875,
"learning_rate": 0.0002,
"loss": 1.0772,
"step": 1560
},
{
"epoch": 0.89,
"grad_norm": 0.2373046875,
"learning_rate": 0.0002,
"loss": 1.0055,
"step": 1570
},
{
"epoch": 0.9,
"grad_norm": 0.2412109375,
"learning_rate": 0.0002,
"loss": 0.9339,
"step": 1580
},
{
"epoch": 0.9,
"grad_norm": 0.267578125,
"learning_rate": 0.0002,
"loss": 0.9126,
"step": 1590
},
{
"epoch": 0.91,
"grad_norm": 0.279296875,
"learning_rate": 0.0002,
"loss": 0.8885,
"step": 1600
},
{
"epoch": 0.92,
"grad_norm": 0.2080078125,
"learning_rate": 0.0002,
"loss": 1.0688,
"step": 1610
},
{
"epoch": 0.92,
"grad_norm": 0.21875,
"learning_rate": 0.0002,
"loss": 1.0258,
"step": 1620
},
{
"epoch": 0.93,
"grad_norm": 0.228515625,
"learning_rate": 0.0002,
"loss": 0.986,
"step": 1630
},
{
"epoch": 0.93,
"grad_norm": 0.251953125,
"learning_rate": 0.0002,
"loss": 0.9506,
"step": 1640
},
{
"epoch": 0.94,
"grad_norm": 0.29296875,
"learning_rate": 0.0002,
"loss": 0.9254,
"step": 1650
},
{
"epoch": 0.94,
"grad_norm": 0.21484375,
"learning_rate": 0.0002,
"loss": 1.0348,
"step": 1660
},
{
"epoch": 0.95,
"grad_norm": 0.23046875,
"learning_rate": 0.0002,
"loss": 1.0086,
"step": 1670
},
{
"epoch": 0.96,
"grad_norm": 0.234375,
"learning_rate": 0.0002,
"loss": 0.9324,
"step": 1680
},
{
"epoch": 0.96,
"grad_norm": 0.25,
"learning_rate": 0.0002,
"loss": 0.87,
"step": 1690
},
{
"epoch": 0.97,
"grad_norm": 0.267578125,
"learning_rate": 0.0002,
"loss": 0.8989,
"step": 1700
},
{
"epoch": 0.97,
"grad_norm": 0.2177734375,
"learning_rate": 0.0002,
"loss": 1.0533,
"step": 1710
},
{
"epoch": 0.98,
"grad_norm": 0.2060546875,
"learning_rate": 0.0002,
"loss": 1.0415,
"step": 1720
},
{
"epoch": 0.98,
"grad_norm": 0.2314453125,
"learning_rate": 0.0002,
"loss": 1.024,
"step": 1730
},
{
"epoch": 0.99,
"grad_norm": 0.263671875,
"learning_rate": 0.0002,
"loss": 0.9463,
"step": 1740
},
{
"epoch": 1.0,
"grad_norm": 0.291015625,
"learning_rate": 0.0002,
"loss": 0.8608,
"step": 1750
},
{
"epoch": 1.0,
"grad_norm": 0.2041015625,
"learning_rate": 0.0002,
"loss": 1.0173,
"step": 1760
},
{
"epoch": 1.01,
"grad_norm": 0.2265625,
"learning_rate": 0.0002,
"loss": 1.0341,
"step": 1770
},
{
"epoch": 1.01,
"grad_norm": 0.21875,
"learning_rate": 0.0002,
"loss": 1.012,
"step": 1780
},
{
"epoch": 1.02,
"grad_norm": 0.240234375,
"learning_rate": 0.0002,
"loss": 0.9792,
"step": 1790
},
{
"epoch": 1.02,
"grad_norm": 0.2578125,
"learning_rate": 0.0002,
"loss": 0.8659,
"step": 1800
},
{
"epoch": 1.03,
"grad_norm": 0.2275390625,
"learning_rate": 0.0002,
"loss": 0.9118,
"step": 1810
},
{
"epoch": 1.04,
"grad_norm": 0.2236328125,
"learning_rate": 0.0002,
"loss": 0.9863,
"step": 1820
},
{
"epoch": 1.04,
"grad_norm": 0.2431640625,
"learning_rate": 0.0002,
"loss": 1.0178,
"step": 1830
},
{
"epoch": 1.05,
"grad_norm": 0.234375,
"learning_rate": 0.0002,
"loss": 0.9463,
"step": 1840
},
{
"epoch": 1.05,
"grad_norm": 0.2578125,
"learning_rate": 0.0002,
"loss": 0.8579,
"step": 1850
},
{
"epoch": 1.06,
"grad_norm": 0.2265625,
"learning_rate": 0.0002,
"loss": 0.8987,
"step": 1860
},
{
"epoch": 1.06,
"grad_norm": 0.2138671875,
"learning_rate": 0.0002,
"loss": 1.0184,
"step": 1870
},
{
"epoch": 1.07,
"grad_norm": 0.23828125,
"learning_rate": 0.0002,
"loss": 1.0167,
"step": 1880
},
{
"epoch": 1.08,
"grad_norm": 0.2412109375,
"learning_rate": 0.0002,
"loss": 0.9397,
"step": 1890
},
{
"epoch": 1.08,
"grad_norm": 0.2734375,
"learning_rate": 0.0002,
"loss": 0.8526,
"step": 1900
},
{
"epoch": 1.09,
"grad_norm": 0.21875,
"learning_rate": 0.0002,
"loss": 0.9324,
"step": 1910
},
{
"epoch": 1.09,
"grad_norm": 0.2216796875,
"learning_rate": 0.0002,
"loss": 1.0039,
"step": 1920
},
{
"epoch": 1.1,
"grad_norm": 0.240234375,
"learning_rate": 0.0002,
"loss": 0.9614,
"step": 1930
},
{
"epoch": 1.1,
"grad_norm": 0.2470703125,
"learning_rate": 0.0002,
"loss": 0.9676,
"step": 1940
},
{
"epoch": 1.11,
"grad_norm": 0.267578125,
"learning_rate": 0.0002,
"loss": 0.8858,
"step": 1950
},
{
"epoch": 1.12,
"grad_norm": 0.205078125,
"learning_rate": 0.0002,
"loss": 0.9097,
"step": 1960
},
{
"epoch": 1.12,
"grad_norm": 0.2236328125,
"learning_rate": 0.0002,
"loss": 1.0206,
"step": 1970
},
{
"epoch": 1.13,
"grad_norm": 0.2578125,
"learning_rate": 0.0002,
"loss": 0.9845,
"step": 1980
},
{
"epoch": 1.13,
"grad_norm": 0.2314453125,
"learning_rate": 0.0002,
"loss": 0.9402,
"step": 1990
},
{
"epoch": 1.14,
"grad_norm": 0.283203125,
"learning_rate": 0.0002,
"loss": 0.8963,
"step": 2000
},
{
"epoch": 1.14,
"grad_norm": 0.251953125,
"learning_rate": 0.0002,
"loss": 0.8848,
"step": 2010
},
{
"epoch": 1.15,
"grad_norm": 0.234375,
"learning_rate": 0.0002,
"loss": 0.9937,
"step": 2020
},
{
"epoch": 1.16,
"grad_norm": 0.2490234375,
"learning_rate": 0.0002,
"loss": 0.9398,
"step": 2030
},
{
"epoch": 1.16,
"grad_norm": 0.26953125,
"learning_rate": 0.0002,
"loss": 0.9064,
"step": 2040
},
{
"epoch": 1.17,
"grad_norm": 0.275390625,
"learning_rate": 0.0002,
"loss": 0.857,
"step": 2050
},
{
"epoch": 1.17,
"grad_norm": 0.216796875,
"learning_rate": 0.0002,
"loss": 0.8838,
"step": 2060
},
{
"epoch": 1.18,
"grad_norm": 0.23828125,
"learning_rate": 0.0002,
"loss": 1.0213,
"step": 2070
},
{
"epoch": 1.18,
"grad_norm": 0.2333984375,
"learning_rate": 0.0002,
"loss": 0.9989,
"step": 2080
},
{
"epoch": 1.19,
"grad_norm": 0.25,
"learning_rate": 0.0002,
"loss": 0.9823,
"step": 2090
},
{
"epoch": 1.2,
"grad_norm": 0.2470703125,
"learning_rate": 0.0002,
"loss": 0.8393,
"step": 2100
},
{
"epoch": 1.2,
"grad_norm": 0.232421875,
"learning_rate": 0.0002,
"loss": 0.8814,
"step": 2110
},
{
"epoch": 1.21,
"grad_norm": 0.21875,
"learning_rate": 0.0002,
"loss": 1.0063,
"step": 2120
},
{
"epoch": 1.21,
"grad_norm": 0.25390625,
"learning_rate": 0.0002,
"loss": 1.0122,
"step": 2130
},
{
"epoch": 1.22,
"grad_norm": 0.2412109375,
"learning_rate": 0.0002,
"loss": 0.9377,
"step": 2140
},
{
"epoch": 1.22,
"grad_norm": 0.283203125,
"learning_rate": 0.0002,
"loss": 0.8951,
"step": 2150
},
{
"epoch": 1.23,
"grad_norm": 0.2431640625,
"learning_rate": 0.0002,
"loss": 0.88,
"step": 2160
},
{
"epoch": 1.24,
"grad_norm": 0.2431640625,
"learning_rate": 0.0002,
"loss": 1.0067,
"step": 2170
},
{
"epoch": 1.24,
"grad_norm": 0.248046875,
"learning_rate": 0.0002,
"loss": 0.9457,
"step": 2180
},
{
"epoch": 1.25,
"grad_norm": 0.2353515625,
"learning_rate": 0.0002,
"loss": 0.9101,
"step": 2190
},
{
"epoch": 1.25,
"grad_norm": 0.259765625,
"learning_rate": 0.0002,
"loss": 0.8667,
"step": 2200
},
{
"epoch": 1.26,
"grad_norm": 0.2470703125,
"learning_rate": 0.0002,
"loss": 0.8822,
"step": 2210
},
{
"epoch": 1.26,
"grad_norm": 0.23046875,
"learning_rate": 0.0002,
"loss": 1.0005,
"step": 2220
},
{
"epoch": 1.27,
"grad_norm": 0.2451171875,
"learning_rate": 0.0002,
"loss": 0.965,
"step": 2230
},
{
"epoch": 1.27,
"grad_norm": 0.267578125,
"learning_rate": 0.0002,
"loss": 0.9537,
"step": 2240
},
{
"epoch": 1.28,
"grad_norm": 0.26171875,
"learning_rate": 0.0002,
"loss": 0.8393,
"step": 2250
},
{
"epoch": 1.29,
"grad_norm": 0.236328125,
"learning_rate": 0.0002,
"loss": 0.9028,
"step": 2260
},
{
"epoch": 1.29,
"grad_norm": 0.2099609375,
"learning_rate": 0.0002,
"loss": 0.9456,
"step": 2270
},
{
"epoch": 1.3,
"grad_norm": 0.228515625,
"learning_rate": 0.0002,
"loss": 0.9766,
"step": 2280
},
{
"epoch": 1.3,
"grad_norm": 0.25390625,
"learning_rate": 0.0002,
"loss": 0.9445,
"step": 2290
},
{
"epoch": 1.31,
"grad_norm": 0.275390625,
"learning_rate": 0.0002,
"loss": 0.8368,
"step": 2300
},
{
"epoch": 1.31,
"grad_norm": 0.2421875,
"learning_rate": 0.0002,
"loss": 0.8603,
"step": 2310
},
{
"epoch": 1.32,
"grad_norm": 0.2412109375,
"learning_rate": 0.0002,
"loss": 1.0516,
"step": 2320
},
{
"epoch": 1.33,
"grad_norm": 0.25390625,
"learning_rate": 0.0002,
"loss": 0.9612,
"step": 2330
},
{
"epoch": 1.33,
"grad_norm": 0.244140625,
"learning_rate": 0.0002,
"loss": 0.9333,
"step": 2340
},
{
"epoch": 1.34,
"grad_norm": 0.26171875,
"learning_rate": 0.0002,
"loss": 0.8959,
"step": 2350
},
{
"epoch": 1.34,
"grad_norm": 0.220703125,
"learning_rate": 0.0002,
"loss": 0.8876,
"step": 2360
},
{
"epoch": 1.35,
"grad_norm": 0.2314453125,
"learning_rate": 0.0002,
"loss": 0.9755,
"step": 2370
},
{
"epoch": 1.35,
"grad_norm": 0.232421875,
"learning_rate": 0.0002,
"loss": 0.984,
"step": 2380
},
{
"epoch": 1.36,
"grad_norm": 0.248046875,
"learning_rate": 0.0002,
"loss": 0.9038,
"step": 2390
},
{
"epoch": 1.37,
"grad_norm": 0.267578125,
"learning_rate": 0.0002,
"loss": 0.8772,
"step": 2400
},
{
"epoch": 1.37,
"grad_norm": 0.240234375,
"learning_rate": 0.0002,
"loss": 0.8802,
"step": 2410
},
{
"epoch": 1.38,
"grad_norm": 0.23828125,
"learning_rate": 0.0002,
"loss": 0.9951,
"step": 2420
},
{
"epoch": 1.38,
"grad_norm": 0.2451171875,
"learning_rate": 0.0002,
"loss": 0.9761,
"step": 2430
},
{
"epoch": 1.39,
"grad_norm": 0.259765625,
"learning_rate": 0.0002,
"loss": 0.9057,
"step": 2440
},
{
"epoch": 1.39,
"grad_norm": 0.2734375,
"learning_rate": 0.0002,
"loss": 0.8993,
"step": 2450
},
{
"epoch": 1.4,
"grad_norm": 0.2294921875,
"learning_rate": 0.0002,
"loss": 0.8513,
"step": 2460
},
{
"epoch": 1.41,
"grad_norm": 0.2421875,
"learning_rate": 0.0002,
"loss": 0.9897,
"step": 2470
},
{
"epoch": 1.41,
"grad_norm": 0.2333984375,
"learning_rate": 0.0002,
"loss": 0.985,
"step": 2480
},
{
"epoch": 1.42,
"grad_norm": 0.2431640625,
"learning_rate": 0.0002,
"loss": 0.9435,
"step": 2490
},
{
"epoch": 1.42,
"grad_norm": 0.3046875,
"learning_rate": 0.0002,
"loss": 0.866,
"step": 2500
},
{
"epoch": 1.43,
"grad_norm": 0.25390625,
"learning_rate": 0.0002,
"loss": 0.8878,
"step": 2510
},
{
"epoch": 1.43,
"grad_norm": 0.2109375,
"learning_rate": 0.0002,
"loss": 0.9561,
"step": 2520
},
{
"epoch": 1.44,
"grad_norm": 0.267578125,
"learning_rate": 0.0002,
"loss": 0.9653,
"step": 2530
},
{
"epoch": 1.45,
"grad_norm": 0.25390625,
"learning_rate": 0.0002,
"loss": 0.8876,
"step": 2540
},
{
"epoch": 1.45,
"grad_norm": 0.259765625,
"learning_rate": 0.0002,
"loss": 0.8824,
"step": 2550
},
{
"epoch": 1.46,
"grad_norm": 0.251953125,
"learning_rate": 0.0002,
"loss": 0.8765,
"step": 2560
},
{
"epoch": 1.46,
"grad_norm": 0.2421875,
"learning_rate": 0.0002,
"loss": 0.9614,
"step": 2570
},
{
"epoch": 1.47,
"grad_norm": 0.255859375,
"learning_rate": 0.0002,
"loss": 0.9814,
"step": 2580
},
{
"epoch": 1.47,
"grad_norm": 0.251953125,
"learning_rate": 0.0002,
"loss": 0.8922,
"step": 2590
},
{
"epoch": 1.48,
"grad_norm": 0.3046875,
"learning_rate": 0.0002,
"loss": 0.8736,
"step": 2600
},
{
"epoch": 1.49,
"grad_norm": 0.2451171875,
"learning_rate": 0.0002,
"loss": 0.8832,
"step": 2610
},
{
"epoch": 1.49,
"grad_norm": 0.220703125,
"learning_rate": 0.0002,
"loss": 0.9943,
"step": 2620
},
{
"epoch": 1.5,
"grad_norm": 0.2265625,
"learning_rate": 0.0002,
"loss": 0.9602,
"step": 2630
},
{
"epoch": 1.5,
"grad_norm": 0.25390625,
"learning_rate": 0.0002,
"loss": 0.936,
"step": 2640
},
{
"epoch": 1.51,
"grad_norm": 0.27734375,
"learning_rate": 0.0002,
"loss": 0.8718,
"step": 2650
},
{
"epoch": 1.51,
"grad_norm": 0.234375,
"learning_rate": 0.0002,
"loss": 0.9041,
"step": 2660
},
{
"epoch": 1.52,
"grad_norm": 0.2236328125,
"learning_rate": 0.0002,
"loss": 0.9741,
"step": 2670
},
{
"epoch": 1.53,
"grad_norm": 0.2314453125,
"learning_rate": 0.0002,
"loss": 0.9613,
"step": 2680
},
{
"epoch": 1.53,
"grad_norm": 0.2373046875,
"learning_rate": 0.0002,
"loss": 0.9412,
"step": 2690
},
{
"epoch": 1.54,
"grad_norm": 0.2578125,
"learning_rate": 0.0002,
"loss": 0.907,
"step": 2700
},
{
"epoch": 1.54,
"grad_norm": 0.263671875,
"learning_rate": 0.0002,
"loss": 0.8255,
"step": 2710
},
{
"epoch": 1.55,
"grad_norm": 0.2392578125,
"learning_rate": 0.0002,
"loss": 0.9947,
"step": 2720
},
{
"epoch": 1.55,
"grad_norm": 0.259765625,
"learning_rate": 0.0002,
"loss": 0.9775,
"step": 2730
},
{
"epoch": 1.56,
"grad_norm": 0.2373046875,
"learning_rate": 0.0002,
"loss": 0.9293,
"step": 2740
},
{
"epoch": 1.57,
"grad_norm": 0.271484375,
"learning_rate": 0.0002,
"loss": 0.877,
"step": 2750
},
{
"epoch": 1.57,
"grad_norm": 0.212890625,
"learning_rate": 0.0002,
"loss": 0.9085,
"step": 2760
},
{
"epoch": 1.58,
"grad_norm": 0.21875,
"learning_rate": 0.0002,
"loss": 0.975,
"step": 2770
},
{
"epoch": 1.58,
"grad_norm": 0.2470703125,
"learning_rate": 0.0002,
"loss": 0.9957,
"step": 2780
},
{
"epoch": 1.59,
"grad_norm": 0.25390625,
"learning_rate": 0.0002,
"loss": 0.9567,
"step": 2790
},
{
"epoch": 1.59,
"grad_norm": 0.271484375,
"learning_rate": 0.0002,
"loss": 0.8587,
"step": 2800
},
{
"epoch": 1.6,
"grad_norm": 0.251953125,
"learning_rate": 0.0002,
"loss": 0.8616,
"step": 2810
},
{
"epoch": 1.61,
"grad_norm": 0.25390625,
"learning_rate": 0.0002,
"loss": 1.0032,
"step": 2820
},
{
"epoch": 1.61,
"grad_norm": 0.2470703125,
"learning_rate": 0.0002,
"loss": 0.9001,
"step": 2830
},
{
"epoch": 1.62,
"grad_norm": 0.2373046875,
"learning_rate": 0.0002,
"loss": 0.9542,
"step": 2840
},
{
"epoch": 1.62,
"grad_norm": 0.26953125,
"learning_rate": 0.0002,
"loss": 0.8459,
"step": 2850
},
{
"epoch": 1.63,
"grad_norm": 0.2392578125,
"learning_rate": 0.0002,
"loss": 0.8761,
"step": 2860
},
{
"epoch": 1.63,
"grad_norm": 0.22265625,
"learning_rate": 0.0002,
"loss": 1.0039,
"step": 2870
},
{
"epoch": 1.64,
"grad_norm": 0.2412109375,
"learning_rate": 0.0002,
"loss": 0.9821,
"step": 2880
},
{
"epoch": 1.64,
"grad_norm": 0.25390625,
"learning_rate": 0.0002,
"loss": 0.9451,
"step": 2890
},
{
"epoch": 1.65,
"grad_norm": 0.255859375,
"learning_rate": 0.0002,
"loss": 0.887,
"step": 2900
},
{
"epoch": 1.66,
"grad_norm": 0.2333984375,
"learning_rate": 0.0002,
"loss": 0.9211,
"step": 2910
},
{
"epoch": 1.66,
"grad_norm": 0.232421875,
"learning_rate": 0.0002,
"loss": 0.9841,
"step": 2920
},
{
"epoch": 1.67,
"grad_norm": 0.234375,
"learning_rate": 0.0002,
"loss": 0.9731,
"step": 2930
},
{
"epoch": 1.67,
"grad_norm": 0.240234375,
"learning_rate": 0.0002,
"loss": 0.9368,
"step": 2940
},
{
"epoch": 1.68,
"grad_norm": 0.265625,
"learning_rate": 0.0002,
"loss": 0.8407,
"step": 2950
},
{
"epoch": 1.68,
"grad_norm": 0.234375,
"learning_rate": 0.0002,
"loss": 0.878,
"step": 2960
},
{
"epoch": 1.69,
"grad_norm": 0.2255859375,
"learning_rate": 0.0002,
"loss": 1.0246,
"step": 2970
},
{
"epoch": 1.7,
"grad_norm": 0.2431640625,
"learning_rate": 0.0002,
"loss": 0.9817,
"step": 2980
},
{
"epoch": 1.7,
"grad_norm": 0.2470703125,
"learning_rate": 0.0002,
"loss": 0.9605,
"step": 2990
},
{
"epoch": 1.71,
"grad_norm": 0.255859375,
"learning_rate": 0.0002,
"loss": 0.8364,
"step": 3000
},
{
"epoch": 1.71,
"grad_norm": 0.2255859375,
"learning_rate": 0.0002,
"loss": 0.865,
"step": 3010
},
{
"epoch": 1.72,
"grad_norm": 0.22265625,
"learning_rate": 0.0002,
"loss": 0.9692,
"step": 3020
},
{
"epoch": 1.72,
"grad_norm": 0.240234375,
"learning_rate": 0.0002,
"loss": 0.9697,
"step": 3030
},
{
"epoch": 1.73,
"grad_norm": 0.2578125,
"learning_rate": 0.0002,
"loss": 0.9166,
"step": 3040
},
{
"epoch": 1.74,
"grad_norm": 0.263671875,
"learning_rate": 0.0002,
"loss": 0.8529,
"step": 3050
},
{
"epoch": 1.74,
"grad_norm": 0.2265625,
"learning_rate": 0.0002,
"loss": 0.9194,
"step": 3060
},
{
"epoch": 1.75,
"grad_norm": 0.2314453125,
"learning_rate": 0.0002,
"loss": 0.9443,
"step": 3070
},
{
"epoch": 1.75,
"grad_norm": 0.2421875,
"learning_rate": 0.0002,
"loss": 0.9519,
"step": 3080
},
{
"epoch": 1.76,
"grad_norm": 0.2470703125,
"learning_rate": 0.0002,
"loss": 0.9066,
"step": 3090
},
{
"epoch": 1.76,
"grad_norm": 0.26171875,
"learning_rate": 0.0002,
"loss": 0.8582,
"step": 3100
},
{
"epoch": 1.77,
"grad_norm": 0.2333984375,
"learning_rate": 0.0002,
"loss": 0.9266,
"step": 3110
},
{
"epoch": 1.78,
"grad_norm": 0.251953125,
"learning_rate": 0.0002,
"loss": 0.9802,
"step": 3120
},
{
"epoch": 1.78,
"grad_norm": 0.228515625,
"learning_rate": 0.0002,
"loss": 0.912,
"step": 3130
},
{
"epoch": 1.79,
"grad_norm": 0.251953125,
"learning_rate": 0.0002,
"loss": 0.9689,
"step": 3140
},
{
"epoch": 1.79,
"grad_norm": 0.27734375,
"learning_rate": 0.0002,
"loss": 0.8624,
"step": 3150
},
{
"epoch": 1.8,
"grad_norm": 0.2451171875,
"learning_rate": 0.0002,
"loss": 0.8598,
"step": 3160
},
{
"epoch": 1.8,
"grad_norm": 0.2294921875,
"learning_rate": 0.0002,
"loss": 0.986,
"step": 3170
},
{
"epoch": 1.81,
"grad_norm": 0.236328125,
"learning_rate": 0.0002,
"loss": 0.9528,
"step": 3180
},
{
"epoch": 1.82,
"grad_norm": 0.24609375,
"learning_rate": 0.0002,
"loss": 0.9345,
"step": 3190
},
{
"epoch": 1.82,
"grad_norm": 0.259765625,
"learning_rate": 0.0002,
"loss": 0.8386,
"step": 3200
},
{
"epoch": 1.83,
"grad_norm": 0.2275390625,
"learning_rate": 0.0002,
"loss": 0.8852,
"step": 3210
},
{
"epoch": 1.83,
"grad_norm": 0.212890625,
"learning_rate": 0.0002,
"loss": 0.9578,
"step": 3220
},
{
"epoch": 1.84,
"grad_norm": 0.23828125,
"learning_rate": 0.0002,
"loss": 0.9477,
"step": 3230
},
{
"epoch": 1.84,
"grad_norm": 0.2490234375,
"learning_rate": 0.0002,
"loss": 0.9011,
"step": 3240
},
{
"epoch": 1.85,
"grad_norm": 0.259765625,
"learning_rate": 0.0002,
"loss": 0.8294,
"step": 3250
},
{
"epoch": 1.86,
"grad_norm": 0.2412109375,
"learning_rate": 0.0002,
"loss": 0.8807,
"step": 3260
},
{
"epoch": 1.86,
"grad_norm": 0.248046875,
"learning_rate": 0.0002,
"loss": 0.972,
"step": 3270
},
{
"epoch": 1.87,
"grad_norm": 0.23828125,
"learning_rate": 0.0002,
"loss": 0.9898,
"step": 3280
},
{
"epoch": 1.87,
"grad_norm": 0.25390625,
"learning_rate": 0.0002,
"loss": 0.9307,
"step": 3290
},
{
"epoch": 1.88,
"grad_norm": 0.27734375,
"learning_rate": 0.0002,
"loss": 0.8186,
"step": 3300
},
{
"epoch": 1.88,
"grad_norm": 0.25,
"learning_rate": 0.0002,
"loss": 0.8804,
"step": 3310
},
{
"epoch": 1.89,
"grad_norm": 0.21875,
"learning_rate": 0.0002,
"loss": 0.9899,
"step": 3320
},
{
"epoch": 1.9,
"grad_norm": 0.24609375,
"learning_rate": 0.0002,
"loss": 0.9439,
"step": 3330
},
{
"epoch": 1.9,
"grad_norm": 0.23828125,
"learning_rate": 0.0002,
"loss": 0.8727,
"step": 3340
},
{
"epoch": 1.91,
"grad_norm": 0.283203125,
"learning_rate": 0.0002,
"loss": 0.862,
"step": 3350
},
{
"epoch": 1.91,
"grad_norm": 0.25,
"learning_rate": 0.0002,
"loss": 0.8661,
"step": 3360
},
{
"epoch": 1.92,
"grad_norm": 0.228515625,
"learning_rate": 0.0002,
"loss": 0.9943,
"step": 3370
},
{
"epoch": 1.92,
"grad_norm": 0.2451171875,
"learning_rate": 0.0002,
"loss": 0.9868,
"step": 3380
},
{
"epoch": 1.93,
"grad_norm": 0.244140625,
"learning_rate": 0.0002,
"loss": 0.9108,
"step": 3390
},
{
"epoch": 1.94,
"grad_norm": 0.25390625,
"learning_rate": 0.0002,
"loss": 0.8446,
"step": 3400
},
{
"epoch": 1.94,
"grad_norm": 0.25,
"learning_rate": 0.0002,
"loss": 0.89,
"step": 3410
},
{
"epoch": 1.95,
"grad_norm": 0.232421875,
"learning_rate": 0.0002,
"loss": 0.9935,
"step": 3420
},
{
"epoch": 1.95,
"grad_norm": 0.2421875,
"learning_rate": 0.0002,
"loss": 0.966,
"step": 3430
},
{
"epoch": 1.96,
"grad_norm": 0.244140625,
"learning_rate": 0.0002,
"loss": 0.9391,
"step": 3440
},
{
"epoch": 1.96,
"grad_norm": 0.265625,
"learning_rate": 0.0002,
"loss": 0.8886,
"step": 3450
},
{
"epoch": 1.97,
"grad_norm": 0.265625,
"learning_rate": 0.0002,
"loss": 0.8634,
"step": 3460
},
{
"epoch": 1.97,
"grad_norm": 0.21484375,
"learning_rate": 0.0002,
"loss": 0.9927,
"step": 3470
},
{
"epoch": 1.98,
"grad_norm": 0.2412109375,
"learning_rate": 0.0002,
"loss": 0.9513,
"step": 3480
},
{
"epoch": 1.99,
"grad_norm": 0.2373046875,
"learning_rate": 0.0002,
"loss": 0.9113,
"step": 3490
},
{
"epoch": 1.99,
"grad_norm": 0.26953125,
"learning_rate": 0.0002,
"loss": 0.8498,
"step": 3500
},
{
"epoch": 2.0,
"grad_norm": 0.255859375,
"learning_rate": 0.0002,
"loss": 0.8547,
"step": 3510
},
{
"epoch": 2.0,
"grad_norm": 0.212890625,
"learning_rate": 0.0002,
"loss": 0.9485,
"step": 3520
},
{
"epoch": 2.01,
"grad_norm": 0.24609375,
"learning_rate": 0.0002,
"loss": 0.9402,
"step": 3530
},
{
"epoch": 2.01,
"grad_norm": 0.25390625,
"learning_rate": 0.0002,
"loss": 0.9405,
"step": 3540
},
{
"epoch": 2.02,
"grad_norm": 0.29296875,
"learning_rate": 0.0002,
"loss": 0.863,
"step": 3550
},
{
"epoch": 2.03,
"grad_norm": 0.2734375,
"learning_rate": 0.0002,
"loss": 0.7701,
"step": 3560
},
{
"epoch": 2.03,
"grad_norm": 0.244140625,
"learning_rate": 0.0002,
"loss": 0.8538,
"step": 3570
},
{
"epoch": 2.04,
"grad_norm": 0.2470703125,
"learning_rate": 0.0002,
"loss": 0.9473,
"step": 3580
},
{
"epoch": 2.04,
"grad_norm": 0.25390625,
"learning_rate": 0.0002,
"loss": 0.9183,
"step": 3590
},
{
"epoch": 2.05,
"grad_norm": 0.24609375,
"learning_rate": 0.0002,
"loss": 0.8304,
"step": 3600
},
{
"epoch": 2.05,
"grad_norm": 0.3046875,
"learning_rate": 0.0002,
"loss": 0.791,
"step": 3610
},
{
"epoch": 2.06,
"grad_norm": 0.255859375,
"learning_rate": 0.0002,
"loss": 0.8672,
"step": 3620
},
{
"epoch": 2.07,
"grad_norm": 0.2470703125,
"learning_rate": 0.0002,
"loss": 0.9531,
"step": 3630
},
{
"epoch": 2.07,
"grad_norm": 0.2431640625,
"learning_rate": 0.0002,
"loss": 0.9388,
"step": 3640
},
{
"epoch": 2.08,
"grad_norm": 0.26953125,
"learning_rate": 0.0002,
"loss": 0.8646,
"step": 3650
},
{
"epoch": 2.08,
"grad_norm": 0.265625,
"learning_rate": 0.0002,
"loss": 0.7959,
"step": 3660
},
{
"epoch": 2.09,
"grad_norm": 0.2373046875,
"learning_rate": 0.0002,
"loss": 0.8864,
"step": 3670
},
{
"epoch": 2.09,
"grad_norm": 0.2373046875,
"learning_rate": 0.0002,
"loss": 0.952,
"step": 3680
},
{
"epoch": 2.1,
"grad_norm": 0.25,
"learning_rate": 0.0002,
"loss": 0.9314,
"step": 3690
},
{
"epoch": 2.11,
"grad_norm": 0.2734375,
"learning_rate": 0.0002,
"loss": 0.8895,
"step": 3700
},
{
"epoch": 2.11,
"grad_norm": 0.28515625,
"learning_rate": 0.0002,
"loss": 0.7767,
"step": 3710
},
{
"epoch": 2.12,
"grad_norm": 0.23046875,
"learning_rate": 0.0002,
"loss": 0.9263,
"step": 3720
},
{
"epoch": 2.12,
"grad_norm": 0.234375,
"learning_rate": 0.0002,
"loss": 0.9469,
"step": 3730
},
{
"epoch": 2.13,
"grad_norm": 0.2451171875,
"learning_rate": 0.0002,
"loss": 0.8629,
"step": 3740
},
{
"epoch": 2.13,
"grad_norm": 0.2734375,
"learning_rate": 0.0002,
"loss": 0.8327,
"step": 3750
},
{
"epoch": 2.14,
"grad_norm": 0.28515625,
"learning_rate": 0.0002,
"loss": 0.7878,
"step": 3760
},
{
"epoch": 2.15,
"grad_norm": 0.2265625,
"learning_rate": 0.0002,
"loss": 0.9102,
"step": 3770
},
{
"epoch": 2.15,
"grad_norm": 0.251953125,
"learning_rate": 0.0002,
"loss": 0.8878,
"step": 3780
},
{
"epoch": 2.16,
"grad_norm": 0.24609375,
"learning_rate": 0.0002,
"loss": 0.868,
"step": 3790
},
{
"epoch": 2.16,
"grad_norm": 0.26953125,
"learning_rate": 0.0002,
"loss": 0.8576,
"step": 3800
},
{
"epoch": 2.17,
"grad_norm": 0.275390625,
"learning_rate": 0.0002,
"loss": 0.7992,
"step": 3810
},
{
"epoch": 2.17,
"grad_norm": 0.2421875,
"learning_rate": 0.0002,
"loss": 0.8624,
"step": 3820
},
{
"epoch": 2.18,
"grad_norm": 0.2421875,
"learning_rate": 0.0002,
"loss": 0.9591,
"step": 3830
},
{
"epoch": 2.19,
"grad_norm": 0.248046875,
"learning_rate": 0.0002,
"loss": 0.9186,
"step": 3840
},
{
"epoch": 2.19,
"grad_norm": 0.27734375,
"learning_rate": 0.0002,
"loss": 0.8621,
"step": 3850
},
{
"epoch": 2.2,
"grad_norm": 0.294921875,
"learning_rate": 0.0002,
"loss": 0.8437,
"step": 3860
},
{
"epoch": 2.2,
"grad_norm": 0.259765625,
"learning_rate": 0.0002,
"loss": 0.8686,
"step": 3870
},
{
"epoch": 2.21,
"grad_norm": 0.2333984375,
"learning_rate": 0.0002,
"loss": 0.9382,
"step": 3880
},
{
"epoch": 2.21,
"grad_norm": 0.2490234375,
"learning_rate": 0.0002,
"loss": 0.9485,
"step": 3890
},
{
"epoch": 2.22,
"grad_norm": 0.26171875,
"learning_rate": 0.0002,
"loss": 0.8593,
"step": 3900
},
{
"epoch": 2.23,
"grad_norm": 0.27734375,
"learning_rate": 0.0002,
"loss": 0.7782,
"step": 3910
},
{
"epoch": 2.23,
"grad_norm": 0.234375,
"learning_rate": 0.0002,
"loss": 0.8612,
"step": 3920
},
{
"epoch": 2.24,
"grad_norm": 0.25,
"learning_rate": 0.0002,
"loss": 0.9716,
"step": 3930
},
{
"epoch": 2.24,
"grad_norm": 0.251953125,
"learning_rate": 0.0002,
"loss": 0.9311,
"step": 3940
},
{
"epoch": 2.25,
"grad_norm": 0.271484375,
"learning_rate": 0.0002,
"loss": 0.863,
"step": 3950
},
{
"epoch": 2.25,
"grad_norm": 0.279296875,
"learning_rate": 0.0002,
"loss": 0.7972,
"step": 3960
},
{
"epoch": 2.26,
"grad_norm": 0.248046875,
"learning_rate": 0.0002,
"loss": 0.9061,
"step": 3970
},
{
"epoch": 2.27,
"grad_norm": 0.263671875,
"learning_rate": 0.0002,
"loss": 0.9467,
"step": 3980
},
{
"epoch": 2.27,
"grad_norm": 0.263671875,
"learning_rate": 0.0002,
"loss": 0.9015,
"step": 3990
},
{
"epoch": 2.28,
"grad_norm": 0.263671875,
"learning_rate": 0.0002,
"loss": 0.8434,
"step": 4000
},
{
"epoch": 2.28,
"grad_norm": 0.28515625,
"learning_rate": 0.0002,
"loss": 0.8208,
"step": 4010
},
{
"epoch": 2.29,
"grad_norm": 0.2431640625,
"learning_rate": 0.0002,
"loss": 0.89,
"step": 4020
},
{
"epoch": 2.29,
"grad_norm": 0.25,
"learning_rate": 0.0002,
"loss": 0.9688,
"step": 4030
},
{
"epoch": 2.3,
"grad_norm": 0.2578125,
"learning_rate": 0.0002,
"loss": 0.9848,
"step": 4040
},
{
"epoch": 2.31,
"grad_norm": 0.26953125,
"learning_rate": 0.0002,
"loss": 0.8888,
"step": 4050
},
{
"epoch": 2.31,
"grad_norm": 0.298828125,
"learning_rate": 0.0002,
"loss": 0.8008,
"step": 4060
},
{
"epoch": 2.32,
"grad_norm": 0.232421875,
"learning_rate": 0.0002,
"loss": 0.8682,
"step": 4070
},
{
"epoch": 2.32,
"grad_norm": 0.2470703125,
"learning_rate": 0.0002,
"loss": 0.9475,
"step": 4080
},
{
"epoch": 2.33,
"grad_norm": 0.2490234375,
"learning_rate": 0.0002,
"loss": 0.9707,
"step": 4090
},
{
"epoch": 2.33,
"grad_norm": 0.263671875,
"learning_rate": 0.0002,
"loss": 0.856,
"step": 4100
},
{
"epoch": 2.34,
"grad_norm": 0.279296875,
"learning_rate": 0.0002,
"loss": 0.7628,
"step": 4110
},
{
"epoch": 2.34,
"grad_norm": 0.2314453125,
"learning_rate": 0.0002,
"loss": 0.8685,
"step": 4120
},
{
"epoch": 2.35,
"grad_norm": 0.2431640625,
"learning_rate": 0.0002,
"loss": 0.9101,
"step": 4130
},
{
"epoch": 2.36,
"grad_norm": 0.244140625,
"learning_rate": 0.0002,
"loss": 0.8846,
"step": 4140
},
{
"epoch": 2.36,
"grad_norm": 0.287109375,
"learning_rate": 0.0002,
"loss": 0.8627,
"step": 4150
},
{
"epoch": 2.37,
"grad_norm": 0.28515625,
"learning_rate": 0.0002,
"loss": 0.7946,
"step": 4160
},
{
"epoch": 2.37,
"grad_norm": 0.2353515625,
"learning_rate": 0.0002,
"loss": 0.8894,
"step": 4170
},
{
"epoch": 2.38,
"grad_norm": 0.23046875,
"learning_rate": 0.0002,
"loss": 0.9399,
"step": 4180
},
{
"epoch": 2.38,
"grad_norm": 0.25390625,
"learning_rate": 0.0002,
"loss": 0.8964,
"step": 4190
},
{
"epoch": 2.39,
"grad_norm": 0.259765625,
"learning_rate": 0.0002,
"loss": 0.8725,
"step": 4200
},
{
"epoch": 2.4,
"grad_norm": 0.30078125,
"learning_rate": 0.0002,
"loss": 0.8108,
"step": 4210
},
{
"epoch": 2.4,
"grad_norm": 0.2373046875,
"learning_rate": 0.0002,
"loss": 0.8888,
"step": 4220
},
{
"epoch": 2.41,
"grad_norm": 0.240234375,
"learning_rate": 0.0002,
"loss": 0.9212,
"step": 4230
},
{
"epoch": 2.41,
"grad_norm": 0.263671875,
"learning_rate": 0.0002,
"loss": 0.9241,
"step": 4240
},
{
"epoch": 2.42,
"grad_norm": 0.25390625,
"learning_rate": 0.0002,
"loss": 0.9084,
"step": 4250
},
{
"epoch": 2.42,
"grad_norm": 0.294921875,
"learning_rate": 0.0002,
"loss": 0.7934,
"step": 4260
},
{
"epoch": 2.43,
"grad_norm": 0.255859375,
"learning_rate": 0.0002,
"loss": 0.9015,
"step": 4270
},
{
"epoch": 2.44,
"grad_norm": 0.2578125,
"learning_rate": 0.0002,
"loss": 0.944,
"step": 4280
},
{
"epoch": 2.44,
"grad_norm": 0.267578125,
"learning_rate": 0.0002,
"loss": 0.9105,
"step": 4290
},
{
"epoch": 2.45,
"grad_norm": 0.26171875,
"learning_rate": 0.0002,
"loss": 0.8353,
"step": 4300
},
{
"epoch": 2.45,
"grad_norm": 0.287109375,
"learning_rate": 0.0002,
"loss": 0.7583,
"step": 4310
},
{
"epoch": 2.46,
"grad_norm": 0.2373046875,
"learning_rate": 0.0002,
"loss": 0.9053,
"step": 4320
},
{
"epoch": 2.46,
"grad_norm": 0.244140625,
"learning_rate": 0.0002,
"loss": 0.961,
"step": 4330
},
{
"epoch": 2.47,
"grad_norm": 0.25390625,
"learning_rate": 0.0002,
"loss": 0.9519,
"step": 4340
},
{
"epoch": 2.48,
"grad_norm": 0.255859375,
"learning_rate": 0.0002,
"loss": 0.9192,
"step": 4350
},
{
"epoch": 2.48,
"grad_norm": 0.29296875,
"learning_rate": 0.0002,
"loss": 0.7754,
"step": 4360
},
{
"epoch": 2.49,
"grad_norm": 0.2265625,
"learning_rate": 0.0002,
"loss": 0.8785,
"step": 4370
},
{
"epoch": 2.49,
"grad_norm": 0.251953125,
"learning_rate": 0.0002,
"loss": 0.8908,
"step": 4380
},
{
"epoch": 2.5,
"grad_norm": 0.248046875,
"learning_rate": 0.0002,
"loss": 0.9154,
"step": 4390
},
{
"epoch": 2.5,
"grad_norm": 0.2578125,
"learning_rate": 0.0002,
"loss": 0.9057,
"step": 4400
},
{
"epoch": 2.51,
"grad_norm": 0.287109375,
"learning_rate": 0.0002,
"loss": 0.8377,
"step": 4410
},
{
"epoch": 2.52,
"grad_norm": 0.2314453125,
"learning_rate": 0.0002,
"loss": 0.8707,
"step": 4420
},
{
"epoch": 2.52,
"grad_norm": 0.2431640625,
"learning_rate": 0.0002,
"loss": 0.9584,
"step": 4430
},
{
"epoch": 2.53,
"grad_norm": 0.267578125,
"learning_rate": 0.0002,
"loss": 0.9012,
"step": 4440
},
{
"epoch": 2.53,
"grad_norm": 0.25390625,
"learning_rate": 0.0002,
"loss": 0.8969,
"step": 4450
},
{
"epoch": 2.54,
"grad_norm": 0.326171875,
"learning_rate": 0.0002,
"loss": 0.8156,
"step": 4460
},
{
"epoch": 2.54,
"grad_norm": 0.2412109375,
"learning_rate": 0.0002,
"loss": 0.909,
"step": 4470
},
{
"epoch": 2.55,
"grad_norm": 0.2431640625,
"learning_rate": 0.0002,
"loss": 0.9195,
"step": 4480
},
{
"epoch": 2.56,
"grad_norm": 0.255859375,
"learning_rate": 0.0002,
"loss": 0.9104,
"step": 4490
},
{
"epoch": 2.56,
"grad_norm": 0.271484375,
"learning_rate": 0.0002,
"loss": 0.8961,
"step": 4500
},
{
"epoch": 2.57,
"grad_norm": 0.271484375,
"learning_rate": 0.0002,
"loss": 0.8028,
"step": 4510
},
{
"epoch": 2.57,
"grad_norm": 0.255859375,
"learning_rate": 0.0002,
"loss": 0.8662,
"step": 4520
},
{
"epoch": 2.58,
"grad_norm": 0.251953125,
"learning_rate": 0.0002,
"loss": 0.8866,
"step": 4530
},
{
"epoch": 2.58,
"grad_norm": 0.275390625,
"learning_rate": 0.0002,
"loss": 0.9517,
"step": 4540
},
{
"epoch": 2.59,
"grad_norm": 0.259765625,
"learning_rate": 0.0002,
"loss": 0.8347,
"step": 4550
},
{
"epoch": 2.6,
"grad_norm": 0.30078125,
"learning_rate": 0.0002,
"loss": 0.789,
"step": 4560
},
{
"epoch": 2.6,
"grad_norm": 0.224609375,
"learning_rate": 0.0002,
"loss": 0.8661,
"step": 4570
},
{
"epoch": 2.61,
"grad_norm": 0.251953125,
"learning_rate": 0.0002,
"loss": 0.9314,
"step": 4580
},
{
"epoch": 2.61,
"grad_norm": 0.26171875,
"learning_rate": 0.0002,
"loss": 0.8867,
"step": 4590
},
{
"epoch": 2.62,
"grad_norm": 0.26171875,
"learning_rate": 0.0002,
"loss": 0.9347,
"step": 4600
},
{
"epoch": 2.62,
"grad_norm": 0.2890625,
"learning_rate": 0.0002,
"loss": 0.7914,
"step": 4610
},
{
"epoch": 2.63,
"grad_norm": 0.240234375,
"learning_rate": 0.0002,
"loss": 0.8746,
"step": 4620
},
{
"epoch": 2.64,
"grad_norm": 0.234375,
"learning_rate": 0.0002,
"loss": 0.9408,
"step": 4630
},
{
"epoch": 2.64,
"grad_norm": 0.24609375,
"learning_rate": 0.0002,
"loss": 0.9056,
"step": 4640
},
{
"epoch": 2.65,
"grad_norm": 0.255859375,
"learning_rate": 0.0002,
"loss": 0.8412,
"step": 4650
},
{
"epoch": 2.65,
"grad_norm": 0.28125,
"learning_rate": 0.0002,
"loss": 0.7981,
"step": 4660
},
{
"epoch": 2.66,
"grad_norm": 0.251953125,
"learning_rate": 0.0002,
"loss": 0.9191,
"step": 4670
},
{
"epoch": 2.66,
"grad_norm": 0.251953125,
"learning_rate": 0.0002,
"loss": 0.9577,
"step": 4680
},
{
"epoch": 2.67,
"grad_norm": 0.255859375,
"learning_rate": 0.0002,
"loss": 0.8989,
"step": 4690
},
{
"epoch": 2.68,
"grad_norm": 0.259765625,
"learning_rate": 0.0002,
"loss": 0.8807,
"step": 4700
},
{
"epoch": 2.68,
"grad_norm": 0.28515625,
"learning_rate": 0.0002,
"loss": 0.7851,
"step": 4710
},
{
"epoch": 2.69,
"grad_norm": 0.2451171875,
"learning_rate": 0.0002,
"loss": 0.9028,
"step": 4720
},
{
"epoch": 2.69,
"grad_norm": 0.234375,
"learning_rate": 0.0002,
"loss": 0.9442,
"step": 4730
},
{
"epoch": 2.7,
"grad_norm": 0.267578125,
"learning_rate": 0.0002,
"loss": 0.9038,
"step": 4740
},
{
"epoch": 2.7,
"grad_norm": 0.26953125,
"learning_rate": 0.0002,
"loss": 0.8508,
"step": 4750
},
{
"epoch": 2.71,
"grad_norm": 0.2890625,
"learning_rate": 0.0002,
"loss": 0.8069,
"step": 4760
},
{
"epoch": 2.71,
"grad_norm": 0.244140625,
"learning_rate": 0.0002,
"loss": 0.8872,
"step": 4770
},
{
"epoch": 2.72,
"grad_norm": 0.251953125,
"learning_rate": 0.0002,
"loss": 1.0013,
"step": 4780
},
{
"epoch": 2.73,
"grad_norm": 0.2578125,
"learning_rate": 0.0002,
"loss": 0.8867,
"step": 4790
},
{
"epoch": 2.73,
"grad_norm": 0.265625,
"learning_rate": 0.0002,
"loss": 0.8711,
"step": 4800
},
{
"epoch": 2.74,
"grad_norm": 0.28515625,
"learning_rate": 0.0002,
"loss": 0.7886,
"step": 4810
},
{
"epoch": 2.74,
"grad_norm": 0.24609375,
"learning_rate": 0.0002,
"loss": 0.9002,
"step": 4820
},
{
"epoch": 2.75,
"grad_norm": 0.2490234375,
"learning_rate": 0.0002,
"loss": 0.9994,
"step": 4830
},
{
"epoch": 2.75,
"grad_norm": 0.2470703125,
"learning_rate": 0.0002,
"loss": 0.9225,
"step": 4840
},
{
"epoch": 2.76,
"grad_norm": 0.28515625,
"learning_rate": 0.0002,
"loss": 0.8509,
"step": 4850
},
{
"epoch": 2.77,
"grad_norm": 0.28125,
"learning_rate": 0.0002,
"loss": 0.7973,
"step": 4860
},
{
"epoch": 2.77,
"grad_norm": 0.236328125,
"learning_rate": 0.0002,
"loss": 0.8904,
"step": 4870
},
{
"epoch": 2.78,
"grad_norm": 0.2373046875,
"learning_rate": 0.0002,
"loss": 0.9189,
"step": 4880
},
{
"epoch": 2.78,
"grad_norm": 0.255859375,
"learning_rate": 0.0002,
"loss": 0.8756,
"step": 4890
},
{
"epoch": 2.79,
"grad_norm": 0.2578125,
"learning_rate": 0.0002,
"loss": 0.8989,
"step": 4900
},
{
"epoch": 2.79,
"grad_norm": 0.275390625,
"learning_rate": 0.0002,
"loss": 0.7792,
"step": 4910
},
{
"epoch": 2.8,
"grad_norm": 0.26171875,
"learning_rate": 0.0002,
"loss": 0.8501,
"step": 4920
},
{
"epoch": 2.81,
"grad_norm": 0.2451171875,
"learning_rate": 0.0002,
"loss": 0.9372,
"step": 4930
},
{
"epoch": 2.81,
"grad_norm": 0.267578125,
"learning_rate": 0.0002,
"loss": 0.9166,
"step": 4940
},
{
"epoch": 2.82,
"grad_norm": 0.2734375,
"learning_rate": 0.0002,
"loss": 0.8523,
"step": 4950
},
{
"epoch": 2.82,
"grad_norm": 0.28515625,
"learning_rate": 0.0002,
"loss": 0.777,
"step": 4960
},
{
"epoch": 2.83,
"grad_norm": 0.2333984375,
"learning_rate": 0.0002,
"loss": 0.8998,
"step": 4970
},
{
"epoch": 2.83,
"grad_norm": 0.2451171875,
"learning_rate": 0.0002,
"loss": 0.907,
"step": 4980
},
{
"epoch": 2.84,
"grad_norm": 0.251953125,
"learning_rate": 0.0002,
"loss": 0.9126,
"step": 4990
},
{
"epoch": 2.85,
"grad_norm": 0.271484375,
"learning_rate": 0.0002,
"loss": 0.8323,
"step": 5000
},
{
"epoch": 2.85,
"grad_norm": 0.302734375,
"learning_rate": 0.0002,
"loss": 0.7853,
"step": 5010
},
{
"epoch": 2.86,
"grad_norm": 0.2470703125,
"learning_rate": 0.0002,
"loss": 0.8995,
"step": 5020
},
{
"epoch": 2.86,
"grad_norm": 0.255859375,
"learning_rate": 0.0002,
"loss": 0.943,
"step": 5030
},
{
"epoch": 2.87,
"grad_norm": 0.25390625,
"learning_rate": 0.0002,
"loss": 0.8728,
"step": 5040
},
{
"epoch": 2.87,
"grad_norm": 0.251953125,
"learning_rate": 0.0002,
"loss": 0.8486,
"step": 5050
},
{
"epoch": 2.88,
"grad_norm": 0.265625,
"learning_rate": 0.0002,
"loss": 0.7424,
"step": 5060
},
{
"epoch": 2.89,
"grad_norm": 0.2255859375,
"learning_rate": 0.0002,
"loss": 0.8708,
"step": 5070
},
{
"epoch": 2.89,
"grad_norm": 0.2470703125,
"learning_rate": 0.0002,
"loss": 0.9878,
"step": 5080
},
{
"epoch": 2.9,
"grad_norm": 0.271484375,
"learning_rate": 0.0002,
"loss": 0.9329,
"step": 5090
},
{
"epoch": 2.9,
"grad_norm": 0.287109375,
"learning_rate": 0.0002,
"loss": 0.8871,
"step": 5100
},
{
"epoch": 2.91,
"grad_norm": 0.310546875,
"learning_rate": 0.0002,
"loss": 0.7713,
"step": 5110
},
{
"epoch": 2.91,
"grad_norm": 0.2490234375,
"learning_rate": 0.0002,
"loss": 0.8863,
"step": 5120
},
{
"epoch": 2.92,
"grad_norm": 0.2451171875,
"learning_rate": 0.0002,
"loss": 0.98,
"step": 5130
},
{
"epoch": 2.93,
"grad_norm": 0.26171875,
"learning_rate": 0.0002,
"loss": 0.8952,
"step": 5140
},
{
"epoch": 2.93,
"grad_norm": 0.275390625,
"learning_rate": 0.0002,
"loss": 0.8547,
"step": 5150
},
{
"epoch": 2.94,
"grad_norm": 0.27734375,
"learning_rate": 0.0002,
"loss": 0.7906,
"step": 5160
},
{
"epoch": 2.94,
"grad_norm": 0.2333984375,
"learning_rate": 0.0002,
"loss": 0.8786,
"step": 5170
},
{
"epoch": 2.95,
"grad_norm": 0.25,
"learning_rate": 0.0002,
"loss": 0.9914,
"step": 5180
},
{
"epoch": 2.95,
"grad_norm": 0.26171875,
"learning_rate": 0.0002,
"loss": 0.9002,
"step": 5190
},
{
"epoch": 2.96,
"grad_norm": 0.271484375,
"learning_rate": 0.0002,
"loss": 0.8754,
"step": 5200
},
{
"epoch": 2.97,
"grad_norm": 0.283203125,
"learning_rate": 0.0002,
"loss": 0.7862,
"step": 5210
},
{
"epoch": 2.97,
"grad_norm": 0.2412109375,
"learning_rate": 0.0002,
"loss": 0.853,
"step": 5220
},
{
"epoch": 2.98,
"grad_norm": 0.2412109375,
"learning_rate": 0.0002,
"loss": 0.9762,
"step": 5230
},
{
"epoch": 2.98,
"grad_norm": 0.25390625,
"learning_rate": 0.0002,
"loss": 0.9244,
"step": 5240
},
{
"epoch": 2.99,
"grad_norm": 0.26953125,
"learning_rate": 0.0002,
"loss": 0.871,
"step": 5250
},
{
"epoch": 2.99,
"grad_norm": 0.302734375,
"learning_rate": 0.0002,
"loss": 0.8212,
"step": 5260
},
{
"epoch": 3.0,
"grad_norm": 0.298828125,
"learning_rate": 0.0002,
"loss": 0.8614,
"step": 5270
},
{
"epoch": 3.01,
"grad_norm": 0.2333984375,
"learning_rate": 0.0002,
"loss": 0.9362,
"step": 5280
},
{
"epoch": 3.01,
"grad_norm": 0.25390625,
"learning_rate": 0.0002,
"loss": 0.9234,
"step": 5290
},
{
"epoch": 3.02,
"grad_norm": 0.263671875,
"learning_rate": 0.0002,
"loss": 0.7922,
"step": 5300
},
{
"epoch": 3.02,
"grad_norm": 0.265625,
"learning_rate": 0.0002,
"loss": 0.8053,
"step": 5310
},
{
"epoch": 3.03,
"grad_norm": 0.326171875,
"learning_rate": 0.0002,
"loss": 0.7281,
"step": 5320
},
{
"epoch": 3.03,
"grad_norm": 0.255859375,
"learning_rate": 0.0002,
"loss": 0.9209,
"step": 5330
},
{
"epoch": 3.04,
"grad_norm": 0.27734375,
"learning_rate": 0.0002,
"loss": 0.891,
"step": 5340
},
{
"epoch": 3.04,
"grad_norm": 0.275390625,
"learning_rate": 0.0002,
"loss": 0.8865,
"step": 5350
},
{
"epoch": 3.05,
"grad_norm": 0.287109375,
"learning_rate": 0.0002,
"loss": 0.8166,
"step": 5360
},
{
"epoch": 3.06,
"grad_norm": 0.30859375,
"learning_rate": 0.0002,
"loss": 0.7621,
"step": 5370
},
{
"epoch": 3.06,
"grad_norm": 0.255859375,
"learning_rate": 0.0002,
"loss": 0.8545,
"step": 5380
},
{
"epoch": 3.07,
"grad_norm": 0.26171875,
"learning_rate": 0.0002,
"loss": 0.8467,
"step": 5390
},
{
"epoch": 3.07,
"grad_norm": 0.26953125,
"learning_rate": 0.0002,
"loss": 0.8838,
"step": 5400
},
{
"epoch": 3.08,
"grad_norm": 0.27734375,
"learning_rate": 0.0002,
"loss": 0.7687,
"step": 5410
},
{
"epoch": 3.08,
"grad_norm": 0.3203125,
"learning_rate": 0.0002,
"loss": 0.6956,
"step": 5420
},
{
"epoch": 3.09,
"grad_norm": 0.255859375,
"learning_rate": 0.0002,
"loss": 0.9383,
"step": 5430
},
{
"epoch": 3.1,
"grad_norm": 0.2578125,
"learning_rate": 0.0002,
"loss": 0.9487,
"step": 5440
},
{
"epoch": 3.1,
"grad_norm": 0.287109375,
"learning_rate": 0.0002,
"loss": 0.859,
"step": 5450
},
{
"epoch": 3.11,
"grad_norm": 0.279296875,
"learning_rate": 0.0002,
"loss": 0.8361,
"step": 5460
},
{
"epoch": 3.11,
"grad_norm": 0.31640625,
"learning_rate": 0.0002,
"loss": 0.7345,
"step": 5470
},
{
"epoch": 3.12,
"grad_norm": 0.251953125,
"learning_rate": 0.0002,
"loss": 0.8926,
"step": 5480
},
{
"epoch": 3.12,
"grad_norm": 0.275390625,
"learning_rate": 0.0002,
"loss": 0.8956,
"step": 5490
},
{
"epoch": 3.13,
"grad_norm": 0.265625,
"learning_rate": 0.0002,
"loss": 0.924,
"step": 5500
},
{
"epoch": 3.14,
"grad_norm": 0.296875,
"learning_rate": 0.0002,
"loss": 0.8312,
"step": 5510
},
{
"epoch": 3.14,
"grad_norm": 0.322265625,
"learning_rate": 0.0002,
"loss": 0.7387,
"step": 5520
},
{
"epoch": 3.15,
"grad_norm": 0.279296875,
"learning_rate": 0.0002,
"loss": 0.9535,
"step": 5530
},
{
"epoch": 3.15,
"grad_norm": 0.263671875,
"learning_rate": 0.0002,
"loss": 0.8703,
"step": 5540
},
{
"epoch": 3.16,
"grad_norm": 0.275390625,
"learning_rate": 0.0002,
"loss": 0.8652,
"step": 5550
},
{
"epoch": 3.16,
"grad_norm": 0.28125,
"learning_rate": 0.0002,
"loss": 0.8261,
"step": 5560
},
{
"epoch": 3.17,
"grad_norm": 0.330078125,
"learning_rate": 0.0002,
"loss": 0.7514,
"step": 5570
},
{
"epoch": 3.18,
"grad_norm": 0.25,
"learning_rate": 0.0002,
"loss": 0.8952,
"step": 5580
},
{
"epoch": 3.18,
"grad_norm": 0.251953125,
"learning_rate": 0.0002,
"loss": 0.9366,
"step": 5590
},
{
"epoch": 3.19,
"grad_norm": 0.251953125,
"learning_rate": 0.0002,
"loss": 0.84,
"step": 5600
},
{
"epoch": 3.19,
"grad_norm": 0.2890625,
"learning_rate": 0.0002,
"loss": 0.8177,
"step": 5610
},
{
"epoch": 3.2,
"grad_norm": 0.296875,
"learning_rate": 0.0002,
"loss": 0.7599,
"step": 5620
},
{
"epoch": 3.2,
"grad_norm": 0.267578125,
"learning_rate": 0.0002,
"loss": 0.8695,
"step": 5630
},
{
"epoch": 3.21,
"grad_norm": 0.26953125,
"learning_rate": 0.0002,
"loss": 0.9257,
"step": 5640
},
{
"epoch": 3.22,
"grad_norm": 0.279296875,
"learning_rate": 0.0002,
"loss": 0.8631,
"step": 5650
},
{
"epoch": 3.22,
"grad_norm": 0.306640625,
"learning_rate": 0.0002,
"loss": 0.8063,
"step": 5660
},
{
"epoch": 3.23,
"grad_norm": 0.29296875,
"learning_rate": 0.0002,
"loss": 0.7112,
"step": 5670
},
{
"epoch": 3.23,
"grad_norm": 0.2578125,
"learning_rate": 0.0002,
"loss": 0.8807,
"step": 5680
},
{
"epoch": 3.24,
"grad_norm": 0.26953125,
"learning_rate": 0.0002,
"loss": 0.8928,
"step": 5690
},
{
"epoch": 3.24,
"grad_norm": 0.28125,
"learning_rate": 0.0002,
"loss": 0.8631,
"step": 5700
},
{
"epoch": 3.25,
"grad_norm": 0.29296875,
"learning_rate": 0.0002,
"loss": 0.8192,
"step": 5710
},
{
"epoch": 3.26,
"grad_norm": 0.337890625,
"learning_rate": 0.0002,
"loss": 0.7731,
"step": 5720
},
{
"epoch": 3.26,
"grad_norm": 0.263671875,
"learning_rate": 0.0002,
"loss": 0.8969,
"step": 5730
},
{
"epoch": 3.27,
"grad_norm": 0.275390625,
"learning_rate": 0.0002,
"loss": 0.9397,
"step": 5740
},
{
"epoch": 3.27,
"grad_norm": 0.263671875,
"learning_rate": 0.0002,
"loss": 0.8819,
"step": 5750
},
{
"epoch": 3.28,
"grad_norm": 0.30859375,
"learning_rate": 0.0002,
"loss": 0.8199,
"step": 5760
},
{
"epoch": 3.28,
"grad_norm": 0.2890625,
"learning_rate": 0.0002,
"loss": 0.7295,
"step": 5770
},
{
"epoch": 3.29,
"grad_norm": 0.26171875,
"learning_rate": 0.0002,
"loss": 0.922,
"step": 5780
},
{
"epoch": 3.3,
"grad_norm": 0.283203125,
"learning_rate": 0.0002,
"loss": 0.8984,
"step": 5790
},
{
"epoch": 3.3,
"grad_norm": 0.265625,
"learning_rate": 0.0002,
"loss": 0.8534,
"step": 5800
},
{
"epoch": 3.31,
"grad_norm": 0.291015625,
"learning_rate": 0.0002,
"loss": 0.7722,
"step": 5810
},
{
"epoch": 3.31,
"grad_norm": 0.30859375,
"learning_rate": 0.0002,
"loss": 0.7367,
"step": 5820
},
{
"epoch": 3.32,
"grad_norm": 0.25390625,
"learning_rate": 0.0002,
"loss": 0.8646,
"step": 5830
},
{
"epoch": 3.32,
"grad_norm": 0.2734375,
"learning_rate": 0.0002,
"loss": 0.896,
"step": 5840
},
{
"epoch": 3.33,
"grad_norm": 0.26953125,
"learning_rate": 0.0002,
"loss": 0.8458,
"step": 5850
},
{
"epoch": 3.34,
"grad_norm": 0.275390625,
"learning_rate": 0.0002,
"loss": 0.763,
"step": 5860
},
{
"epoch": 3.34,
"grad_norm": 0.330078125,
"learning_rate": 0.0002,
"loss": 0.7361,
"step": 5870
},
{
"epoch": 3.35,
"grad_norm": 0.259765625,
"learning_rate": 0.0002,
"loss": 0.9133,
"step": 5880
},
{
"epoch": 3.35,
"grad_norm": 0.2734375,
"learning_rate": 0.0002,
"loss": 0.892,
"step": 5890
},
{
"epoch": 3.36,
"grad_norm": 0.265625,
"learning_rate": 0.0002,
"loss": 0.856,
"step": 5900
},
{
"epoch": 3.36,
"grad_norm": 0.30859375,
"learning_rate": 0.0002,
"loss": 0.8,
"step": 5910
},
{
"epoch": 3.37,
"grad_norm": 0.314453125,
"learning_rate": 0.0002,
"loss": 0.7611,
"step": 5920
},
{
"epoch": 3.38,
"grad_norm": 0.259765625,
"learning_rate": 0.0002,
"loss": 0.8691,
"step": 5930
},
{
"epoch": 3.38,
"grad_norm": 0.287109375,
"learning_rate": 0.0002,
"loss": 0.8909,
"step": 5940
},
{
"epoch": 3.39,
"grad_norm": 0.291015625,
"learning_rate": 0.0002,
"loss": 0.8433,
"step": 5950
},
{
"epoch": 3.39,
"grad_norm": 0.3125,
"learning_rate": 0.0002,
"loss": 0.8023,
"step": 5960
},
{
"epoch": 3.4,
"grad_norm": 0.318359375,
"learning_rate": 0.0002,
"loss": 0.7657,
"step": 5970
},
{
"epoch": 3.4,
"grad_norm": 0.251953125,
"learning_rate": 0.0002,
"loss": 0.8957,
"step": 5980
},
{
"epoch": 3.41,
"grad_norm": 0.263671875,
"learning_rate": 0.0002,
"loss": 0.874,
"step": 5990
},
{
"epoch": 3.41,
"grad_norm": 0.2890625,
"learning_rate": 0.0002,
"loss": 0.8756,
"step": 6000
},
{
"epoch": 3.42,
"grad_norm": 0.28515625,
"learning_rate": 0.0002,
"loss": 0.8275,
"step": 6010
},
{
"epoch": 3.43,
"grad_norm": 0.333984375,
"learning_rate": 0.0002,
"loss": 0.7317,
"step": 6020
},
{
"epoch": 3.43,
"grad_norm": 0.265625,
"learning_rate": 0.0002,
"loss": 0.927,
"step": 6030
},
{
"epoch": 3.44,
"grad_norm": 0.271484375,
"learning_rate": 0.0002,
"loss": 0.8982,
"step": 6040
},
{
"epoch": 3.44,
"grad_norm": 0.296875,
"learning_rate": 0.0002,
"loss": 0.8786,
"step": 6050
},
{
"epoch": 3.45,
"grad_norm": 0.29296875,
"learning_rate": 0.0002,
"loss": 0.7958,
"step": 6060
},
{
"epoch": 3.45,
"grad_norm": 0.31640625,
"learning_rate": 0.0002,
"loss": 0.7241,
"step": 6070
},
{
"epoch": 3.46,
"grad_norm": 0.255859375,
"learning_rate": 0.0002,
"loss": 0.8856,
"step": 6080
},
{
"epoch": 3.47,
"grad_norm": 0.271484375,
"learning_rate": 0.0002,
"loss": 0.9041,
"step": 6090
},
{
"epoch": 3.47,
"grad_norm": 0.29296875,
"learning_rate": 0.0002,
"loss": 0.859,
"step": 6100
},
{
"epoch": 3.48,
"grad_norm": 0.28515625,
"learning_rate": 0.0002,
"loss": 0.8297,
"step": 6110
},
{
"epoch": 3.48,
"grad_norm": 0.3359375,
"learning_rate": 0.0002,
"loss": 0.7489,
"step": 6120
},
{
"epoch": 3.49,
"grad_norm": 0.263671875,
"learning_rate": 0.0002,
"loss": 0.9257,
"step": 6130
},
{
"epoch": 3.49,
"grad_norm": 0.26171875,
"learning_rate": 0.0002,
"loss": 0.8566,
"step": 6140
},
{
"epoch": 3.5,
"grad_norm": 0.283203125,
"learning_rate": 0.0002,
"loss": 0.8396,
"step": 6150
},
{
"epoch": 3.51,
"grad_norm": 0.296875,
"learning_rate": 0.0002,
"loss": 0.7752,
"step": 6160
},
{
"epoch": 3.51,
"grad_norm": 0.318359375,
"learning_rate": 0.0002,
"loss": 0.7169,
"step": 6170
},
{
"epoch": 3.52,
"grad_norm": 0.271484375,
"learning_rate": 0.0002,
"loss": 0.9082,
"step": 6180
},
{
"epoch": 3.52,
"grad_norm": 0.26171875,
"learning_rate": 0.0002,
"loss": 0.8875,
"step": 6190
},
{
"epoch": 3.53,
"grad_norm": 0.275390625,
"learning_rate": 0.0002,
"loss": 0.8636,
"step": 6200
},
{
"epoch": 3.53,
"grad_norm": 0.3046875,
"learning_rate": 0.0002,
"loss": 0.7987,
"step": 6210
},
{
"epoch": 3.54,
"grad_norm": 0.3203125,
"learning_rate": 0.0002,
"loss": 0.7453,
"step": 6220
},
{
"epoch": 3.55,
"grad_norm": 0.26171875,
"learning_rate": 0.0002,
"loss": 0.8934,
"step": 6230
},
{
"epoch": 3.55,
"grad_norm": 0.26953125,
"learning_rate": 0.0002,
"loss": 0.8645,
"step": 6240
},
{
"epoch": 3.56,
"grad_norm": 0.283203125,
"learning_rate": 0.0002,
"loss": 0.8846,
"step": 6250
},
{
"epoch": 3.56,
"grad_norm": 0.306640625,
"learning_rate": 0.0002,
"loss": 0.7788,
"step": 6260
},
{
"epoch": 3.57,
"grad_norm": 0.33203125,
"learning_rate": 0.0002,
"loss": 0.7235,
"step": 6270
},
{
"epoch": 3.57,
"grad_norm": 0.251953125,
"learning_rate": 0.0002,
"loss": 0.9174,
"step": 6280
},
{
"epoch": 3.58,
"grad_norm": 0.27734375,
"learning_rate": 0.0002,
"loss": 0.9251,
"step": 6290
},
{
"epoch": 3.59,
"grad_norm": 0.310546875,
"learning_rate": 0.0002,
"loss": 0.8728,
"step": 6300
},
{
"epoch": 3.59,
"grad_norm": 0.291015625,
"learning_rate": 0.0002,
"loss": 0.8223,
"step": 6310
},
{
"epoch": 3.6,
"grad_norm": 0.322265625,
"learning_rate": 0.0002,
"loss": 0.7791,
"step": 6320
},
{
"epoch": 3.6,
"grad_norm": 0.279296875,
"learning_rate": 0.0002,
"loss": 0.8652,
"step": 6330
},
{
"epoch": 3.61,
"grad_norm": 0.26171875,
"learning_rate": 0.0002,
"loss": 0.8955,
"step": 6340
},
{
"epoch": 3.61,
"grad_norm": 0.2890625,
"learning_rate": 0.0002,
"loss": 0.8949,
"step": 6350
},
{
"epoch": 3.62,
"grad_norm": 0.30078125,
"learning_rate": 0.0002,
"loss": 0.805,
"step": 6360
},
{
"epoch": 3.63,
"grad_norm": 0.345703125,
"learning_rate": 0.0002,
"loss": 0.7788,
"step": 6370
},
{
"epoch": 3.63,
"grad_norm": 0.2578125,
"learning_rate": 0.0002,
"loss": 0.9124,
"step": 6380
},
{
"epoch": 3.64,
"grad_norm": 0.283203125,
"learning_rate": 0.0002,
"loss": 0.9254,
"step": 6390
},
{
"epoch": 3.64,
"grad_norm": 0.306640625,
"learning_rate": 0.0002,
"loss": 0.8955,
"step": 6400
},
{
"epoch": 3.65,
"grad_norm": 0.287109375,
"learning_rate": 0.0002,
"loss": 0.8114,
"step": 6410
},
{
"epoch": 3.65,
"grad_norm": 0.31640625,
"learning_rate": 0.0002,
"loss": 0.8126,
"step": 6420
},
{
"epoch": 3.66,
"grad_norm": 0.291015625,
"learning_rate": 0.0002,
"loss": 0.9208,
"step": 6430
},
{
"epoch": 3.67,
"grad_norm": 0.2734375,
"learning_rate": 0.0002,
"loss": 0.8888,
"step": 6440
},
{
"epoch": 3.67,
"grad_norm": 0.283203125,
"learning_rate": 0.0002,
"loss": 0.8866,
"step": 6450
},
{
"epoch": 3.68,
"grad_norm": 0.310546875,
"learning_rate": 0.0002,
"loss": 0.7911,
"step": 6460
},
{
"epoch": 3.68,
"grad_norm": 0.33984375,
"learning_rate": 0.0002,
"loss": 0.7326,
"step": 6470
},
{
"epoch": 3.69,
"grad_norm": 0.263671875,
"learning_rate": 0.0002,
"loss": 0.9256,
"step": 6480
},
{
"epoch": 3.69,
"grad_norm": 0.267578125,
"learning_rate": 0.0002,
"loss": 0.8476,
"step": 6490
},
{
"epoch": 3.7,
"grad_norm": 0.279296875,
"learning_rate": 0.0002,
"loss": 0.8977,
"step": 6500
},
{
"epoch": 3.71,
"grad_norm": 0.283203125,
"learning_rate": 0.0002,
"loss": 0.7827,
"step": 6510
},
{
"epoch": 3.71,
"grad_norm": 0.330078125,
"learning_rate": 0.0002,
"loss": 0.7202,
"step": 6520
},
{
"epoch": 3.72,
"grad_norm": 0.25390625,
"learning_rate": 0.0002,
"loss": 0.9272,
"step": 6530
},
{
"epoch": 3.72,
"grad_norm": 0.2734375,
"learning_rate": 0.0002,
"loss": 0.933,
"step": 6540
},
{
"epoch": 3.73,
"grad_norm": 0.29296875,
"learning_rate": 0.0002,
"loss": 0.8725,
"step": 6550
},
{
"epoch": 3.73,
"grad_norm": 0.30078125,
"learning_rate": 0.0002,
"loss": 0.8208,
"step": 6560
},
{
"epoch": 3.74,
"grad_norm": 0.3203125,
"learning_rate": 0.0002,
"loss": 0.7662,
"step": 6570
},
{
"epoch": 3.75,
"grad_norm": 0.25390625,
"learning_rate": 0.0002,
"loss": 0.9297,
"step": 6580
},
{
"epoch": 3.75,
"grad_norm": 0.259765625,
"learning_rate": 0.0002,
"loss": 0.9671,
"step": 6590
},
{
"epoch": 3.76,
"grad_norm": 0.287109375,
"learning_rate": 0.0002,
"loss": 0.8773,
"step": 6600
},
{
"epoch": 3.76,
"grad_norm": 0.2890625,
"learning_rate": 0.0002,
"loss": 0.7993,
"step": 6610
},
{
"epoch": 3.77,
"grad_norm": 0.3125,
"learning_rate": 0.0002,
"loss": 0.7291,
"step": 6620
},
{
"epoch": 3.77,
"grad_norm": 0.275390625,
"learning_rate": 0.0002,
"loss": 0.9011,
"step": 6630
},
{
"epoch": 3.78,
"grad_norm": 0.306640625,
"learning_rate": 0.0002,
"loss": 0.8893,
"step": 6640
},
{
"epoch": 3.78,
"grad_norm": 0.275390625,
"learning_rate": 0.0002,
"loss": 0.8976,
"step": 6650
},
{
"epoch": 3.79,
"grad_norm": 0.28515625,
"learning_rate": 0.0002,
"loss": 0.8049,
"step": 6660
},
{
"epoch": 3.8,
"grad_norm": 0.322265625,
"learning_rate": 0.0002,
"loss": 0.7419,
"step": 6670
},
{
"epoch": 3.8,
"grad_norm": 0.259765625,
"learning_rate": 0.0002,
"loss": 0.8809,
"step": 6680
},
{
"epoch": 3.81,
"grad_norm": 0.259765625,
"learning_rate": 0.0002,
"loss": 0.8919,
"step": 6690
},
{
"epoch": 3.81,
"grad_norm": 0.26953125,
"learning_rate": 0.0002,
"loss": 0.9121,
"step": 6700
},
{
"epoch": 3.82,
"grad_norm": 0.275390625,
"learning_rate": 0.0002,
"loss": 0.8353,
"step": 6710
},
{
"epoch": 3.82,
"grad_norm": 0.322265625,
"learning_rate": 0.0002,
"loss": 0.7508,
"step": 6720
},
{
"epoch": 3.83,
"grad_norm": 0.263671875,
"learning_rate": 0.0002,
"loss": 0.8558,
"step": 6730
},
{
"epoch": 3.84,
"grad_norm": 0.27734375,
"learning_rate": 0.0002,
"loss": 0.8868,
"step": 6740
},
{
"epoch": 3.84,
"grad_norm": 0.267578125,
"learning_rate": 0.0002,
"loss": 0.8733,
"step": 6750
},
{
"epoch": 3.85,
"grad_norm": 0.271484375,
"learning_rate": 0.0002,
"loss": 0.8081,
"step": 6760
},
{
"epoch": 3.85,
"grad_norm": 0.302734375,
"learning_rate": 0.0002,
"loss": 0.7546,
"step": 6770
},
{
"epoch": 3.86,
"grad_norm": 0.255859375,
"learning_rate": 0.0002,
"loss": 0.8928,
"step": 6780
},
{
"epoch": 3.86,
"grad_norm": 0.263671875,
"learning_rate": 0.0002,
"loss": 0.8965,
"step": 6790
},
{
"epoch": 3.87,
"grad_norm": 0.27734375,
"learning_rate": 0.0002,
"loss": 0.8731,
"step": 6800
},
{
"epoch": 3.88,
"grad_norm": 0.29296875,
"learning_rate": 0.0002,
"loss": 0.8397,
"step": 6810
},
{
"epoch": 3.88,
"grad_norm": 0.3203125,
"learning_rate": 0.0002,
"loss": 0.703,
"step": 6820
},
{
"epoch": 3.89,
"grad_norm": 0.25390625,
"learning_rate": 0.0002,
"loss": 0.8832,
"step": 6830
},
{
"epoch": 3.89,
"grad_norm": 0.26171875,
"learning_rate": 0.0002,
"loss": 0.9216,
"step": 6840
},
{
"epoch": 3.9,
"grad_norm": 0.279296875,
"learning_rate": 0.0002,
"loss": 0.8275,
"step": 6850
},
{
"epoch": 3.9,
"grad_norm": 0.28515625,
"learning_rate": 0.0002,
"loss": 0.8089,
"step": 6860
},
{
"epoch": 3.91,
"grad_norm": 0.3046875,
"learning_rate": 0.0002,
"loss": 0.7456,
"step": 6870
},
{
"epoch": 3.92,
"grad_norm": 0.26171875,
"learning_rate": 0.0002,
"loss": 0.9036,
"step": 6880
},
{
"epoch": 3.92,
"grad_norm": 0.2734375,
"learning_rate": 0.0002,
"loss": 0.9218,
"step": 6890
},
{
"epoch": 3.93,
"grad_norm": 0.2734375,
"learning_rate": 0.0002,
"loss": 0.8775,
"step": 6900
},
{
"epoch": 3.93,
"grad_norm": 0.291015625,
"learning_rate": 0.0002,
"loss": 0.8538,
"step": 6910
},
{
"epoch": 3.94,
"grad_norm": 0.30859375,
"learning_rate": 0.0002,
"loss": 0.7244,
"step": 6920
},
{
"epoch": 3.94,
"grad_norm": 0.26171875,
"learning_rate": 0.0002,
"loss": 0.9341,
"step": 6930
},
{
"epoch": 3.95,
"grad_norm": 0.28125,
"learning_rate": 0.0002,
"loss": 0.8836,
"step": 6940
},
{
"epoch": 3.96,
"grad_norm": 0.291015625,
"learning_rate": 0.0002,
"loss": 0.843,
"step": 6950
},
{
"epoch": 3.96,
"grad_norm": 0.2890625,
"learning_rate": 0.0002,
"loss": 0.7709,
"step": 6960
},
{
"epoch": 3.97,
"grad_norm": 0.33203125,
"learning_rate": 0.0002,
"loss": 0.7457,
"step": 6970
},
{
"epoch": 3.97,
"grad_norm": 0.25,
"learning_rate": 0.0002,
"loss": 0.9053,
"step": 6980
},
{
"epoch": 3.98,
"grad_norm": 0.25390625,
"learning_rate": 0.0002,
"loss": 0.9145,
"step": 6990
},
{
"epoch": 3.98,
"grad_norm": 0.283203125,
"learning_rate": 0.0002,
"loss": 0.88,
"step": 7000
},
{
"epoch": 3.99,
"grad_norm": 0.291015625,
"learning_rate": 0.0002,
"loss": 0.7534,
"step": 7010
},
{
"epoch": 4.0,
"grad_norm": 0.3046875,
"learning_rate": 0.0002,
"loss": 0.7275,
"step": 7020
},
{
"epoch": 4.0,
"grad_norm": 0.251953125,
"learning_rate": 0.0002,
"loss": 0.8967,
"step": 7030
},
{
"epoch": 4.01,
"grad_norm": 0.271484375,
"learning_rate": 0.0002,
"loss": 0.8987,
"step": 7040
},
{
"epoch": 4.01,
"grad_norm": 0.37109375,
"learning_rate": 0.0002,
"loss": 0.8529,
"step": 7050
},
{
"epoch": 4.02,
"grad_norm": 0.2734375,
"learning_rate": 0.0002,
"loss": 0.7961,
"step": 7060
},
{
"epoch": 4.02,
"grad_norm": 0.3046875,
"learning_rate": 0.0002,
"loss": 0.7495,
"step": 7070
},
{
"epoch": 4.03,
"grad_norm": 0.294921875,
"learning_rate": 0.0002,
"loss": 0.7246,
"step": 7080
},
{
"epoch": 4.04,
"grad_norm": 0.26171875,
"learning_rate": 0.0002,
"loss": 0.8711,
"step": 7090
},
{
"epoch": 4.04,
"grad_norm": 0.291015625,
"learning_rate": 0.0002,
"loss": 0.878,
"step": 7100
},
{
"epoch": 4.05,
"grad_norm": 0.388671875,
"learning_rate": 0.0002,
"loss": 0.7671,
"step": 7110
},
{
"epoch": 4.05,
"grad_norm": 0.302734375,
"learning_rate": 0.0002,
"loss": 0.7497,
"step": 7120
},
{
"epoch": 4.06,
"grad_norm": 0.33984375,
"learning_rate": 0.0002,
"loss": 0.7371,
"step": 7130
},
{
"epoch": 4.06,
"grad_norm": 0.2890625,
"learning_rate": 0.0002,
"loss": 0.8748,
"step": 7140
},
{
"epoch": 4.07,
"grad_norm": 0.28125,
"learning_rate": 0.0002,
"loss": 0.8654,
"step": 7150
},
{
"epoch": 4.08,
"grad_norm": 0.30859375,
"learning_rate": 0.0002,
"loss": 0.8225,
"step": 7160
},
{
"epoch": 4.08,
"grad_norm": 0.314453125,
"learning_rate": 0.0002,
"loss": 0.7118,
"step": 7170
},
{
"epoch": 4.09,
"grad_norm": 0.28125,
"learning_rate": 0.0002,
"loss": 0.7277,
"step": 7180
},
{
"epoch": 4.09,
"grad_norm": 0.26171875,
"learning_rate": 0.0002,
"loss": 0.881,
"step": 7190
},
{
"epoch": 4.1,
"grad_norm": 0.2890625,
"learning_rate": 0.0002,
"loss": 0.882,
"step": 7200
},
{
"epoch": 4.1,
"grad_norm": 0.30078125,
"learning_rate": 0.0002,
"loss": 0.8092,
"step": 7210
},
{
"epoch": 4.11,
"grad_norm": 0.322265625,
"learning_rate": 0.0002,
"loss": 0.7393,
"step": 7220
},
{
"epoch": 4.11,
"grad_norm": 0.283203125,
"learning_rate": 0.0002,
"loss": 0.7119,
"step": 7230
},
{
"epoch": 4.12,
"grad_norm": 0.267578125,
"learning_rate": 0.0002,
"loss": 0.8961,
"step": 7240
},
{
"epoch": 4.13,
"grad_norm": 0.298828125,
"learning_rate": 0.0002,
"loss": 0.8183,
"step": 7250
},
{
"epoch": 4.13,
"grad_norm": 0.2890625,
"learning_rate": 0.0002,
"loss": 0.7742,
"step": 7260
},
{
"epoch": 4.14,
"grad_norm": 0.32421875,
"learning_rate": 0.0002,
"loss": 0.7203,
"step": 7270
},
{
"epoch": 4.14,
"grad_norm": 0.294921875,
"learning_rate": 0.0002,
"loss": 0.7242,
"step": 7280
},
{
"epoch": 4.15,
"grad_norm": 0.275390625,
"learning_rate": 0.0002,
"loss": 0.8495,
"step": 7290
},
{
"epoch": 4.15,
"grad_norm": 0.279296875,
"learning_rate": 0.0002,
"loss": 0.8641,
"step": 7300
},
{
"epoch": 4.16,
"grad_norm": 0.3046875,
"learning_rate": 0.0002,
"loss": 0.8044,
"step": 7310
},
{
"epoch": 4.17,
"grad_norm": 0.3359375,
"learning_rate": 0.0002,
"loss": 0.747,
"step": 7320
},
{
"epoch": 4.17,
"grad_norm": 0.298828125,
"learning_rate": 0.0002,
"loss": 0.7301,
"step": 7330
},
{
"epoch": 4.18,
"grad_norm": 0.283203125,
"learning_rate": 0.0002,
"loss": 0.8736,
"step": 7340
},
{
"epoch": 4.18,
"grad_norm": 0.287109375,
"learning_rate": 0.0002,
"loss": 0.8345,
"step": 7350
},
{
"epoch": 4.19,
"grad_norm": 0.3125,
"learning_rate": 0.0002,
"loss": 0.8308,
"step": 7360
},
{
"epoch": 4.19,
"grad_norm": 0.33203125,
"learning_rate": 0.0002,
"loss": 0.7209,
"step": 7370
},
{
"epoch": 4.2,
"grad_norm": 0.287109375,
"learning_rate": 0.0002,
"loss": 0.7254,
"step": 7380
},
{
"epoch": 4.21,
"grad_norm": 0.546875,
"learning_rate": 0.0002,
"loss": 0.8572,
"step": 7390
},
{
"epoch": 4.21,
"grad_norm": 0.27734375,
"learning_rate": 0.0002,
"loss": 0.8422,
"step": 7400
},
{
"epoch": 4.22,
"grad_norm": 0.361328125,
"learning_rate": 0.0002,
"loss": 0.8001,
"step": 7410
},
{
"epoch": 4.22,
"grad_norm": 0.326171875,
"learning_rate": 0.0002,
"loss": 0.7102,
"step": 7420
},
{
"epoch": 4.23,
"grad_norm": 0.28125,
"learning_rate": 0.0002,
"loss": 0.7417,
"step": 7430
},
{
"epoch": 4.23,
"grad_norm": 0.279296875,
"learning_rate": 0.0002,
"loss": 0.8495,
"step": 7440
},
{
"epoch": 4.24,
"grad_norm": 0.279296875,
"learning_rate": 0.0002,
"loss": 0.8218,
"step": 7450
},
{
"epoch": 4.25,
"grad_norm": 0.318359375,
"learning_rate": 0.0002,
"loss": 0.802,
"step": 7460
},
{
"epoch": 4.25,
"grad_norm": 0.3203125,
"learning_rate": 0.0002,
"loss": 0.7345,
"step": 7470
},
{
"epoch": 4.26,
"grad_norm": 0.298828125,
"learning_rate": 0.0002,
"loss": 0.7279,
"step": 7480
},
{
"epoch": 4.26,
"grad_norm": 0.298828125,
"learning_rate": 0.0002,
"loss": 0.9336,
"step": 7490
},
{
"epoch": 4.27,
"grad_norm": 0.29296875,
"learning_rate": 0.0002,
"loss": 0.8473,
"step": 7500
},
{
"epoch": 4.27,
"grad_norm": 0.30078125,
"learning_rate": 0.0002,
"loss": 0.8213,
"step": 7510
},
{
"epoch": 4.28,
"grad_norm": 0.310546875,
"learning_rate": 0.0002,
"loss": 0.7491,
"step": 7520
},
{
"epoch": 4.29,
"grad_norm": 0.298828125,
"learning_rate": 0.0002,
"loss": 0.77,
"step": 7530
},
{
"epoch": 4.29,
"grad_norm": 0.291015625,
"learning_rate": 0.0002,
"loss": 0.9229,
"step": 7540
},
{
"epoch": 4.3,
"grad_norm": 0.30859375,
"learning_rate": 0.0002,
"loss": 0.8667,
"step": 7550
},
{
"epoch": 4.3,
"grad_norm": 0.310546875,
"learning_rate": 0.0002,
"loss": 0.8125,
"step": 7560
},
{
"epoch": 4.31,
"grad_norm": 0.306640625,
"learning_rate": 0.0002,
"loss": 0.774,
"step": 7570
},
{
"epoch": 4.31,
"grad_norm": 0.306640625,
"learning_rate": 0.0002,
"loss": 0.761,
"step": 7580
},
{
"epoch": 4.32,
"grad_norm": 0.279296875,
"learning_rate": 0.0002,
"loss": 0.8943,
"step": 7590
},
{
"epoch": 4.33,
"grad_norm": 0.298828125,
"learning_rate": 0.0002,
"loss": 0.8648,
"step": 7600
},
{
"epoch": 4.33,
"grad_norm": 0.3125,
"learning_rate": 0.0002,
"loss": 0.7935,
"step": 7610
},
{
"epoch": 4.34,
"grad_norm": 0.3125,
"learning_rate": 0.0002,
"loss": 0.7123,
"step": 7620
},
{
"epoch": 4.34,
"grad_norm": 0.275390625,
"learning_rate": 0.0002,
"loss": 0.7425,
"step": 7630
},
{
"epoch": 4.35,
"grad_norm": 0.296875,
"learning_rate": 0.0002,
"loss": 0.8549,
"step": 7640
},
{
"epoch": 4.35,
"grad_norm": 0.296875,
"learning_rate": 0.0002,
"loss": 0.876,
"step": 7650
},
{
"epoch": 4.36,
"grad_norm": 0.310546875,
"learning_rate": 0.0002,
"loss": 0.8254,
"step": 7660
},
{
"epoch": 4.37,
"grad_norm": 0.328125,
"learning_rate": 0.0002,
"loss": 0.7379,
"step": 7670
},
{
"epoch": 4.37,
"grad_norm": 0.27734375,
"learning_rate": 0.0002,
"loss": 0.7345,
"step": 7680
},
{
"epoch": 4.38,
"grad_norm": 0.283203125,
"learning_rate": 0.0002,
"loss": 0.9001,
"step": 7690
},
{
"epoch": 4.38,
"grad_norm": 0.287109375,
"learning_rate": 0.0002,
"loss": 0.8214,
"step": 7700
},
{
"epoch": 4.39,
"grad_norm": 0.30078125,
"learning_rate": 0.0002,
"loss": 0.7806,
"step": 7710
},
{
"epoch": 4.39,
"grad_norm": 0.322265625,
"learning_rate": 0.0002,
"loss": 0.7122,
"step": 7720
},
{
"epoch": 4.4,
"grad_norm": 0.2890625,
"learning_rate": 0.0002,
"loss": 0.7645,
"step": 7730
},
{
"epoch": 4.41,
"grad_norm": 0.29296875,
"learning_rate": 0.0002,
"loss": 0.9018,
"step": 7740
},
{
"epoch": 4.41,
"grad_norm": 0.298828125,
"learning_rate": 0.0002,
"loss": 0.8552,
"step": 7750
},
{
"epoch": 4.42,
"grad_norm": 0.30859375,
"learning_rate": 0.0002,
"loss": 0.8327,
"step": 7760
},
{
"epoch": 4.42,
"grad_norm": 0.3203125,
"learning_rate": 0.0002,
"loss": 0.7828,
"step": 7770
},
{
"epoch": 4.43,
"grad_norm": 0.306640625,
"learning_rate": 0.0002,
"loss": 0.716,
"step": 7780
},
{
"epoch": 4.43,
"grad_norm": 0.275390625,
"learning_rate": 0.0002,
"loss": 0.911,
"step": 7790
},
{
"epoch": 4.44,
"grad_norm": 0.29296875,
"learning_rate": 0.0002,
"loss": 0.86,
"step": 7800
},
{
"epoch": 4.45,
"grad_norm": 0.30078125,
"learning_rate": 0.0002,
"loss": 0.8629,
"step": 7810
},
{
"epoch": 4.45,
"grad_norm": 0.310546875,
"learning_rate": 0.0002,
"loss": 0.7469,
"step": 7820
},
{
"epoch": 4.46,
"grad_norm": 0.287109375,
"learning_rate": 0.0002,
"loss": 0.7177,
"step": 7830
},
{
"epoch": 4.46,
"grad_norm": 0.2890625,
"learning_rate": 0.0002,
"loss": 0.9086,
"step": 7840
},
{
"epoch": 4.47,
"grad_norm": 0.34765625,
"learning_rate": 0.0002,
"loss": 0.8792,
"step": 7850
},
{
"epoch": 4.47,
"grad_norm": 0.314453125,
"learning_rate": 0.0002,
"loss": 0.8517,
"step": 7860
},
{
"epoch": 4.48,
"grad_norm": 0.353515625,
"learning_rate": 0.0002,
"loss": 0.7711,
"step": 7870
},
{
"epoch": 4.48,
"grad_norm": 0.294921875,
"learning_rate": 0.0002,
"loss": 0.7412,
"step": 7880
},
{
"epoch": 4.49,
"grad_norm": 0.2734375,
"learning_rate": 0.0002,
"loss": 0.8807,
"step": 7890
},
{
"epoch": 4.5,
"grad_norm": 0.341796875,
"learning_rate": 0.0002,
"loss": 0.8501,
"step": 7900
},
{
"epoch": 4.5,
"grad_norm": 0.30859375,
"learning_rate": 0.0002,
"loss": 0.7968,
"step": 7910
},
{
"epoch": 4.51,
"grad_norm": 0.314453125,
"learning_rate": 0.0002,
"loss": 0.7332,
"step": 7920
},
{
"epoch": 4.51,
"grad_norm": 0.298828125,
"learning_rate": 0.0002,
"loss": 0.7492,
"step": 7930
},
{
"epoch": 4.52,
"grad_norm": 0.28515625,
"learning_rate": 0.0002,
"loss": 0.87,
"step": 7940
},
{
"epoch": 4.52,
"grad_norm": 0.326171875,
"learning_rate": 0.0002,
"loss": 0.8625,
"step": 7950
},
{
"epoch": 4.53,
"grad_norm": 0.296875,
"learning_rate": 0.0002,
"loss": 0.8603,
"step": 7960
},
{
"epoch": 4.54,
"grad_norm": 0.33984375,
"learning_rate": 0.0002,
"loss": 0.8038,
"step": 7970
},
{
"epoch": 4.54,
"grad_norm": 0.32421875,
"learning_rate": 0.0002,
"loss": 0.7151,
"step": 7980
},
{
"epoch": 4.55,
"grad_norm": 0.287109375,
"learning_rate": 0.0002,
"loss": 0.8531,
"step": 7990
},
{
"epoch": 4.55,
"grad_norm": 0.296875,
"learning_rate": 0.0002,
"loss": 0.8473,
"step": 8000
},
{
"epoch": 4.56,
"grad_norm": 0.337890625,
"learning_rate": 0.0002,
"loss": 0.8129,
"step": 8010
},
{
"epoch": 4.56,
"grad_norm": 0.337890625,
"learning_rate": 0.0002,
"loss": 0.7445,
"step": 8020
},
{
"epoch": 4.57,
"grad_norm": 0.279296875,
"learning_rate": 0.0002,
"loss": 0.7548,
"step": 8030
},
{
"epoch": 4.58,
"grad_norm": 0.29296875,
"learning_rate": 0.0002,
"loss": 0.8742,
"step": 8040
},
{
"epoch": 4.58,
"grad_norm": 0.27734375,
"learning_rate": 0.0002,
"loss": 0.9106,
"step": 8050
},
{
"epoch": 4.59,
"grad_norm": 0.3203125,
"learning_rate": 0.0002,
"loss": 0.8259,
"step": 8060
},
{
"epoch": 4.59,
"grad_norm": 0.37890625,
"learning_rate": 0.0002,
"loss": 0.7902,
"step": 8070
},
{
"epoch": 4.6,
"grad_norm": 0.318359375,
"learning_rate": 0.0002,
"loss": 0.7934,
"step": 8080
},
{
"epoch": 4.6,
"grad_norm": 0.283203125,
"learning_rate": 0.0002,
"loss": 0.8658,
"step": 8090
},
{
"epoch": 4.61,
"grad_norm": 0.29296875,
"learning_rate": 0.0002,
"loss": 0.862,
"step": 8100
},
{
"epoch": 4.62,
"grad_norm": 0.310546875,
"learning_rate": 0.0002,
"loss": 0.8364,
"step": 8110
},
{
"epoch": 4.62,
"grad_norm": 0.302734375,
"learning_rate": 0.0002,
"loss": 0.7066,
"step": 8120
},
{
"epoch": 4.63,
"grad_norm": 0.2890625,
"learning_rate": 0.0002,
"loss": 0.7342,
"step": 8130
},
{
"epoch": 4.63,
"grad_norm": 0.2890625,
"learning_rate": 0.0002,
"loss": 0.9144,
"step": 8140
},
{
"epoch": 4.64,
"grad_norm": 0.283203125,
"learning_rate": 0.0002,
"loss": 0.8674,
"step": 8150
},
{
"epoch": 4.64,
"grad_norm": 0.287109375,
"learning_rate": 0.0002,
"loss": 0.8151,
"step": 8160
},
{
"epoch": 4.65,
"grad_norm": 0.33203125,
"learning_rate": 0.0002,
"loss": 0.7233,
"step": 8170
},
{
"epoch": 4.66,
"grad_norm": 0.318359375,
"learning_rate": 0.0002,
"loss": 0.7736,
"step": 8180
},
{
"epoch": 4.66,
"grad_norm": 0.267578125,
"learning_rate": 0.0002,
"loss": 0.89,
"step": 8190
},
{
"epoch": 4.67,
"grad_norm": 0.291015625,
"learning_rate": 0.0002,
"loss": 0.8526,
"step": 8200
},
{
"epoch": 4.67,
"grad_norm": 0.302734375,
"learning_rate": 0.0002,
"loss": 0.83,
"step": 8210
},
{
"epoch": 4.68,
"grad_norm": 0.337890625,
"learning_rate": 0.0002,
"loss": 0.7225,
"step": 8220
},
{
"epoch": 4.68,
"grad_norm": 0.283203125,
"learning_rate": 0.0002,
"loss": 0.7638,
"step": 8230
},
{
"epoch": 4.69,
"grad_norm": 0.271484375,
"learning_rate": 0.0002,
"loss": 0.8994,
"step": 8240
},
{
"epoch": 4.7,
"grad_norm": 0.287109375,
"learning_rate": 0.0002,
"loss": 0.8524,
"step": 8250
},
{
"epoch": 4.7,
"grad_norm": 0.29296875,
"learning_rate": 0.0002,
"loss": 0.8493,
"step": 8260
},
{
"epoch": 4.71,
"grad_norm": 0.29296875,
"learning_rate": 0.0002,
"loss": 0.7478,
"step": 8270
},
{
"epoch": 4.71,
"grad_norm": 0.30859375,
"learning_rate": 0.0002,
"loss": 0.7344,
"step": 8280
},
{
"epoch": 4.72,
"grad_norm": 0.283203125,
"learning_rate": 0.0002,
"loss": 0.8714,
"step": 8290
},
{
"epoch": 4.72,
"grad_norm": 0.279296875,
"learning_rate": 0.0002,
"loss": 0.8932,
"step": 8300
},
{
"epoch": 4.73,
"grad_norm": 0.30859375,
"learning_rate": 0.0002,
"loss": 0.8411,
"step": 8310
},
{
"epoch": 4.74,
"grad_norm": 0.318359375,
"learning_rate": 0.0002,
"loss": 0.7606,
"step": 8320
},
{
"epoch": 4.74,
"grad_norm": 0.283203125,
"learning_rate": 0.0002,
"loss": 0.7383,
"step": 8330
},
{
"epoch": 4.75,
"grad_norm": 0.287109375,
"learning_rate": 0.0002,
"loss": 0.8935,
"step": 8340
},
{
"epoch": 4.75,
"grad_norm": 0.27734375,
"learning_rate": 0.0002,
"loss": 0.885,
"step": 8350
},
{
"epoch": 4.76,
"grad_norm": 0.322265625,
"learning_rate": 0.0002,
"loss": 0.8561,
"step": 8360
},
{
"epoch": 4.76,
"grad_norm": 0.337890625,
"learning_rate": 0.0002,
"loss": 0.7718,
"step": 8370
},
{
"epoch": 4.77,
"grad_norm": 0.349609375,
"learning_rate": 0.0002,
"loss": 0.7581,
"step": 8380
},
{
"epoch": 4.78,
"grad_norm": 0.279296875,
"learning_rate": 0.0002,
"loss": 0.8594,
"step": 8390
},
{
"epoch": 4.78,
"grad_norm": 0.298828125,
"learning_rate": 0.0002,
"loss": 0.88,
"step": 8400
},
{
"epoch": 4.79,
"grad_norm": 0.322265625,
"learning_rate": 0.0002,
"loss": 0.8275,
"step": 8410
},
{
"epoch": 4.79,
"grad_norm": 0.31640625,
"learning_rate": 0.0002,
"loss": 0.7273,
"step": 8420
},
{
"epoch": 4.8,
"grad_norm": 0.294921875,
"learning_rate": 0.0002,
"loss": 0.7681,
"step": 8430
},
{
"epoch": 4.8,
"grad_norm": 0.275390625,
"learning_rate": 0.0002,
"loss": 0.9001,
"step": 8440
},
{
"epoch": 4.81,
"grad_norm": 0.306640625,
"learning_rate": 0.0002,
"loss": 0.8591,
"step": 8450
},
{
"epoch": 4.82,
"grad_norm": 0.29296875,
"learning_rate": 0.0002,
"loss": 0.8366,
"step": 8460
},
{
"epoch": 4.82,
"grad_norm": 0.322265625,
"learning_rate": 0.0002,
"loss": 0.7378,
"step": 8470
},
{
"epoch": 4.83,
"grad_norm": 0.29296875,
"learning_rate": 0.0002,
"loss": 0.765,
"step": 8480
},
{
"epoch": 4.83,
"grad_norm": 0.298828125,
"learning_rate": 0.0002,
"loss": 0.8969,
"step": 8490
},
{
"epoch": 4.84,
"grad_norm": 0.283203125,
"learning_rate": 0.0002,
"loss": 0.8693,
"step": 8500
},
{
"epoch": 4.84,
"grad_norm": 0.32421875,
"learning_rate": 0.0002,
"loss": 0.8613,
"step": 8510
},
{
"epoch": 4.85,
"grad_norm": 0.33984375,
"learning_rate": 0.0002,
"loss": 0.779,
"step": 8520
},
{
"epoch": 4.85,
"grad_norm": 0.29296875,
"learning_rate": 0.0002,
"loss": 0.7373,
"step": 8530
},
{
"epoch": 4.86,
"grad_norm": 0.263671875,
"learning_rate": 0.0002,
"loss": 0.8844,
"step": 8540
},
{
"epoch": 4.87,
"grad_norm": 0.30078125,
"learning_rate": 0.0002,
"loss": 0.851,
"step": 8550
},
{
"epoch": 4.87,
"grad_norm": 0.3125,
"learning_rate": 0.0002,
"loss": 0.8388,
"step": 8560
},
{
"epoch": 4.88,
"grad_norm": 0.33984375,
"learning_rate": 0.0002,
"loss": 0.8049,
"step": 8570
},
{
"epoch": 4.88,
"grad_norm": 0.29296875,
"learning_rate": 0.0002,
"loss": 0.728,
"step": 8580
},
{
"epoch": 4.89,
"grad_norm": 0.3046875,
"learning_rate": 0.0002,
"loss": 0.85,
"step": 8590
},
{
"epoch": 4.89,
"grad_norm": 0.298828125,
"learning_rate": 0.0002,
"loss": 0.8518,
"step": 8600
},
{
"epoch": 4.9,
"grad_norm": 0.306640625,
"learning_rate": 0.0002,
"loss": 0.8337,
"step": 8610
},
{
"epoch": 4.91,
"grad_norm": 0.349609375,
"learning_rate": 0.0002,
"loss": 0.7444,
"step": 8620
},
{
"epoch": 4.91,
"grad_norm": 0.2890625,
"learning_rate": 0.0002,
"loss": 0.7731,
"step": 8630
},
{
"epoch": 4.92,
"grad_norm": 0.275390625,
"learning_rate": 0.0002,
"loss": 0.8802,
"step": 8640
},
{
"epoch": 4.92,
"grad_norm": 0.279296875,
"learning_rate": 0.0002,
"loss": 0.9098,
"step": 8650
},
{
"epoch": 4.93,
"grad_norm": 0.30859375,
"learning_rate": 0.0002,
"loss": 0.834,
"step": 8660
},
{
"epoch": 4.93,
"grad_norm": 0.32421875,
"learning_rate": 0.0002,
"loss": 0.7543,
"step": 8670
},
{
"epoch": 4.94,
"grad_norm": 0.302734375,
"learning_rate": 0.0002,
"loss": 0.7567,
"step": 8680
},
{
"epoch": 4.95,
"grad_norm": 0.28515625,
"learning_rate": 0.0002,
"loss": 0.8749,
"step": 8690
},
{
"epoch": 4.95,
"grad_norm": 0.296875,
"learning_rate": 0.0002,
"loss": 0.902,
"step": 8700
},
{
"epoch": 4.96,
"grad_norm": 0.298828125,
"learning_rate": 0.0002,
"loss": 0.8387,
"step": 8710
},
{
"epoch": 4.96,
"grad_norm": 0.33203125,
"learning_rate": 0.0002,
"loss": 0.7634,
"step": 8720
},
{
"epoch": 4.97,
"grad_norm": 0.291015625,
"learning_rate": 0.0002,
"loss": 0.7561,
"step": 8730
},
{
"epoch": 4.97,
"grad_norm": 0.275390625,
"learning_rate": 0.0002,
"loss": 0.8951,
"step": 8740
},
{
"epoch": 4.98,
"grad_norm": 0.29296875,
"learning_rate": 0.0002,
"loss": 0.8968,
"step": 8750
},
{
"epoch": 4.99,
"grad_norm": 0.302734375,
"learning_rate": 0.0002,
"loss": 0.8528,
"step": 8760
},
{
"epoch": 4.99,
"grad_norm": 0.330078125,
"learning_rate": 0.0002,
"loss": 0.7364,
"step": 8770
},
{
"epoch": 5.0,
"grad_norm": 0.298828125,
"learning_rate": 0.0002,
"loss": 0.7572,
"step": 8780
},
{
"epoch": 5.0,
"grad_norm": 0.2734375,
"learning_rate": 0.0002,
"loss": 0.8193,
"step": 8790
},
{
"epoch": 5.01,
"grad_norm": 0.296875,
"learning_rate": 0.0002,
"loss": 0.8147,
"step": 8800
},
{
"epoch": 5.01,
"grad_norm": 0.298828125,
"learning_rate": 0.0002,
"loss": 0.814,
"step": 8810
},
{
"epoch": 5.02,
"grad_norm": 0.3359375,
"learning_rate": 0.0002,
"loss": 0.7789,
"step": 8820
},
{
"epoch": 5.03,
"grad_norm": 0.361328125,
"learning_rate": 0.0002,
"loss": 0.7042,
"step": 8830
},
{
"epoch": 5.03,
"grad_norm": 0.294921875,
"learning_rate": 0.0002,
"loss": 0.7396,
"step": 8840
},
{
"epoch": 5.04,
"grad_norm": 0.380859375,
"learning_rate": 0.0002,
"loss": 0.8916,
"step": 8850
},
{
"epoch": 5.04,
"grad_norm": 0.30859375,
"learning_rate": 0.0002,
"loss": 0.8322,
"step": 8860
},
{
"epoch": 5.05,
"grad_norm": 0.3125,
"learning_rate": 0.0002,
"loss": 0.7591,
"step": 8870
},
{
"epoch": 5.05,
"grad_norm": 0.33984375,
"learning_rate": 0.0002,
"loss": 0.6652,
"step": 8880
},
{
"epoch": 5.06,
"grad_norm": 0.310546875,
"learning_rate": 0.0002,
"loss": 0.7564,
"step": 8890
},
{
"epoch": 5.07,
"grad_norm": 0.30859375,
"learning_rate": 0.0002,
"loss": 0.8046,
"step": 8900
},
{
"epoch": 5.07,
"grad_norm": 0.333984375,
"learning_rate": 0.0002,
"loss": 0.842,
"step": 8910
},
{
"epoch": 5.08,
"grad_norm": 0.330078125,
"learning_rate": 0.0002,
"loss": 0.7599,
"step": 8920
},
{
"epoch": 5.08,
"grad_norm": 0.353515625,
"learning_rate": 0.0002,
"loss": 0.6848,
"step": 8930
},
{
"epoch": 5.09,
"grad_norm": 0.30078125,
"learning_rate": 0.0002,
"loss": 0.7208,
"step": 8940
},
{
"epoch": 5.09,
"grad_norm": 0.294921875,
"learning_rate": 0.0002,
"loss": 0.8479,
"step": 8950
},
{
"epoch": 5.1,
"grad_norm": 0.3125,
"learning_rate": 0.0002,
"loss": 0.8372,
"step": 8960
},
{
"epoch": 5.11,
"grad_norm": 0.333984375,
"learning_rate": 0.0002,
"loss": 0.7274,
"step": 8970
},
{
"epoch": 5.11,
"grad_norm": 0.330078125,
"learning_rate": 0.0002,
"loss": 0.6996,
"step": 8980
},
{
"epoch": 5.12,
"grad_norm": 0.31640625,
"learning_rate": 0.0002,
"loss": 0.7768,
"step": 8990
},
{
"epoch": 5.12,
"grad_norm": 0.30078125,
"learning_rate": 0.0002,
"loss": 0.837,
"step": 9000
},
{
"epoch": 5.13,
"grad_norm": 0.330078125,
"learning_rate": 0.0002,
"loss": 0.8047,
"step": 9010
},
{
"epoch": 5.13,
"grad_norm": 0.328125,
"learning_rate": 0.0002,
"loss": 0.774,
"step": 9020
},
{
"epoch": 5.14,
"grad_norm": 0.341796875,
"learning_rate": 0.0002,
"loss": 0.6529,
"step": 9030
},
{
"epoch": 5.15,
"grad_norm": 0.298828125,
"learning_rate": 0.0002,
"loss": 0.7248,
"step": 9040
},
{
"epoch": 5.15,
"grad_norm": 0.296875,
"learning_rate": 0.0002,
"loss": 0.8272,
"step": 9050
},
{
"epoch": 5.16,
"grad_norm": 0.328125,
"learning_rate": 0.0002,
"loss": 0.8244,
"step": 9060
},
{
"epoch": 5.16,
"grad_norm": 0.31640625,
"learning_rate": 0.0002,
"loss": 0.7324,
"step": 9070
},
{
"epoch": 5.17,
"grad_norm": 0.35546875,
"learning_rate": 0.0002,
"loss": 0.6828,
"step": 9080
},
{
"epoch": 5.17,
"grad_norm": 0.296875,
"learning_rate": 0.0002,
"loss": 0.7614,
"step": 9090
},
{
"epoch": 5.18,
"grad_norm": 0.302734375,
"learning_rate": 0.0002,
"loss": 0.8315,
"step": 9100
},
{
"epoch": 5.18,
"grad_norm": 0.32421875,
"learning_rate": 0.0002,
"loss": 0.8298,
"step": 9110
},
{
"epoch": 5.19,
"grad_norm": 0.33984375,
"learning_rate": 0.0002,
"loss": 0.7865,
"step": 9120
},
{
"epoch": 5.2,
"grad_norm": 0.345703125,
"learning_rate": 0.0002,
"loss": 0.7191,
"step": 9130
},
{
"epoch": 5.2,
"grad_norm": 0.296875,
"learning_rate": 0.0002,
"loss": 0.7779,
"step": 9140
},
{
"epoch": 5.21,
"grad_norm": 0.302734375,
"learning_rate": 0.0002,
"loss": 0.8367,
"step": 9150
},
{
"epoch": 5.21,
"grad_norm": 0.318359375,
"learning_rate": 0.0002,
"loss": 0.8405,
"step": 9160
},
{
"epoch": 5.22,
"grad_norm": 0.32421875,
"learning_rate": 0.0002,
"loss": 0.7552,
"step": 9170
},
{
"epoch": 5.22,
"grad_norm": 0.353515625,
"learning_rate": 0.0002,
"loss": 0.6737,
"step": 9180
},
{
"epoch": 5.23,
"grad_norm": 0.306640625,
"learning_rate": 0.0002,
"loss": 0.7761,
"step": 9190
},
{
"epoch": 5.24,
"grad_norm": 0.302734375,
"learning_rate": 0.0002,
"loss": 0.8427,
"step": 9200
},
{
"epoch": 5.24,
"grad_norm": 0.31640625,
"learning_rate": 0.0002,
"loss": 0.8125,
"step": 9210
},
{
"epoch": 5.25,
"grad_norm": 0.337890625,
"learning_rate": 0.0002,
"loss": 0.8025,
"step": 9220
},
{
"epoch": 5.25,
"grad_norm": 0.3515625,
"learning_rate": 0.0002,
"loss": 0.7217,
"step": 9230
},
{
"epoch": 5.26,
"grad_norm": 0.310546875,
"learning_rate": 0.0002,
"loss": 0.7732,
"step": 9240
},
{
"epoch": 5.26,
"grad_norm": 0.3125,
"learning_rate": 0.0002,
"loss": 0.8663,
"step": 9250
},
{
"epoch": 5.27,
"grad_norm": 0.3046875,
"learning_rate": 0.0002,
"loss": 0.8386,
"step": 9260
},
{
"epoch": 5.28,
"grad_norm": 0.341796875,
"learning_rate": 0.0002,
"loss": 0.7779,
"step": 9270
},
{
"epoch": 5.28,
"grad_norm": 0.357421875,
"learning_rate": 0.0002,
"loss": 0.689,
"step": 9280
},
{
"epoch": 5.29,
"grad_norm": 0.310546875,
"learning_rate": 0.0002,
"loss": 0.7515,
"step": 9290
},
{
"epoch": 5.29,
"grad_norm": 0.31640625,
"learning_rate": 0.0002,
"loss": 0.9001,
"step": 9300
},
{
"epoch": 5.3,
"grad_norm": 0.31640625,
"learning_rate": 0.0002,
"loss": 0.7936,
"step": 9310
},
{
"epoch": 5.3,
"grad_norm": 0.3359375,
"learning_rate": 0.0002,
"loss": 0.7395,
"step": 9320
},
{
"epoch": 5.31,
"grad_norm": 0.3515625,
"learning_rate": 0.0002,
"loss": 0.6828,
"step": 9330
},
{
"epoch": 5.32,
"grad_norm": 0.3125,
"learning_rate": 0.0002,
"loss": 0.766,
"step": 9340
},
{
"epoch": 5.32,
"grad_norm": 0.3046875,
"learning_rate": 0.0002,
"loss": 0.8524,
"step": 9350
},
{
"epoch": 5.33,
"grad_norm": 0.318359375,
"learning_rate": 0.0002,
"loss": 0.8122,
"step": 9360
},
{
"epoch": 5.33,
"grad_norm": 0.34375,
"learning_rate": 0.0002,
"loss": 0.7972,
"step": 9370
},
{
"epoch": 5.34,
"grad_norm": 0.3671875,
"learning_rate": 0.0002,
"loss": 0.7058,
"step": 9380
},
{
"epoch": 5.34,
"grad_norm": 0.318359375,
"learning_rate": 0.0002,
"loss": 0.7869,
"step": 9390
},
{
"epoch": 5.35,
"grad_norm": 0.29296875,
"learning_rate": 0.0002,
"loss": 0.8735,
"step": 9400
},
{
"epoch": 5.36,
"grad_norm": 0.30859375,
"learning_rate": 0.0002,
"loss": 0.8089,
"step": 9410
},
{
"epoch": 5.36,
"grad_norm": 0.33984375,
"learning_rate": 0.0002,
"loss": 0.7725,
"step": 9420
},
{
"epoch": 5.37,
"grad_norm": 0.38671875,
"learning_rate": 0.0002,
"loss": 0.6743,
"step": 9430
},
{
"epoch": 5.37,
"grad_norm": 0.310546875,
"learning_rate": 0.0002,
"loss": 0.7509,
"step": 9440
},
{
"epoch": 5.38,
"grad_norm": 0.369140625,
"learning_rate": 0.0002,
"loss": 0.857,
"step": 9450
},
{
"epoch": 5.38,
"grad_norm": 0.31640625,
"learning_rate": 0.0002,
"loss": 0.8284,
"step": 9460
},
{
"epoch": 5.39,
"grad_norm": 0.345703125,
"learning_rate": 0.0002,
"loss": 0.7789,
"step": 9470
},
{
"epoch": 5.4,
"grad_norm": 0.35546875,
"learning_rate": 0.0002,
"loss": 0.6707,
"step": 9480
},
{
"epoch": 5.4,
"grad_norm": 0.34375,
"learning_rate": 0.0002,
"loss": 0.7618,
"step": 9490
},
{
"epoch": 5.41,
"grad_norm": 0.330078125,
"learning_rate": 0.0002,
"loss": 0.8578,
"step": 9500
},
{
"epoch": 5.41,
"grad_norm": 0.318359375,
"learning_rate": 0.0002,
"loss": 0.8171,
"step": 9510
},
{
"epoch": 5.42,
"grad_norm": 0.361328125,
"learning_rate": 0.0002,
"loss": 0.7501,
"step": 9520
},
{
"epoch": 5.42,
"grad_norm": 0.35546875,
"learning_rate": 0.0002,
"loss": 0.72,
"step": 9530
},
{
"epoch": 5.43,
"grad_norm": 0.30078125,
"learning_rate": 0.0002,
"loss": 0.732,
"step": 9540
},
{
"epoch": 5.44,
"grad_norm": 0.306640625,
"learning_rate": 0.0002,
"loss": 0.8673,
"step": 9550
},
{
"epoch": 5.44,
"grad_norm": 0.31640625,
"learning_rate": 0.0002,
"loss": 0.8511,
"step": 9560
},
{
"epoch": 5.45,
"grad_norm": 0.341796875,
"learning_rate": 0.0002,
"loss": 0.7353,
"step": 9570
},
{
"epoch": 5.45,
"grad_norm": 0.36328125,
"learning_rate": 0.0002,
"loss": 0.7003,
"step": 9580
},
{
"epoch": 5.46,
"grad_norm": 0.3203125,
"learning_rate": 0.0002,
"loss": 0.804,
"step": 9590
},
{
"epoch": 5.46,
"grad_norm": 0.3125,
"learning_rate": 0.0002,
"loss": 0.8475,
"step": 9600
},
{
"epoch": 5.47,
"grad_norm": 0.337890625,
"learning_rate": 0.0002,
"loss": 0.8454,
"step": 9610
},
{
"epoch": 5.48,
"grad_norm": 0.3515625,
"learning_rate": 0.0002,
"loss": 0.7883,
"step": 9620
},
{
"epoch": 5.48,
"grad_norm": 0.357421875,
"learning_rate": 0.0002,
"loss": 0.6765,
"step": 9630
},
{
"epoch": 5.49,
"grad_norm": 0.310546875,
"learning_rate": 0.0002,
"loss": 0.7595,
"step": 9640
},
{
"epoch": 5.49,
"grad_norm": 0.30078125,
"learning_rate": 0.0002,
"loss": 0.849,
"step": 9650
},
{
"epoch": 5.5,
"grad_norm": 0.32421875,
"learning_rate": 0.0002,
"loss": 0.841,
"step": 9660
},
{
"epoch": 5.5,
"grad_norm": 0.33984375,
"learning_rate": 0.0002,
"loss": 0.7854,
"step": 9670
},
{
"epoch": 5.51,
"grad_norm": 0.337890625,
"learning_rate": 0.0002,
"loss": 0.7011,
"step": 9680
},
{
"epoch": 5.52,
"grad_norm": 0.310546875,
"learning_rate": 0.0002,
"loss": 0.7428,
"step": 9690
},
{
"epoch": 5.52,
"grad_norm": 0.287109375,
"learning_rate": 0.0002,
"loss": 0.8701,
"step": 9700
},
{
"epoch": 5.53,
"grad_norm": 0.3125,
"learning_rate": 0.0002,
"loss": 0.7852,
"step": 9710
},
{
"epoch": 5.53,
"grad_norm": 0.32421875,
"learning_rate": 0.0002,
"loss": 0.7718,
"step": 9720
},
{
"epoch": 5.54,
"grad_norm": 0.3828125,
"learning_rate": 0.0002,
"loss": 0.7085,
"step": 9730
},
{
"epoch": 5.54,
"grad_norm": 0.322265625,
"learning_rate": 0.0002,
"loss": 0.7796,
"step": 9740
},
{
"epoch": 5.55,
"grad_norm": 0.296875,
"learning_rate": 0.0002,
"loss": 0.8341,
"step": 9750
},
{
"epoch": 5.55,
"grad_norm": 0.330078125,
"learning_rate": 0.0002,
"loss": 0.8505,
"step": 9760
},
{
"epoch": 5.56,
"grad_norm": 0.33984375,
"learning_rate": 0.0002,
"loss": 0.81,
"step": 9770
},
{
"epoch": 5.57,
"grad_norm": 0.359375,
"learning_rate": 0.0002,
"loss": 0.6939,
"step": 9780
},
{
"epoch": 5.57,
"grad_norm": 0.302734375,
"learning_rate": 0.0002,
"loss": 0.7882,
"step": 9790
},
{
"epoch": 5.58,
"grad_norm": 0.330078125,
"learning_rate": 0.0002,
"loss": 0.8979,
"step": 9800
},
{
"epoch": 5.58,
"grad_norm": 0.310546875,
"learning_rate": 0.0002,
"loss": 0.8045,
"step": 9810
},
{
"epoch": 5.59,
"grad_norm": 0.32421875,
"learning_rate": 0.0002,
"loss": 0.7508,
"step": 9820
},
{
"epoch": 5.59,
"grad_norm": 0.345703125,
"learning_rate": 0.0002,
"loss": 0.7107,
"step": 9830
},
{
"epoch": 5.6,
"grad_norm": 0.333984375,
"learning_rate": 0.0002,
"loss": 0.7851,
"step": 9840
},
{
"epoch": 5.61,
"grad_norm": 0.3203125,
"learning_rate": 0.0002,
"loss": 0.8831,
"step": 9850
},
{
"epoch": 5.61,
"grad_norm": 0.341796875,
"learning_rate": 0.0002,
"loss": 0.7831,
"step": 9860
},
{
"epoch": 5.62,
"grad_norm": 0.3359375,
"learning_rate": 0.0002,
"loss": 0.7963,
"step": 9870
},
{
"epoch": 5.62,
"grad_norm": 0.357421875,
"learning_rate": 0.0002,
"loss": 0.6816,
"step": 9880
},
{
"epoch": 5.63,
"grad_norm": 0.318359375,
"learning_rate": 0.0002,
"loss": 0.7638,
"step": 9890
},
{
"epoch": 5.63,
"grad_norm": 0.294921875,
"learning_rate": 0.0002,
"loss": 0.8935,
"step": 9900
},
{
"epoch": 5.64,
"grad_norm": 0.3203125,
"learning_rate": 0.0002,
"loss": 0.7828,
"step": 9910
},
{
"epoch": 5.65,
"grad_norm": 0.353515625,
"learning_rate": 0.0002,
"loss": 0.7594,
"step": 9920
},
{
"epoch": 5.65,
"grad_norm": 0.390625,
"learning_rate": 0.0002,
"loss": 0.7165,
"step": 9930
},
{
"epoch": 5.66,
"grad_norm": 0.31640625,
"learning_rate": 0.0002,
"loss": 0.7579,
"step": 9940
},
{
"epoch": 5.66,
"grad_norm": 0.3125,
"learning_rate": 0.0002,
"loss": 0.8643,
"step": 9950
},
{
"epoch": 5.67,
"grad_norm": 0.33203125,
"learning_rate": 0.0002,
"loss": 0.8233,
"step": 9960
},
{
"epoch": 5.67,
"grad_norm": 0.341796875,
"learning_rate": 0.0002,
"loss": 0.8198,
"step": 9970
},
{
"epoch": 5.68,
"grad_norm": 0.34375,
"learning_rate": 0.0002,
"loss": 0.6659,
"step": 9980
},
{
"epoch": 5.69,
"grad_norm": 0.302734375,
"learning_rate": 0.0002,
"loss": 0.7583,
"step": 9990
},
{
"epoch": 5.69,
"grad_norm": 0.30859375,
"learning_rate": 0.0002,
"loss": 0.8282,
"step": 10000
}
],
"logging_steps": 10,
"max_steps": 10000,
"num_input_tokens_seen": 0,
"num_train_epochs": 6,
"save_steps": 500,
"total_flos": 1.3772789232384e+18,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}