| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 15.04, | |
| "eval_steps": 500000000, | |
| "global_step": 1000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "crossentropy": 2.7819188833236694, | |
| "epoch": 0.001, | |
| "grad_norm": 2.296875, | |
| "learning_rate": 2e-05, | |
| "loss": 55.4633, | |
| "step": 1 | |
| }, | |
| { | |
| "crossentropy": 2.886364698410034, | |
| "epoch": 0.002, | |
| "grad_norm": 2.4375, | |
| "learning_rate": 4e-05, | |
| "loss": 57.6593, | |
| "step": 2 | |
| }, | |
| { | |
| "crossentropy": 2.610915184020996, | |
| "epoch": 0.003, | |
| "grad_norm": 2.265625, | |
| "learning_rate": 6e-05, | |
| "loss": 54.4112, | |
| "step": 3 | |
| }, | |
| { | |
| "crossentropy": 2.8315508365631104, | |
| "epoch": 0.004, | |
| "grad_norm": 2.3125, | |
| "learning_rate": 8e-05, | |
| "loss": 57.7924, | |
| "step": 4 | |
| }, | |
| { | |
| "crossentropy": 2.8231977224349976, | |
| "epoch": 0.005, | |
| "grad_norm": 2.3125, | |
| "learning_rate": 0.0001, | |
| "loss": 57.4897, | |
| "step": 5 | |
| }, | |
| { | |
| "crossentropy": 2.8636255264282227, | |
| "epoch": 0.006, | |
| "grad_norm": 2.078125, | |
| "learning_rate": 0.00012, | |
| "loss": 57.0583, | |
| "step": 6 | |
| }, | |
| { | |
| "crossentropy": 2.8325681686401367, | |
| "epoch": 0.007, | |
| "grad_norm": 1.8671875, | |
| "learning_rate": 0.00014000000000000001, | |
| "loss": 56.1569, | |
| "step": 7 | |
| }, | |
| { | |
| "crossentropy": 2.788058638572693, | |
| "epoch": 0.008, | |
| "grad_norm": 1.734375, | |
| "learning_rate": 0.00016, | |
| "loss": 56.0723, | |
| "step": 8 | |
| }, | |
| { | |
| "crossentropy": 2.860862612724304, | |
| "epoch": 0.009, | |
| "grad_norm": 1.5703125, | |
| "learning_rate": 0.00017999999999999998, | |
| "loss": 56.7063, | |
| "step": 9 | |
| }, | |
| { | |
| "crossentropy": 2.876722812652588, | |
| "epoch": 0.01, | |
| "grad_norm": 1.2734375, | |
| "learning_rate": 0.0002, | |
| "loss": 56.7833, | |
| "step": 10 | |
| }, | |
| { | |
| "crossentropy": 2.8933022022247314, | |
| "epoch": 0.011, | |
| "grad_norm": 1.25, | |
| "learning_rate": 0.00022, | |
| "loss": 57.416, | |
| "step": 11 | |
| }, | |
| { | |
| "crossentropy": 2.9026139974594116, | |
| "epoch": 0.012, | |
| "grad_norm": 1.109375, | |
| "learning_rate": 0.00024, | |
| "loss": 57.1726, | |
| "step": 12 | |
| }, | |
| { | |
| "crossentropy": 2.8697686195373535, | |
| "epoch": 0.013, | |
| "grad_norm": 1.015625, | |
| "learning_rate": 0.00026000000000000003, | |
| "loss": 57.0761, | |
| "step": 13 | |
| }, | |
| { | |
| "crossentropy": 2.823047637939453, | |
| "epoch": 0.014, | |
| "grad_norm": 0.91015625, | |
| "learning_rate": 0.00028000000000000003, | |
| "loss": 57.1636, | |
| "step": 14 | |
| }, | |
| { | |
| "crossentropy": 2.81307590007782, | |
| "epoch": 0.015, | |
| "grad_norm": 0.92578125, | |
| "learning_rate": 0.0003, | |
| "loss": 56.68, | |
| "step": 15 | |
| }, | |
| { | |
| "crossentropy": 2.893505334854126, | |
| "epoch": 0.016, | |
| "grad_norm": 0.953125, | |
| "grad_norm_var": 0.33952433268229165, | |
| "learning_rate": 0.00032, | |
| "loss": 56.6504, | |
| "step": 16 | |
| }, | |
| { | |
| "crossentropy": 2.7856497764587402, | |
| "epoch": 0.017, | |
| "grad_norm": 0.9375, | |
| "grad_norm_var": 0.33678080240885416, | |
| "learning_rate": 0.00034, | |
| "loss": 56.4876, | |
| "step": 17 | |
| }, | |
| { | |
| "crossentropy": 2.87225878238678, | |
| "epoch": 0.018, | |
| "grad_norm": 0.86328125, | |
| "grad_norm_var": 0.3073923110961914, | |
| "learning_rate": 0.00035999999999999997, | |
| "loss": 57.2457, | |
| "step": 18 | |
| }, | |
| { | |
| "crossentropy": 2.8507845401763916, | |
| "epoch": 0.019, | |
| "grad_norm": 0.87890625, | |
| "grad_norm_var": 0.27884089152018227, | |
| "learning_rate": 0.00038, | |
| "loss": 56.671, | |
| "step": 19 | |
| }, | |
| { | |
| "crossentropy": 2.923235774040222, | |
| "epoch": 0.02, | |
| "grad_norm": 0.85546875, | |
| "grad_norm_var": 0.2293008804321289, | |
| "learning_rate": 0.0004, | |
| "loss": 57.6122, | |
| "step": 20 | |
| }, | |
| { | |
| "crossentropy": 2.7433438301086426, | |
| "epoch": 0.021, | |
| "grad_norm": 0.7734375, | |
| "grad_norm_var": 0.1661752700805664, | |
| "learning_rate": 0.00042, | |
| "loss": 55.3607, | |
| "step": 21 | |
| }, | |
| { | |
| "crossentropy": 2.8193527460098267, | |
| "epoch": 0.022, | |
| "grad_norm": 0.79296875, | |
| "grad_norm_var": 0.11674779256184896, | |
| "learning_rate": 0.00044, | |
| "loss": 55.6994, | |
| "step": 22 | |
| }, | |
| { | |
| "crossentropy": 2.7718470096588135, | |
| "epoch": 0.023, | |
| "grad_norm": 0.8125, | |
| "grad_norm_var": 0.07935994466145833, | |
| "learning_rate": 0.00046, | |
| "loss": 56.1888, | |
| "step": 23 | |
| }, | |
| { | |
| "crossentropy": 2.887826681137085, | |
| "epoch": 0.024, | |
| "grad_norm": 0.7890625, | |
| "grad_norm_var": 0.047818756103515624, | |
| "learning_rate": 0.00048, | |
| "loss": 58.3357, | |
| "step": 24 | |
| }, | |
| { | |
| "crossentropy": 2.8159505128860474, | |
| "epoch": 0.025, | |
| "grad_norm": 0.7890625, | |
| "grad_norm_var": 0.02467625935872396, | |
| "learning_rate": 0.0005, | |
| "loss": 56.4528, | |
| "step": 25 | |
| }, | |
| { | |
| "crossentropy": 2.786083936691284, | |
| "epoch": 0.026, | |
| "grad_norm": 0.79296875, | |
| "grad_norm_var": 0.01730187733968099, | |
| "learning_rate": 0.0005200000000000001, | |
| "loss": 55.865, | |
| "step": 26 | |
| }, | |
| { | |
| "crossentropy": 2.878562092781067, | |
| "epoch": 0.027, | |
| "grad_norm": 0.80078125, | |
| "grad_norm_var": 0.009134928385416666, | |
| "learning_rate": 0.00054, | |
| "loss": 57.3279, | |
| "step": 27 | |
| }, | |
| { | |
| "crossentropy": 2.8583030700683594, | |
| "epoch": 0.028, | |
| "grad_norm": 0.78515625, | |
| "grad_norm_var": 0.005572954813639323, | |
| "learning_rate": 0.0005600000000000001, | |
| "loss": 56.4947, | |
| "step": 28 | |
| }, | |
| { | |
| "crossentropy": 2.867052912712097, | |
| "epoch": 0.029, | |
| "grad_norm": 0.828125, | |
| "grad_norm_var": 0.003748003641764323, | |
| "learning_rate": 0.00058, | |
| "loss": 58.2363, | |
| "step": 29 | |
| }, | |
| { | |
| "crossentropy": 2.880504012107849, | |
| "epoch": 0.03, | |
| "grad_norm": 0.77734375, | |
| "grad_norm_var": 0.0036615371704101563, | |
| "learning_rate": 0.0006, | |
| "loss": 57.2681, | |
| "step": 30 | |
| }, | |
| { | |
| "crossentropy": 2.8287140130996704, | |
| "epoch": 0.031, | |
| "grad_norm": 0.76953125, | |
| "grad_norm_var": 0.003290239969889323, | |
| "learning_rate": 0.00062, | |
| "loss": 56.6969, | |
| "step": 31 | |
| }, | |
| { | |
| "crossentropy": 2.8504087924957275, | |
| "epoch": 0.032, | |
| "grad_norm": 0.75, | |
| "grad_norm_var": 0.002397600809733073, | |
| "learning_rate": 0.00064, | |
| "loss": 57.0567, | |
| "step": 32 | |
| }, | |
| { | |
| "crossentropy": 2.830446481704712, | |
| "epoch": 0.033, | |
| "grad_norm": 0.76171875, | |
| "grad_norm_var": 0.0013933817545572916, | |
| "learning_rate": 0.00066, | |
| "loss": 55.9364, | |
| "step": 33 | |
| }, | |
| { | |
| "crossentropy": 2.805998682975769, | |
| "epoch": 0.034, | |
| "grad_norm": 0.78515625, | |
| "grad_norm_var": 0.0011288960774739583, | |
| "learning_rate": 0.00068, | |
| "loss": 57.0045, | |
| "step": 34 | |
| }, | |
| { | |
| "crossentropy": 2.8322499990463257, | |
| "epoch": 0.035, | |
| "grad_norm": 0.765625, | |
| "grad_norm_var": 0.0006845474243164062, | |
| "learning_rate": 0.0007, | |
| "loss": 56.6746, | |
| "step": 35 | |
| }, | |
| { | |
| "crossentropy": 2.978684902191162, | |
| "epoch": 0.036, | |
| "grad_norm": 0.83203125, | |
| "grad_norm_var": 0.0005121231079101562, | |
| "learning_rate": 0.0007199999999999999, | |
| "loss": 58.4334, | |
| "step": 36 | |
| }, | |
| { | |
| "crossentropy": 2.847362518310547, | |
| "epoch": 0.037, | |
| "grad_norm": 0.78515625, | |
| "grad_norm_var": 0.000498199462890625, | |
| "learning_rate": 0.00074, | |
| "loss": 56.7639, | |
| "step": 37 | |
| }, | |
| { | |
| "crossentropy": 2.8229883909225464, | |
| "epoch": 0.038, | |
| "grad_norm": 0.8046875, | |
| "grad_norm_var": 0.0005136489868164063, | |
| "learning_rate": 0.00076, | |
| "loss": 55.6415, | |
| "step": 38 | |
| }, | |
| { | |
| "crossentropy": 2.813721776008606, | |
| "epoch": 0.039, | |
| "grad_norm": 0.75, | |
| "grad_norm_var": 0.0005645116170247395, | |
| "learning_rate": 0.0007800000000000001, | |
| "loss": 58.0355, | |
| "step": 39 | |
| }, | |
| { | |
| "crossentropy": 2.755218267440796, | |
| "epoch": 0.04, | |
| "grad_norm": 0.74609375, | |
| "grad_norm_var": 0.0006589253743489583, | |
| "learning_rate": 0.0008, | |
| "loss": 54.858, | |
| "step": 40 | |
| }, | |
| { | |
| "crossentropy": 2.842182993888855, | |
| "epoch": 0.041, | |
| "grad_norm": 0.76171875, | |
| "grad_norm_var": 0.0006825129191080729, | |
| "learning_rate": 0.00082, | |
| "loss": 56.0784, | |
| "step": 41 | |
| }, | |
| { | |
| "crossentropy": 2.9310812950134277, | |
| "epoch": 0.042, | |
| "grad_norm": 0.7890625, | |
| "grad_norm_var": 0.0006772359212239583, | |
| "learning_rate": 0.00084, | |
| "loss": 58.2136, | |
| "step": 42 | |
| }, | |
| { | |
| "crossentropy": 2.890253186225891, | |
| "epoch": 0.043, | |
| "grad_norm": 0.76953125, | |
| "grad_norm_var": 0.0006548563639322916, | |
| "learning_rate": 0.00086, | |
| "loss": 56.9909, | |
| "step": 43 | |
| }, | |
| { | |
| "crossentropy": 2.967428684234619, | |
| "epoch": 0.044, | |
| "grad_norm": 0.75, | |
| "grad_norm_var": 0.0007023493448893229, | |
| "learning_rate": 0.00088, | |
| "loss": 56.9546, | |
| "step": 44 | |
| }, | |
| { | |
| "crossentropy": 2.779818534851074, | |
| "epoch": 0.045, | |
| "grad_norm": 0.7890625, | |
| "grad_norm_var": 0.0005294164021809896, | |
| "learning_rate": 0.0009000000000000001, | |
| "loss": 56.4747, | |
| "step": 45 | |
| }, | |
| { | |
| "crossentropy": 2.9322011470794678, | |
| "epoch": 0.046, | |
| "grad_norm": 0.80078125, | |
| "grad_norm_var": 0.0005736668904622396, | |
| "learning_rate": 0.00092, | |
| "loss": 58.0048, | |
| "step": 46 | |
| }, | |
| { | |
| "crossentropy": 2.796338438987732, | |
| "epoch": 0.047, | |
| "grad_norm": 0.765625, | |
| "grad_norm_var": 0.0005777994791666667, | |
| "learning_rate": 0.00094, | |
| "loss": 56.074, | |
| "step": 47 | |
| }, | |
| { | |
| "crossentropy": 2.8237509727478027, | |
| "epoch": 0.048, | |
| "grad_norm": 0.76953125, | |
| "grad_norm_var": 0.0005355199178059896, | |
| "learning_rate": 0.00096, | |
| "loss": 56.0959, | |
| "step": 48 | |
| }, | |
| { | |
| "crossentropy": 2.9482001066207886, | |
| "epoch": 0.049, | |
| "grad_norm": 0.75, | |
| "grad_norm_var": 0.0005673726399739583, | |
| "learning_rate": 0.00098, | |
| "loss": 57.4375, | |
| "step": 49 | |
| }, | |
| { | |
| "crossentropy": 2.8411340713500977, | |
| "epoch": 0.05, | |
| "grad_norm": 0.7890625, | |
| "grad_norm_var": 0.0005731582641601562, | |
| "learning_rate": 0.001, | |
| "loss": 56.8031, | |
| "step": 50 | |
| }, | |
| { | |
| "crossentropy": 2.8085217475891113, | |
| "epoch": 0.051, | |
| "grad_norm": 0.80859375, | |
| "grad_norm_var": 0.0006284077962239583, | |
| "learning_rate": 0.0009989473684210526, | |
| "loss": 56.6429, | |
| "step": 51 | |
| }, | |
| { | |
| "crossentropy": 2.884308099746704, | |
| "epoch": 0.052, | |
| "grad_norm": 0.78515625, | |
| "grad_norm_var": 0.0004330952962239583, | |
| "learning_rate": 0.0009978947368421054, | |
| "loss": 57.8023, | |
| "step": 52 | |
| }, | |
| { | |
| "crossentropy": 2.89777672290802, | |
| "epoch": 0.053, | |
| "grad_norm": 0.765625, | |
| "grad_norm_var": 0.00043277740478515626, | |
| "learning_rate": 0.000996842105263158, | |
| "loss": 58.1485, | |
| "step": 53 | |
| }, | |
| { | |
| "crossentropy": 2.833405017852783, | |
| "epoch": 0.054, | |
| "grad_norm": 0.7734375, | |
| "grad_norm_var": 0.00036869049072265627, | |
| "learning_rate": 0.0009957894736842105, | |
| "loss": 56.4035, | |
| "step": 54 | |
| }, | |
| { | |
| "crossentropy": 2.898597002029419, | |
| "epoch": 0.055, | |
| "grad_norm": 0.8125, | |
| "grad_norm_var": 0.00042362213134765624, | |
| "learning_rate": 0.000994736842105263, | |
| "loss": 58.3541, | |
| "step": 55 | |
| }, | |
| { | |
| "crossentropy": 2.83421790599823, | |
| "epoch": 0.056, | |
| "grad_norm": 0.765625, | |
| "grad_norm_var": 0.0003679911295572917, | |
| "learning_rate": 0.0009936842105263159, | |
| "loss": 56.4192, | |
| "step": 56 | |
| }, | |
| { | |
| "crossentropy": 2.8999528884887695, | |
| "epoch": 0.057, | |
| "grad_norm": 0.80078125, | |
| "grad_norm_var": 0.0003794352213541667, | |
| "learning_rate": 0.0009926315789473685, | |
| "loss": 58.0858, | |
| "step": 57 | |
| }, | |
| { | |
| "crossentropy": 2.992220401763916, | |
| "epoch": 0.058, | |
| "grad_norm": 0.7578125, | |
| "grad_norm_var": 0.00040384928385416664, | |
| "learning_rate": 0.000991578947368421, | |
| "loss": 59.5004, | |
| "step": 58 | |
| }, | |
| { | |
| "crossentropy": 2.8451712131500244, | |
| "epoch": 0.059, | |
| "grad_norm": 0.74609375, | |
| "grad_norm_var": 0.00046564737955729165, | |
| "learning_rate": 0.0009905263157894738, | |
| "loss": 56.6674, | |
| "step": 59 | |
| }, | |
| { | |
| "crossentropy": 2.8718665838241577, | |
| "epoch": 0.06, | |
| "grad_norm": 0.78125, | |
| "grad_norm_var": 0.00041478474934895834, | |
| "learning_rate": 0.0009894736842105264, | |
| "loss": 56.9806, | |
| "step": 60 | |
| }, | |
| { | |
| "crossentropy": 2.9184417724609375, | |
| "epoch": 0.061, | |
| "grad_norm": 0.765625, | |
| "grad_norm_var": 0.0004170735677083333, | |
| "learning_rate": 0.000988421052631579, | |
| "loss": 57.6043, | |
| "step": 61 | |
| }, | |
| { | |
| "crossentropy": 2.834872841835022, | |
| "epoch": 0.062, | |
| "grad_norm": 0.7421875, | |
| "grad_norm_var": 0.00044854482014973957, | |
| "learning_rate": 0.0009873684210526315, | |
| "loss": 55.8379, | |
| "step": 62 | |
| }, | |
| { | |
| "crossentropy": 2.9252291917800903, | |
| "epoch": 0.063, | |
| "grad_norm": 0.7578125, | |
| "grad_norm_var": 0.0004607518513997396, | |
| "learning_rate": 0.0009863157894736843, | |
| "loss": 57.7051, | |
| "step": 63 | |
| }, | |
| { | |
| "crossentropy": 2.7089247703552246, | |
| "epoch": 0.064, | |
| "grad_norm": 0.78125, | |
| "grad_norm_var": 0.0004636128743489583, | |
| "learning_rate": 0.000985263157894737, | |
| "loss": 55.8377, | |
| "step": 64 | |
| }, | |
| { | |
| "crossentropy": 2.8316437005996704, | |
| "epoch": 1.001, | |
| "grad_norm": 0.75390625, | |
| "grad_norm_var": 0.00044040679931640626, | |
| "learning_rate": 0.0009842105263157895, | |
| "loss": 56.5235, | |
| "step": 65 | |
| }, | |
| { | |
| "crossentropy": 2.881982922554016, | |
| "epoch": 1.002, | |
| "grad_norm": 0.7890625, | |
| "grad_norm_var": 0.00037333170572916664, | |
| "learning_rate": 0.000983157894736842, | |
| "loss": 56.9903, | |
| "step": 66 | |
| }, | |
| { | |
| "crossentropy": 2.8200029134750366, | |
| "epoch": 1.003, | |
| "grad_norm": 0.796875, | |
| "grad_norm_var": 0.0004017512003580729, | |
| "learning_rate": 0.0009821052631578948, | |
| "loss": 56.6703, | |
| "step": 67 | |
| }, | |
| { | |
| "crossentropy": 2.7705549001693726, | |
| "epoch": 1.004, | |
| "grad_norm": 0.78125, | |
| "grad_norm_var": 0.0004012425740559896, | |
| "learning_rate": 0.0009810526315789474, | |
| "loss": 55.9015, | |
| "step": 68 | |
| }, | |
| { | |
| "crossentropy": 2.8487346172332764, | |
| "epoch": 1.005, | |
| "grad_norm": 0.73046875, | |
| "grad_norm_var": 0.0005208333333333333, | |
| "learning_rate": 0.00098, | |
| "loss": 56.4856, | |
| "step": 69 | |
| }, | |
| { | |
| "crossentropy": 2.809660792350769, | |
| "epoch": 1.006, | |
| "grad_norm": 0.78515625, | |
| "grad_norm_var": 0.00041802724202473957, | |
| "learning_rate": 0.0009789473684210528, | |
| "loss": 57.3916, | |
| "step": 70 | |
| }, | |
| { | |
| "crossentropy": 2.8564740419387817, | |
| "epoch": 1.007, | |
| "grad_norm": 0.76953125, | |
| "grad_norm_var": 0.00041681925455729166, | |
| "learning_rate": 0.0009778947368421053, | |
| "loss": 55.8133, | |
| "step": 71 | |
| }, | |
| { | |
| "crossentropy": 2.8648808002471924, | |
| "epoch": 1.008, | |
| "grad_norm": 0.79296875, | |
| "grad_norm_var": 0.00038859049479166664, | |
| "learning_rate": 0.000976842105263158, | |
| "loss": 57.4218, | |
| "step": 72 | |
| }, | |
| { | |
| "crossentropy": 2.7834339141845703, | |
| "epoch": 1.009, | |
| "grad_norm": 0.7734375, | |
| "grad_norm_var": 0.0003794352213541667, | |
| "learning_rate": 0.0009757894736842106, | |
| "loss": 56.7101, | |
| "step": 73 | |
| }, | |
| { | |
| "crossentropy": 2.9197006225585938, | |
| "epoch": 1.01, | |
| "grad_norm": 0.7734375, | |
| "grad_norm_var": 0.0003371556599934896, | |
| "learning_rate": 0.0009747368421052632, | |
| "loss": 58.2303, | |
| "step": 74 | |
| }, | |
| { | |
| "crossentropy": 2.8555904626846313, | |
| "epoch": 1.011, | |
| "grad_norm": 0.75390625, | |
| "grad_norm_var": 0.0003509521484375, | |
| "learning_rate": 0.0009736842105263158, | |
| "loss": 56.6662, | |
| "step": 75 | |
| }, | |
| { | |
| "crossentropy": 2.833241581916809, | |
| "epoch": 1.012, | |
| "grad_norm": 0.76171875, | |
| "grad_norm_var": 0.00035444895426432293, | |
| "learning_rate": 0.0009726315789473684, | |
| "loss": 56.8305, | |
| "step": 76 | |
| }, | |
| { | |
| "crossentropy": 2.913132429122925, | |
| "epoch": 1.013, | |
| "grad_norm": 0.80859375, | |
| "grad_norm_var": 0.0003814697265625, | |
| "learning_rate": 0.0009715789473684211, | |
| "loss": 57.7847, | |
| "step": 77 | |
| }, | |
| { | |
| "crossentropy": 2.794333338737488, | |
| "epoch": 1.014, | |
| "grad_norm": 0.73828125, | |
| "grad_norm_var": 0.00044854482014973957, | |
| "learning_rate": 0.0009705263157894737, | |
| "loss": 55.5186, | |
| "step": 78 | |
| }, | |
| { | |
| "crossentropy": 2.7918150424957275, | |
| "epoch": 1.015, | |
| "grad_norm": 0.77734375, | |
| "grad_norm_var": 0.00044530232747395834, | |
| "learning_rate": 0.0009694736842105263, | |
| "loss": 54.9685, | |
| "step": 79 | |
| }, | |
| { | |
| "crossentropy": 2.9973483085632324, | |
| "epoch": 1.016, | |
| "grad_norm": 0.79296875, | |
| "grad_norm_var": 0.0004668553670247396, | |
| "learning_rate": 0.000968421052631579, | |
| "loss": 57.5043, | |
| "step": 80 | |
| }, | |
| { | |
| "crossentropy": 2.6424334049224854, | |
| "epoch": 1.017, | |
| "grad_norm": 0.75390625, | |
| "grad_norm_var": 0.0004668553670247396, | |
| "learning_rate": 0.0009673684210526316, | |
| "loss": 53.7855, | |
| "step": 81 | |
| }, | |
| { | |
| "crossentropy": 2.901629686355591, | |
| "epoch": 1.018, | |
| "grad_norm": 0.7265625, | |
| "grad_norm_var": 0.0005828221638997396, | |
| "learning_rate": 0.0009663157894736843, | |
| "loss": 56.3773, | |
| "step": 82 | |
| }, | |
| { | |
| "crossentropy": 2.9043585062026978, | |
| "epoch": 1.019, | |
| "grad_norm": 0.78125, | |
| "grad_norm_var": 0.0005416234334309895, | |
| "learning_rate": 0.0009652631578947368, | |
| "loss": 56.4689, | |
| "step": 83 | |
| }, | |
| { | |
| "crossentropy": 2.89982807636261, | |
| "epoch": 1.02, | |
| "grad_norm": 0.77734375, | |
| "grad_norm_var": 0.0005360921223958333, | |
| "learning_rate": 0.0009642105263157895, | |
| "loss": 55.5952, | |
| "step": 84 | |
| }, | |
| { | |
| "crossentropy": 2.795522093772888, | |
| "epoch": 1.021, | |
| "grad_norm": 0.78515625, | |
| "grad_norm_var": 0.00044530232747395834, | |
| "learning_rate": 0.0009631578947368421, | |
| "loss": 55.937, | |
| "step": 85 | |
| }, | |
| { | |
| "crossentropy": 2.879517436027527, | |
| "epoch": 1.022, | |
| "grad_norm": 0.7578125, | |
| "grad_norm_var": 0.0004439671834309896, | |
| "learning_rate": 0.0009621052631578947, | |
| "loss": 57.3097, | |
| "step": 86 | |
| }, | |
| { | |
| "crossentropy": 2.977717161178589, | |
| "epoch": 1.023, | |
| "grad_norm": 0.765625, | |
| "grad_norm_var": 0.00044530232747395834, | |
| "learning_rate": 0.0009610526315789475, | |
| "loss": 58.7542, | |
| "step": 87 | |
| }, | |
| { | |
| "crossentropy": 2.6625332832336426, | |
| "epoch": 1.024, | |
| "grad_norm": 0.74609375, | |
| "grad_norm_var": 0.00043919881184895836, | |
| "learning_rate": 0.00096, | |
| "loss": 55.1611, | |
| "step": 88 | |
| }, | |
| { | |
| "crossentropy": 2.8213740587234497, | |
| "epoch": 1.025, | |
| "grad_norm": 0.78515625, | |
| "grad_norm_var": 0.0004577000935872396, | |
| "learning_rate": 0.0009589473684210527, | |
| "loss": 56.8185, | |
| "step": 89 | |
| }, | |
| { | |
| "crossentropy": 2.746438980102539, | |
| "epoch": 1.026, | |
| "grad_norm": 0.78515625, | |
| "grad_norm_var": 0.0004750569661458333, | |
| "learning_rate": 0.0009578947368421053, | |
| "loss": 56.6947, | |
| "step": 90 | |
| }, | |
| { | |
| "crossentropy": 2.6565908193588257, | |
| "epoch": 1.027, | |
| "grad_norm": 0.7734375, | |
| "grad_norm_var": 0.0004607518513997396, | |
| "learning_rate": 0.000956842105263158, | |
| "loss": 54.1427, | |
| "step": 91 | |
| }, | |
| { | |
| "crossentropy": 2.88510262966156, | |
| "epoch": 1.028, | |
| "grad_norm": 0.8046875, | |
| "grad_norm_var": 0.0005299886067708333, | |
| "learning_rate": 0.0009557894736842105, | |
| "loss": 57.024, | |
| "step": 92 | |
| }, | |
| { | |
| "crossentropy": 2.8993020057678223, | |
| "epoch": 1.029, | |
| "grad_norm": 0.78125, | |
| "grad_norm_var": 0.00044498443603515627, | |
| "learning_rate": 0.0009547368421052631, | |
| "loss": 57.8516, | |
| "step": 93 | |
| }, | |
| { | |
| "crossentropy": 2.856235146522522, | |
| "epoch": 1.03, | |
| "grad_norm": 0.8203125, | |
| "grad_norm_var": 0.000510406494140625, | |
| "learning_rate": 0.0009536842105263158, | |
| "loss": 57.8481, | |
| "step": 94 | |
| }, | |
| { | |
| "crossentropy": 2.766387939453125, | |
| "epoch": 1.031, | |
| "grad_norm": 0.8359375, | |
| "grad_norm_var": 0.0007364273071289063, | |
| "learning_rate": 0.0009526315789473684, | |
| "loss": 57.7393, | |
| "step": 95 | |
| }, | |
| { | |
| "crossentropy": 2.7507987022399902, | |
| "epoch": 1.032, | |
| "grad_norm": 0.78515625, | |
| "grad_norm_var": 0.0007262547810872396, | |
| "learning_rate": 0.0009515789473684211, | |
| "loss": 55.9701, | |
| "step": 96 | |
| }, | |
| { | |
| "crossentropy": 2.862091898918152, | |
| "epoch": 1.033, | |
| "grad_norm": 0.765625, | |
| "grad_norm_var": 0.0006955464680989584, | |
| "learning_rate": 0.0009505263157894737, | |
| "loss": 57.3552, | |
| "step": 97 | |
| }, | |
| { | |
| "crossentropy": 2.942552924156189, | |
| "epoch": 1.034, | |
| "grad_norm": 0.78515625, | |
| "grad_norm_var": 0.0004943211873372396, | |
| "learning_rate": 0.0009494736842105264, | |
| "loss": 56.6328, | |
| "step": 98 | |
| }, | |
| { | |
| "crossentropy": 2.8581180572509766, | |
| "epoch": 1.035, | |
| "grad_norm": 0.76953125, | |
| "grad_norm_var": 0.0005063374837239583, | |
| "learning_rate": 0.000948421052631579, | |
| "loss": 57.5714, | |
| "step": 99 | |
| }, | |
| { | |
| "crossentropy": 2.811803102493286, | |
| "epoch": 1.036, | |
| "grad_norm": 0.765625, | |
| "grad_norm_var": 0.0005233128865559896, | |
| "learning_rate": 0.0009473684210526315, | |
| "loss": 56.7149, | |
| "step": 100 | |
| }, | |
| { | |
| "crossentropy": 2.8827518224716187, | |
| "epoch": 1.037, | |
| "grad_norm": 0.796875, | |
| "grad_norm_var": 0.0005368550618489583, | |
| "learning_rate": 0.0009463157894736842, | |
| "loss": 56.9192, | |
| "step": 101 | |
| }, | |
| { | |
| "crossentropy": 2.907082676887512, | |
| "epoch": 1.038, | |
| "grad_norm": 0.79296875, | |
| "grad_norm_var": 0.0004973729451497396, | |
| "learning_rate": 0.0009452631578947368, | |
| "loss": 57.5872, | |
| "step": 102 | |
| }, | |
| { | |
| "crossentropy": 2.859190344810486, | |
| "epoch": 1.039, | |
| "grad_norm": 0.75390625, | |
| "grad_norm_var": 0.0005360921223958333, | |
| "learning_rate": 0.0009442105263157895, | |
| "loss": 55.8119, | |
| "step": 103 | |
| }, | |
| { | |
| "crossentropy": 2.740812063217163, | |
| "epoch": 1.04, | |
| "grad_norm": 0.7578125, | |
| "grad_norm_var": 0.0004851659138997396, | |
| "learning_rate": 0.0009431578947368421, | |
| "loss": 55.4499, | |
| "step": 104 | |
| }, | |
| { | |
| "crossentropy": 2.786709427833557, | |
| "epoch": 1.041, | |
| "grad_norm": 0.74609375, | |
| "grad_norm_var": 0.0005792617797851562, | |
| "learning_rate": 0.0009421052631578948, | |
| "loss": 55.1188, | |
| "step": 105 | |
| }, | |
| { | |
| "crossentropy": 2.8872324228286743, | |
| "epoch": 1.042, | |
| "grad_norm": 0.828125, | |
| "grad_norm_var": 0.0007100423177083333, | |
| "learning_rate": 0.0009410526315789474, | |
| "loss": 58.2194, | |
| "step": 106 | |
| }, | |
| { | |
| "crossentropy": 2.7634663581848145, | |
| "epoch": 1.043, | |
| "grad_norm": 0.734375, | |
| "grad_norm_var": 0.0008664449055989583, | |
| "learning_rate": 0.00094, | |
| "loss": 55.4621, | |
| "step": 107 | |
| }, | |
| { | |
| "crossentropy": 2.874852418899536, | |
| "epoch": 1.044, | |
| "grad_norm": 0.7578125, | |
| "grad_norm_var": 0.0008664449055989583, | |
| "learning_rate": 0.0009389473684210527, | |
| "loss": 57.0353, | |
| "step": 108 | |
| }, | |
| { | |
| "crossentropy": 2.775438666343689, | |
| "epoch": 1.045, | |
| "grad_norm": 0.76171875, | |
| "grad_norm_var": 0.0008864720662434896, | |
| "learning_rate": 0.0009378947368421052, | |
| "loss": 56.4358, | |
| "step": 109 | |
| }, | |
| { | |
| "crossentropy": 2.8654950857162476, | |
| "epoch": 1.046, | |
| "grad_norm": 0.80078125, | |
| "grad_norm_var": 0.0008015950520833333, | |
| "learning_rate": 0.0009368421052631579, | |
| "loss": 57.3299, | |
| "step": 110 | |
| }, | |
| { | |
| "crossentropy": 2.761886715888977, | |
| "epoch": 1.047, | |
| "grad_norm": 0.75390625, | |
| "grad_norm_var": 0.0005812962849934896, | |
| "learning_rate": 0.0009357894736842105, | |
| "loss": 57.1592, | |
| "step": 111 | |
| }, | |
| { | |
| "crossentropy": 2.8443450927734375, | |
| "epoch": 1.048, | |
| "grad_norm": 0.78515625, | |
| "grad_norm_var": 0.0005812962849934896, | |
| "learning_rate": 0.0009347368421052633, | |
| "loss": 56.3252, | |
| "step": 112 | |
| }, | |
| { | |
| "crossentropy": 2.7667864561080933, | |
| "epoch": 1.049, | |
| "grad_norm": 0.76171875, | |
| "grad_norm_var": 0.0005856831868489583, | |
| "learning_rate": 0.0009336842105263158, | |
| "loss": 56.8155, | |
| "step": 113 | |
| }, | |
| { | |
| "crossentropy": 2.8398473262786865, | |
| "epoch": 1.05, | |
| "grad_norm": 0.77734375, | |
| "grad_norm_var": 0.0005757649739583333, | |
| "learning_rate": 0.0009326315789473684, | |
| "loss": 56.5225, | |
| "step": 114 | |
| }, | |
| { | |
| "crossentropy": 2.79670786857605, | |
| "epoch": 1.051, | |
| "grad_norm": 0.7734375, | |
| "grad_norm_var": 0.0005757013956705729, | |
| "learning_rate": 0.0009315789473684211, | |
| "loss": 57.254, | |
| "step": 115 | |
| }, | |
| { | |
| "crossentropy": 2.774913787841797, | |
| "epoch": 1.052, | |
| "grad_norm": 0.796875, | |
| "grad_norm_var": 0.0006113052368164062, | |
| "learning_rate": 0.0009305263157894737, | |
| "loss": 57.1821, | |
| "step": 116 | |
| }, | |
| { | |
| "crossentropy": 2.73682963848114, | |
| "epoch": 1.053, | |
| "grad_norm": 0.765625, | |
| "grad_norm_var": 0.0005757013956705729, | |
| "learning_rate": 0.0009294736842105263, | |
| "loss": 56.0187, | |
| "step": 117 | |
| }, | |
| { | |
| "crossentropy": 2.802118182182312, | |
| "epoch": 1.054, | |
| "grad_norm": 0.7890625, | |
| "grad_norm_var": 0.0005655924479166667, | |
| "learning_rate": 0.0009284210526315789, | |
| "loss": 57.0246, | |
| "step": 118 | |
| }, | |
| { | |
| "crossentropy": 2.833091616630554, | |
| "epoch": 1.055, | |
| "grad_norm": 0.71875, | |
| "grad_norm_var": 0.0007252375284830729, | |
| "learning_rate": 0.0009273684210526316, | |
| "loss": 56.1985, | |
| "step": 119 | |
| }, | |
| { | |
| "crossentropy": 2.892102599143982, | |
| "epoch": 1.056, | |
| "grad_norm": 0.76171875, | |
| "grad_norm_var": 0.00072021484375, | |
| "learning_rate": 0.0009263157894736843, | |
| "loss": 58.5873, | |
| "step": 120 | |
| }, | |
| { | |
| "crossentropy": 2.8181869983673096, | |
| "epoch": 1.057, | |
| "grad_norm": 0.73828125, | |
| "grad_norm_var": 0.000748443603515625, | |
| "learning_rate": 0.0009252631578947368, | |
| "loss": 55.6273, | |
| "step": 121 | |
| }, | |
| { | |
| "crossentropy": 2.8839467763900757, | |
| "epoch": 1.058, | |
| "grad_norm": 0.796875, | |
| "grad_norm_var": 0.0005633036295572916, | |
| "learning_rate": 0.0009242105263157895, | |
| "loss": 57.4793, | |
| "step": 122 | |
| }, | |
| { | |
| "crossentropy": 2.82876455783844, | |
| "epoch": 1.059, | |
| "grad_norm": 0.7578125, | |
| "grad_norm_var": 0.0004954020182291667, | |
| "learning_rate": 0.0009231578947368421, | |
| "loss": 56.5058, | |
| "step": 123 | |
| }, | |
| { | |
| "crossentropy": 2.8746414184570312, | |
| "epoch": 1.06, | |
| "grad_norm": 0.79296875, | |
| "grad_norm_var": 0.0005222956339518229, | |
| "learning_rate": 0.0009221052631578948, | |
| "loss": 56.7531, | |
| "step": 124 | |
| }, | |
| { | |
| "crossentropy": 2.8503822088241577, | |
| "epoch": 1.061, | |
| "grad_norm": 0.76171875, | |
| "grad_norm_var": 0.0005222956339518229, | |
| "learning_rate": 0.0009210526315789473, | |
| "loss": 56.2036, | |
| "step": 125 | |
| }, | |
| { | |
| "crossentropy": 2.736844062805176, | |
| "epoch": 1.062, | |
| "grad_norm": 0.73046875, | |
| "grad_norm_var": 0.0005497614542643229, | |
| "learning_rate": 0.00092, | |
| "loss": 56.6791, | |
| "step": 126 | |
| }, | |
| { | |
| "crossentropy": 2.8095351457595825, | |
| "epoch": 1.063, | |
| "grad_norm": 0.76171875, | |
| "grad_norm_var": 0.0005406061808268229, | |
| "learning_rate": 0.0009189473684210526, | |
| "loss": 57.0038, | |
| "step": 127 | |
| }, | |
| { | |
| "crossentropy": 2.8585771322250366, | |
| "epoch": 1.064, | |
| "grad_norm": 0.82421875, | |
| "grad_norm_var": 0.0007313410441080729, | |
| "learning_rate": 0.0009178947368421053, | |
| "loss": 57.2698, | |
| "step": 128 | |
| }, | |
| { | |
| "crossentropy": 2.827934741973877, | |
| "epoch": 2.001, | |
| "grad_norm": 0.7578125, | |
| "grad_norm_var": 0.000925445556640625, | |
| "learning_rate": 0.000916842105263158, | |
| "loss": 55.3524, | |
| "step": 129 | |
| }, | |
| { | |
| "crossentropy": 2.834007978439331, | |
| "epoch": 2.002, | |
| "grad_norm": 0.796875, | |
| "grad_norm_var": 0.00096435546875, | |
| "learning_rate": 0.0009157894736842105, | |
| "loss": 55.6883, | |
| "step": 130 | |
| }, | |
| { | |
| "crossentropy": 2.752839684486389, | |
| "epoch": 2.003, | |
| "grad_norm": 0.7578125, | |
| "grad_norm_var": 0.000937652587890625, | |
| "learning_rate": 0.0009147368421052632, | |
| "loss": 56.9396, | |
| "step": 131 | |
| }, | |
| { | |
| "crossentropy": 2.780048131942749, | |
| "epoch": 2.004, | |
| "grad_norm": 0.765625, | |
| "grad_norm_var": 0.000937652587890625, | |
| "learning_rate": 0.0009136842105263158, | |
| "loss": 55.8962, | |
| "step": 132 | |
| }, | |
| { | |
| "crossentropy": 2.7902239561080933, | |
| "epoch": 2.005, | |
| "grad_norm": 0.7890625, | |
| "grad_norm_var": 0.000937652587890625, | |
| "learning_rate": 0.0009126315789473685, | |
| "loss": 56.2156, | |
| "step": 133 | |
| }, | |
| { | |
| "crossentropy": 2.856339693069458, | |
| "epoch": 2.006, | |
| "grad_norm": 0.74609375, | |
| "grad_norm_var": 0.0007939020792643229, | |
| "learning_rate": 0.000911578947368421, | |
| "loss": 57.4976, | |
| "step": 134 | |
| }, | |
| { | |
| "crossentropy": 2.8085330724716187, | |
| "epoch": 2.007, | |
| "grad_norm": 0.76953125, | |
| "grad_norm_var": 0.0007862726847330729, | |
| "learning_rate": 0.0009105263157894737, | |
| "loss": 56.5116, | |
| "step": 135 | |
| }, | |
| { | |
| "crossentropy": 2.7982712984085083, | |
| "epoch": 2.008, | |
| "grad_norm": 0.77734375, | |
| "grad_norm_var": 0.0006998062133789062, | |
| "learning_rate": 0.0009094736842105264, | |
| "loss": 56.9157, | |
| "step": 136 | |
| }, | |
| { | |
| "crossentropy": 2.8740073442459106, | |
| "epoch": 2.009, | |
| "grad_norm": 0.73828125, | |
| "grad_norm_var": 0.000748443603515625, | |
| "learning_rate": 0.000908421052631579, | |
| "loss": 56.9175, | |
| "step": 137 | |
| }, | |
| { | |
| "crossentropy": 2.849576234817505, | |
| "epoch": 2.01, | |
| "grad_norm": 0.79296875, | |
| "grad_norm_var": 0.0007593154907226563, | |
| "learning_rate": 0.0009073684210526316, | |
| "loss": 56.2422, | |
| "step": 138 | |
| }, | |
| { | |
| "crossentropy": 2.764275550842285, | |
| "epoch": 2.011, | |
| "grad_norm": 0.76171875, | |
| "grad_norm_var": 0.000742022196451823, | |
| "learning_rate": 0.0009063157894736842, | |
| "loss": 55.6254, | |
| "step": 139 | |
| }, | |
| { | |
| "crossentropy": 2.868152141571045, | |
| "epoch": 2.012, | |
| "grad_norm": 0.79296875, | |
| "grad_norm_var": 0.0007593154907226563, | |
| "learning_rate": 0.0009052631578947369, | |
| "loss": 55.5984, | |
| "step": 140 | |
| }, | |
| { | |
| "crossentropy": 2.7295836210250854, | |
| "epoch": 2.013, | |
| "grad_norm": 0.79296875, | |
| "grad_norm_var": 0.0006392796834309896, | |
| "learning_rate": 0.0009042105263157895, | |
| "loss": 55.8961, | |
| "step": 141 | |
| }, | |
| { | |
| "crossentropy": 2.8953765630722046, | |
| "epoch": 2.014, | |
| "grad_norm": 0.7265625, | |
| "grad_norm_var": 0.0007932027180989583, | |
| "learning_rate": 0.0009031578947368422, | |
| "loss": 57.4738, | |
| "step": 142 | |
| }, | |
| { | |
| "crossentropy": 2.8890058994293213, | |
| "epoch": 2.015, | |
| "grad_norm": 0.79296875, | |
| "grad_norm_var": 0.0006528218587239583, | |
| "learning_rate": 0.0009021052631578947, | |
| "loss": 56.5425, | |
| "step": 143 | |
| }, | |
| { | |
| "crossentropy": 2.7584519386291504, | |
| "epoch": 2.016, | |
| "grad_norm": 0.74609375, | |
| "grad_norm_var": 0.000510406494140625, | |
| "learning_rate": 0.0009010526315789473, | |
| "loss": 55.3745, | |
| "step": 144 | |
| }, | |
| { | |
| "crossentropy": 2.9137370586395264, | |
| "epoch": 2.017, | |
| "grad_norm": 0.74609375, | |
| "grad_norm_var": 0.0005365371704101563, | |
| "learning_rate": 0.0009000000000000001, | |
| "loss": 57.0554, | |
| "step": 145 | |
| }, | |
| { | |
| "crossentropy": 2.836198091506958, | |
| "epoch": 2.018, | |
| "grad_norm": 0.7734375, | |
| "grad_norm_var": 0.00048160552978515625, | |
| "learning_rate": 0.0008989473684210527, | |
| "loss": 55.6138, | |
| "step": 146 | |
| }, | |
| { | |
| "crossentropy": 2.8125778436660767, | |
| "epoch": 2.019, | |
| "grad_norm": 0.7578125, | |
| "grad_norm_var": 0.00048160552978515625, | |
| "learning_rate": 0.0008978947368421053, | |
| "loss": 56.4268, | |
| "step": 147 | |
| }, | |
| { | |
| "crossentropy": 2.9599599838256836, | |
| "epoch": 2.02, | |
| "grad_norm": 0.78125, | |
| "grad_norm_var": 0.0004943211873372396, | |
| "learning_rate": 0.0008968421052631579, | |
| "loss": 57.8099, | |
| "step": 148 | |
| }, | |
| { | |
| "crossentropy": 2.9571365118026733, | |
| "epoch": 2.021, | |
| "grad_norm": 0.77734375, | |
| "grad_norm_var": 0.00046971638997395836, | |
| "learning_rate": 0.0008957894736842106, | |
| "loss": 58.1597, | |
| "step": 149 | |
| }, | |
| { | |
| "crossentropy": 2.8628395795822144, | |
| "epoch": 2.022, | |
| "grad_norm": 0.80078125, | |
| "grad_norm_var": 0.0005035400390625, | |
| "learning_rate": 0.0008947368421052632, | |
| "loss": 58.4094, | |
| "step": 150 | |
| }, | |
| { | |
| "crossentropy": 2.8677598237991333, | |
| "epoch": 2.023, | |
| "grad_norm": 0.7578125, | |
| "grad_norm_var": 0.0005136489868164063, | |
| "learning_rate": 0.0008936842105263157, | |
| "loss": 57.5826, | |
| "step": 151 | |
| }, | |
| { | |
| "crossentropy": 2.8962247371673584, | |
| "epoch": 2.024, | |
| "grad_norm": 0.78125, | |
| "grad_norm_var": 0.0005185445149739583, | |
| "learning_rate": 0.0008926315789473684, | |
| "loss": 57.425, | |
| "step": 152 | |
| }, | |
| { | |
| "crossentropy": 2.8157013654708862, | |
| "epoch": 2.025, | |
| "grad_norm": 0.765625, | |
| "grad_norm_var": 0.00044956207275390623, | |
| "learning_rate": 0.0008915789473684211, | |
| "loss": 54.5755, | |
| "step": 153 | |
| }, | |
| { | |
| "crossentropy": 2.874367117881775, | |
| "epoch": 2.026, | |
| "grad_norm": 0.74609375, | |
| "grad_norm_var": 0.00045413970947265624, | |
| "learning_rate": 0.0008905263157894738, | |
| "loss": 55.1411, | |
| "step": 154 | |
| }, | |
| { | |
| "crossentropy": 2.78195583820343, | |
| "epoch": 2.027, | |
| "grad_norm": 0.7734375, | |
| "grad_norm_var": 0.00045166015625, | |
| "learning_rate": 0.0008894736842105263, | |
| "loss": 55.6419, | |
| "step": 155 | |
| }, | |
| { | |
| "crossentropy": 2.7602696418762207, | |
| "epoch": 2.028, | |
| "grad_norm": 0.76171875, | |
| "grad_norm_var": 0.0004150390625, | |
| "learning_rate": 0.000888421052631579, | |
| "loss": 55.7835, | |
| "step": 156 | |
| }, | |
| { | |
| "crossentropy": 2.785650372505188, | |
| "epoch": 2.029, | |
| "grad_norm": 0.77734375, | |
| "grad_norm_var": 0.00037740071614583335, | |
| "learning_rate": 0.0008873684210526316, | |
| "loss": 56.6855, | |
| "step": 157 | |
| }, | |
| { | |
| "crossentropy": 2.9064029455184937, | |
| "epoch": 2.03, | |
| "grad_norm": 0.8671875, | |
| "grad_norm_var": 0.0008626302083333333, | |
| "learning_rate": 0.0008863157894736842, | |
| "loss": 57.8286, | |
| "step": 158 | |
| }, | |
| { | |
| "crossentropy": 2.751049518585205, | |
| "epoch": 2.031, | |
| "grad_norm": 0.75, | |
| "grad_norm_var": 0.0008773167928059895, | |
| "learning_rate": 0.0008852631578947368, | |
| "loss": 55.0993, | |
| "step": 159 | |
| }, | |
| { | |
| "crossentropy": 2.914052367210388, | |
| "epoch": 2.032, | |
| "grad_norm": 0.78125, | |
| "grad_norm_var": 0.0008298238118489583, | |
| "learning_rate": 0.0008842105263157894, | |
| "loss": 57.5343, | |
| "step": 160 | |
| }, | |
| { | |
| "crossentropy": 2.799962878227234, | |
| "epoch": 2.033, | |
| "grad_norm": 0.7265625, | |
| "grad_norm_var": 0.0009286880493164062, | |
| "learning_rate": 0.0008831578947368422, | |
| "loss": 55.8085, | |
| "step": 161 | |
| }, | |
| { | |
| "crossentropy": 2.844335198402405, | |
| "epoch": 2.034, | |
| "grad_norm": 0.76953125, | |
| "grad_norm_var": 0.0009297688802083333, | |
| "learning_rate": 0.0008821052631578948, | |
| "loss": 57.7179, | |
| "step": 162 | |
| }, | |
| { | |
| "crossentropy": 2.914214015007019, | |
| "epoch": 2.035, | |
| "grad_norm": 0.8125, | |
| "grad_norm_var": 0.0010027567545572917, | |
| "learning_rate": 0.0008810526315789475, | |
| "loss": 56.2728, | |
| "step": 163 | |
| }, | |
| { | |
| "crossentropy": 2.8970110416412354, | |
| "epoch": 2.036, | |
| "grad_norm": 0.80859375, | |
| "grad_norm_var": 0.001065508524576823, | |
| "learning_rate": 0.00088, | |
| "loss": 57.5116, | |
| "step": 164 | |
| }, | |
| { | |
| "crossentropy": 2.953248143196106, | |
| "epoch": 2.037, | |
| "grad_norm": 0.765625, | |
| "grad_norm_var": 0.0010759989420572917, | |
| "learning_rate": 0.0008789473684210526, | |
| "loss": 59.0574, | |
| "step": 165 | |
| }, | |
| { | |
| "crossentropy": 2.8602688312530518, | |
| "epoch": 2.038, | |
| "grad_norm": 0.7734375, | |
| "grad_norm_var": 0.0010390599568684896, | |
| "learning_rate": 0.0008778947368421053, | |
| "loss": 56.9093, | |
| "step": 166 | |
| }, | |
| { | |
| "crossentropy": 2.8289544582366943, | |
| "epoch": 2.039, | |
| "grad_norm": 0.765625, | |
| "grad_norm_var": 0.0010238011678059896, | |
| "learning_rate": 0.0008768421052631579, | |
| "loss": 55.8569, | |
| "step": 167 | |
| }, | |
| { | |
| "crossentropy": 2.809374690055847, | |
| "epoch": 2.04, | |
| "grad_norm": 0.76171875, | |
| "grad_norm_var": 0.0010355631510416666, | |
| "learning_rate": 0.0008757894736842105, | |
| "loss": 57.3663, | |
| "step": 168 | |
| }, | |
| { | |
| "crossentropy": 2.753124713897705, | |
| "epoch": 2.041, | |
| "grad_norm": 0.82421875, | |
| "grad_norm_var": 0.0011738459269205728, | |
| "learning_rate": 0.0008747368421052632, | |
| "loss": 55.8763, | |
| "step": 169 | |
| }, | |
| { | |
| "crossentropy": 2.842271566390991, | |
| "epoch": 2.042, | |
| "grad_norm": 0.78125, | |
| "grad_norm_var": 0.0010965983072916666, | |
| "learning_rate": 0.0008736842105263159, | |
| "loss": 56.9834, | |
| "step": 170 | |
| }, | |
| { | |
| "crossentropy": 2.672803044319153, | |
| "epoch": 2.043, | |
| "grad_norm": 0.7578125, | |
| "grad_norm_var": 0.0011281331380208334, | |
| "learning_rate": 0.0008726315789473685, | |
| "loss": 54.2653, | |
| "step": 171 | |
| }, | |
| { | |
| "crossentropy": 2.887640357017517, | |
| "epoch": 2.044, | |
| "grad_norm": 0.83984375, | |
| "grad_norm_var": 0.0013163248697916667, | |
| "learning_rate": 0.000871578947368421, | |
| "loss": 57.3367, | |
| "step": 172 | |
| }, | |
| { | |
| "crossentropy": 2.95772647857666, | |
| "epoch": 2.045, | |
| "grad_norm": 0.78125, | |
| "grad_norm_var": 0.0013132095336914062, | |
| "learning_rate": 0.0008705263157894737, | |
| "loss": 58.1996, | |
| "step": 173 | |
| }, | |
| { | |
| "crossentropy": 2.9062867164611816, | |
| "epoch": 2.046, | |
| "grad_norm": 0.80078125, | |
| "grad_norm_var": 0.0008646647135416666, | |
| "learning_rate": 0.0008694736842105263, | |
| "loss": 56.2754, | |
| "step": 174 | |
| }, | |
| { | |
| "crossentropy": 2.816689133644104, | |
| "epoch": 2.047, | |
| "grad_norm": 0.7578125, | |
| "grad_norm_var": 0.0008359273274739583, | |
| "learning_rate": 0.000868421052631579, | |
| "loss": 56.8248, | |
| "step": 175 | |
| }, | |
| { | |
| "crossentropy": 2.7304184436798096, | |
| "epoch": 2.048, | |
| "grad_norm": 0.75, | |
| "grad_norm_var": 0.0008989969889322916, | |
| "learning_rate": 0.0008673684210526315, | |
| "loss": 56.0621, | |
| "step": 176 | |
| }, | |
| { | |
| "crossentropy": 2.8292678594589233, | |
| "epoch": 2.049, | |
| "grad_norm": 0.77734375, | |
| "grad_norm_var": 0.0006998062133789062, | |
| "learning_rate": 0.0008663157894736843, | |
| "loss": 57.5406, | |
| "step": 177 | |
| }, | |
| { | |
| "crossentropy": 2.7578535079956055, | |
| "epoch": 2.05, | |
| "grad_norm": 0.7578125, | |
| "grad_norm_var": 0.0007293701171875, | |
| "learning_rate": 0.0008652631578947369, | |
| "loss": 55.5626, | |
| "step": 178 | |
| }, | |
| { | |
| "crossentropy": 2.8397985696792603, | |
| "epoch": 2.051, | |
| "grad_norm": 0.7578125, | |
| "grad_norm_var": 0.0006955464680989584, | |
| "learning_rate": 0.0008642105263157895, | |
| "loss": 56.7329, | |
| "step": 179 | |
| }, | |
| { | |
| "crossentropy": 2.8631927967071533, | |
| "epoch": 2.052, | |
| "grad_norm": 0.76171875, | |
| "grad_norm_var": 0.0006467183430989583, | |
| "learning_rate": 0.0008631578947368422, | |
| "loss": 56.4729, | |
| "step": 180 | |
| }, | |
| { | |
| "crossentropy": 2.829651117324829, | |
| "epoch": 2.053, | |
| "grad_norm": 0.7734375, | |
| "grad_norm_var": 0.0006398518880208333, | |
| "learning_rate": 0.0008621052631578947, | |
| "loss": 55.3689, | |
| "step": 181 | |
| }, | |
| { | |
| "crossentropy": 2.866907000541687, | |
| "epoch": 2.054, | |
| "grad_norm": 0.796875, | |
| "grad_norm_var": 0.0006650288899739584, | |
| "learning_rate": 0.0008610526315789474, | |
| "loss": 57.1882, | |
| "step": 182 | |
| }, | |
| { | |
| "crossentropy": 2.833262324333191, | |
| "epoch": 2.055, | |
| "grad_norm": 0.7421875, | |
| "grad_norm_var": 0.0007375081380208334, | |
| "learning_rate": 0.00086, | |
| "loss": 57.2365, | |
| "step": 183 | |
| }, | |
| { | |
| "crossentropy": 2.846835970878601, | |
| "epoch": 2.056, | |
| "grad_norm": 0.77734375, | |
| "grad_norm_var": 0.0007222493489583334, | |
| "learning_rate": 0.0008589473684210527, | |
| "loss": 57.233, | |
| "step": 184 | |
| }, | |
| { | |
| "crossentropy": 2.7716782093048096, | |
| "epoch": 2.057, | |
| "grad_norm": 0.78125, | |
| "grad_norm_var": 0.0005690892537434896, | |
| "learning_rate": 0.0008578947368421052, | |
| "loss": 55.5457, | |
| "step": 185 | |
| }, | |
| { | |
| "crossentropy": 2.8875041007995605, | |
| "epoch": 2.058, | |
| "grad_norm": 0.77734375, | |
| "grad_norm_var": 0.0005666097005208333, | |
| "learning_rate": 0.0008568421052631579, | |
| "loss": 57.5646, | |
| "step": 186 | |
| }, | |
| { | |
| "crossentropy": 2.9292314052581787, | |
| "epoch": 2.059, | |
| "grad_norm": 0.76953125, | |
| "grad_norm_var": 0.0005492528279622395, | |
| "learning_rate": 0.0008557894736842106, | |
| "loss": 58.1659, | |
| "step": 187 | |
| }, | |
| { | |
| "crossentropy": 2.877217411994934, | |
| "epoch": 2.06, | |
| "grad_norm": 0.77734375, | |
| "grad_norm_var": 0.00025424957275390627, | |
| "learning_rate": 0.0008547368421052632, | |
| "loss": 57.4952, | |
| "step": 188 | |
| }, | |
| { | |
| "crossentropy": 2.9632803201675415, | |
| "epoch": 2.061, | |
| "grad_norm": 0.7890625, | |
| "grad_norm_var": 0.0002684911092122396, | |
| "learning_rate": 0.0008536842105263158, | |
| "loss": 56.5015, | |
| "step": 189 | |
| }, | |
| { | |
| "crossentropy": 2.754631280899048, | |
| "epoch": 2.062, | |
| "grad_norm": 0.734375, | |
| "grad_norm_var": 0.000286865234375, | |
| "learning_rate": 0.0008526315789473684, | |
| "loss": 56.8266, | |
| "step": 190 | |
| }, | |
| { | |
| "crossentropy": 2.7981419563293457, | |
| "epoch": 2.063, | |
| "grad_norm": 0.8046875, | |
| "grad_norm_var": 0.0003631591796875, | |
| "learning_rate": 0.0008515789473684211, | |
| "loss": 55.2874, | |
| "step": 191 | |
| }, | |
| { | |
| "crossentropy": 2.779536008834839, | |
| "epoch": 2.064, | |
| "grad_norm": 0.77734375, | |
| "grad_norm_var": 0.00033512115478515623, | |
| "learning_rate": 0.0008505263157894737, | |
| "loss": 56.5862, | |
| "step": 192 | |
| }, | |
| { | |
| "crossentropy": 2.8000186681747437, | |
| "epoch": 3.001, | |
| "grad_norm": 0.7734375, | |
| "grad_norm_var": 0.0003203709920247396, | |
| "learning_rate": 0.0008494736842105262, | |
| "loss": 55.9334, | |
| "step": 193 | |
| }, | |
| { | |
| "crossentropy": 2.8197896480560303, | |
| "epoch": 3.002, | |
| "grad_norm": 0.8046875, | |
| "grad_norm_var": 0.0003615697224934896, | |
| "learning_rate": 0.000848421052631579, | |
| "loss": 56.534, | |
| "step": 194 | |
| }, | |
| { | |
| "crossentropy": 2.788080930709839, | |
| "epoch": 3.003, | |
| "grad_norm": 0.75, | |
| "grad_norm_var": 0.00039265950520833335, | |
| "learning_rate": 0.0008473684210526316, | |
| "loss": 55.8295, | |
| "step": 195 | |
| }, | |
| { | |
| "crossentropy": 2.658084750175476, | |
| "epoch": 3.004, | |
| "grad_norm": 0.7421875, | |
| "grad_norm_var": 0.00046183268229166665, | |
| "learning_rate": 0.0008463157894736843, | |
| "loss": 54.5686, | |
| "step": 196 | |
| }, | |
| { | |
| "crossentropy": 2.9385114908218384, | |
| "epoch": 3.005, | |
| "grad_norm": 0.828125, | |
| "grad_norm_var": 0.0006205240885416666, | |
| "learning_rate": 0.0008452631578947369, | |
| "loss": 59.5615, | |
| "step": 197 | |
| }, | |
| { | |
| "crossentropy": 2.836456775665283, | |
| "epoch": 3.006, | |
| "grad_norm": 0.7578125, | |
| "grad_norm_var": 0.0005666097005208333, | |
| "learning_rate": 0.0008442105263157895, | |
| "loss": 55.7351, | |
| "step": 198 | |
| }, | |
| { | |
| "crossentropy": 2.8391683101654053, | |
| "epoch": 3.007, | |
| "grad_norm": 0.7578125, | |
| "grad_norm_var": 0.0005879084269205729, | |
| "learning_rate": 0.0008431578947368421, | |
| "loss": 56.6525, | |
| "step": 199 | |
| }, | |
| { | |
| "crossentropy": 2.840343952178955, | |
| "epoch": 3.008, | |
| "grad_norm": 0.72265625, | |
| "grad_norm_var": 0.0007548014322916667, | |
| "learning_rate": 0.0008421052631578947, | |
| "loss": 56.1, | |
| "step": 200 | |
| }, | |
| { | |
| "crossentropy": 2.7855753898620605, | |
| "epoch": 3.009, | |
| "grad_norm": 0.76953125, | |
| "grad_norm_var": 0.0007525126139322917, | |
| "learning_rate": 0.0008410526315789474, | |
| "loss": 55.2499, | |
| "step": 201 | |
| }, | |
| { | |
| "crossentropy": 2.915839195251465, | |
| "epoch": 3.01, | |
| "grad_norm": 0.8359375, | |
| "grad_norm_var": 0.001015154520670573, | |
| "learning_rate": 0.00084, | |
| "loss": 58.2427, | |
| "step": 202 | |
| }, | |
| { | |
| "crossentropy": 2.785078763961792, | |
| "epoch": 3.011, | |
| "grad_norm": 0.77734375, | |
| "grad_norm_var": 0.001015154520670573, | |
| "learning_rate": 0.0008389473684210527, | |
| "loss": 57.127, | |
| "step": 203 | |
| }, | |
| { | |
| "crossentropy": 2.7886747121810913, | |
| "epoch": 3.012, | |
| "grad_norm": 0.76953125, | |
| "grad_norm_var": 0.0010027567545572917, | |
| "learning_rate": 0.0008378947368421053, | |
| "loss": 55.473, | |
| "step": 204 | |
| }, | |
| { | |
| "crossentropy": 2.8934919834136963, | |
| "epoch": 3.013, | |
| "grad_norm": 0.76171875, | |
| "grad_norm_var": 0.0009052912394205729, | |
| "learning_rate": 0.000836842105263158, | |
| "loss": 56.9954, | |
| "step": 205 | |
| }, | |
| { | |
| "crossentropy": 2.8888269662857056, | |
| "epoch": 3.014, | |
| "grad_norm": 0.75390625, | |
| "grad_norm_var": 0.0008697509765625, | |
| "learning_rate": 0.0008357894736842105, | |
| "loss": 56.4975, | |
| "step": 206 | |
| }, | |
| { | |
| "crossentropy": 2.8736512660980225, | |
| "epoch": 3.015, | |
| "grad_norm": 0.75390625, | |
| "grad_norm_var": 0.000888824462890625, | |
| "learning_rate": 0.0008347368421052631, | |
| "loss": 57.8767, | |
| "step": 207 | |
| }, | |
| { | |
| "crossentropy": 2.808267593383789, | |
| "epoch": 3.016, | |
| "grad_norm": 0.78515625, | |
| "grad_norm_var": 0.0008992513020833333, | |
| "learning_rate": 0.0008336842105263158, | |
| "loss": 55.929, | |
| "step": 208 | |
| }, | |
| { | |
| "crossentropy": 2.927086591720581, | |
| "epoch": 3.017, | |
| "grad_norm": 0.8046875, | |
| "grad_norm_var": 0.0009684244791666667, | |
| "learning_rate": 0.0008326315789473684, | |
| "loss": 57.214, | |
| "step": 209 | |
| }, | |
| { | |
| "crossentropy": 2.742948889732361, | |
| "epoch": 3.018, | |
| "grad_norm": 0.77734375, | |
| "grad_norm_var": 0.0009012222290039062, | |
| "learning_rate": 0.0008315789473684212, | |
| "loss": 55.9185, | |
| "step": 210 | |
| }, | |
| { | |
| "crossentropy": 2.879304528236389, | |
| "epoch": 3.019, | |
| "grad_norm": 0.80859375, | |
| "grad_norm_var": 0.000946044921875, | |
| "learning_rate": 0.0008305263157894737, | |
| "loss": 57.5414, | |
| "step": 211 | |
| }, | |
| { | |
| "crossentropy": 2.866540312767029, | |
| "epoch": 3.02, | |
| "grad_norm": 0.75390625, | |
| "grad_norm_var": 0.0009027481079101563, | |
| "learning_rate": 0.0008294736842105264, | |
| "loss": 55.6362, | |
| "step": 212 | |
| }, | |
| { | |
| "crossentropy": 2.8433879613876343, | |
| "epoch": 3.021, | |
| "grad_norm": 0.7734375, | |
| "grad_norm_var": 0.0007104873657226562, | |
| "learning_rate": 0.000828421052631579, | |
| "loss": 56.9735, | |
| "step": 213 | |
| }, | |
| { | |
| "crossentropy": 2.8319772481918335, | |
| "epoch": 3.022, | |
| "grad_norm": 0.7578125, | |
| "grad_norm_var": 0.0007104873657226562, | |
| "learning_rate": 0.0008273684210526315, | |
| "loss": 56.2473, | |
| "step": 214 | |
| }, | |
| { | |
| "crossentropy": 2.781643509864807, | |
| "epoch": 3.023, | |
| "grad_norm": 0.77734375, | |
| "grad_norm_var": 0.0006955464680989584, | |
| "learning_rate": 0.0008263157894736842, | |
| "loss": 55.011, | |
| "step": 215 | |
| }, | |
| { | |
| "crossentropy": 2.8087105751037598, | |
| "epoch": 3.024, | |
| "grad_norm": 0.80859375, | |
| "grad_norm_var": 0.0005696614583333334, | |
| "learning_rate": 0.0008252631578947368, | |
| "loss": 56.3954, | |
| "step": 216 | |
| }, | |
| { | |
| "crossentropy": 3.0052173137664795, | |
| "epoch": 3.025, | |
| "grad_norm": 0.80078125, | |
| "grad_norm_var": 0.0005900065104166666, | |
| "learning_rate": 0.0008242105263157895, | |
| "loss": 58.9994, | |
| "step": 217 | |
| }, | |
| { | |
| "crossentropy": 2.844808340072632, | |
| "epoch": 3.026, | |
| "grad_norm": 0.7578125, | |
| "grad_norm_var": 0.0004018147786458333, | |
| "learning_rate": 0.0008231578947368422, | |
| "loss": 56.4327, | |
| "step": 218 | |
| }, | |
| { | |
| "crossentropy": 2.7618932723999023, | |
| "epoch": 3.027, | |
| "grad_norm": 0.73046875, | |
| "grad_norm_var": 0.0005330403645833333, | |
| "learning_rate": 0.0008221052631578948, | |
| "loss": 55.3659, | |
| "step": 219 | |
| }, | |
| { | |
| "crossentropy": 2.800921320915222, | |
| "epoch": 3.028, | |
| "grad_norm": 0.73828125, | |
| "grad_norm_var": 0.0006103515625, | |
| "learning_rate": 0.0008210526315789474, | |
| "loss": 55.6769, | |
| "step": 220 | |
| }, | |
| { | |
| "crossentropy": 2.724760413169861, | |
| "epoch": 3.029, | |
| "grad_norm": 0.75390625, | |
| "grad_norm_var": 0.0006243387858072916, | |
| "learning_rate": 0.00082, | |
| "loss": 54.9201, | |
| "step": 221 | |
| }, | |
| { | |
| "crossentropy": 2.781510591506958, | |
| "epoch": 3.03, | |
| "grad_norm": 0.76953125, | |
| "grad_norm_var": 0.0006039937337239584, | |
| "learning_rate": 0.0008189473684210527, | |
| "loss": 55.7819, | |
| "step": 222 | |
| }, | |
| { | |
| "crossentropy": 2.776448607444763, | |
| "epoch": 3.031, | |
| "grad_norm": 0.7421875, | |
| "grad_norm_var": 0.0006408055623372395, | |
| "learning_rate": 0.0008178947368421052, | |
| "loss": 55.0119, | |
| "step": 223 | |
| }, | |
| { | |
| "crossentropy": 2.7627722024917603, | |
| "epoch": 3.032, | |
| "grad_norm": 0.7734375, | |
| "grad_norm_var": 0.0006276448567708333, | |
| "learning_rate": 0.0008168421052631579, | |
| "loss": 55.1523, | |
| "step": 224 | |
| }, | |
| { | |
| "crossentropy": 2.800988793373108, | |
| "epoch": 3.033, | |
| "grad_norm": 0.796875, | |
| "grad_norm_var": 0.000595855712890625, | |
| "learning_rate": 0.0008157894736842105, | |
| "loss": 56.915, | |
| "step": 225 | |
| }, | |
| { | |
| "crossentropy": 2.8703144788742065, | |
| "epoch": 3.034, | |
| "grad_norm": 0.8046875, | |
| "grad_norm_var": 0.0006692886352539062, | |
| "learning_rate": 0.0008147368421052633, | |
| "loss": 56.9158, | |
| "step": 226 | |
| }, | |
| { | |
| "crossentropy": 2.82321560382843, | |
| "epoch": 3.035, | |
| "grad_norm": 0.7890625, | |
| "grad_norm_var": 0.0005971272786458333, | |
| "learning_rate": 0.0008136842105263158, | |
| "loss": 56.0276, | |
| "step": 227 | |
| }, | |
| { | |
| "crossentropy": 2.7239447832107544, | |
| "epoch": 3.036, | |
| "grad_norm": 0.76953125, | |
| "grad_norm_var": 0.0005777994791666667, | |
| "learning_rate": 0.0008126315789473684, | |
| "loss": 55.3433, | |
| "step": 228 | |
| }, | |
| { | |
| "crossentropy": 2.8542327880859375, | |
| "epoch": 3.037, | |
| "grad_norm": 0.7890625, | |
| "grad_norm_var": 0.0005971272786458333, | |
| "learning_rate": 0.0008115789473684211, | |
| "loss": 56.0984, | |
| "step": 229 | |
| }, | |
| { | |
| "crossentropy": 2.6775788068771362, | |
| "epoch": 3.038, | |
| "grad_norm": 0.7734375, | |
| "grad_norm_var": 0.0005818684895833333, | |
| "learning_rate": 0.0008105263157894737, | |
| "loss": 56.2637, | |
| "step": 230 | |
| }, | |
| { | |
| "crossentropy": 2.818870425224304, | |
| "epoch": 3.039, | |
| "grad_norm": 0.74609375, | |
| "grad_norm_var": 0.0006266276041666667, | |
| "learning_rate": 0.0008094736842105264, | |
| "loss": 56.7463, | |
| "step": 231 | |
| }, | |
| { | |
| "crossentropy": 2.831796646118164, | |
| "epoch": 3.04, | |
| "grad_norm": 0.796875, | |
| "grad_norm_var": 0.0005772272745768229, | |
| "learning_rate": 0.0008084210526315789, | |
| "loss": 57.8099, | |
| "step": 232 | |
| }, | |
| { | |
| "crossentropy": 2.789393901824951, | |
| "epoch": 3.041, | |
| "grad_norm": 0.74609375, | |
| "grad_norm_var": 0.0005451838175455729, | |
| "learning_rate": 0.0008073684210526316, | |
| "loss": 55.6014, | |
| "step": 233 | |
| }, | |
| { | |
| "crossentropy": 2.873471736907959, | |
| "epoch": 3.042, | |
| "grad_norm": 0.76953125, | |
| "grad_norm_var": 0.0005388895670572917, | |
| "learning_rate": 0.0008063157894736842, | |
| "loss": 55.5173, | |
| "step": 234 | |
| }, | |
| { | |
| "crossentropy": 2.758022665977478, | |
| "epoch": 3.043, | |
| "grad_norm": 0.75390625, | |
| "grad_norm_var": 0.0004557291666666667, | |
| "learning_rate": 0.0008052631578947369, | |
| "loss": 55.6085, | |
| "step": 235 | |
| }, | |
| { | |
| "crossentropy": 2.940276026725769, | |
| "epoch": 3.044, | |
| "grad_norm": 0.76953125, | |
| "grad_norm_var": 0.0003865559895833333, | |
| "learning_rate": 0.0008042105263157895, | |
| "loss": 57.4984, | |
| "step": 236 | |
| }, | |
| { | |
| "crossentropy": 2.8459670543670654, | |
| "epoch": 3.045, | |
| "grad_norm": 0.8203125, | |
| "grad_norm_var": 0.0005065282185872396, | |
| "learning_rate": 0.0008031578947368421, | |
| "loss": 57.8657, | |
| "step": 237 | |
| }, | |
| { | |
| "crossentropy": 2.82193660736084, | |
| "epoch": 3.046, | |
| "grad_norm": 0.7734375, | |
| "grad_norm_var": 0.000504302978515625, | |
| "learning_rate": 0.0008021052631578948, | |
| "loss": 57.3414, | |
| "step": 238 | |
| }, | |
| { | |
| "crossentropy": 2.692602515220642, | |
| "epoch": 3.047, | |
| "grad_norm": 0.74609375, | |
| "grad_norm_var": 0.00048770904541015623, | |
| "learning_rate": 0.0008010526315789474, | |
| "loss": 54.8326, | |
| "step": 239 | |
| }, | |
| { | |
| "crossentropy": 2.7393832206726074, | |
| "epoch": 3.048, | |
| "grad_norm": 0.796875, | |
| "grad_norm_var": 0.0005136489868164063, | |
| "learning_rate": 0.0008, | |
| "loss": 55.2464, | |
| "step": 240 | |
| }, | |
| { | |
| "crossentropy": 2.8725425004959106, | |
| "epoch": 3.049, | |
| "grad_norm": 0.73046875, | |
| "grad_norm_var": 0.0006184895833333333, | |
| "learning_rate": 0.0007989473684210526, | |
| "loss": 56.4413, | |
| "step": 241 | |
| }, | |
| { | |
| "crossentropy": 3.001668334007263, | |
| "epoch": 3.05, | |
| "grad_norm": 0.78125, | |
| "grad_norm_var": 0.0005551656087239583, | |
| "learning_rate": 0.0007978947368421052, | |
| "loss": 57.9582, | |
| "step": 242 | |
| }, | |
| { | |
| "crossentropy": 2.92042338848114, | |
| "epoch": 3.051, | |
| "grad_norm": 0.78125, | |
| "grad_norm_var": 0.0005411783854166667, | |
| "learning_rate": 0.000796842105263158, | |
| "loss": 58.8934, | |
| "step": 243 | |
| }, | |
| { | |
| "crossentropy": 2.869771957397461, | |
| "epoch": 3.052, | |
| "grad_norm": 0.76171875, | |
| "grad_norm_var": 0.000547027587890625, | |
| "learning_rate": 0.0007957894736842105, | |
| "loss": 56.5323, | |
| "step": 244 | |
| }, | |
| { | |
| "crossentropy": 3.0153883695602417, | |
| "epoch": 3.053, | |
| "grad_norm": 0.76171875, | |
| "grad_norm_var": 0.0005278905232747396, | |
| "learning_rate": 0.0007947368421052632, | |
| "loss": 57.5091, | |
| "step": 245 | |
| }, | |
| { | |
| "crossentropy": 2.887974262237549, | |
| "epoch": 3.054, | |
| "grad_norm": 0.78515625, | |
| "grad_norm_var": 0.0005429585774739584, | |
| "learning_rate": 0.0007936842105263158, | |
| "loss": 57.8712, | |
| "step": 246 | |
| }, | |
| { | |
| "crossentropy": 2.864806890487671, | |
| "epoch": 3.055, | |
| "grad_norm": 0.765625, | |
| "grad_norm_var": 0.0005044937133789062, | |
| "learning_rate": 0.0007926315789473685, | |
| "loss": 56.7738, | |
| "step": 247 | |
| }, | |
| { | |
| "crossentropy": 2.8489774465560913, | |
| "epoch": 3.056, | |
| "grad_norm": 0.7734375, | |
| "grad_norm_var": 0.00045871734619140625, | |
| "learning_rate": 0.000791578947368421, | |
| "loss": 57.5012, | |
| "step": 248 | |
| }, | |
| { | |
| "crossentropy": 2.867731213569641, | |
| "epoch": 3.057, | |
| "grad_norm": 0.78515625, | |
| "grad_norm_var": 0.00043074289957682293, | |
| "learning_rate": 0.0007905263157894736, | |
| "loss": 57.2708, | |
| "step": 249 | |
| }, | |
| { | |
| "crossentropy": 2.8339585065841675, | |
| "epoch": 3.058, | |
| "grad_norm": 0.81640625, | |
| "grad_norm_var": 0.000551287333170573, | |
| "learning_rate": 0.0007894736842105263, | |
| "loss": 56.8616, | |
| "step": 250 | |
| }, | |
| { | |
| "crossentropy": 2.9042646884918213, | |
| "epoch": 3.059, | |
| "grad_norm": 0.78515625, | |
| "grad_norm_var": 0.0005238215128580729, | |
| "learning_rate": 0.000788421052631579, | |
| "loss": 57.7641, | |
| "step": 251 | |
| }, | |
| { | |
| "crossentropy": 2.86965274810791, | |
| "epoch": 3.06, | |
| "grad_norm": 0.77734375, | |
| "grad_norm_var": 0.0005197525024414062, | |
| "learning_rate": 0.0007873684210526317, | |
| "loss": 56.565, | |
| "step": 252 | |
| }, | |
| { | |
| "crossentropy": 2.9171911478042603, | |
| "epoch": 3.061, | |
| "grad_norm": 0.76953125, | |
| "grad_norm_var": 0.0003916422526041667, | |
| "learning_rate": 0.0007863157894736842, | |
| "loss": 59.3709, | |
| "step": 253 | |
| }, | |
| { | |
| "crossentropy": 2.7907087802886963, | |
| "epoch": 3.062, | |
| "grad_norm": 0.82421875, | |
| "grad_norm_var": 0.0005462010701497396, | |
| "learning_rate": 0.0007852631578947369, | |
| "loss": 56.1077, | |
| "step": 254 | |
| }, | |
| { | |
| "crossentropy": 2.810297727584839, | |
| "epoch": 3.063, | |
| "grad_norm": 0.76953125, | |
| "grad_norm_var": 0.0004821141560872396, | |
| "learning_rate": 0.0007842105263157895, | |
| "loss": 57.7457, | |
| "step": 255 | |
| }, | |
| { | |
| "crossentropy": 2.8475112915039062, | |
| "epoch": 3.064, | |
| "grad_norm": 0.82421875, | |
| "grad_norm_var": 0.0005938212076822916, | |
| "learning_rate": 0.000783157894736842, | |
| "loss": 55.2254, | |
| "step": 256 | |
| }, | |
| { | |
| "crossentropy": 2.777912735939026, | |
| "epoch": 4.001, | |
| "grad_norm": 0.78125, | |
| "grad_norm_var": 0.0005144755045572917, | |
| "learning_rate": 0.0007821052631578947, | |
| "loss": 56.0005, | |
| "step": 257 | |
| }, | |
| { | |
| "crossentropy": 2.8968677520751953, | |
| "epoch": 4.002, | |
| "grad_norm": 0.765625, | |
| "grad_norm_var": 0.000540924072265625, | |
| "learning_rate": 0.0007810526315789473, | |
| "loss": 55.7583, | |
| "step": 258 | |
| }, | |
| { | |
| "crossentropy": 2.9626487493515015, | |
| "epoch": 4.003, | |
| "grad_norm": 0.78515625, | |
| "grad_norm_var": 0.00050048828125, | |
| "learning_rate": 0.0007800000000000001, | |
| "loss": 57.9443, | |
| "step": 259 | |
| }, | |
| { | |
| "crossentropy": 2.8472225666046143, | |
| "epoch": 4.004, | |
| "grad_norm": 0.77734375, | |
| "grad_norm_var": 0.0004628499348958333, | |
| "learning_rate": 0.0007789473684210527, | |
| "loss": 56.6347, | |
| "step": 260 | |
| }, | |
| { | |
| "crossentropy": 2.9540131092071533, | |
| "epoch": 4.005, | |
| "grad_norm": 0.75, | |
| "grad_norm_var": 0.0005538304646809896, | |
| "learning_rate": 0.0007778947368421053, | |
| "loss": 57.5122, | |
| "step": 261 | |
| }, | |
| { | |
| "crossentropy": 2.801051616668701, | |
| "epoch": 4.006, | |
| "grad_norm": 0.7578125, | |
| "grad_norm_var": 0.0005787531534830729, | |
| "learning_rate": 0.0007768421052631579, | |
| "loss": 56.3798, | |
| "step": 262 | |
| }, | |
| { | |
| "crossentropy": 2.816064238548279, | |
| "epoch": 4.007, | |
| "grad_norm": 0.765625, | |
| "grad_norm_var": 0.0005950291951497395, | |
| "learning_rate": 0.0007757894736842105, | |
| "loss": 55.7209, | |
| "step": 263 | |
| }, | |
| { | |
| "crossentropy": 2.7781593799591064, | |
| "epoch": 4.008, | |
| "grad_norm": 0.76171875, | |
| "grad_norm_var": 0.0006285985310872396, | |
| "learning_rate": 0.0007747368421052632, | |
| "loss": 56.1839, | |
| "step": 264 | |
| }, | |
| { | |
| "crossentropy": 2.8623517751693726, | |
| "epoch": 4.009, | |
| "grad_norm": 0.75, | |
| "grad_norm_var": 0.0006123860677083333, | |
| "learning_rate": 0.0007736842105263157, | |
| "loss": 57.0193, | |
| "step": 265 | |
| }, | |
| { | |
| "crossentropy": 2.950491189956665, | |
| "epoch": 4.01, | |
| "grad_norm": 0.765625, | |
| "grad_norm_var": 0.0006209691365559896, | |
| "learning_rate": 0.0007726315789473684, | |
| "loss": 58.5925, | |
| "step": 266 | |
| }, | |
| { | |
| "crossentropy": 2.855417013168335, | |
| "epoch": 4.011, | |
| "grad_norm": 0.8046875, | |
| "grad_norm_var": 0.0006650288899739584, | |
| "learning_rate": 0.000771578947368421, | |
| "loss": 56.9977, | |
| "step": 267 | |
| }, | |
| { | |
| "crossentropy": 2.7262661457061768, | |
| "epoch": 4.012, | |
| "grad_norm": 0.7890625, | |
| "grad_norm_var": 0.0006621678670247396, | |
| "learning_rate": 0.0007705263157894738, | |
| "loss": 55.8765, | |
| "step": 268 | |
| }, | |
| { | |
| "crossentropy": 2.7855740785598755, | |
| "epoch": 4.013, | |
| "grad_norm": 0.74609375, | |
| "grad_norm_var": 0.0005935033162434896, | |
| "learning_rate": 0.0007694736842105264, | |
| "loss": 55.4868, | |
| "step": 269 | |
| }, | |
| { | |
| "crossentropy": 2.743911862373352, | |
| "epoch": 4.014, | |
| "grad_norm": 0.73828125, | |
| "grad_norm_var": 0.0006820042928059896, | |
| "learning_rate": 0.0007684210526315789, | |
| "loss": 56.2341, | |
| "step": 270 | |
| }, | |
| { | |
| "crossentropy": 2.7292779684066772, | |
| "epoch": 4.015, | |
| "grad_norm": 0.765625, | |
| "grad_norm_var": 0.0005055745442708333, | |
| "learning_rate": 0.0007673684210526316, | |
| "loss": 54.8426, | |
| "step": 271 | |
| }, | |
| { | |
| "crossentropy": 2.8533518314361572, | |
| "epoch": 4.016, | |
| "grad_norm": 0.7734375, | |
| "grad_norm_var": 0.00030307769775390626, | |
| "learning_rate": 0.0007663157894736842, | |
| "loss": 56.84, | |
| "step": 272 | |
| }, | |
| { | |
| "crossentropy": 2.7164812088012695, | |
| "epoch": 4.017, | |
| "grad_norm": 0.73828125, | |
| "grad_norm_var": 0.0003387451171875, | |
| "learning_rate": 0.0007652631578947369, | |
| "loss": 54.748, | |
| "step": 273 | |
| }, | |
| { | |
| "crossentropy": 2.742064118385315, | |
| "epoch": 4.018, | |
| "grad_norm": 0.73828125, | |
| "grad_norm_var": 0.0003819147745768229, | |
| "learning_rate": 0.0007642105263157894, | |
| "loss": 57.1381, | |
| "step": 274 | |
| }, | |
| { | |
| "crossentropy": 2.8081696033477783, | |
| "epoch": 4.019, | |
| "grad_norm": 0.734375, | |
| "grad_norm_var": 0.00039265950520833335, | |
| "learning_rate": 0.0007631578947368421, | |
| "loss": 54.7163, | |
| "step": 275 | |
| }, | |
| { | |
| "crossentropy": 2.995293974876404, | |
| "epoch": 4.02, | |
| "grad_norm": 0.8046875, | |
| "grad_norm_var": 0.0005034764607747395, | |
| "learning_rate": 0.0007621052631578948, | |
| "loss": 58.3995, | |
| "step": 276 | |
| }, | |
| { | |
| "crossentropy": 2.789522409439087, | |
| "epoch": 4.021, | |
| "grad_norm": 0.75390625, | |
| "grad_norm_var": 0.0004984537760416666, | |
| "learning_rate": 0.0007610526315789474, | |
| "loss": 56.2995, | |
| "step": 277 | |
| }, | |
| { | |
| "crossentropy": 2.9243152141571045, | |
| "epoch": 4.022, | |
| "grad_norm": 0.75, | |
| "grad_norm_var": 0.0005063374837239583, | |
| "learning_rate": 0.00076, | |
| "loss": 56.1988, | |
| "step": 278 | |
| }, | |
| { | |
| "crossentropy": 2.75484561920166, | |
| "epoch": 4.023, | |
| "grad_norm": 0.7421875, | |
| "grad_norm_var": 0.0005269368489583333, | |
| "learning_rate": 0.0007589473684210526, | |
| "loss": 55.6807, | |
| "step": 279 | |
| }, | |
| { | |
| "crossentropy": 2.753888249397278, | |
| "epoch": 4.024, | |
| "grad_norm": 0.7421875, | |
| "grad_norm_var": 0.0005456924438476563, | |
| "learning_rate": 0.0007578947368421053, | |
| "loss": 55.653, | |
| "step": 280 | |
| }, | |
| { | |
| "crossentropy": 2.889884352684021, | |
| "epoch": 4.025, | |
| "grad_norm": 0.7734375, | |
| "grad_norm_var": 0.0005533218383789063, | |
| "learning_rate": 0.0007568421052631579, | |
| "loss": 56.7904, | |
| "step": 281 | |
| }, | |
| { | |
| "crossentropy": 2.790767788887024, | |
| "epoch": 4.026, | |
| "grad_norm": 0.7265625, | |
| "grad_norm_var": 0.0006194432576497396, | |
| "learning_rate": 0.0007557894736842105, | |
| "loss": 54.6775, | |
| "step": 282 | |
| }, | |
| { | |
| "crossentropy": 2.7210811376571655, | |
| "epoch": 4.027, | |
| "grad_norm": 0.7421875, | |
| "grad_norm_var": 0.00047092437744140626, | |
| "learning_rate": 0.0007547368421052631, | |
| "loss": 54.3834, | |
| "step": 283 | |
| }, | |
| { | |
| "crossentropy": 2.862104892730713, | |
| "epoch": 4.028, | |
| "grad_norm": 0.7734375, | |
| "grad_norm_var": 0.0004124323527018229, | |
| "learning_rate": 0.0007536842105263158, | |
| "loss": 56.5897, | |
| "step": 284 | |
| }, | |
| { | |
| "crossentropy": 2.861825942993164, | |
| "epoch": 4.029, | |
| "grad_norm": 0.76953125, | |
| "grad_norm_var": 0.0004261652628580729, | |
| "learning_rate": 0.0007526315789473685, | |
| "loss": 57.199, | |
| "step": 285 | |
| }, | |
| { | |
| "crossentropy": 2.738445281982422, | |
| "epoch": 4.03, | |
| "grad_norm": 0.828125, | |
| "grad_norm_var": 0.0007405598958333333, | |
| "learning_rate": 0.000751578947368421, | |
| "loss": 56.5317, | |
| "step": 286 | |
| }, | |
| { | |
| "crossentropy": 2.8645286560058594, | |
| "epoch": 4.031, | |
| "grad_norm": 0.76953125, | |
| "grad_norm_var": 0.0007445653279622396, | |
| "learning_rate": 0.0007505263157894737, | |
| "loss": 56.3045, | |
| "step": 287 | |
| }, | |
| { | |
| "crossentropy": 2.903935194015503, | |
| "epoch": 4.032, | |
| "grad_norm": 0.75, | |
| "grad_norm_var": 0.0007369359334309896, | |
| "learning_rate": 0.0007494736842105263, | |
| "loss": 56.0398, | |
| "step": 288 | |
| }, | |
| { | |
| "crossentropy": 2.8562066555023193, | |
| "epoch": 4.033, | |
| "grad_norm": 0.74609375, | |
| "grad_norm_var": 0.0007196426391601563, | |
| "learning_rate": 0.000748421052631579, | |
| "loss": 55.6453, | |
| "step": 289 | |
| }, | |
| { | |
| "crossentropy": 2.7645561695098877, | |
| "epoch": 4.034, | |
| "grad_norm": 0.75390625, | |
| "grad_norm_var": 0.0006916681925455729, | |
| "learning_rate": 0.0007473684210526316, | |
| "loss": 55.2077, | |
| "step": 290 | |
| }, | |
| { | |
| "crossentropy": 2.8362536430358887, | |
| "epoch": 4.035, | |
| "grad_norm": 0.73828125, | |
| "grad_norm_var": 0.0006792704264322917, | |
| "learning_rate": 0.0007463157894736842, | |
| "loss": 56.4424, | |
| "step": 291 | |
| }, | |
| { | |
| "crossentropy": 2.9669147729873657, | |
| "epoch": 4.036, | |
| "grad_norm": 0.796875, | |
| "grad_norm_var": 0.0006368001302083333, | |
| "learning_rate": 0.0007452631578947369, | |
| "loss": 58.4369, | |
| "step": 292 | |
| }, | |
| { | |
| "crossentropy": 2.751739501953125, | |
| "epoch": 4.037, | |
| "grad_norm": 0.75, | |
| "grad_norm_var": 0.0006408055623372395, | |
| "learning_rate": 0.0007442105263157895, | |
| "loss": 55.4938, | |
| "step": 293 | |
| }, | |
| { | |
| "crossentropy": 2.7845678329467773, | |
| "epoch": 4.038, | |
| "grad_norm": 0.78125, | |
| "grad_norm_var": 0.0006621678670247396, | |
| "learning_rate": 0.0007431578947368422, | |
| "loss": 57.6227, | |
| "step": 294 | |
| }, | |
| { | |
| "crossentropy": 2.894593834877014, | |
| "epoch": 4.039, | |
| "grad_norm": 0.76171875, | |
| "grad_norm_var": 0.0006357828776041666, | |
| "learning_rate": 0.0007421052631578947, | |
| "loss": 56.1408, | |
| "step": 295 | |
| }, | |
| { | |
| "crossentropy": 2.8750534057617188, | |
| "epoch": 4.04, | |
| "grad_norm": 0.74609375, | |
| "grad_norm_var": 0.0006260553995768229, | |
| "learning_rate": 0.0007410526315789474, | |
| "loss": 57.3143, | |
| "step": 296 | |
| }, | |
| { | |
| "crossentropy": 2.828782796859741, | |
| "epoch": 4.041, | |
| "grad_norm": 0.7734375, | |
| "grad_norm_var": 0.0006260553995768229, | |
| "learning_rate": 0.00074, | |
| "loss": 56.8303, | |
| "step": 297 | |
| }, | |
| { | |
| "crossentropy": 2.830629348754883, | |
| "epoch": 4.042, | |
| "grad_norm": 0.7890625, | |
| "grad_norm_var": 0.0005670547485351563, | |
| "learning_rate": 0.0007389473684210527, | |
| "loss": 57.1108, | |
| "step": 298 | |
| }, | |
| { | |
| "crossentropy": 2.8372979164123535, | |
| "epoch": 4.043, | |
| "grad_norm": 0.74609375, | |
| "grad_norm_var": 0.0005551656087239583, | |
| "learning_rate": 0.0007378947368421052, | |
| "loss": 56.3579, | |
| "step": 299 | |
| }, | |
| { | |
| "crossentropy": 2.7618184089660645, | |
| "epoch": 4.044, | |
| "grad_norm": 0.76171875, | |
| "grad_norm_var": 0.0005538304646809896, | |
| "learning_rate": 0.0007368421052631579, | |
| "loss": 56.3516, | |
| "step": 300 | |
| }, | |
| { | |
| "crossentropy": 2.8434700965881348, | |
| "epoch": 4.045, | |
| "grad_norm": 0.75, | |
| "grad_norm_var": 0.0005694071451822917, | |
| "learning_rate": 0.0007357894736842106, | |
| "loss": 57.8224, | |
| "step": 301 | |
| }, | |
| { | |
| "crossentropy": 2.845301866531372, | |
| "epoch": 4.046, | |
| "grad_norm": 0.765625, | |
| "grad_norm_var": 0.0002886454264322917, | |
| "learning_rate": 0.0007347368421052632, | |
| "loss": 56.0516, | |
| "step": 302 | |
| }, | |
| { | |
| "crossentropy": 2.902227759361267, | |
| "epoch": 4.047, | |
| "grad_norm": 0.80078125, | |
| "grad_norm_var": 0.00038426717122395834, | |
| "learning_rate": 0.0007336842105263159, | |
| "loss": 57.105, | |
| "step": 303 | |
| }, | |
| { | |
| "crossentropy": 2.8356798887252808, | |
| "epoch": 4.048, | |
| "grad_norm": 0.7734375, | |
| "grad_norm_var": 0.00037740071614583335, | |
| "learning_rate": 0.0007326315789473684, | |
| "loss": 57.0535, | |
| "step": 304 | |
| }, | |
| { | |
| "crossentropy": 2.9245163202285767, | |
| "epoch": 4.049, | |
| "grad_norm": 0.8046875, | |
| "grad_norm_var": 0.0004470189412434896, | |
| "learning_rate": 0.0007315789473684211, | |
| "loss": 58.2829, | |
| "step": 305 | |
| }, | |
| { | |
| "crossentropy": 2.773668646812439, | |
| "epoch": 4.05, | |
| "grad_norm": 0.7265625, | |
| "grad_norm_var": 0.0005462646484375, | |
| "learning_rate": 0.0007305263157894737, | |
| "loss": 56.9855, | |
| "step": 306 | |
| }, | |
| { | |
| "crossentropy": 2.974271535873413, | |
| "epoch": 4.051, | |
| "grad_norm": 0.8046875, | |
| "grad_norm_var": 0.0005711237589518229, | |
| "learning_rate": 0.0007294736842105262, | |
| "loss": 58.2544, | |
| "step": 307 | |
| }, | |
| { | |
| "crossentropy": 2.7676045894622803, | |
| "epoch": 4.052, | |
| "grad_norm": 0.7578125, | |
| "grad_norm_var": 0.0005304336547851563, | |
| "learning_rate": 0.000728421052631579, | |
| "loss": 55.4997, | |
| "step": 308 | |
| }, | |
| { | |
| "crossentropy": 2.8458242416381836, | |
| "epoch": 4.053, | |
| "grad_norm": 0.74609375, | |
| "grad_norm_var": 0.000540924072265625, | |
| "learning_rate": 0.0007273684210526316, | |
| "loss": 56.7489, | |
| "step": 309 | |
| }, | |
| { | |
| "crossentropy": 2.9176676273345947, | |
| "epoch": 4.054, | |
| "grad_norm": 0.75390625, | |
| "grad_norm_var": 0.0005395889282226562, | |
| "learning_rate": 0.0007263157894736843, | |
| "loss": 57.7019, | |
| "step": 310 | |
| }, | |
| { | |
| "crossentropy": 2.7517309188842773, | |
| "epoch": 4.055, | |
| "grad_norm": 0.765625, | |
| "grad_norm_var": 0.0005381266276041667, | |
| "learning_rate": 0.0007252631578947369, | |
| "loss": 56.0364, | |
| "step": 311 | |
| }, | |
| { | |
| "crossentropy": 2.8117847442626953, | |
| "epoch": 4.056, | |
| "grad_norm": 0.80078125, | |
| "grad_norm_var": 0.0005755106608072917, | |
| "learning_rate": 0.0007242105263157895, | |
| "loss": 57.0578, | |
| "step": 312 | |
| }, | |
| { | |
| "crossentropy": 2.7207109928131104, | |
| "epoch": 4.057, | |
| "grad_norm": 0.75390625, | |
| "grad_norm_var": 0.0005904515584309896, | |
| "learning_rate": 0.0007231578947368421, | |
| "loss": 55.5513, | |
| "step": 313 | |
| }, | |
| { | |
| "crossentropy": 2.798499345779419, | |
| "epoch": 4.058, | |
| "grad_norm": 0.73828125, | |
| "grad_norm_var": 0.0006144205729166667, | |
| "learning_rate": 0.0007221052631578947, | |
| "loss": 56.0931, | |
| "step": 314 | |
| }, | |
| { | |
| "crossentropy": 2.8496850728988647, | |
| "epoch": 4.059, | |
| "grad_norm": 0.76953125, | |
| "grad_norm_var": 0.0005877176920572917, | |
| "learning_rate": 0.0007210526315789474, | |
| "loss": 56.5729, | |
| "step": 315 | |
| }, | |
| { | |
| "crossentropy": 2.739889979362488, | |
| "epoch": 4.06, | |
| "grad_norm": 0.7734375, | |
| "grad_norm_var": 0.0005879084269205729, | |
| "learning_rate": 0.0007199999999999999, | |
| "loss": 57.048, | |
| "step": 316 | |
| }, | |
| { | |
| "crossentropy": 2.8727529048919678, | |
| "epoch": 4.061, | |
| "grad_norm": 0.8203125, | |
| "grad_norm_var": 0.0007298151652018229, | |
| "learning_rate": 0.0007189473684210527, | |
| "loss": 57.4175, | |
| "step": 317 | |
| }, | |
| { | |
| "crossentropy": 2.9316142797470093, | |
| "epoch": 4.062, | |
| "grad_norm": 0.83203125, | |
| "grad_norm_var": 0.0009470621744791667, | |
| "learning_rate": 0.0007178947368421053, | |
| "loss": 57.4141, | |
| "step": 318 | |
| }, | |
| { | |
| "crossentropy": 2.8334540128707886, | |
| "epoch": 4.063, | |
| "grad_norm": 0.765625, | |
| "grad_norm_var": 0.000909868876139323, | |
| "learning_rate": 0.000716842105263158, | |
| "loss": 56.4528, | |
| "step": 319 | |
| }, | |
| { | |
| "crossentropy": 2.8760459423065186, | |
| "epoch": 4.064, | |
| "grad_norm": 0.76171875, | |
| "grad_norm_var": 0.0009195963541666667, | |
| "learning_rate": 0.0007157894736842105, | |
| "loss": 57.3916, | |
| "step": 320 | |
| }, | |
| { | |
| "crossentropy": 2.7978386878967285, | |
| "epoch": 5.001, | |
| "grad_norm": 0.76953125, | |
| "grad_norm_var": 0.0007181803385416667, | |
| "learning_rate": 0.0007147368421052631, | |
| "loss": 56.2145, | |
| "step": 321 | |
| }, | |
| { | |
| "crossentropy": 2.744502544403076, | |
| "epoch": 5.002, | |
| "grad_norm": 0.7890625, | |
| "grad_norm_var": 0.0006683349609375, | |
| "learning_rate": 0.0007136842105263158, | |
| "loss": 56.6854, | |
| "step": 322 | |
| }, | |
| { | |
| "crossentropy": 2.8497555255889893, | |
| "epoch": 5.003, | |
| "grad_norm": 0.76953125, | |
| "grad_norm_var": 0.0006540298461914062, | |
| "learning_rate": 0.0007126315789473684, | |
| "loss": 55.9165, | |
| "step": 323 | |
| }, | |
| { | |
| "crossentropy": 2.7810473442077637, | |
| "epoch": 5.004, | |
| "grad_norm": 0.8515625, | |
| "grad_norm_var": 0.000968170166015625, | |
| "learning_rate": 0.000711578947368421, | |
| "loss": 56.7632, | |
| "step": 324 | |
| }, | |
| { | |
| "crossentropy": 2.9091150760650635, | |
| "epoch": 5.005, | |
| "grad_norm": 0.7421875, | |
| "grad_norm_var": 0.0010171890258789062, | |
| "learning_rate": 0.0007105263157894737, | |
| "loss": 57.0648, | |
| "step": 325 | |
| }, | |
| { | |
| "crossentropy": 2.9585464000701904, | |
| "epoch": 5.006, | |
| "grad_norm": 0.7578125, | |
| "grad_norm_var": 0.001034990946451823, | |
| "learning_rate": 0.0007094736842105264, | |
| "loss": 57.6818, | |
| "step": 326 | |
| }, | |
| { | |
| "crossentropy": 2.7995084524154663, | |
| "epoch": 5.007, | |
| "grad_norm": 0.75, | |
| "grad_norm_var": 0.0010457356770833333, | |
| "learning_rate": 0.000708421052631579, | |
| "loss": 55.2677, | |
| "step": 327 | |
| }, | |
| { | |
| "crossentropy": 2.8381704092025757, | |
| "epoch": 5.008, | |
| "grad_norm": 0.765625, | |
| "grad_norm_var": 0.0010207494099934895, | |
| "learning_rate": 0.0007073684210526316, | |
| "loss": 56.1405, | |
| "step": 328 | |
| }, | |
| { | |
| "crossentropy": 2.8964773416519165, | |
| "epoch": 5.009, | |
| "grad_norm": 0.75390625, | |
| "grad_norm_var": 0.0009571711222330729, | |
| "learning_rate": 0.0007063157894736842, | |
| "loss": 57.4197, | |
| "step": 329 | |
| }, | |
| { | |
| "crossentropy": 2.8280014991760254, | |
| "epoch": 5.01, | |
| "grad_norm": 0.76953125, | |
| "grad_norm_var": 0.0009571711222330729, | |
| "learning_rate": 0.0007052631578947368, | |
| "loss": 56.3693, | |
| "step": 330 | |
| }, | |
| { | |
| "crossentropy": 2.792478084564209, | |
| "epoch": 5.011, | |
| "grad_norm": 0.76953125, | |
| "grad_norm_var": 0.0009600321451822916, | |
| "learning_rate": 0.0007042105263157895, | |
| "loss": 56.2133, | |
| "step": 331 | |
| }, | |
| { | |
| "crossentropy": 2.8384552001953125, | |
| "epoch": 5.012, | |
| "grad_norm": 0.73828125, | |
| "grad_norm_var": 0.0009052912394205729, | |
| "learning_rate": 0.0007031578947368421, | |
| "loss": 57.0165, | |
| "step": 332 | |
| }, | |
| { | |
| "crossentropy": 2.888237237930298, | |
| "epoch": 5.013, | |
| "grad_norm": 0.796875, | |
| "grad_norm_var": 0.0006998697916666667, | |
| "learning_rate": 0.0007021052631578948, | |
| "loss": 58.966, | |
| "step": 333 | |
| }, | |
| { | |
| "crossentropy": 2.834162950515747, | |
| "epoch": 5.014, | |
| "grad_norm": 0.74609375, | |
| "grad_norm_var": 0.0007338841756184896, | |
| "learning_rate": 0.0007010526315789474, | |
| "loss": 55.0934, | |
| "step": 334 | |
| }, | |
| { | |
| "crossentropy": 2.7016130685806274, | |
| "epoch": 5.015, | |
| "grad_norm": 0.7578125, | |
| "grad_norm_var": 0.0007382710774739583, | |
| "learning_rate": 0.0007, | |
| "loss": 55.4662, | |
| "step": 335 | |
| }, | |
| { | |
| "crossentropy": 2.894522786140442, | |
| "epoch": 5.016, | |
| "grad_norm": 0.7734375, | |
| "grad_norm_var": 0.0007369359334309896, | |
| "learning_rate": 0.0006989473684210527, | |
| "loss": 55.544, | |
| "step": 336 | |
| }, | |
| { | |
| "crossentropy": 2.891346573829651, | |
| "epoch": 5.017, | |
| "grad_norm": 0.765625, | |
| "grad_norm_var": 0.0007375081380208334, | |
| "learning_rate": 0.0006978947368421052, | |
| "loss": 57.4768, | |
| "step": 337 | |
| }, | |
| { | |
| "crossentropy": 2.8023122549057007, | |
| "epoch": 5.018, | |
| "grad_norm": 0.79296875, | |
| "grad_norm_var": 0.0007491429646809896, | |
| "learning_rate": 0.0006968421052631579, | |
| "loss": 56.9822, | |
| "step": 338 | |
| }, | |
| { | |
| "crossentropy": 2.893723964691162, | |
| "epoch": 5.019, | |
| "grad_norm": 0.78515625, | |
| "grad_norm_var": 0.0007659276326497396, | |
| "learning_rate": 0.0006957894736842105, | |
| "loss": 56.84, | |
| "step": 339 | |
| }, | |
| { | |
| "crossentropy": 2.8015278577804565, | |
| "epoch": 5.02, | |
| "grad_norm": 0.73828125, | |
| "grad_norm_var": 0.0003326416015625, | |
| "learning_rate": 0.0006947368421052632, | |
| "loss": 55.3613, | |
| "step": 340 | |
| }, | |
| { | |
| "crossentropy": 2.868039131164551, | |
| "epoch": 5.021, | |
| "grad_norm": 0.7578125, | |
| "grad_norm_var": 0.00030517578125, | |
| "learning_rate": 0.0006936842105263159, | |
| "loss": 57.3593, | |
| "step": 341 | |
| }, | |
| { | |
| "crossentropy": 2.800724148750305, | |
| "epoch": 5.022, | |
| "grad_norm": 0.7265625, | |
| "grad_norm_var": 0.000390625, | |
| "learning_rate": 0.0006926315789473684, | |
| "loss": 55.3344, | |
| "step": 342 | |
| }, | |
| { | |
| "crossentropy": 2.791234016418457, | |
| "epoch": 5.023, | |
| "grad_norm": 0.74609375, | |
| "grad_norm_var": 0.00039768218994140625, | |
| "learning_rate": 0.0006915789473684211, | |
| "loss": 55.2978, | |
| "step": 343 | |
| }, | |
| { | |
| "crossentropy": 2.718875527381897, | |
| "epoch": 5.024, | |
| "grad_norm": 0.75, | |
| "grad_norm_var": 0.0004042943318684896, | |
| "learning_rate": 0.0006905263157894737, | |
| "loss": 54.9774, | |
| "step": 344 | |
| }, | |
| { | |
| "crossentropy": 2.888947367668152, | |
| "epoch": 5.025, | |
| "grad_norm": 0.765625, | |
| "grad_norm_var": 0.0004025777180989583, | |
| "learning_rate": 0.0006894736842105264, | |
| "loss": 56.9891, | |
| "step": 345 | |
| }, | |
| { | |
| "crossentropy": 2.7386454343795776, | |
| "epoch": 5.026, | |
| "grad_norm": 0.78125, | |
| "grad_norm_var": 0.0004241307576497396, | |
| "learning_rate": 0.0006884210526315789, | |
| "loss": 55.8273, | |
| "step": 346 | |
| }, | |
| { | |
| "crossentropy": 2.703773021697998, | |
| "epoch": 5.027, | |
| "grad_norm": 0.765625, | |
| "grad_norm_var": 0.000421142578125, | |
| "learning_rate": 0.0006873684210526316, | |
| "loss": 55.5817, | |
| "step": 347 | |
| }, | |
| { | |
| "crossentropy": 2.821714162826538, | |
| "epoch": 5.028, | |
| "grad_norm": 0.76953125, | |
| "grad_norm_var": 0.000384521484375, | |
| "learning_rate": 0.0006863157894736842, | |
| "loss": 55.9298, | |
| "step": 348 | |
| }, | |
| { | |
| "crossentropy": 2.8079909086227417, | |
| "epoch": 5.029, | |
| "grad_norm": 0.71875, | |
| "grad_norm_var": 0.00042012532552083336, | |
| "learning_rate": 0.0006852631578947368, | |
| "loss": 55.0177, | |
| "step": 349 | |
| }, | |
| { | |
| "crossentropy": 2.694362759590149, | |
| "epoch": 5.03, | |
| "grad_norm": 0.7734375, | |
| "grad_norm_var": 0.00042057037353515625, | |
| "learning_rate": 0.0006842105263157895, | |
| "loss": 54.1136, | |
| "step": 350 | |
| }, | |
| { | |
| "crossentropy": 2.8693727254867554, | |
| "epoch": 5.031, | |
| "grad_norm": 0.7734375, | |
| "grad_norm_var": 0.0004302342732747396, | |
| "learning_rate": 0.0006831578947368421, | |
| "loss": 57.0075, | |
| "step": 351 | |
| }, | |
| { | |
| "crossentropy": 2.8346846103668213, | |
| "epoch": 5.032, | |
| "grad_norm": 0.74609375, | |
| "grad_norm_var": 0.000433349609375, | |
| "learning_rate": 0.0006821052631578948, | |
| "loss": 56.9684, | |
| "step": 352 | |
| }, | |
| { | |
| "crossentropy": 2.9195793867111206, | |
| "epoch": 5.033, | |
| "grad_norm": 0.74609375, | |
| "grad_norm_var": 0.00044193267822265623, | |
| "learning_rate": 0.0006810526315789474, | |
| "loss": 57.0331, | |
| "step": 353 | |
| }, | |
| { | |
| "crossentropy": 2.8132762908935547, | |
| "epoch": 5.034, | |
| "grad_norm": 0.77734375, | |
| "grad_norm_var": 0.00038547515869140624, | |
| "learning_rate": 0.00068, | |
| "loss": 55.508, | |
| "step": 354 | |
| }, | |
| { | |
| "crossentropy": 2.7499265670776367, | |
| "epoch": 5.035, | |
| "grad_norm": 0.73828125, | |
| "grad_norm_var": 0.00035037994384765623, | |
| "learning_rate": 0.0006789473684210526, | |
| "loss": 56.0205, | |
| "step": 355 | |
| }, | |
| { | |
| "crossentropy": 2.800133466720581, | |
| "epoch": 5.036, | |
| "grad_norm": 0.78125, | |
| "grad_norm_var": 0.0003720601399739583, | |
| "learning_rate": 0.0006778947368421052, | |
| "loss": 55.9033, | |
| "step": 356 | |
| }, | |
| { | |
| "crossentropy": 2.820741653442383, | |
| "epoch": 5.037, | |
| "grad_norm": 0.7578125, | |
| "grad_norm_var": 0.0003720601399739583, | |
| "learning_rate": 0.0006768421052631579, | |
| "loss": 55.9092, | |
| "step": 357 | |
| }, | |
| { | |
| "crossentropy": 2.781237006187439, | |
| "epoch": 5.038, | |
| "grad_norm": 0.75, | |
| "grad_norm_var": 0.0003102620442708333, | |
| "learning_rate": 0.0006757894736842106, | |
| "loss": 56.8959, | |
| "step": 358 | |
| }, | |
| { | |
| "crossentropy": 2.8900152444839478, | |
| "epoch": 5.039, | |
| "grad_norm": 0.78125, | |
| "grad_norm_var": 0.0003280003865559896, | |
| "learning_rate": 0.0006747368421052632, | |
| "loss": 56.5975, | |
| "step": 359 | |
| }, | |
| { | |
| "crossentropy": 2.6693739891052246, | |
| "epoch": 5.04, | |
| "grad_norm": 0.75, | |
| "grad_norm_var": 0.0003280003865559896, | |
| "learning_rate": 0.0006736842105263158, | |
| "loss": 55.8081, | |
| "step": 360 | |
| }, | |
| { | |
| "crossentropy": 2.8285491466522217, | |
| "epoch": 5.041, | |
| "grad_norm": 0.765625, | |
| "grad_norm_var": 0.0003280003865559896, | |
| "learning_rate": 0.0006726315789473685, | |
| "loss": 55.7024, | |
| "step": 361 | |
| }, | |
| { | |
| "crossentropy": 2.970876455307007, | |
| "epoch": 5.042, | |
| "grad_norm": 0.76171875, | |
| "grad_norm_var": 0.000299072265625, | |
| "learning_rate": 0.0006715789473684211, | |
| "loss": 57.7093, | |
| "step": 362 | |
| }, | |
| { | |
| "crossentropy": 2.902941942214966, | |
| "epoch": 5.043, | |
| "grad_norm": 0.78125, | |
| "grad_norm_var": 0.0003265380859375, | |
| "learning_rate": 0.0006705263157894736, | |
| "loss": 58.8785, | |
| "step": 363 | |
| }, | |
| { | |
| "crossentropy": 2.888626217842102, | |
| "epoch": 5.044, | |
| "grad_norm": 0.76171875, | |
| "grad_norm_var": 0.000321197509765625, | |
| "learning_rate": 0.0006694736842105263, | |
| "loss": 56.7212, | |
| "step": 364 | |
| }, | |
| { | |
| "crossentropy": 2.9147382974624634, | |
| "epoch": 5.045, | |
| "grad_norm": 0.765625, | |
| "grad_norm_var": 0.000199127197265625, | |
| "learning_rate": 0.0006684210526315789, | |
| "loss": 57.0614, | |
| "step": 365 | |
| }, | |
| { | |
| "crossentropy": 2.816585063934326, | |
| "epoch": 5.046, | |
| "grad_norm": 0.76953125, | |
| "grad_norm_var": 0.00019474029541015624, | |
| "learning_rate": 0.0006673684210526317, | |
| "loss": 57.5533, | |
| "step": 366 | |
| }, | |
| { | |
| "crossentropy": 2.801294684410095, | |
| "epoch": 5.047, | |
| "grad_norm": 0.765625, | |
| "grad_norm_var": 0.00018761952718098957, | |
| "learning_rate": 0.0006663157894736842, | |
| "loss": 56.8426, | |
| "step": 367 | |
| }, | |
| { | |
| "crossentropy": 2.8105653524398804, | |
| "epoch": 5.048, | |
| "grad_norm": 0.75390625, | |
| "grad_norm_var": 0.00017439524332682292, | |
| "learning_rate": 0.0006652631578947369, | |
| "loss": 56.7316, | |
| "step": 368 | |
| }, | |
| { | |
| "crossentropy": 2.8542263507843018, | |
| "epoch": 5.049, | |
| "grad_norm": 0.78515625, | |
| "grad_norm_var": 0.00018202463785807292, | |
| "learning_rate": 0.0006642105263157895, | |
| "loss": 57.1404, | |
| "step": 369 | |
| }, | |
| { | |
| "crossentropy": 2.75753116607666, | |
| "epoch": 5.05, | |
| "grad_norm": 0.74609375, | |
| "grad_norm_var": 0.00019321441650390624, | |
| "learning_rate": 0.0006631578947368421, | |
| "loss": 56.3615, | |
| "step": 370 | |
| }, | |
| { | |
| "crossentropy": 2.7114880084991455, | |
| "epoch": 5.051, | |
| "grad_norm": 0.76171875, | |
| "grad_norm_var": 0.00014896392822265624, | |
| "learning_rate": 0.0006621052631578947, | |
| "loss": 55.6979, | |
| "step": 371 | |
| }, | |
| { | |
| "crossentropy": 2.8140069246292114, | |
| "epoch": 5.052, | |
| "grad_norm": 0.796875, | |
| "grad_norm_var": 0.0001983006795247396, | |
| "learning_rate": 0.0006610526315789473, | |
| "loss": 57.4654, | |
| "step": 372 | |
| }, | |
| { | |
| "crossentropy": 2.9149471521377563, | |
| "epoch": 5.053, | |
| "grad_norm": 0.796875, | |
| "grad_norm_var": 0.0002517064412434896, | |
| "learning_rate": 0.00066, | |
| "loss": 58.3265, | |
| "step": 373 | |
| }, | |
| { | |
| "crossentropy": 2.922295570373535, | |
| "epoch": 5.054, | |
| "grad_norm": 0.77734375, | |
| "grad_norm_var": 0.00023167928059895834, | |
| "learning_rate": 0.0006589473684210527, | |
| "loss": 58.1646, | |
| "step": 374 | |
| }, | |
| { | |
| "crossentropy": 2.7302639484405518, | |
| "epoch": 5.055, | |
| "grad_norm": 0.75, | |
| "grad_norm_var": 0.0002459208170572917, | |
| "learning_rate": 0.0006578947368421054, | |
| "loss": 56.4477, | |
| "step": 375 | |
| }, | |
| { | |
| "crossentropy": 2.849370002746582, | |
| "epoch": 5.056, | |
| "grad_norm": 0.76171875, | |
| "grad_norm_var": 0.00022627512613932293, | |
| "learning_rate": 0.0006568421052631579, | |
| "loss": 56.5778, | |
| "step": 376 | |
| }, | |
| { | |
| "crossentropy": 2.7923754453659058, | |
| "epoch": 5.057, | |
| "grad_norm": 0.73828125, | |
| "grad_norm_var": 0.000284576416015625, | |
| "learning_rate": 0.0006557894736842105, | |
| "loss": 56.2568, | |
| "step": 377 | |
| }, | |
| { | |
| "crossentropy": 2.807488203048706, | |
| "epoch": 5.058, | |
| "grad_norm": 0.765625, | |
| "grad_norm_var": 0.0002827326456705729, | |
| "learning_rate": 0.0006547368421052632, | |
| "loss": 55.3052, | |
| "step": 378 | |
| }, | |
| { | |
| "crossentropy": 2.947903275489807, | |
| "epoch": 5.059, | |
| "grad_norm": 0.75390625, | |
| "grad_norm_var": 0.00027872721354166665, | |
| "learning_rate": 0.0006536842105263158, | |
| "loss": 58.4199, | |
| "step": 379 | |
| }, | |
| { | |
| "crossentropy": 2.838424563407898, | |
| "epoch": 5.06, | |
| "grad_norm": 0.7734375, | |
| "grad_norm_var": 0.0002812067667643229, | |
| "learning_rate": 0.0006526315789473684, | |
| "loss": 55.4688, | |
| "step": 380 | |
| }, | |
| { | |
| "crossentropy": 2.931127429008484, | |
| "epoch": 5.061, | |
| "grad_norm": 0.7578125, | |
| "grad_norm_var": 0.00028578440348307293, | |
| "learning_rate": 0.000651578947368421, | |
| "loss": 57.8073, | |
| "step": 381 | |
| }, | |
| { | |
| "crossentropy": 2.776180386543274, | |
| "epoch": 5.062, | |
| "grad_norm": 0.765625, | |
| "grad_norm_var": 0.0002848307291666667, | |
| "learning_rate": 0.0006505263157894738, | |
| "loss": 55.5867, | |
| "step": 382 | |
| }, | |
| { | |
| "crossentropy": 2.95345675945282, | |
| "epoch": 5.063, | |
| "grad_norm": 0.765625, | |
| "grad_norm_var": 0.0002848307291666667, | |
| "learning_rate": 0.0006494736842105264, | |
| "loss": 57.5454, | |
| "step": 383 | |
| }, | |
| { | |
| "crossentropy": 2.890448570251465, | |
| "epoch": 5.064, | |
| "grad_norm": 0.7734375, | |
| "grad_norm_var": 0.00027815500895182293, | |
| "learning_rate": 0.0006484210526315789, | |
| "loss": 57.2167, | |
| "step": 384 | |
| }, | |
| { | |
| "crossentropy": 2.896027088165283, | |
| "epoch": 6.001, | |
| "grad_norm": 0.75, | |
| "grad_norm_var": 0.0002486546834309896, | |
| "learning_rate": 0.0006473684210526316, | |
| "loss": 56.383, | |
| "step": 385 | |
| }, | |
| { | |
| "crossentropy": 2.7579907178878784, | |
| "epoch": 6.002, | |
| "grad_norm": 0.765625, | |
| "grad_norm_var": 0.0002471923828125, | |
| "learning_rate": 0.0006463157894736842, | |
| "loss": 54.8391, | |
| "step": 386 | |
| }, | |
| { | |
| "crossentropy": 2.7551032304763794, | |
| "epoch": 6.003, | |
| "grad_norm": 0.76171875, | |
| "grad_norm_var": 0.00018253326416015626, | |
| "learning_rate": 0.0006452631578947369, | |
| "loss": 56.4615, | |
| "step": 387 | |
| }, | |
| { | |
| "crossentropy": 2.7534666061401367, | |
| "epoch": 6.004, | |
| "grad_norm": 0.78515625, | |
| "grad_norm_var": 0.000140380859375, | |
| "learning_rate": 0.0006442105263157894, | |
| "loss": 55.3748, | |
| "step": 388 | |
| }, | |
| { | |
| "crossentropy": 2.878955602645874, | |
| "epoch": 6.005, | |
| "grad_norm": 0.75390625, | |
| "grad_norm_var": 0.000131988525390625, | |
| "learning_rate": 0.0006431578947368421, | |
| "loss": 56.7231, | |
| "step": 389 | |
| }, | |
| { | |
| "crossentropy": 2.7627967596054077, | |
| "epoch": 6.006, | |
| "grad_norm": 0.78125, | |
| "grad_norm_var": 0.00014216105143229166, | |
| "learning_rate": 0.0006421052631578948, | |
| "loss": 55.1437, | |
| "step": 390 | |
| }, | |
| { | |
| "crossentropy": 2.7477593421936035, | |
| "epoch": 6.007, | |
| "grad_norm": 0.75, | |
| "grad_norm_var": 0.0001545588175455729, | |
| "learning_rate": 0.0006410526315789474, | |
| "loss": 55.0148, | |
| "step": 391 | |
| }, | |
| { | |
| "crossentropy": 2.919530153274536, | |
| "epoch": 6.008, | |
| "grad_norm": 0.80078125, | |
| "grad_norm_var": 0.0001891454060872396, | |
| "learning_rate": 0.00064, | |
| "loss": 57.6885, | |
| "step": 392 | |
| }, | |
| { | |
| "crossentropy": 2.819299101829529, | |
| "epoch": 6.009, | |
| "grad_norm": 0.765625, | |
| "grad_norm_var": 0.0001891454060872396, | |
| "learning_rate": 0.0006389473684210526, | |
| "loss": 56.0235, | |
| "step": 393 | |
| }, | |
| { | |
| "crossentropy": 2.9445995092391968, | |
| "epoch": 6.01, | |
| "grad_norm": 0.796875, | |
| "grad_norm_var": 0.00022761027018229166, | |
| "learning_rate": 0.0006378947368421053, | |
| "loss": 58.1254, | |
| "step": 394 | |
| }, | |
| { | |
| "crossentropy": 2.777245879173279, | |
| "epoch": 6.011, | |
| "grad_norm": 0.796875, | |
| "grad_norm_var": 0.0002726236979166667, | |
| "learning_rate": 0.0006368421052631579, | |
| "loss": 55.7731, | |
| "step": 395 | |
| }, | |
| { | |
| "crossentropy": 2.983623743057251, | |
| "epoch": 6.012, | |
| "grad_norm": 0.78515625, | |
| "grad_norm_var": 0.00026950836181640627, | |
| "learning_rate": 0.0006357894736842106, | |
| "loss": 58.7228, | |
| "step": 396 | |
| }, | |
| { | |
| "crossentropy": 2.9414658546447754, | |
| "epoch": 6.013, | |
| "grad_norm": 0.75390625, | |
| "grad_norm_var": 0.0002899169921875, | |
| "learning_rate": 0.0006347368421052631, | |
| "loss": 56.5824, | |
| "step": 397 | |
| }, | |
| { | |
| "crossentropy": 2.8643269538879395, | |
| "epoch": 6.014, | |
| "grad_norm": 0.74609375, | |
| "grad_norm_var": 0.00033156077067057293, | |
| "learning_rate": 0.0006336842105263157, | |
| "loss": 57.9013, | |
| "step": 398 | |
| }, | |
| { | |
| "crossentropy": 2.71036159992218, | |
| "epoch": 6.015, | |
| "grad_norm": 0.75, | |
| "grad_norm_var": 0.0003590265909830729, | |
| "learning_rate": 0.0006326315789473685, | |
| "loss": 54.2278, | |
| "step": 399 | |
| }, | |
| { | |
| "crossentropy": 2.782063364982605, | |
| "epoch": 6.016, | |
| "grad_norm": 0.765625, | |
| "grad_norm_var": 0.0003590265909830729, | |
| "learning_rate": 0.0006315789473684211, | |
| "loss": 55.5568, | |
| "step": 400 | |
| }, | |
| { | |
| "crossentropy": 2.783318877220154, | |
| "epoch": 6.017, | |
| "grad_norm": 0.7421875, | |
| "grad_norm_var": 0.0003829320271809896, | |
| "learning_rate": 0.0006305263157894737, | |
| "loss": 56.6671, | |
| "step": 401 | |
| }, | |
| { | |
| "crossentropy": 2.9031968116760254, | |
| "epoch": 6.018, | |
| "grad_norm": 0.73828125, | |
| "grad_norm_var": 0.0004412333170572917, | |
| "learning_rate": 0.0006294736842105263, | |
| "loss": 55.2412, | |
| "step": 402 | |
| }, | |
| { | |
| "crossentropy": 2.8032177686691284, | |
| "epoch": 6.019, | |
| "grad_norm": 0.75390625, | |
| "grad_norm_var": 0.00045064290364583336, | |
| "learning_rate": 0.000628421052631579, | |
| "loss": 55.8731, | |
| "step": 403 | |
| }, | |
| { | |
| "crossentropy": 2.816411256790161, | |
| "epoch": 6.02, | |
| "grad_norm": 0.78125, | |
| "grad_norm_var": 0.00044193267822265623, | |
| "learning_rate": 0.0006273684210526316, | |
| "loss": 56.364, | |
| "step": 404 | |
| }, | |
| { | |
| "crossentropy": 2.7340190410614014, | |
| "epoch": 6.021, | |
| "grad_norm": 0.73046875, | |
| "grad_norm_var": 0.0005151748657226563, | |
| "learning_rate": 0.0006263157894736841, | |
| "loss": 54.9243, | |
| "step": 405 | |
| }, | |
| { | |
| "crossentropy": 2.8077826499938965, | |
| "epoch": 6.022, | |
| "grad_norm": 0.765625, | |
| "grad_norm_var": 0.0004963556925455729, | |
| "learning_rate": 0.0006252631578947368, | |
| "loss": 56.1388, | |
| "step": 406 | |
| }, | |
| { | |
| "crossentropy": 2.8767653703689575, | |
| "epoch": 6.023, | |
| "grad_norm": 0.7265625, | |
| "grad_norm_var": 0.000574175516764323, | |
| "learning_rate": 0.0006242105263157895, | |
| "loss": 56.2231, | |
| "step": 407 | |
| }, | |
| { | |
| "crossentropy": 2.6605910062789917, | |
| "epoch": 6.024, | |
| "grad_norm": 0.77734375, | |
| "grad_norm_var": 0.0004887262980143229, | |
| "learning_rate": 0.0006231578947368422, | |
| "loss": 54.9768, | |
| "step": 408 | |
| }, | |
| { | |
| "crossentropy": 2.831836223602295, | |
| "epoch": 6.025, | |
| "grad_norm": 0.75390625, | |
| "grad_norm_var": 0.0004900614420572917, | |
| "learning_rate": 0.0006221052631578947, | |
| "loss": 55.0988, | |
| "step": 409 | |
| }, | |
| { | |
| "crossentropy": 2.700801968574524, | |
| "epoch": 6.026, | |
| "grad_norm": 0.7421875, | |
| "grad_norm_var": 0.0004099527994791667, | |
| "learning_rate": 0.0006210526315789474, | |
| "loss": 54.9491, | |
| "step": 410 | |
| }, | |
| { | |
| "crossentropy": 2.933374762535095, | |
| "epoch": 6.027, | |
| "grad_norm": 0.7734375, | |
| "grad_norm_var": 0.0003191630045572917, | |
| "learning_rate": 0.00062, | |
| "loss": 56.9396, | |
| "step": 411 | |
| }, | |
| { | |
| "crossentropy": 2.74997615814209, | |
| "epoch": 6.028, | |
| "grad_norm": 0.70703125, | |
| "grad_norm_var": 0.0003903706868489583, | |
| "learning_rate": 0.0006189473684210526, | |
| "loss": 55.024, | |
| "step": 412 | |
| }, | |
| { | |
| "crossentropy": 2.916796088218689, | |
| "epoch": 6.029, | |
| "grad_norm": 0.7578125, | |
| "grad_norm_var": 0.00039310455322265624, | |
| "learning_rate": 0.0006178947368421053, | |
| "loss": 57.4697, | |
| "step": 413 | |
| }, | |
| { | |
| "crossentropy": 2.900514841079712, | |
| "epoch": 6.03, | |
| "grad_norm": 0.75, | |
| "grad_norm_var": 0.0003916422526041667, | |
| "learning_rate": 0.0006168421052631578, | |
| "loss": 57.3729, | |
| "step": 414 | |
| }, | |
| { | |
| "crossentropy": 2.8999792337417603, | |
| "epoch": 6.031, | |
| "grad_norm": 0.76171875, | |
| "grad_norm_var": 0.0003986994425455729, | |
| "learning_rate": 0.0006157894736842106, | |
| "loss": 57.0101, | |
| "step": 415 | |
| }, | |
| { | |
| "crossentropy": 2.8940166234970093, | |
| "epoch": 6.032, | |
| "grad_norm": 0.7734375, | |
| "grad_norm_var": 0.0004170099894205729, | |
| "learning_rate": 0.0006147368421052632, | |
| "loss": 59.3612, | |
| "step": 416 | |
| }, | |
| { | |
| "crossentropy": 2.7649847269058228, | |
| "epoch": 6.033, | |
| "grad_norm": 0.73828125, | |
| "grad_norm_var": 0.00042317708333333335, | |
| "learning_rate": 0.0006136842105263159, | |
| "loss": 55.2584, | |
| "step": 417 | |
| }, | |
| { | |
| "crossentropy": 2.8447442054748535, | |
| "epoch": 6.034, | |
| "grad_norm": 0.765625, | |
| "grad_norm_var": 0.0004200617472330729, | |
| "learning_rate": 0.0006126315789473684, | |
| "loss": 56.9598, | |
| "step": 418 | |
| }, | |
| { | |
| "crossentropy": 2.9044229984283447, | |
| "epoch": 6.035, | |
| "grad_norm": 0.7734375, | |
| "grad_norm_var": 0.0004445393880208333, | |
| "learning_rate": 0.000611578947368421, | |
| "loss": 57.1472, | |
| "step": 419 | |
| }, | |
| { | |
| "crossentropy": 2.858897089958191, | |
| "epoch": 6.036, | |
| "grad_norm": 0.78515625, | |
| "grad_norm_var": 0.0004592259724934896, | |
| "learning_rate": 0.0006105263157894737, | |
| "loss": 58.3656, | |
| "step": 420 | |
| }, | |
| { | |
| "crossentropy": 2.781478762626648, | |
| "epoch": 6.037, | |
| "grad_norm": 0.765625, | |
| "grad_norm_var": 0.0004208882649739583, | |
| "learning_rate": 0.0006094736842105263, | |
| "loss": 55.7423, | |
| "step": 421 | |
| }, | |
| { | |
| "crossentropy": 2.7873969078063965, | |
| "epoch": 6.038, | |
| "grad_norm": 0.76171875, | |
| "grad_norm_var": 0.00041751861572265626, | |
| "learning_rate": 0.0006084210526315789, | |
| "loss": 56.0058, | |
| "step": 422 | |
| }, | |
| { | |
| "crossentropy": 2.7320199012756348, | |
| "epoch": 6.039, | |
| "grad_norm": 0.80078125, | |
| "grad_norm_var": 0.0004597981770833333, | |
| "learning_rate": 0.0006073684210526316, | |
| "loss": 55.8398, | |
| "step": 423 | |
| }, | |
| { | |
| "crossentropy": 2.859510898590088, | |
| "epoch": 6.04, | |
| "grad_norm": 0.734375, | |
| "grad_norm_var": 0.0004856745402018229, | |
| "learning_rate": 0.0006063157894736843, | |
| "loss": 56.2963, | |
| "step": 424 | |
| }, | |
| { | |
| "crossentropy": 2.756445050239563, | |
| "epoch": 6.041, | |
| "grad_norm": 0.7578125, | |
| "grad_norm_var": 0.00048395792643229164, | |
| "learning_rate": 0.0006052631578947369, | |
| "loss": 55.111, | |
| "step": 425 | |
| }, | |
| { | |
| "crossentropy": 2.632467031478882, | |
| "epoch": 6.042, | |
| "grad_norm": 0.71484375, | |
| "grad_norm_var": 0.0005929946899414062, | |
| "learning_rate": 0.0006042105263157894, | |
| "loss": 53.8061, | |
| "step": 426 | |
| }, | |
| { | |
| "crossentropy": 2.8806999921798706, | |
| "epoch": 6.043, | |
| "grad_norm": 0.75390625, | |
| "grad_norm_var": 0.0005755106608072917, | |
| "learning_rate": 0.0006031578947368421, | |
| "loss": 57.9055, | |
| "step": 427 | |
| }, | |
| { | |
| "crossentropy": 2.818897008895874, | |
| "epoch": 6.044, | |
| "grad_norm": 0.7265625, | |
| "grad_norm_var": 0.00047092437744140626, | |
| "learning_rate": 0.0006021052631578947, | |
| "loss": 56.0326, | |
| "step": 428 | |
| }, | |
| { | |
| "crossentropy": 2.8834354877471924, | |
| "epoch": 6.045, | |
| "grad_norm": 0.765625, | |
| "grad_norm_var": 0.0004749933878580729, | |
| "learning_rate": 0.0006010526315789474, | |
| "loss": 57.5019, | |
| "step": 429 | |
| }, | |
| { | |
| "crossentropy": 2.856741189956665, | |
| "epoch": 6.046, | |
| "grad_norm": 0.734375, | |
| "grad_norm_var": 0.0005070368448893229, | |
| "learning_rate": 0.0006, | |
| "loss": 56.8689, | |
| "step": 430 | |
| }, | |
| { | |
| "crossentropy": 2.8887990713119507, | |
| "epoch": 6.047, | |
| "grad_norm": 0.765625, | |
| "grad_norm_var": 0.000510406494140625, | |
| "learning_rate": 0.0005989473684210527, | |
| "loss": 57.7479, | |
| "step": 431 | |
| }, | |
| { | |
| "crossentropy": 2.8145207166671753, | |
| "epoch": 6.048, | |
| "grad_norm": 0.75390625, | |
| "grad_norm_var": 0.0004922866821289063, | |
| "learning_rate": 0.0005978947368421053, | |
| "loss": 56.8016, | |
| "step": 432 | |
| }, | |
| { | |
| "crossentropy": 2.841512441635132, | |
| "epoch": 6.049, | |
| "grad_norm": 0.7734375, | |
| "grad_norm_var": 0.000485992431640625, | |
| "learning_rate": 0.0005968421052631579, | |
| "loss": 57.1147, | |
| "step": 433 | |
| }, | |
| { | |
| "crossentropy": 2.8247647285461426, | |
| "epoch": 6.05, | |
| "grad_norm": 0.7578125, | |
| "grad_norm_var": 0.000482177734375, | |
| "learning_rate": 0.0005957894736842106, | |
| "loss": 57.3273, | |
| "step": 434 | |
| }, | |
| { | |
| "crossentropy": 2.7687268257141113, | |
| "epoch": 6.051, | |
| "grad_norm": 0.796875, | |
| "grad_norm_var": 0.000565338134765625, | |
| "learning_rate": 0.0005947368421052631, | |
| "loss": 54.6459, | |
| "step": 435 | |
| }, | |
| { | |
| "crossentropy": 2.9791083335876465, | |
| "epoch": 6.052, | |
| "grad_norm": 0.79296875, | |
| "grad_norm_var": 0.0005961100260416667, | |
| "learning_rate": 0.0005936842105263158, | |
| "loss": 59.038, | |
| "step": 436 | |
| }, | |
| { | |
| "crossentropy": 2.80897319316864, | |
| "epoch": 6.053, | |
| "grad_norm": 0.75, | |
| "grad_norm_var": 0.0005991617838541667, | |
| "learning_rate": 0.0005926315789473684, | |
| "loss": 56.0703, | |
| "step": 437 | |
| }, | |
| { | |
| "crossentropy": 2.8237274885177612, | |
| "epoch": 6.054, | |
| "grad_norm": 0.78125, | |
| "grad_norm_var": 0.0006306330362955729, | |
| "learning_rate": 0.0005915789473684211, | |
| "loss": 56.6772, | |
| "step": 438 | |
| }, | |
| { | |
| "crossentropy": 2.7993067502975464, | |
| "epoch": 6.055, | |
| "grad_norm": 0.77734375, | |
| "grad_norm_var": 0.0005375544230143229, | |
| "learning_rate": 0.0005905263157894736, | |
| "loss": 56.7413, | |
| "step": 439 | |
| }, | |
| { | |
| "crossentropy": 2.8471277952194214, | |
| "epoch": 6.056, | |
| "grad_norm": 0.76171875, | |
| "grad_norm_var": 0.0004961649576822917, | |
| "learning_rate": 0.0005894736842105263, | |
| "loss": 56.4865, | |
| "step": 440 | |
| }, | |
| { | |
| "crossentropy": 2.8962905406951904, | |
| "epoch": 6.057, | |
| "grad_norm": 0.71875, | |
| "grad_norm_var": 0.000604248046875, | |
| "learning_rate": 0.000588421052631579, | |
| "loss": 56.8234, | |
| "step": 441 | |
| }, | |
| { | |
| "crossentropy": 2.8405044078826904, | |
| "epoch": 6.058, | |
| "grad_norm": 0.77734375, | |
| "grad_norm_var": 0.0004903157552083333, | |
| "learning_rate": 0.0005873684210526316, | |
| "loss": 57.3965, | |
| "step": 442 | |
| }, | |
| { | |
| "crossentropy": 2.89117431640625, | |
| "epoch": 6.059, | |
| "grad_norm": 0.7734375, | |
| "grad_norm_var": 0.0004938125610351562, | |
| "learning_rate": 0.0005863157894736842, | |
| "loss": 57.63, | |
| "step": 443 | |
| }, | |
| { | |
| "crossentropy": 2.7827476263046265, | |
| "epoch": 6.06, | |
| "grad_norm": 0.74609375, | |
| "grad_norm_var": 0.00042292277018229164, | |
| "learning_rate": 0.0005852631578947368, | |
| "loss": 56.2097, | |
| "step": 444 | |
| }, | |
| { | |
| "crossentropy": 2.8849616050720215, | |
| "epoch": 6.061, | |
| "grad_norm": 0.796875, | |
| "grad_norm_var": 0.0004900614420572917, | |
| "learning_rate": 0.0005842105263157895, | |
| "loss": 58.1, | |
| "step": 445 | |
| }, | |
| { | |
| "crossentropy": 2.994400978088379, | |
| "epoch": 6.062, | |
| "grad_norm": 0.7734375, | |
| "grad_norm_var": 0.00042012532552083336, | |
| "learning_rate": 0.0005831578947368421, | |
| "loss": 58.3295, | |
| "step": 446 | |
| }, | |
| { | |
| "crossentropy": 2.806332230567932, | |
| "epoch": 6.063, | |
| "grad_norm": 0.75, | |
| "grad_norm_var": 0.00044148763020833334, | |
| "learning_rate": 0.0005821052631578948, | |
| "loss": 56.9378, | |
| "step": 447 | |
| }, | |
| { | |
| "crossentropy": 2.8283108472824097, | |
| "epoch": 6.064, | |
| "grad_norm": 0.7578125, | |
| "grad_norm_var": 0.0004353205362955729, | |
| "learning_rate": 0.0005810526315789474, | |
| "loss": 57.3855, | |
| "step": 448 | |
| }, | |
| { | |
| "crossentropy": 2.7301007509231567, | |
| "epoch": 7.001, | |
| "grad_norm": 0.7421875, | |
| "grad_norm_var": 0.0004729588826497396, | |
| "learning_rate": 0.00058, | |
| "loss": 54.0905, | |
| "step": 449 | |
| }, | |
| { | |
| "crossentropy": 2.741808295249939, | |
| "epoch": 7.002, | |
| "grad_norm": 0.81640625, | |
| "grad_norm_var": 0.000577545166015625, | |
| "learning_rate": 0.0005789473684210527, | |
| "loss": 55.7839, | |
| "step": 450 | |
| }, | |
| { | |
| "crossentropy": 2.9567712545394897, | |
| "epoch": 7.003, | |
| "grad_norm": 0.76953125, | |
| "grad_norm_var": 0.000531005859375, | |
| "learning_rate": 0.0005778947368421053, | |
| "loss": 58.1478, | |
| "step": 451 | |
| }, | |
| { | |
| "crossentropy": 2.740614175796509, | |
| "epoch": 7.004, | |
| "grad_norm": 0.7265625, | |
| "grad_norm_var": 0.000614166259765625, | |
| "learning_rate": 0.0005768421052631579, | |
| "loss": 54.8335, | |
| "step": 452 | |
| }, | |
| { | |
| "crossentropy": 2.791291117668152, | |
| "epoch": 7.005, | |
| "grad_norm": 0.73828125, | |
| "grad_norm_var": 0.0006316502888997396, | |
| "learning_rate": 0.0005757894736842105, | |
| "loss": 57.0646, | |
| "step": 453 | |
| }, | |
| { | |
| "crossentropy": 2.830601930618286, | |
| "epoch": 7.006, | |
| "grad_norm": 0.7734375, | |
| "grad_norm_var": 0.0006243387858072916, | |
| "learning_rate": 0.0005747368421052632, | |
| "loss": 56.4297, | |
| "step": 454 | |
| }, | |
| { | |
| "crossentropy": 2.766074061393738, | |
| "epoch": 7.007, | |
| "grad_norm": 0.7421875, | |
| "grad_norm_var": 0.0006469090779622396, | |
| "learning_rate": 0.0005736842105263158, | |
| "loss": 55.315, | |
| "step": 455 | |
| }, | |
| { | |
| "crossentropy": 2.8447988033294678, | |
| "epoch": 7.008, | |
| "grad_norm": 0.8046875, | |
| "grad_norm_var": 0.0006357192993164063, | |
| "learning_rate": 0.0005726315789473684, | |
| "loss": 56.7864, | |
| "step": 456 | |
| }, | |
| { | |
| "crossentropy": 2.8546324968338013, | |
| "epoch": 7.009, | |
| "grad_norm": 0.75, | |
| "grad_norm_var": 0.0006388346354166667, | |
| "learning_rate": 0.0005715789473684211, | |
| "loss": 56.896, | |
| "step": 457 | |
| }, | |
| { | |
| "crossentropy": 2.8077392578125, | |
| "epoch": 7.01, | |
| "grad_norm": 0.78125, | |
| "grad_norm_var": 0.0006528218587239583, | |
| "learning_rate": 0.0005705263157894737, | |
| "loss": 55.2438, | |
| "step": 458 | |
| }, | |
| { | |
| "crossentropy": 2.8590996265411377, | |
| "epoch": 7.011, | |
| "grad_norm": 0.765625, | |
| "grad_norm_var": 0.0006296157836914063, | |
| "learning_rate": 0.0005694736842105264, | |
| "loss": 56.4089, | |
| "step": 459 | |
| }, | |
| { | |
| "crossentropy": 2.6845264434814453, | |
| "epoch": 7.012, | |
| "grad_norm": 0.73828125, | |
| "grad_norm_var": 0.00059814453125, | |
| "learning_rate": 0.0005684210526315789, | |
| "loss": 54.5441, | |
| "step": 460 | |
| }, | |
| { | |
| "crossentropy": 2.821642518043518, | |
| "epoch": 7.013, | |
| "grad_norm": 0.7578125, | |
| "grad_norm_var": 0.0005889892578125, | |
| "learning_rate": 0.0005673684210526316, | |
| "loss": 57.5324, | |
| "step": 461 | |
| }, | |
| { | |
| "crossentropy": 2.8966156244277954, | |
| "epoch": 7.014, | |
| "grad_norm": 0.78125, | |
| "grad_norm_var": 0.0006052652994791666, | |
| "learning_rate": 0.0005663157894736842, | |
| "loss": 57.1761, | |
| "step": 462 | |
| }, | |
| { | |
| "crossentropy": 2.9717679023742676, | |
| "epoch": 7.015, | |
| "grad_norm": 0.76953125, | |
| "grad_norm_var": 0.0006062189737955729, | |
| "learning_rate": 0.0005652631578947368, | |
| "loss": 57.733, | |
| "step": 463 | |
| }, | |
| { | |
| "crossentropy": 2.8593705892562866, | |
| "epoch": 7.016, | |
| "grad_norm": 0.76953125, | |
| "grad_norm_var": 0.0006060282389322917, | |
| "learning_rate": 0.0005642105263157896, | |
| "loss": 56.4377, | |
| "step": 464 | |
| }, | |
| { | |
| "crossentropy": 2.8285802602767944, | |
| "epoch": 7.017, | |
| "grad_norm": 0.75, | |
| "grad_norm_var": 0.0005869547526041667, | |
| "learning_rate": 0.0005631578947368421, | |
| "loss": 56.9182, | |
| "step": 465 | |
| }, | |
| { | |
| "crossentropy": 2.80751633644104, | |
| "epoch": 7.018, | |
| "grad_norm": 0.78125, | |
| "grad_norm_var": 0.0004215876261393229, | |
| "learning_rate": 0.0005621052631578948, | |
| "loss": 58.1339, | |
| "step": 466 | |
| }, | |
| { | |
| "crossentropy": 2.880888819694519, | |
| "epoch": 7.019, | |
| "grad_norm": 0.765625, | |
| "grad_norm_var": 0.000418853759765625, | |
| "learning_rate": 0.0005610526315789474, | |
| "loss": 57.298, | |
| "step": 467 | |
| }, | |
| { | |
| "crossentropy": 2.818486213684082, | |
| "epoch": 7.02, | |
| "grad_norm": 0.73046875, | |
| "grad_norm_var": 0.0004012425740559896, | |
| "learning_rate": 0.0005600000000000001, | |
| "loss": 55.9215, | |
| "step": 468 | |
| }, | |
| { | |
| "crossentropy": 2.765039563179016, | |
| "epoch": 7.021, | |
| "grad_norm": 0.76953125, | |
| "grad_norm_var": 0.0003615697224934896, | |
| "learning_rate": 0.0005589473684210526, | |
| "loss": 56.3919, | |
| "step": 469 | |
| }, | |
| { | |
| "crossentropy": 2.765025496482849, | |
| "epoch": 7.022, | |
| "grad_norm": 0.7890625, | |
| "grad_norm_var": 0.0003956476847330729, | |
| "learning_rate": 0.0005578947368421052, | |
| "loss": 56.4707, | |
| "step": 470 | |
| }, | |
| { | |
| "crossentropy": 2.6866267919540405, | |
| "epoch": 7.023, | |
| "grad_norm": 0.75390625, | |
| "grad_norm_var": 0.0003679911295572917, | |
| "learning_rate": 0.0005568421052631579, | |
| "loss": 54.7156, | |
| "step": 471 | |
| }, | |
| { | |
| "crossentropy": 2.7979997396469116, | |
| "epoch": 7.024, | |
| "grad_norm": 0.7109375, | |
| "grad_norm_var": 0.00043512980143229165, | |
| "learning_rate": 0.0005557894736842106, | |
| "loss": 56.7267, | |
| "step": 472 | |
| }, | |
| { | |
| "crossentropy": 2.851110816001892, | |
| "epoch": 7.025, | |
| "grad_norm": 0.75390625, | |
| "grad_norm_var": 0.00043074289957682293, | |
| "learning_rate": 0.0005547368421052632, | |
| "loss": 56.6983, | |
| "step": 473 | |
| }, | |
| { | |
| "crossentropy": 2.7913917303085327, | |
| "epoch": 7.026, | |
| "grad_norm": 0.74609375, | |
| "grad_norm_var": 0.0004107157389322917, | |
| "learning_rate": 0.0005536842105263158, | |
| "loss": 55.4107, | |
| "step": 474 | |
| }, | |
| { | |
| "crossentropy": 2.8196051120758057, | |
| "epoch": 7.027, | |
| "grad_norm": 0.7890625, | |
| "grad_norm_var": 0.0004679361979166667, | |
| "learning_rate": 0.0005526315789473685, | |
| "loss": 57.6855, | |
| "step": 475 | |
| }, | |
| { | |
| "crossentropy": 2.7812657356262207, | |
| "epoch": 7.028, | |
| "grad_norm": 0.78515625, | |
| "grad_norm_var": 0.0004709879557291667, | |
| "learning_rate": 0.0005515789473684211, | |
| "loss": 56.5311, | |
| "step": 476 | |
| }, | |
| { | |
| "crossentropy": 2.884629964828491, | |
| "epoch": 7.029, | |
| "grad_norm": 0.734375, | |
| "grad_norm_var": 0.0005205790201822917, | |
| "learning_rate": 0.0005505263157894736, | |
| "loss": 56.0411, | |
| "step": 477 | |
| }, | |
| { | |
| "crossentropy": 2.770102024078369, | |
| "epoch": 7.03, | |
| "grad_norm": 0.74609375, | |
| "grad_norm_var": 0.0005039850870768229, | |
| "learning_rate": 0.0005494736842105263, | |
| "loss": 55.6079, | |
| "step": 478 | |
| }, | |
| { | |
| "crossentropy": 2.7415486574172974, | |
| "epoch": 7.031, | |
| "grad_norm": 0.7734375, | |
| "grad_norm_var": 0.000510406494140625, | |
| "learning_rate": 0.0005484210526315789, | |
| "loss": 56.3453, | |
| "step": 479 | |
| }, | |
| { | |
| "crossentropy": 2.850640296936035, | |
| "epoch": 7.032, | |
| "grad_norm": 0.76171875, | |
| "grad_norm_var": 0.0005035400390625, | |
| "learning_rate": 0.0005473684210526317, | |
| "loss": 56.8195, | |
| "step": 480 | |
| }, | |
| { | |
| "crossentropy": 2.7248213291168213, | |
| "epoch": 7.033, | |
| "grad_norm": 0.7265625, | |
| "grad_norm_var": 0.000565338134765625, | |
| "learning_rate": 0.0005463157894736843, | |
| "loss": 54.4893, | |
| "step": 481 | |
| }, | |
| { | |
| "crossentropy": 2.8087538480758667, | |
| "epoch": 7.034, | |
| "grad_norm": 0.7890625, | |
| "grad_norm_var": 0.0005940755208333333, | |
| "learning_rate": 0.0005452631578947369, | |
| "loss": 56.456, | |
| "step": 482 | |
| }, | |
| { | |
| "crossentropy": 2.7221765518188477, | |
| "epoch": 7.035, | |
| "grad_norm": 0.75390625, | |
| "grad_norm_var": 0.0005904515584309896, | |
| "learning_rate": 0.0005442105263157895, | |
| "loss": 56.3871, | |
| "step": 483 | |
| }, | |
| { | |
| "crossentropy": 2.897437572479248, | |
| "epoch": 7.036, | |
| "grad_norm": 0.78515625, | |
| "grad_norm_var": 0.0005833307902018229, | |
| "learning_rate": 0.0005431578947368421, | |
| "loss": 56.8503, | |
| "step": 484 | |
| }, | |
| { | |
| "crossentropy": 2.9512122869491577, | |
| "epoch": 7.037, | |
| "grad_norm": 0.8359375, | |
| "grad_norm_var": 0.0009389241536458333, | |
| "learning_rate": 0.0005421052631578948, | |
| "loss": 57.7135, | |
| "step": 485 | |
| }, | |
| { | |
| "crossentropy": 2.768813133239746, | |
| "epoch": 7.038, | |
| "grad_norm": 0.7421875, | |
| "grad_norm_var": 0.0009236653645833333, | |
| "learning_rate": 0.0005410526315789473, | |
| "loss": 55.8437, | |
| "step": 486 | |
| }, | |
| { | |
| "crossentropy": 2.994757056236267, | |
| "epoch": 7.039, | |
| "grad_norm": 0.76171875, | |
| "grad_norm_var": 0.000919342041015625, | |
| "learning_rate": 0.00054, | |
| "loss": 57.5798, | |
| "step": 487 | |
| }, | |
| { | |
| "crossentropy": 2.804057478904724, | |
| "epoch": 7.04, | |
| "grad_norm": 0.75390625, | |
| "grad_norm_var": 0.0007410049438476562, | |
| "learning_rate": 0.0005389473684210526, | |
| "loss": 56.4264, | |
| "step": 488 | |
| }, | |
| { | |
| "crossentropy": 2.8012847900390625, | |
| "epoch": 7.041, | |
| "grad_norm": 0.77734375, | |
| "grad_norm_var": 0.0007410049438476562, | |
| "learning_rate": 0.0005378947368421054, | |
| "loss": 55.1591, | |
| "step": 489 | |
| }, | |
| { | |
| "crossentropy": 2.927749991416931, | |
| "epoch": 7.042, | |
| "grad_norm": 0.7734375, | |
| "grad_norm_var": 0.0007138570149739583, | |
| "learning_rate": 0.0005368421052631579, | |
| "loss": 56.8244, | |
| "step": 490 | |
| }, | |
| { | |
| "crossentropy": 2.840681791305542, | |
| "epoch": 7.043, | |
| "grad_norm": 0.8203125, | |
| "grad_norm_var": 0.0008623758951822917, | |
| "learning_rate": 0.0005357894736842105, | |
| "loss": 56.2898, | |
| "step": 491 | |
| }, | |
| { | |
| "crossentropy": 2.743423581123352, | |
| "epoch": 7.044, | |
| "grad_norm": 0.74609375, | |
| "grad_norm_var": 0.00087890625, | |
| "learning_rate": 0.0005347368421052632, | |
| "loss": 56.1168, | |
| "step": 492 | |
| }, | |
| { | |
| "crossentropy": 2.800847887992859, | |
| "epoch": 7.045, | |
| "grad_norm": 0.7421875, | |
| "grad_norm_var": 0.0008481343587239584, | |
| "learning_rate": 0.0005336842105263158, | |
| "loss": 55.5752, | |
| "step": 493 | |
| }, | |
| { | |
| "crossentropy": 2.6926422119140625, | |
| "epoch": 7.046, | |
| "grad_norm": 0.71875, | |
| "grad_norm_var": 0.0009749730428059896, | |
| "learning_rate": 0.0005326315789473684, | |
| "loss": 54.8747, | |
| "step": 494 | |
| }, | |
| { | |
| "crossentropy": 2.8151170015335083, | |
| "epoch": 7.047, | |
| "grad_norm": 0.75390625, | |
| "grad_norm_var": 0.000980377197265625, | |
| "learning_rate": 0.000531578947368421, | |
| "loss": 55.6196, | |
| "step": 495 | |
| }, | |
| { | |
| "crossentropy": 2.9150657653808594, | |
| "epoch": 7.048, | |
| "grad_norm": 0.8046875, | |
| "grad_norm_var": 0.001076189676920573, | |
| "learning_rate": 0.0005305263157894737, | |
| "loss": 57.0052, | |
| "step": 496 | |
| }, | |
| { | |
| "crossentropy": 2.840682625770569, | |
| "epoch": 7.049, | |
| "grad_norm": 0.80078125, | |
| "grad_norm_var": 0.0010121663411458333, | |
| "learning_rate": 0.0005294736842105264, | |
| "loss": 58.2104, | |
| "step": 497 | |
| }, | |
| { | |
| "crossentropy": 2.8163187503814697, | |
| "epoch": 7.05, | |
| "grad_norm": 0.7890625, | |
| "grad_norm_var": 0.0010121663411458333, | |
| "learning_rate": 0.000528421052631579, | |
| "loss": 56.675, | |
| "step": 498 | |
| }, | |
| { | |
| "crossentropy": 2.9374297857284546, | |
| "epoch": 7.051, | |
| "grad_norm": 0.73828125, | |
| "grad_norm_var": 0.0010660807291666666, | |
| "learning_rate": 0.0005273684210526316, | |
| "loss": 57.6299, | |
| "step": 499 | |
| }, | |
| { | |
| "crossentropy": 2.9131165742874146, | |
| "epoch": 7.052, | |
| "grad_norm": 0.828125, | |
| "grad_norm_var": 0.0012598037719726562, | |
| "learning_rate": 0.0005263157894736842, | |
| "loss": 58.4242, | |
| "step": 500 | |
| }, | |
| { | |
| "crossentropy": 2.900420308113098, | |
| "epoch": 7.053, | |
| "grad_norm": 0.8203125, | |
| "grad_norm_var": 0.0011463801066080729, | |
| "learning_rate": 0.0005252631578947369, | |
| "loss": 57.3699, | |
| "step": 501 | |
| }, | |
| { | |
| "crossentropy": 2.9151841402053833, | |
| "epoch": 7.054, | |
| "grad_norm": 0.7421875, | |
| "grad_norm_var": 0.0011463801066080729, | |
| "learning_rate": 0.0005242105263157895, | |
| "loss": 56.6537, | |
| "step": 502 | |
| }, | |
| { | |
| "crossentropy": 2.9279205799102783, | |
| "epoch": 7.055, | |
| "grad_norm": 0.78515625, | |
| "grad_norm_var": 0.001144854227701823, | |
| "learning_rate": 0.0005231578947368421, | |
| "loss": 57.4459, | |
| "step": 503 | |
| }, | |
| { | |
| "crossentropy": 2.9333810806274414, | |
| "epoch": 7.056, | |
| "grad_norm": 0.77734375, | |
| "grad_norm_var": 0.001114336649576823, | |
| "learning_rate": 0.0005221052631578947, | |
| "loss": 57.2862, | |
| "step": 504 | |
| }, | |
| { | |
| "crossentropy": 2.805972099304199, | |
| "epoch": 7.057, | |
| "grad_norm": 0.75, | |
| "grad_norm_var": 0.0011566162109375, | |
| "learning_rate": 0.0005210526315789474, | |
| "loss": 56.7537, | |
| "step": 505 | |
| }, | |
| { | |
| "crossentropy": 2.8755249977111816, | |
| "epoch": 7.058, | |
| "grad_norm": 0.82421875, | |
| "grad_norm_var": 0.0013111750284830728, | |
| "learning_rate": 0.0005200000000000001, | |
| "loss": 58.3636, | |
| "step": 506 | |
| }, | |
| { | |
| "crossentropy": 2.9246153831481934, | |
| "epoch": 7.059, | |
| "grad_norm": 0.74609375, | |
| "grad_norm_var": 0.0012326558430989583, | |
| "learning_rate": 0.0005189473684210526, | |
| "loss": 57.2294, | |
| "step": 507 | |
| }, | |
| { | |
| "crossentropy": 2.8592110872268677, | |
| "epoch": 7.06, | |
| "grad_norm": 0.75390625, | |
| "grad_norm_var": 0.00120849609375, | |
| "learning_rate": 0.0005178947368421053, | |
| "loss": 57.1909, | |
| "step": 508 | |
| }, | |
| { | |
| "crossentropy": 2.755800485610962, | |
| "epoch": 7.061, | |
| "grad_norm": 0.75390625, | |
| "grad_norm_var": 0.0011682510375976562, | |
| "learning_rate": 0.0005168421052631579, | |
| "loss": 55.5823, | |
| "step": 509 | |
| }, | |
| { | |
| "crossentropy": 2.7610713243484497, | |
| "epoch": 7.062, | |
| "grad_norm": 0.74609375, | |
| "grad_norm_var": 0.0010129292805989584, | |
| "learning_rate": 0.0005157894736842106, | |
| "loss": 55.2065, | |
| "step": 510 | |
| }, | |
| { | |
| "crossentropy": 2.8712987899780273, | |
| "epoch": 7.063, | |
| "grad_norm": 0.75390625, | |
| "grad_norm_var": 0.0010129292805989584, | |
| "learning_rate": 0.0005147368421052631, | |
| "loss": 56.8722, | |
| "step": 511 | |
| }, | |
| { | |
| "crossentropy": 2.7960678339004517, | |
| "epoch": 7.064, | |
| "grad_norm": 0.7578125, | |
| "grad_norm_var": 0.0009702046712239584, | |
| "learning_rate": 0.0005136842105263157, | |
| "loss": 55.4123, | |
| "step": 512 | |
| }, | |
| { | |
| "crossentropy": 2.8373619318008423, | |
| "epoch": 8.001, | |
| "grad_norm": 0.78125, | |
| "grad_norm_var": 0.0009103775024414063, | |
| "learning_rate": 0.0005126315789473685, | |
| "loss": 57.6566, | |
| "step": 513 | |
| }, | |
| { | |
| "crossentropy": 2.7967774868011475, | |
| "epoch": 8.002, | |
| "grad_norm": 0.78515625, | |
| "grad_norm_var": 0.0008508682250976563, | |
| "learning_rate": 0.0005115789473684211, | |
| "loss": 57.0669, | |
| "step": 514 | |
| }, | |
| { | |
| "crossentropy": 2.795915961265564, | |
| "epoch": 8.003, | |
| "grad_norm": 0.80078125, | |
| "grad_norm_var": 0.0006955464680989584, | |
| "learning_rate": 0.0005105263157894738, | |
| "loss": 54.5675, | |
| "step": 515 | |
| }, | |
| { | |
| "crossentropy": 2.754665493965149, | |
| "epoch": 8.004, | |
| "grad_norm": 0.7734375, | |
| "grad_norm_var": 0.0005246480305989583, | |
| "learning_rate": 0.0005094736842105263, | |
| "loss": 56.3108, | |
| "step": 516 | |
| }, | |
| { | |
| "crossentropy": 2.8175487518310547, | |
| "epoch": 8.005, | |
| "grad_norm": 0.74609375, | |
| "grad_norm_var": 0.0005121231079101562, | |
| "learning_rate": 0.000508421052631579, | |
| "loss": 56.5216, | |
| "step": 517 | |
| }, | |
| { | |
| "crossentropy": 2.884229898452759, | |
| "epoch": 8.006, | |
| "grad_norm": 0.7421875, | |
| "grad_norm_var": 0.000531005859375, | |
| "learning_rate": 0.0005073684210526316, | |
| "loss": 56.2322, | |
| "step": 518 | |
| }, | |
| { | |
| "crossentropy": 2.8160749673843384, | |
| "epoch": 8.007, | |
| "grad_norm": 0.7890625, | |
| "grad_norm_var": 0.0005578994750976562, | |
| "learning_rate": 0.0005063157894736841, | |
| "loss": 56.09, | |
| "step": 519 | |
| }, | |
| { | |
| "crossentropy": 2.8993260860443115, | |
| "epoch": 8.008, | |
| "grad_norm": 0.8125, | |
| "grad_norm_var": 0.000665728251139323, | |
| "learning_rate": 0.0005052631578947368, | |
| "loss": 56.7618, | |
| "step": 520 | |
| }, | |
| { | |
| "crossentropy": 2.8044674396514893, | |
| "epoch": 8.009, | |
| "grad_norm": 0.765625, | |
| "grad_norm_var": 0.00045878092447916666, | |
| "learning_rate": 0.0005042105263157895, | |
| "loss": 56.7056, | |
| "step": 521 | |
| }, | |
| { | |
| "crossentropy": 2.688468098640442, | |
| "epoch": 8.01, | |
| "grad_norm": 0.7109375, | |
| "grad_norm_var": 0.0006321589152018229, | |
| "learning_rate": 0.0005031578947368422, | |
| "loss": 54.6439, | |
| "step": 522 | |
| }, | |
| { | |
| "crossentropy": 2.77541720867157, | |
| "epoch": 8.011, | |
| "grad_norm": 0.74609375, | |
| "grad_norm_var": 0.0006469090779622396, | |
| "learning_rate": 0.0005021052631578948, | |
| "loss": 55.0559, | |
| "step": 523 | |
| }, | |
| { | |
| "crossentropy": 2.9500958919525146, | |
| "epoch": 8.012, | |
| "grad_norm": 0.765625, | |
| "grad_norm_var": 0.0006398518880208333, | |
| "learning_rate": 0.0005010526315789474, | |
| "loss": 58.3708, | |
| "step": 524 | |
| }, | |
| { | |
| "crossentropy": 2.7789478302001953, | |
| "epoch": 8.013, | |
| "grad_norm": 0.7109375, | |
| "grad_norm_var": 0.0008040746053059896, | |
| "learning_rate": 0.0005, | |
| "loss": 56.1477, | |
| "step": 525 | |
| }, | |
| { | |
| "crossentropy": 2.89370858669281, | |
| "epoch": 8.014, | |
| "grad_norm": 0.7265625, | |
| "grad_norm_var": 0.0008819580078125, | |
| "learning_rate": 0.0004989473684210527, | |
| "loss": 57.8208, | |
| "step": 526 | |
| }, | |
| { | |
| "crossentropy": 2.829719305038452, | |
| "epoch": 8.015, | |
| "grad_norm": 0.828125, | |
| "grad_norm_var": 0.001163482666015625, | |
| "learning_rate": 0.0004978947368421053, | |
| "loss": 57.3503, | |
| "step": 527 | |
| }, | |
| { | |
| "crossentropy": 2.8827792406082153, | |
| "epoch": 8.016, | |
| "grad_norm": 0.7734375, | |
| "grad_norm_var": 0.001163482666015625, | |
| "learning_rate": 0.0004968421052631579, | |
| "loss": 57.5831, | |
| "step": 528 | |
| }, | |
| { | |
| "crossentropy": 2.839862585067749, | |
| "epoch": 8.017, | |
| "grad_norm": 0.76953125, | |
| "grad_norm_var": 0.0011484146118164063, | |
| "learning_rate": 0.0004957894736842105, | |
| "loss": 55.7402, | |
| "step": 529 | |
| }, | |
| { | |
| "crossentropy": 2.7569580078125, | |
| "epoch": 8.018, | |
| "grad_norm": 0.75390625, | |
| "grad_norm_var": 0.0011270523071289062, | |
| "learning_rate": 0.0004947368421052632, | |
| "loss": 56.3813, | |
| "step": 530 | |
| }, | |
| { | |
| "crossentropy": 2.771231174468994, | |
| "epoch": 8.019, | |
| "grad_norm": 0.78125, | |
| "grad_norm_var": 0.001053619384765625, | |
| "learning_rate": 0.0004936842105263158, | |
| "loss": 57.1672, | |
| "step": 531 | |
| }, | |
| { | |
| "crossentropy": 2.787784457206726, | |
| "epoch": 8.02, | |
| "grad_norm": 0.75, | |
| "grad_norm_var": 0.0010528564453125, | |
| "learning_rate": 0.0004926315789473684, | |
| "loss": 56.1965, | |
| "step": 532 | |
| }, | |
| { | |
| "crossentropy": 2.716630458831787, | |
| "epoch": 8.021, | |
| "grad_norm": 0.75, | |
| "grad_norm_var": 0.0010461807250976562, | |
| "learning_rate": 0.000491578947368421, | |
| "loss": 55.2343, | |
| "step": 533 | |
| }, | |
| { | |
| "crossentropy": 2.8388242721557617, | |
| "epoch": 8.022, | |
| "grad_norm": 0.73046875, | |
| "grad_norm_var": 0.001084136962890625, | |
| "learning_rate": 0.0004905263157894737, | |
| "loss": 56.8292, | |
| "step": 534 | |
| }, | |
| { | |
| "crossentropy": 2.8070307970046997, | |
| "epoch": 8.023, | |
| "grad_norm": 0.75, | |
| "grad_norm_var": 0.0010294596354166667, | |
| "learning_rate": 0.0004894736842105264, | |
| "loss": 55.36, | |
| "step": 535 | |
| }, | |
| { | |
| "crossentropy": 2.865221381187439, | |
| "epoch": 8.024, | |
| "grad_norm": 0.79296875, | |
| "grad_norm_var": 0.0009108861287434896, | |
| "learning_rate": 0.000488421052631579, | |
| "loss": 56.6106, | |
| "step": 536 | |
| }, | |
| { | |
| "crossentropy": 2.9020869731903076, | |
| "epoch": 8.025, | |
| "grad_norm": 0.76953125, | |
| "grad_norm_var": 0.0009165445963541667, | |
| "learning_rate": 0.0004873684210526316, | |
| "loss": 56.5227, | |
| "step": 537 | |
| }, | |
| { | |
| "crossentropy": 2.7119977474212646, | |
| "epoch": 8.026, | |
| "grad_norm": 0.74609375, | |
| "grad_norm_var": 0.0007786432902018229, | |
| "learning_rate": 0.0004863157894736842, | |
| "loss": 55.6506, | |
| "step": 538 | |
| }, | |
| { | |
| "crossentropy": 2.7730578184127808, | |
| "epoch": 8.027, | |
| "grad_norm": 0.7734375, | |
| "grad_norm_var": 0.0007781982421875, | |
| "learning_rate": 0.00048526315789473683, | |
| "loss": 56.5655, | |
| "step": 539 | |
| }, | |
| { | |
| "crossentropy": 2.736594557762146, | |
| "epoch": 8.028, | |
| "grad_norm": 0.7578125, | |
| "grad_norm_var": 0.0007769266764322916, | |
| "learning_rate": 0.0004842105263157895, | |
| "loss": 55.2709, | |
| "step": 540 | |
| }, | |
| { | |
| "crossentropy": 2.87870717048645, | |
| "epoch": 8.029, | |
| "grad_norm": 0.77734375, | |
| "grad_norm_var": 0.0006158828735351562, | |
| "learning_rate": 0.00048315789473684213, | |
| "loss": 56.1277, | |
| "step": 541 | |
| }, | |
| { | |
| "crossentropy": 2.8086968660354614, | |
| "epoch": 8.03, | |
| "grad_norm": 0.7265625, | |
| "grad_norm_var": 0.0006158828735351562, | |
| "learning_rate": 0.00048210526315789476, | |
| "loss": 56.0955, | |
| "step": 542 | |
| }, | |
| { | |
| "crossentropy": 2.807764172554016, | |
| "epoch": 8.031, | |
| "grad_norm": 0.734375, | |
| "grad_norm_var": 0.00036869049072265627, | |
| "learning_rate": 0.00048105263157894733, | |
| "loss": 55.8439, | |
| "step": 543 | |
| }, | |
| { | |
| "crossentropy": 2.887509346008301, | |
| "epoch": 8.032, | |
| "grad_norm": 0.70703125, | |
| "grad_norm_var": 0.0005124409993489584, | |
| "learning_rate": 0.00048, | |
| "loss": 57.691, | |
| "step": 544 | |
| }, | |
| { | |
| "crossentropy": 2.8629679679870605, | |
| "epoch": 8.033, | |
| "grad_norm": 0.75390625, | |
| "grad_norm_var": 0.0004961649576822917, | |
| "learning_rate": 0.00047894736842105264, | |
| "loss": 57.1656, | |
| "step": 545 | |
| }, | |
| { | |
| "crossentropy": 2.749383807182312, | |
| "epoch": 8.034, | |
| "grad_norm": 0.75, | |
| "grad_norm_var": 0.0004968643188476562, | |
| "learning_rate": 0.00047789473684210526, | |
| "loss": 54.5942, | |
| "step": 546 | |
| }, | |
| { | |
| "crossentropy": 2.771678328514099, | |
| "epoch": 8.035, | |
| "grad_norm": 0.75, | |
| "grad_norm_var": 0.00044091542561848957, | |
| "learning_rate": 0.0004768421052631579, | |
| "loss": 56.419, | |
| "step": 547 | |
| }, | |
| { | |
| "crossentropy": 2.6900097131729126, | |
| "epoch": 8.036, | |
| "grad_norm": 0.78125, | |
| "grad_norm_var": 0.0004968643188476562, | |
| "learning_rate": 0.00047578947368421057, | |
| "loss": 55.0504, | |
| "step": 548 | |
| }, | |
| { | |
| "crossentropy": 2.769201636314392, | |
| "epoch": 8.037, | |
| "grad_norm": 0.73828125, | |
| "grad_norm_var": 0.000510406494140625, | |
| "learning_rate": 0.0004747368421052632, | |
| "loss": 55.9095, | |
| "step": 549 | |
| }, | |
| { | |
| "crossentropy": 2.9785958528518677, | |
| "epoch": 8.038, | |
| "grad_norm": 0.80078125, | |
| "grad_norm_var": 0.0006134033203125, | |
| "learning_rate": 0.00047368421052631577, | |
| "loss": 57.2934, | |
| "step": 550 | |
| }, | |
| { | |
| "crossentropy": 2.833102226257324, | |
| "epoch": 8.039, | |
| "grad_norm": 0.77734375, | |
| "grad_norm_var": 0.000635210673014323, | |
| "learning_rate": 0.0004726315789473684, | |
| "loss": 57.6031, | |
| "step": 551 | |
| }, | |
| { | |
| "crossentropy": 2.8962067365646362, | |
| "epoch": 8.04, | |
| "grad_norm": 0.76953125, | |
| "grad_norm_var": 0.0005619684855143229, | |
| "learning_rate": 0.0004715789473684211, | |
| "loss": 56.1431, | |
| "step": 552 | |
| }, | |
| { | |
| "crossentropy": 2.8563923835754395, | |
| "epoch": 8.041, | |
| "grad_norm": 0.82421875, | |
| "grad_norm_var": 0.0008396784464518229, | |
| "learning_rate": 0.0004705263157894737, | |
| "loss": 58.5512, | |
| "step": 553 | |
| }, | |
| { | |
| "crossentropy": 2.8052828311920166, | |
| "epoch": 8.042, | |
| "grad_norm": 0.73046875, | |
| "grad_norm_var": 0.0008849461873372395, | |
| "learning_rate": 0.0004694736842105263, | |
| "loss": 54.951, | |
| "step": 554 | |
| }, | |
| { | |
| "crossentropy": 2.726549983024597, | |
| "epoch": 8.043, | |
| "grad_norm": 0.72265625, | |
| "grad_norm_var": 0.0009518941243489583, | |
| "learning_rate": 0.00046842105263157895, | |
| "loss": 55.8634, | |
| "step": 555 | |
| }, | |
| { | |
| "crossentropy": 2.8912360668182373, | |
| "epoch": 8.044, | |
| "grad_norm": 0.73828125, | |
| "grad_norm_var": 0.0009719212849934896, | |
| "learning_rate": 0.00046736842105263163, | |
| "loss": 56.6226, | |
| "step": 556 | |
| }, | |
| { | |
| "crossentropy": 2.8774843215942383, | |
| "epoch": 8.045, | |
| "grad_norm": 0.78125, | |
| "grad_norm_var": 0.0009844462076822917, | |
| "learning_rate": 0.0004663157894736842, | |
| "loss": 57.6825, | |
| "step": 557 | |
| }, | |
| { | |
| "crossentropy": 2.7926796674728394, | |
| "epoch": 8.046, | |
| "grad_norm": 0.76171875, | |
| "grad_norm_var": 0.0009266535441080729, | |
| "learning_rate": 0.00046526315789473683, | |
| "loss": 56.6319, | |
| "step": 558 | |
| }, | |
| { | |
| "crossentropy": 2.8781834840774536, | |
| "epoch": 8.047, | |
| "grad_norm": 0.75390625, | |
| "grad_norm_var": 0.0008900960286458334, | |
| "learning_rate": 0.00046421052631578946, | |
| "loss": 57.2263, | |
| "step": 559 | |
| }, | |
| { | |
| "crossentropy": 2.768616557121277, | |
| "epoch": 8.048, | |
| "grad_norm": 0.77734375, | |
| "grad_norm_var": 0.0007138570149739583, | |
| "learning_rate": 0.00046315789473684214, | |
| "loss": 55.393, | |
| "step": 560 | |
| }, | |
| { | |
| "crossentropy": 2.8414195775985718, | |
| "epoch": 8.049, | |
| "grad_norm": 0.75, | |
| "grad_norm_var": 0.0007196426391601563, | |
| "learning_rate": 0.00046210526315789476, | |
| "loss": 56.6017, | |
| "step": 561 | |
| }, | |
| { | |
| "crossentropy": 2.886254072189331, | |
| "epoch": 8.05, | |
| "grad_norm": 0.765625, | |
| "grad_norm_var": 0.0007079442342122396, | |
| "learning_rate": 0.0004610526315789474, | |
| "loss": 57.1002, | |
| "step": 562 | |
| }, | |
| { | |
| "crossentropy": 2.882540702819824, | |
| "epoch": 8.051, | |
| "grad_norm": 0.734375, | |
| "grad_norm_var": 0.0007521947224934896, | |
| "learning_rate": 0.00046, | |
| "loss": 57.4033, | |
| "step": 563 | |
| }, | |
| { | |
| "crossentropy": 2.8176023960113525, | |
| "epoch": 8.052, | |
| "grad_norm": 0.75, | |
| "grad_norm_var": 0.0007369359334309896, | |
| "learning_rate": 0.00045894736842105264, | |
| "loss": 55.8245, | |
| "step": 564 | |
| }, | |
| { | |
| "crossentropy": 2.8578507900238037, | |
| "epoch": 8.053, | |
| "grad_norm": 0.72265625, | |
| "grad_norm_var": 0.0007994969685872396, | |
| "learning_rate": 0.00045789473684210527, | |
| "loss": 55.7208, | |
| "step": 565 | |
| }, | |
| { | |
| "crossentropy": 2.907991409301758, | |
| "epoch": 8.054, | |
| "grad_norm": 0.75390625, | |
| "grad_norm_var": 0.0006820042928059896, | |
| "learning_rate": 0.0004568421052631579, | |
| "loss": 57.6047, | |
| "step": 566 | |
| }, | |
| { | |
| "crossentropy": 2.9070833921432495, | |
| "epoch": 8.055, | |
| "grad_norm": 0.76953125, | |
| "grad_norm_var": 0.0006647109985351562, | |
| "learning_rate": 0.0004557894736842105, | |
| "loss": 56.8514, | |
| "step": 567 | |
| }, | |
| { | |
| "crossentropy": 2.8488681316375732, | |
| "epoch": 8.056, | |
| "grad_norm": 0.71484375, | |
| "grad_norm_var": 0.000757280985514323, | |
| "learning_rate": 0.0004547368421052632, | |
| "loss": 55.2764, | |
| "step": 568 | |
| }, | |
| { | |
| "crossentropy": 2.843410849571228, | |
| "epoch": 8.057, | |
| "grad_norm": 0.74609375, | |
| "grad_norm_var": 0.0003986994425455729, | |
| "learning_rate": 0.0004536842105263158, | |
| "loss": 57.3239, | |
| "step": 569 | |
| }, | |
| { | |
| "crossentropy": 2.7495408058166504, | |
| "epoch": 8.058, | |
| "grad_norm": 0.8203125, | |
| "grad_norm_var": 0.000689697265625, | |
| "learning_rate": 0.00045263157894736845, | |
| "loss": 56.725, | |
| "step": 570 | |
| }, | |
| { | |
| "crossentropy": 2.8769084215164185, | |
| "epoch": 8.059, | |
| "grad_norm": 0.7265625, | |
| "grad_norm_var": 0.0006743748982747396, | |
| "learning_rate": 0.0004515789473684211, | |
| "loss": 58.1138, | |
| "step": 571 | |
| }, | |
| { | |
| "crossentropy": 2.865220785140991, | |
| "epoch": 8.06, | |
| "grad_norm": 0.765625, | |
| "grad_norm_var": 0.0006632486979166667, | |
| "learning_rate": 0.00045052631578947365, | |
| "loss": 57.7083, | |
| "step": 572 | |
| }, | |
| { | |
| "crossentropy": 2.7678266763687134, | |
| "epoch": 8.061, | |
| "grad_norm": 0.73828125, | |
| "grad_norm_var": 0.0006331761678059895, | |
| "learning_rate": 0.00044947368421052633, | |
| "loss": 56.7169, | |
| "step": 573 | |
| }, | |
| { | |
| "crossentropy": 2.716267228126526, | |
| "epoch": 8.062, | |
| "grad_norm": 0.74609375, | |
| "grad_norm_var": 0.0006306330362955729, | |
| "learning_rate": 0.00044842105263157895, | |
| "loss": 55.4713, | |
| "step": 574 | |
| }, | |
| { | |
| "crossentropy": 2.746058225631714, | |
| "epoch": 8.063, | |
| "grad_norm": 0.7578125, | |
| "grad_norm_var": 0.000632476806640625, | |
| "learning_rate": 0.0004473684210526316, | |
| "loss": 56.1347, | |
| "step": 575 | |
| }, | |
| { | |
| "crossentropy": 2.8452965021133423, | |
| "epoch": 8.064, | |
| "grad_norm": 0.75390625, | |
| "grad_norm_var": 0.0005889892578125, | |
| "learning_rate": 0.0004463157894736842, | |
| "loss": 57.0829, | |
| "step": 576 | |
| }, | |
| { | |
| "crossentropy": 2.7510355710983276, | |
| "epoch": 9.001, | |
| "grad_norm": 0.71875, | |
| "grad_norm_var": 0.0006367365519205729, | |
| "learning_rate": 0.0004452631578947369, | |
| "loss": 56.112, | |
| "step": 577 | |
| }, | |
| { | |
| "crossentropy": 2.735834240913391, | |
| "epoch": 9.002, | |
| "grad_norm": 0.74609375, | |
| "grad_norm_var": 0.0006235758463541667, | |
| "learning_rate": 0.0004442105263157895, | |
| "loss": 55.181, | |
| "step": 578 | |
| }, | |
| { | |
| "crossentropy": 2.731901526451111, | |
| "epoch": 9.003, | |
| "grad_norm": 0.765625, | |
| "grad_norm_var": 0.000640869140625, | |
| "learning_rate": 0.0004431578947368421, | |
| "loss": 56.0573, | |
| "step": 579 | |
| }, | |
| { | |
| "crossentropy": 2.9925543069839478, | |
| "epoch": 9.004, | |
| "grad_norm": 0.78125, | |
| "grad_norm_var": 0.0006418228149414062, | |
| "learning_rate": 0.0004421052631578947, | |
| "loss": 57.7087, | |
| "step": 580 | |
| }, | |
| { | |
| "crossentropy": 2.794908046722412, | |
| "epoch": 9.005, | |
| "grad_norm": 0.76953125, | |
| "grad_norm_var": 0.0006575902303059896, | |
| "learning_rate": 0.0004410526315789474, | |
| "loss": 55.5605, | |
| "step": 581 | |
| }, | |
| { | |
| "crossentropy": 2.9317067861557007, | |
| "epoch": 9.006, | |
| "grad_norm": 0.76171875, | |
| "grad_norm_var": 0.0006458918253580729, | |
| "learning_rate": 0.00044, | |
| "loss": 58.2652, | |
| "step": 582 | |
| }, | |
| { | |
| "crossentropy": 2.9609873294830322, | |
| "epoch": 9.007, | |
| "grad_norm": 0.74609375, | |
| "grad_norm_var": 0.0005431493123372396, | |
| "learning_rate": 0.00043894736842105264, | |
| "loss": 58.6218, | |
| "step": 583 | |
| }, | |
| { | |
| "crossentropy": 2.7990305423736572, | |
| "epoch": 9.008, | |
| "grad_norm": 0.76953125, | |
| "grad_norm_var": 0.0005462010701497396, | |
| "learning_rate": 0.00043789473684210527, | |
| "loss": 57.2674, | |
| "step": 584 | |
| }, | |
| { | |
| "crossentropy": 2.7703919410705566, | |
| "epoch": 9.009, | |
| "grad_norm": 0.7734375, | |
| "grad_norm_var": 0.0002913792928059896, | |
| "learning_rate": 0.00043684210526315795, | |
| "loss": 55.587, | |
| "step": 585 | |
| }, | |
| { | |
| "crossentropy": 2.887115478515625, | |
| "epoch": 9.01, | |
| "grad_norm": 0.80859375, | |
| "grad_norm_var": 0.00040486653645833336, | |
| "learning_rate": 0.0004357894736842105, | |
| "loss": 56.62, | |
| "step": 586 | |
| }, | |
| { | |
| "crossentropy": 2.9152616262435913, | |
| "epoch": 9.011, | |
| "grad_norm": 0.73046875, | |
| "grad_norm_var": 0.0004546483357747396, | |
| "learning_rate": 0.00043473684210526315, | |
| "loss": 56.9964, | |
| "step": 587 | |
| }, | |
| { | |
| "crossentropy": 2.8132872581481934, | |
| "epoch": 9.012, | |
| "grad_norm": 0.75390625, | |
| "grad_norm_var": 0.00042972564697265627, | |
| "learning_rate": 0.00043368421052631577, | |
| "loss": 55.0605, | |
| "step": 588 | |
| }, | |
| { | |
| "crossentropy": 2.7516273260116577, | |
| "epoch": 9.013, | |
| "grad_norm": 0.7421875, | |
| "grad_norm_var": 0.000437164306640625, | |
| "learning_rate": 0.00043263157894736845, | |
| "loss": 56.6974, | |
| "step": 589 | |
| }, | |
| { | |
| "crossentropy": 2.7954412698745728, | |
| "epoch": 9.014, | |
| "grad_norm": 0.75390625, | |
| "grad_norm_var": 0.00043837229410807293, | |
| "learning_rate": 0.0004315789473684211, | |
| "loss": 56.952, | |
| "step": 590 | |
| }, | |
| { | |
| "crossentropy": 2.7435046434402466, | |
| "epoch": 9.015, | |
| "grad_norm": 0.7734375, | |
| "grad_norm_var": 0.0004514058430989583, | |
| "learning_rate": 0.0004305263157894737, | |
| "loss": 56.8651, | |
| "step": 591 | |
| }, | |
| { | |
| "crossentropy": 2.85153329372406, | |
| "epoch": 9.016, | |
| "grad_norm": 0.734375, | |
| "grad_norm_var": 0.0004892349243164062, | |
| "learning_rate": 0.00042947368421052633, | |
| "loss": 55.8497, | |
| "step": 592 | |
| }, | |
| { | |
| "crossentropy": 2.698570728302002, | |
| "epoch": 9.017, | |
| "grad_norm": 0.74609375, | |
| "grad_norm_var": 0.00039265950520833335, | |
| "learning_rate": 0.00042842105263157896, | |
| "loss": 55.0285, | |
| "step": 593 | |
| }, | |
| { | |
| "crossentropy": 2.8842196464538574, | |
| "epoch": 9.018, | |
| "grad_norm": 0.78515625, | |
| "grad_norm_var": 0.00041681925455729166, | |
| "learning_rate": 0.0004273684210526316, | |
| "loss": 56.7253, | |
| "step": 594 | |
| }, | |
| { | |
| "crossentropy": 2.703991651535034, | |
| "epoch": 9.019, | |
| "grad_norm": 0.7265625, | |
| "grad_norm_var": 0.000494384765625, | |
| "learning_rate": 0.0004263157894736842, | |
| "loss": 54.9482, | |
| "step": 595 | |
| }, | |
| { | |
| "crossentropy": 2.828360438346863, | |
| "epoch": 9.02, | |
| "grad_norm": 0.74609375, | |
| "grad_norm_var": 0.00047092437744140626, | |
| "learning_rate": 0.00042526315789473683, | |
| "loss": 56.6368, | |
| "step": 596 | |
| }, | |
| { | |
| "crossentropy": 2.7593008279800415, | |
| "epoch": 9.021, | |
| "grad_norm": 0.73046875, | |
| "grad_norm_var": 0.0005039850870768229, | |
| "learning_rate": 0.0004242105263157895, | |
| "loss": 56.4671, | |
| "step": 597 | |
| }, | |
| { | |
| "crossentropy": 2.716967463493347, | |
| "epoch": 9.022, | |
| "grad_norm": 0.78125, | |
| "grad_norm_var": 0.0005449930826822917, | |
| "learning_rate": 0.00042315789473684214, | |
| "loss": 55.5724, | |
| "step": 598 | |
| }, | |
| { | |
| "crossentropy": 2.774570941925049, | |
| "epoch": 9.023, | |
| "grad_norm": 0.73046875, | |
| "grad_norm_var": 0.0005816141764322917, | |
| "learning_rate": 0.00042210526315789477, | |
| "loss": 56.5112, | |
| "step": 599 | |
| }, | |
| { | |
| "crossentropy": 2.72983181476593, | |
| "epoch": 9.024, | |
| "grad_norm": 0.7734375, | |
| "grad_norm_var": 0.0005899429321289063, | |
| "learning_rate": 0.00042105263157894734, | |
| "loss": 55.1346, | |
| "step": 600 | |
| }, | |
| { | |
| "crossentropy": 2.8420827388763428, | |
| "epoch": 9.025, | |
| "grad_norm": 0.76171875, | |
| "grad_norm_var": 0.0005706787109375, | |
| "learning_rate": 0.00042, | |
| "loss": 56.3536, | |
| "step": 601 | |
| }, | |
| { | |
| "crossentropy": 2.880441188812256, | |
| "epoch": 9.026, | |
| "grad_norm": 0.74609375, | |
| "grad_norm_var": 0.00036722819010416666, | |
| "learning_rate": 0.00041894736842105264, | |
| "loss": 57.4732, | |
| "step": 602 | |
| }, | |
| { | |
| "crossentropy": 2.8829102516174316, | |
| "epoch": 9.027, | |
| "grad_norm": 0.75390625, | |
| "grad_norm_var": 0.0003374735514322917, | |
| "learning_rate": 0.00041789473684210527, | |
| "loss": 57.025, | |
| "step": 603 | |
| }, | |
| { | |
| "crossentropy": 2.7579070329666138, | |
| "epoch": 9.028, | |
| "grad_norm": 0.74609375, | |
| "grad_norm_var": 0.00033976236979166665, | |
| "learning_rate": 0.0004168421052631579, | |
| "loss": 54.3299, | |
| "step": 604 | |
| }, | |
| { | |
| "crossentropy": 2.79787278175354, | |
| "epoch": 9.029, | |
| "grad_norm": 0.72265625, | |
| "grad_norm_var": 0.0003890355428059896, | |
| "learning_rate": 0.0004157894736842106, | |
| "loss": 56.4175, | |
| "step": 605 | |
| }, | |
| { | |
| "crossentropy": 2.814871907234192, | |
| "epoch": 9.03, | |
| "grad_norm": 0.76953125, | |
| "grad_norm_var": 0.0004109064737955729, | |
| "learning_rate": 0.0004147368421052632, | |
| "loss": 56.6412, | |
| "step": 606 | |
| }, | |
| { | |
| "crossentropy": 2.9164552688598633, | |
| "epoch": 9.031, | |
| "grad_norm": 0.76171875, | |
| "grad_norm_var": 0.00038553873697916665, | |
| "learning_rate": 0.0004136842105263158, | |
| "loss": 56.5248, | |
| "step": 607 | |
| }, | |
| { | |
| "crossentropy": 2.851742148399353, | |
| "epoch": 9.032, | |
| "grad_norm": 0.7734375, | |
| "grad_norm_var": 0.000394439697265625, | |
| "learning_rate": 0.0004126315789473684, | |
| "loss": 57.026, | |
| "step": 608 | |
| }, | |
| { | |
| "crossentropy": 2.8921687602996826, | |
| "epoch": 9.033, | |
| "grad_norm": 0.77734375, | |
| "grad_norm_var": 0.000424957275390625, | |
| "learning_rate": 0.0004115789473684211, | |
| "loss": 56.07, | |
| "step": 609 | |
| }, | |
| { | |
| "crossentropy": 2.748379707336426, | |
| "epoch": 9.034, | |
| "grad_norm": 0.71484375, | |
| "grad_norm_var": 0.0004547119140625, | |
| "learning_rate": 0.0004105263157894737, | |
| "loss": 54.6257, | |
| "step": 610 | |
| }, | |
| { | |
| "crossentropy": 2.8249024152755737, | |
| "epoch": 9.035, | |
| "grad_norm": 0.75390625, | |
| "grad_norm_var": 0.0004124323527018229, | |
| "learning_rate": 0.00040947368421052633, | |
| "loss": 55.8105, | |
| "step": 611 | |
| }, | |
| { | |
| "crossentropy": 2.793903946876526, | |
| "epoch": 9.036, | |
| "grad_norm": 0.74609375, | |
| "grad_norm_var": 0.0004124323527018229, | |
| "learning_rate": 0.00040842105263157896, | |
| "loss": 56.7126, | |
| "step": 612 | |
| }, | |
| { | |
| "crossentropy": 2.8926647901535034, | |
| "epoch": 9.037, | |
| "grad_norm": 0.734375, | |
| "grad_norm_var": 0.0004018147786458333, | |
| "learning_rate": 0.00040736842105263164, | |
| "loss": 57.0243, | |
| "step": 613 | |
| }, | |
| { | |
| "crossentropy": 2.722157835960388, | |
| "epoch": 9.038, | |
| "grad_norm": 0.75, | |
| "grad_norm_var": 0.0003448486328125, | |
| "learning_rate": 0.0004063157894736842, | |
| "loss": 56.15, | |
| "step": 614 | |
| }, | |
| { | |
| "crossentropy": 2.879178524017334, | |
| "epoch": 9.039, | |
| "grad_norm": 0.76953125, | |
| "grad_norm_var": 0.000333404541015625, | |
| "learning_rate": 0.00040526315789473684, | |
| "loss": 56.5565, | |
| "step": 615 | |
| }, | |
| { | |
| "crossentropy": 2.870532989501953, | |
| "epoch": 9.04, | |
| "grad_norm": 0.734375, | |
| "grad_norm_var": 0.00032450358072916665, | |
| "learning_rate": 0.00040421052631578946, | |
| "loss": 57.2944, | |
| "step": 616 | |
| }, | |
| { | |
| "crossentropy": 2.6587058305740356, | |
| "epoch": 9.041, | |
| "grad_norm": 0.7265625, | |
| "grad_norm_var": 0.0003513971964518229, | |
| "learning_rate": 0.0004031578947368421, | |
| "loss": 55.9194, | |
| "step": 617 | |
| }, | |
| { | |
| "crossentropy": 2.8166728019714355, | |
| "epoch": 9.042, | |
| "grad_norm": 0.73828125, | |
| "grad_norm_var": 0.00035800933837890623, | |
| "learning_rate": 0.00040210526315789477, | |
| "loss": 56.4466, | |
| "step": 618 | |
| }, | |
| { | |
| "crossentropy": 2.7822681665420532, | |
| "epoch": 9.043, | |
| "grad_norm": 0.765625, | |
| "grad_norm_var": 0.0003753662109375, | |
| "learning_rate": 0.0004010526315789474, | |
| "loss": 55.9381, | |
| "step": 619 | |
| }, | |
| { | |
| "crossentropy": 2.7443701028823853, | |
| "epoch": 9.044, | |
| "grad_norm": 0.7421875, | |
| "grad_norm_var": 0.00037784576416015624, | |
| "learning_rate": 0.0004, | |
| "loss": 55.3256, | |
| "step": 620 | |
| }, | |
| { | |
| "crossentropy": 2.77031409740448, | |
| "epoch": 9.045, | |
| "grad_norm": 0.76171875, | |
| "grad_norm_var": 0.0003371556599934896, | |
| "learning_rate": 0.0003989473684210526, | |
| "loss": 55.9228, | |
| "step": 621 | |
| }, | |
| { | |
| "crossentropy": 2.7815150022506714, | |
| "epoch": 9.046, | |
| "grad_norm": 0.76953125, | |
| "grad_norm_var": 0.0003371556599934896, | |
| "learning_rate": 0.00039789473684210527, | |
| "loss": 55.1725, | |
| "step": 622 | |
| }, | |
| { | |
| "crossentropy": 2.8636655807495117, | |
| "epoch": 9.047, | |
| "grad_norm": 0.76171875, | |
| "grad_norm_var": 0.0003371556599934896, | |
| "learning_rate": 0.0003968421052631579, | |
| "loss": 56.2236, | |
| "step": 623 | |
| }, | |
| { | |
| "crossentropy": 2.8161659240722656, | |
| "epoch": 9.048, | |
| "grad_norm": 0.7265625, | |
| "grad_norm_var": 0.0003356297810872396, | |
| "learning_rate": 0.0003957894736842105, | |
| "loss": 56.7137, | |
| "step": 624 | |
| }, | |
| { | |
| "crossentropy": 2.873541831970215, | |
| "epoch": 9.049, | |
| "grad_norm": 0.74609375, | |
| "grad_norm_var": 0.00027561187744140625, | |
| "learning_rate": 0.00039473684210526315, | |
| "loss": 58.0352, | |
| "step": 625 | |
| }, | |
| { | |
| "crossentropy": 2.830682158470154, | |
| "epoch": 9.05, | |
| "grad_norm": 0.78515625, | |
| "grad_norm_var": 0.00028934478759765623, | |
| "learning_rate": 0.00039368421052631583, | |
| "loss": 56.4589, | |
| "step": 626 | |
| }, | |
| { | |
| "crossentropy": 2.8676915168762207, | |
| "epoch": 9.051, | |
| "grad_norm": 0.73828125, | |
| "grad_norm_var": 0.0002979914347330729, | |
| "learning_rate": 0.00039263157894736846, | |
| "loss": 57.3925, | |
| "step": 627 | |
| }, | |
| { | |
| "crossentropy": 2.846311330795288, | |
| "epoch": 9.052, | |
| "grad_norm": 0.765625, | |
| "grad_norm_var": 0.00031229654947916664, | |
| "learning_rate": 0.000391578947368421, | |
| "loss": 57.5314, | |
| "step": 628 | |
| }, | |
| { | |
| "crossentropy": 2.8266266584396362, | |
| "epoch": 9.053, | |
| "grad_norm": 0.7578125, | |
| "grad_norm_var": 0.00029474894205729166, | |
| "learning_rate": 0.00039052631578947365, | |
| "loss": 57.0562, | |
| "step": 629 | |
| }, | |
| { | |
| "crossentropy": 2.8677502870559692, | |
| "epoch": 9.054, | |
| "grad_norm": 0.75, | |
| "grad_norm_var": 0.00029474894205729166, | |
| "learning_rate": 0.00038947368421052633, | |
| "loss": 56.3468, | |
| "step": 630 | |
| }, | |
| { | |
| "crossentropy": 2.819862723350525, | |
| "epoch": 9.055, | |
| "grad_norm": 0.76953125, | |
| "grad_norm_var": 0.00029474894205729166, | |
| "learning_rate": 0.00038842105263157896, | |
| "loss": 57.1646, | |
| "step": 631 | |
| }, | |
| { | |
| "crossentropy": 2.8373594284057617, | |
| "epoch": 9.056, | |
| "grad_norm": 0.76953125, | |
| "grad_norm_var": 0.0002873102823893229, | |
| "learning_rate": 0.0003873684210526316, | |
| "loss": 57.8049, | |
| "step": 632 | |
| }, | |
| { | |
| "crossentropy": 2.844488739967346, | |
| "epoch": 9.057, | |
| "grad_norm": 0.7421875, | |
| "grad_norm_var": 0.0002440770467122396, | |
| "learning_rate": 0.0003863157894736842, | |
| "loss": 57.1337, | |
| "step": 633 | |
| }, | |
| { | |
| "crossentropy": 2.8051549196243286, | |
| "epoch": 9.058, | |
| "grad_norm": 0.7578125, | |
| "grad_norm_var": 0.0002227783203125, | |
| "learning_rate": 0.0003852631578947369, | |
| "loss": 55.1455, | |
| "step": 634 | |
| }, | |
| { | |
| "crossentropy": 2.889759659767151, | |
| "epoch": 9.059, | |
| "grad_norm": 0.74609375, | |
| "grad_norm_var": 0.00022373199462890624, | |
| "learning_rate": 0.00038421052631578946, | |
| "loss": 57.317, | |
| "step": 635 | |
| }, | |
| { | |
| "crossentropy": 2.9444185495376587, | |
| "epoch": 9.06, | |
| "grad_norm": 0.75, | |
| "grad_norm_var": 0.0002135594685872396, | |
| "learning_rate": 0.0003831578947368421, | |
| "loss": 57.7695, | |
| "step": 636 | |
| }, | |
| { | |
| "crossentropy": 2.893734097480774, | |
| "epoch": 9.061, | |
| "grad_norm": 0.78515625, | |
| "grad_norm_var": 0.00026543935139973957, | |
| "learning_rate": 0.0003821052631578947, | |
| "loss": 56.677, | |
| "step": 637 | |
| }, | |
| { | |
| "crossentropy": 2.8171656131744385, | |
| "epoch": 9.062, | |
| "grad_norm": 0.734375, | |
| "grad_norm_var": 0.00028661092122395836, | |
| "learning_rate": 0.0003810526315789474, | |
| "loss": 55.8621, | |
| "step": 638 | |
| }, | |
| { | |
| "crossentropy": 2.9001771211624146, | |
| "epoch": 9.063, | |
| "grad_norm": 0.76171875, | |
| "grad_norm_var": 0.00028661092122395836, | |
| "learning_rate": 0.00038, | |
| "loss": 58.2953, | |
| "step": 639 | |
| }, | |
| { | |
| "crossentropy": 2.8124544620513916, | |
| "epoch": 9.064, | |
| "grad_norm": 0.7421875, | |
| "grad_norm_var": 0.000241851806640625, | |
| "learning_rate": 0.00037894736842105265, | |
| "loss": 56.2274, | |
| "step": 640 | |
| }, | |
| { | |
| "crossentropy": 2.691709280014038, | |
| "epoch": 10.001, | |
| "grad_norm": 0.7109375, | |
| "grad_norm_var": 0.00030492146809895835, | |
| "learning_rate": 0.00037789473684210527, | |
| "loss": 53.1371, | |
| "step": 641 | |
| }, | |
| { | |
| "crossentropy": 2.748944401741028, | |
| "epoch": 10.002, | |
| "grad_norm": 0.765625, | |
| "grad_norm_var": 0.00030358632405598957, | |
| "learning_rate": 0.0003768421052631579, | |
| "loss": 55.4782, | |
| "step": 642 | |
| }, | |
| { | |
| "crossentropy": 2.8715380430221558, | |
| "epoch": 10.003, | |
| "grad_norm": 0.734375, | |
| "grad_norm_var": 0.0003127415974934896, | |
| "learning_rate": 0.0003757894736842105, | |
| "loss": 56.7533, | |
| "step": 643 | |
| }, | |
| { | |
| "crossentropy": 2.7285449504852295, | |
| "epoch": 10.004, | |
| "grad_norm": 0.74609375, | |
| "grad_norm_var": 0.0003110249837239583, | |
| "learning_rate": 0.00037473684210526315, | |
| "loss": 56.2589, | |
| "step": 644 | |
| }, | |
| { | |
| "crossentropy": 2.9220064878463745, | |
| "epoch": 10.005, | |
| "grad_norm": 0.75, | |
| "grad_norm_var": 0.0003110249837239583, | |
| "learning_rate": 0.0003736842105263158, | |
| "loss": 55.9243, | |
| "step": 645 | |
| }, | |
| { | |
| "crossentropy": 2.886649250984192, | |
| "epoch": 10.006, | |
| "grad_norm": 0.7578125, | |
| "grad_norm_var": 0.0002898534138997396, | |
| "learning_rate": 0.00037263157894736846, | |
| "loss": 58.1926, | |
| "step": 646 | |
| }, | |
| { | |
| "crossentropy": 2.7266123294830322, | |
| "epoch": 10.007, | |
| "grad_norm": 0.76953125, | |
| "grad_norm_var": 0.0002898534138997396, | |
| "learning_rate": 0.0003715789473684211, | |
| "loss": 56.4874, | |
| "step": 647 | |
| }, | |
| { | |
| "crossentropy": 2.8823474645614624, | |
| "epoch": 10.008, | |
| "grad_norm": 0.7734375, | |
| "grad_norm_var": 0.0003193537394205729, | |
| "learning_rate": 0.0003705263157894737, | |
| "loss": 56.2834, | |
| "step": 648 | |
| }, | |
| { | |
| "crossentropy": 2.8225646018981934, | |
| "epoch": 10.009, | |
| "grad_norm": 0.7890625, | |
| "grad_norm_var": 0.0004058202107747396, | |
| "learning_rate": 0.00036947368421052633, | |
| "loss": 56.0732, | |
| "step": 649 | |
| }, | |
| { | |
| "crossentropy": 2.7276248931884766, | |
| "epoch": 10.01, | |
| "grad_norm": 0.734375, | |
| "grad_norm_var": 0.00042622884114583334, | |
| "learning_rate": 0.00036842105263157896, | |
| "loss": 54.2119, | |
| "step": 650 | |
| }, | |
| { | |
| "crossentropy": 2.851339101791382, | |
| "epoch": 10.011, | |
| "grad_norm": 0.7265625, | |
| "grad_norm_var": 0.00046971638997395836, | |
| "learning_rate": 0.0003673684210526316, | |
| "loss": 57.2307, | |
| "step": 651 | |
| }, | |
| { | |
| "crossentropy": 2.81039559841156, | |
| "epoch": 10.012, | |
| "grad_norm": 0.75, | |
| "grad_norm_var": 0.0003890355428059896, | |
| "learning_rate": 0.0003663157894736842, | |
| "loss": 55.5788, | |
| "step": 652 | |
| }, | |
| { | |
| "crossentropy": 2.917648434638977, | |
| "epoch": 10.013, | |
| "grad_norm": 0.79296875, | |
| "grad_norm_var": 0.00048726399739583334, | |
| "learning_rate": 0.00036526315789473684, | |
| "loss": 58.2029, | |
| "step": 653 | |
| }, | |
| { | |
| "crossentropy": 2.8261080980300903, | |
| "epoch": 10.014, | |
| "grad_norm": 0.73046875, | |
| "grad_norm_var": 0.0005116780598958333, | |
| "learning_rate": 0.0003642105263157895, | |
| "loss": 56.476, | |
| "step": 654 | |
| }, | |
| { | |
| "crossentropy": 2.7618919610977173, | |
| "epoch": 10.015, | |
| "grad_norm": 0.74609375, | |
| "grad_norm_var": 0.0005080540974934896, | |
| "learning_rate": 0.00036315789473684214, | |
| "loss": 55.7106, | |
| "step": 655 | |
| }, | |
| { | |
| "crossentropy": 2.804520845413208, | |
| "epoch": 10.016, | |
| "grad_norm": 0.7890625, | |
| "grad_norm_var": 0.0005889256795247396, | |
| "learning_rate": 0.00036210526315789477, | |
| "loss": 56.374, | |
| "step": 656 | |
| }, | |
| { | |
| "crossentropy": 2.751194953918457, | |
| "epoch": 10.017, | |
| "grad_norm": 0.7265625, | |
| "grad_norm_var": 0.0005141576131184896, | |
| "learning_rate": 0.00036105263157894734, | |
| "loss": 55.439, | |
| "step": 657 | |
| }, | |
| { | |
| "crossentropy": 2.893984794616699, | |
| "epoch": 10.018, | |
| "grad_norm": 0.7421875, | |
| "grad_norm_var": 0.0005156834920247396, | |
| "learning_rate": 0.00035999999999999997, | |
| "loss": 59.3323, | |
| "step": 658 | |
| }, | |
| { | |
| "crossentropy": 2.833603858947754, | |
| "epoch": 10.019, | |
| "grad_norm": 0.7265625, | |
| "grad_norm_var": 0.0005395889282226562, | |
| "learning_rate": 0.00035894736842105265, | |
| "loss": 56.9289, | |
| "step": 659 | |
| }, | |
| { | |
| "crossentropy": 2.7936803102493286, | |
| "epoch": 10.02, | |
| "grad_norm": 0.75, | |
| "grad_norm_var": 0.0005368550618489583, | |
| "learning_rate": 0.0003578947368421053, | |
| "loss": 55.5988, | |
| "step": 660 | |
| }, | |
| { | |
| "crossentropy": 2.797438144683838, | |
| "epoch": 10.021, | |
| "grad_norm": 0.7578125, | |
| "grad_norm_var": 0.000537109375, | |
| "learning_rate": 0.0003568421052631579, | |
| "loss": 56.618, | |
| "step": 661 | |
| }, | |
| { | |
| "crossentropy": 2.6816130876541138, | |
| "epoch": 10.022, | |
| "grad_norm": 0.79296875, | |
| "grad_norm_var": 0.0006326675415039062, | |
| "learning_rate": 0.0003557894736842105, | |
| "loss": 55.4491, | |
| "step": 662 | |
| }, | |
| { | |
| "crossentropy": 2.775187849998474, | |
| "epoch": 10.023, | |
| "grad_norm": 0.71875, | |
| "grad_norm_var": 0.0007029215494791667, | |
| "learning_rate": 0.0003547368421052632, | |
| "loss": 54.9814, | |
| "step": 663 | |
| }, | |
| { | |
| "crossentropy": 2.9283132553100586, | |
| "epoch": 10.024, | |
| "grad_norm": 0.76953125, | |
| "grad_norm_var": 0.0006931940714518229, | |
| "learning_rate": 0.0003536842105263158, | |
| "loss": 57.377, | |
| "step": 664 | |
| }, | |
| { | |
| "crossentropy": 2.754921555519104, | |
| "epoch": 10.025, | |
| "grad_norm": 0.73828125, | |
| "grad_norm_var": 0.000608062744140625, | |
| "learning_rate": 0.0003526315789473684, | |
| "loss": 55.0399, | |
| "step": 665 | |
| }, | |
| { | |
| "crossentropy": 2.87182080745697, | |
| "epoch": 10.026, | |
| "grad_norm": 0.7578125, | |
| "grad_norm_var": 0.0005950927734375, | |
| "learning_rate": 0.00035157894736842103, | |
| "loss": 56.8635, | |
| "step": 666 | |
| }, | |
| { | |
| "crossentropy": 2.787114143371582, | |
| "epoch": 10.027, | |
| "grad_norm": 0.79296875, | |
| "grad_norm_var": 0.0006545384724934896, | |
| "learning_rate": 0.0003505263157894737, | |
| "loss": 56.3069, | |
| "step": 667 | |
| }, | |
| { | |
| "crossentropy": 2.770491600036621, | |
| "epoch": 10.028, | |
| "grad_norm": 0.74609375, | |
| "grad_norm_var": 0.0006581624348958333, | |
| "learning_rate": 0.00034947368421052634, | |
| "loss": 56.8387, | |
| "step": 668 | |
| }, | |
| { | |
| "crossentropy": 2.8516522645950317, | |
| "epoch": 10.029, | |
| "grad_norm": 0.7421875, | |
| "grad_norm_var": 0.0005614598592122396, | |
| "learning_rate": 0.00034842105263157896, | |
| "loss": 56.9971, | |
| "step": 669 | |
| }, | |
| { | |
| "crossentropy": 2.750314712524414, | |
| "epoch": 10.03, | |
| "grad_norm": 0.69921875, | |
| "grad_norm_var": 0.0007109959920247396, | |
| "learning_rate": 0.0003473684210526316, | |
| "loss": 54.2891, | |
| "step": 670 | |
| }, | |
| { | |
| "crossentropy": 2.7711139917373657, | |
| "epoch": 10.031, | |
| "grad_norm": 0.7578125, | |
| "grad_norm_var": 0.0007138570149739583, | |
| "learning_rate": 0.0003463157894736842, | |
| "loss": 55.5093, | |
| "step": 671 | |
| }, | |
| { | |
| "crossentropy": 2.7986541986465454, | |
| "epoch": 10.032, | |
| "grad_norm": 0.73828125, | |
| "grad_norm_var": 0.0006138483683268229, | |
| "learning_rate": 0.00034526315789473684, | |
| "loss": 56.5041, | |
| "step": 672 | |
| }, | |
| { | |
| "crossentropy": 2.8304941654205322, | |
| "epoch": 10.033, | |
| "grad_norm": 0.734375, | |
| "grad_norm_var": 0.0005960464477539062, | |
| "learning_rate": 0.00034421052631578947, | |
| "loss": 56.5678, | |
| "step": 673 | |
| }, | |
| { | |
| "crossentropy": 2.748772144317627, | |
| "epoch": 10.034, | |
| "grad_norm": 0.7734375, | |
| "grad_norm_var": 0.0006336847941080729, | |
| "learning_rate": 0.0003431578947368421, | |
| "loss": 56.2156, | |
| "step": 674 | |
| }, | |
| { | |
| "crossentropy": 2.911113739013672, | |
| "epoch": 10.035, | |
| "grad_norm": 0.80859375, | |
| "grad_norm_var": 0.0008005777994791667, | |
| "learning_rate": 0.00034210526315789477, | |
| "loss": 57.6437, | |
| "step": 675 | |
| }, | |
| { | |
| "crossentropy": 2.8851195573806763, | |
| "epoch": 10.036, | |
| "grad_norm": 0.76953125, | |
| "grad_norm_var": 0.0008117039998372396, | |
| "learning_rate": 0.0003410526315789474, | |
| "loss": 57.8075, | |
| "step": 676 | |
| }, | |
| { | |
| "crossentropy": 2.8386086225509644, | |
| "epoch": 10.037, | |
| "grad_norm": 0.73046875, | |
| "grad_norm_var": 0.000852203369140625, | |
| "learning_rate": 0.00034, | |
| "loss": 56.9642, | |
| "step": 677 | |
| }, | |
| { | |
| "crossentropy": 2.837552309036255, | |
| "epoch": 10.038, | |
| "grad_norm": 0.7265625, | |
| "grad_norm_var": 0.0007862726847330729, | |
| "learning_rate": 0.0003389473684210526, | |
| "loss": 56.0464, | |
| "step": 678 | |
| }, | |
| { | |
| "crossentropy": 2.7876830101013184, | |
| "epoch": 10.039, | |
| "grad_norm": 0.74609375, | |
| "grad_norm_var": 0.0007181803385416667, | |
| "learning_rate": 0.0003378947368421053, | |
| "loss": 57.0013, | |
| "step": 679 | |
| }, | |
| { | |
| "crossentropy": 2.6957716941833496, | |
| "epoch": 10.04, | |
| "grad_norm": 0.7421875, | |
| "grad_norm_var": 0.0007008234659830729, | |
| "learning_rate": 0.0003368421052631579, | |
| "loss": 54.8812, | |
| "step": 680 | |
| }, | |
| { | |
| "crossentropy": 2.858887195587158, | |
| "epoch": 10.041, | |
| "grad_norm": 0.78125, | |
| "grad_norm_var": 0.0007476806640625, | |
| "learning_rate": 0.00033578947368421053, | |
| "loss": 57.4649, | |
| "step": 681 | |
| }, | |
| { | |
| "crossentropy": 2.835731267929077, | |
| "epoch": 10.042, | |
| "grad_norm": 0.73046875, | |
| "grad_norm_var": 0.0007766087849934896, | |
| "learning_rate": 0.00033473684210526315, | |
| "loss": 57.3096, | |
| "step": 682 | |
| }, | |
| { | |
| "crossentropy": 2.7914698123931885, | |
| "epoch": 10.043, | |
| "grad_norm": 0.75, | |
| "grad_norm_var": 0.0006528218587239583, | |
| "learning_rate": 0.00033368421052631583, | |
| "loss": 54.5885, | |
| "step": 683 | |
| }, | |
| { | |
| "crossentropy": 2.7969202995300293, | |
| "epoch": 10.044, | |
| "grad_norm": 0.72265625, | |
| "grad_norm_var": 0.0006947835286458333, | |
| "learning_rate": 0.00033263157894736846, | |
| "loss": 55.199, | |
| "step": 684 | |
| }, | |
| { | |
| "crossentropy": 2.7273718118667603, | |
| "epoch": 10.045, | |
| "grad_norm": 0.7421875, | |
| "grad_norm_var": 0.0006947835286458333, | |
| "learning_rate": 0.00033157894736842103, | |
| "loss": 54.9177, | |
| "step": 685 | |
| }, | |
| { | |
| "crossentropy": 2.9325153827667236, | |
| "epoch": 10.046, | |
| "grad_norm": 0.77734375, | |
| "grad_norm_var": 0.0005777994791666667, | |
| "learning_rate": 0.00033052631578947366, | |
| "loss": 57.8565, | |
| "step": 686 | |
| }, | |
| { | |
| "crossentropy": 2.8253042697906494, | |
| "epoch": 10.047, | |
| "grad_norm": 0.76953125, | |
| "grad_norm_var": 0.0005955378214518229, | |
| "learning_rate": 0.00032947368421052634, | |
| "loss": 56.367, | |
| "step": 687 | |
| }, | |
| { | |
| "crossentropy": 2.9794070720672607, | |
| "epoch": 10.048, | |
| "grad_norm": 0.80078125, | |
| "grad_norm_var": 0.0007196426391601563, | |
| "learning_rate": 0.00032842105263157896, | |
| "loss": 58.8961, | |
| "step": 688 | |
| }, | |
| { | |
| "crossentropy": 2.8630117177963257, | |
| "epoch": 10.049, | |
| "grad_norm": 0.74609375, | |
| "grad_norm_var": 0.000693511962890625, | |
| "learning_rate": 0.0003273684210526316, | |
| "loss": 55.8875, | |
| "step": 689 | |
| }, | |
| { | |
| "crossentropy": 2.885907292366028, | |
| "epoch": 10.05, | |
| "grad_norm": 0.734375, | |
| "grad_norm_var": 0.0007049560546875, | |
| "learning_rate": 0.0003263157894736842, | |
| "loss": 57.5656, | |
| "step": 690 | |
| }, | |
| { | |
| "crossentropy": 2.809995174407959, | |
| "epoch": 10.051, | |
| "grad_norm": 0.7421875, | |
| "grad_norm_var": 0.0005050023396809896, | |
| "learning_rate": 0.0003252631578947369, | |
| "loss": 57.0467, | |
| "step": 691 | |
| }, | |
| { | |
| "crossentropy": 2.9418861865997314, | |
| "epoch": 10.052, | |
| "grad_norm": 0.7578125, | |
| "grad_norm_var": 0.00048421223958333335, | |
| "learning_rate": 0.00032421052631578947, | |
| "loss": 57.9401, | |
| "step": 692 | |
| }, | |
| { | |
| "crossentropy": 2.795152425765991, | |
| "epoch": 10.053, | |
| "grad_norm": 0.7578125, | |
| "grad_norm_var": 0.0004597345987955729, | |
| "learning_rate": 0.0003231578947368421, | |
| "loss": 56.1651, | |
| "step": 693 | |
| }, | |
| { | |
| "crossentropy": 2.81339955329895, | |
| "epoch": 10.054, | |
| "grad_norm": 0.8515625, | |
| "grad_norm_var": 0.0010171890258789062, | |
| "learning_rate": 0.0003221052631578947, | |
| "loss": 56.595, | |
| "step": 694 | |
| }, | |
| { | |
| "crossentropy": 2.9207112789154053, | |
| "epoch": 10.055, | |
| "grad_norm": 0.76171875, | |
| "grad_norm_var": 0.001004473368326823, | |
| "learning_rate": 0.0003210526315789474, | |
| "loss": 57.6239, | |
| "step": 695 | |
| }, | |
| { | |
| "crossentropy": 2.7555843591690063, | |
| "epoch": 10.056, | |
| "grad_norm": 0.74609375, | |
| "grad_norm_var": 0.0009958902994791667, | |
| "learning_rate": 0.00032, | |
| "loss": 55.6938, | |
| "step": 696 | |
| }, | |
| { | |
| "crossentropy": 2.7774757146835327, | |
| "epoch": 10.057, | |
| "grad_norm": 0.76953125, | |
| "grad_norm_var": 0.0009724299112955729, | |
| "learning_rate": 0.00031894736842105265, | |
| "loss": 56.9648, | |
| "step": 697 | |
| }, | |
| { | |
| "crossentropy": 2.934939742088318, | |
| "epoch": 10.058, | |
| "grad_norm": 0.75390625, | |
| "grad_norm_var": 0.0009144465128580729, | |
| "learning_rate": 0.0003178947368421053, | |
| "loss": 57.7034, | |
| "step": 698 | |
| }, | |
| { | |
| "crossentropy": 2.8009008169174194, | |
| "epoch": 10.059, | |
| "grad_norm": 0.74609375, | |
| "grad_norm_var": 0.0009213765462239583, | |
| "learning_rate": 0.00031684210526315785, | |
| "loss": 56.4452, | |
| "step": 699 | |
| }, | |
| { | |
| "crossentropy": 2.9469012022018433, | |
| "epoch": 10.06, | |
| "grad_norm": 0.7421875, | |
| "grad_norm_var": 0.0008447647094726562, | |
| "learning_rate": 0.00031578947368421053, | |
| "loss": 58.8681, | |
| "step": 700 | |
| }, | |
| { | |
| "crossentropy": 2.7601804733276367, | |
| "epoch": 10.061, | |
| "grad_norm": 0.73828125, | |
| "grad_norm_var": 0.0008562723795572917, | |
| "learning_rate": 0.00031473684210526316, | |
| "loss": 56.4526, | |
| "step": 701 | |
| }, | |
| { | |
| "crossentropy": 2.8851720094680786, | |
| "epoch": 10.062, | |
| "grad_norm": 0.7578125, | |
| "grad_norm_var": 0.0008406956990559896, | |
| "learning_rate": 0.0003136842105263158, | |
| "loss": 56.0664, | |
| "step": 702 | |
| }, | |
| { | |
| "crossentropy": 2.7828023433685303, | |
| "epoch": 10.063, | |
| "grad_norm": 0.74609375, | |
| "grad_norm_var": 0.0008483250935872396, | |
| "learning_rate": 0.0003126315789473684, | |
| "loss": 55.3705, | |
| "step": 703 | |
| }, | |
| { | |
| "crossentropy": 2.9105297327041626, | |
| "epoch": 10.064, | |
| "grad_norm": 0.73046875, | |
| "grad_norm_var": 0.0007705052693684895, | |
| "learning_rate": 0.0003115789473684211, | |
| "loss": 57.2773, | |
| "step": 704 | |
| }, | |
| { | |
| "crossentropy": 2.706776738166809, | |
| "epoch": 11.001, | |
| "grad_norm": 0.734375, | |
| "grad_norm_var": 0.0008045832316080729, | |
| "learning_rate": 0.0003105263157894737, | |
| "loss": 54.4294, | |
| "step": 705 | |
| }, | |
| { | |
| "crossentropy": 2.785434126853943, | |
| "epoch": 11.002, | |
| "grad_norm": 0.73828125, | |
| "grad_norm_var": 0.000811767578125, | |
| "learning_rate": 0.0003094736842105263, | |
| "loss": 56.077, | |
| "step": 706 | |
| }, | |
| { | |
| "crossentropy": 2.708212375640869, | |
| "epoch": 11.003, | |
| "grad_norm": 0.73828125, | |
| "grad_norm_var": 0.0008254369099934896, | |
| "learning_rate": 0.0003084210526315789, | |
| "loss": 53.1839, | |
| "step": 707 | |
| }, | |
| { | |
| "crossentropy": 2.7735451459884644, | |
| "epoch": 11.004, | |
| "grad_norm": 0.7421875, | |
| "grad_norm_var": 0.0008300145467122396, | |
| "learning_rate": 0.0003073684210526316, | |
| "loss": 56.1457, | |
| "step": 708 | |
| }, | |
| { | |
| "crossentropy": 2.8523166179656982, | |
| "epoch": 11.005, | |
| "grad_norm": 0.7734375, | |
| "grad_norm_var": 0.0001713434855143229, | |
| "learning_rate": 0.0003063157894736842, | |
| "loss": 57.1347, | |
| "step": 709 | |
| }, | |
| { | |
| "crossentropy": 2.914563536643982, | |
| "epoch": 11.006, | |
| "grad_norm": 0.78515625, | |
| "grad_norm_var": 0.0002522150675455729, | |
| "learning_rate": 0.00030526315789473684, | |
| "loss": 57.723, | |
| "step": 710 | |
| }, | |
| { | |
| "crossentropy": 2.7213577032089233, | |
| "epoch": 11.007, | |
| "grad_norm": 0.765625, | |
| "grad_norm_var": 0.00027033487955729164, | |
| "learning_rate": 0.00030421052631578947, | |
| "loss": 55.6901, | |
| "step": 711 | |
| }, | |
| { | |
| "crossentropy": 2.912691831588745, | |
| "epoch": 11.008, | |
| "grad_norm": 0.765625, | |
| "grad_norm_var": 0.0002608617146809896, | |
| "learning_rate": 0.00030315789473684215, | |
| "loss": 57.1725, | |
| "step": 712 | |
| }, | |
| { | |
| "crossentropy": 2.8348140716552734, | |
| "epoch": 11.009, | |
| "grad_norm": 0.765625, | |
| "grad_norm_var": 0.0002766927083333333, | |
| "learning_rate": 0.0003021052631578947, | |
| "loss": 56.1903, | |
| "step": 713 | |
| }, | |
| { | |
| "crossentropy": 2.8859379291534424, | |
| "epoch": 11.01, | |
| "grad_norm": 0.77734375, | |
| "grad_norm_var": 0.00032145182291666666, | |
| "learning_rate": 0.00030105263157894735, | |
| "loss": 58.3572, | |
| "step": 714 | |
| }, | |
| { | |
| "crossentropy": 2.778756260871887, | |
| "epoch": 11.011, | |
| "grad_norm": 0.765625, | |
| "grad_norm_var": 0.00032526652018229166, | |
| "learning_rate": 0.0003, | |
| "loss": 56.3487, | |
| "step": 715 | |
| }, | |
| { | |
| "crossentropy": 2.982430934906006, | |
| "epoch": 11.012, | |
| "grad_norm": 0.78125, | |
| "grad_norm_var": 0.0003539403279622396, | |
| "learning_rate": 0.00029894736842105265, | |
| "loss": 57.9075, | |
| "step": 716 | |
| }, | |
| { | |
| "crossentropy": 2.8630356788635254, | |
| "epoch": 11.013, | |
| "grad_norm": 0.73046875, | |
| "grad_norm_var": 0.000394439697265625, | |
| "learning_rate": 0.0002978947368421053, | |
| "loss": 56.721, | |
| "step": 717 | |
| }, | |
| { | |
| "crossentropy": 2.6543571949005127, | |
| "epoch": 11.014, | |
| "grad_norm": 0.74609375, | |
| "grad_norm_var": 0.000394439697265625, | |
| "learning_rate": 0.0002968421052631579, | |
| "loss": 54.8384, | |
| "step": 718 | |
| }, | |
| { | |
| "crossentropy": 2.869687557220459, | |
| "epoch": 11.015, | |
| "grad_norm": 0.77734375, | |
| "grad_norm_var": 0.000382232666015625, | |
| "learning_rate": 0.00029578947368421053, | |
| "loss": 57.8614, | |
| "step": 719 | |
| }, | |
| { | |
| "crossentropy": 2.846762180328369, | |
| "epoch": 11.016, | |
| "grad_norm": 0.7421875, | |
| "grad_norm_var": 0.00034885406494140626, | |
| "learning_rate": 0.00029473684210526316, | |
| "loss": 57.1553, | |
| "step": 720 | |
| }, | |
| { | |
| "crossentropy": 2.7584152221679688, | |
| "epoch": 11.017, | |
| "grad_norm": 0.7421875, | |
| "grad_norm_var": 0.0003280003865559896, | |
| "learning_rate": 0.0002936842105263158, | |
| "loss": 55.2654, | |
| "step": 721 | |
| }, | |
| { | |
| "crossentropy": 2.8962432146072388, | |
| "epoch": 11.018, | |
| "grad_norm": 0.7265625, | |
| "grad_norm_var": 0.0003682454427083333, | |
| "learning_rate": 0.0002926315789473684, | |
| "loss": 56.6584, | |
| "step": 722 | |
| }, | |
| { | |
| "crossentropy": 2.7644320726394653, | |
| "epoch": 11.019, | |
| "grad_norm": 0.7421875, | |
| "grad_norm_var": 0.0003590265909830729, | |
| "learning_rate": 0.00029157894736842104, | |
| "loss": 55.4874, | |
| "step": 723 | |
| }, | |
| { | |
| "crossentropy": 2.6588387489318848, | |
| "epoch": 11.02, | |
| "grad_norm": 0.72265625, | |
| "grad_norm_var": 0.0004241943359375, | |
| "learning_rate": 0.0002905263157894737, | |
| "loss": 54.2554, | |
| "step": 724 | |
| }, | |
| { | |
| "crossentropy": 2.8226871490478516, | |
| "epoch": 11.021, | |
| "grad_norm": 0.734375, | |
| "grad_norm_var": 0.0004330952962239583, | |
| "learning_rate": 0.00028947368421052634, | |
| "loss": 56.599, | |
| "step": 725 | |
| }, | |
| { | |
| "crossentropy": 2.8901766538619995, | |
| "epoch": 11.022, | |
| "grad_norm": 0.734375, | |
| "grad_norm_var": 0.0003859837849934896, | |
| "learning_rate": 0.00028842105263157897, | |
| "loss": 56.2554, | |
| "step": 726 | |
| }, | |
| { | |
| "crossentropy": 2.772639751434326, | |
| "epoch": 11.023, | |
| "grad_norm": 0.75, | |
| "grad_norm_var": 0.0003712336222330729, | |
| "learning_rate": 0.0002873684210526316, | |
| "loss": 56.7091, | |
| "step": 727 | |
| }, | |
| { | |
| "crossentropy": 2.8242876529693604, | |
| "epoch": 11.024, | |
| "grad_norm": 0.74609375, | |
| "grad_norm_var": 0.00035502115885416665, | |
| "learning_rate": 0.0002863157894736842, | |
| "loss": 55.5862, | |
| "step": 728 | |
| }, | |
| { | |
| "crossentropy": 2.9208909273147583, | |
| "epoch": 11.025, | |
| "grad_norm": 0.73828125, | |
| "grad_norm_var": 0.00034122467041015626, | |
| "learning_rate": 0.00028526315789473685, | |
| "loss": 56.8342, | |
| "step": 729 | |
| }, | |
| { | |
| "crossentropy": 2.6857054233551025, | |
| "epoch": 11.026, | |
| "grad_norm": 0.71875, | |
| "grad_norm_var": 0.000321197509765625, | |
| "learning_rate": 0.00028421052631578947, | |
| "loss": 54.6538, | |
| "step": 730 | |
| }, | |
| { | |
| "crossentropy": 2.8337063789367676, | |
| "epoch": 11.027, | |
| "grad_norm": 0.7578125, | |
| "grad_norm_var": 0.0003021240234375, | |
| "learning_rate": 0.0002831578947368421, | |
| "loss": 57.5415, | |
| "step": 731 | |
| }, | |
| { | |
| "crossentropy": 2.786600351333618, | |
| "epoch": 11.028, | |
| "grad_norm": 0.74609375, | |
| "grad_norm_var": 0.00020084381103515624, | |
| "learning_rate": 0.0002821052631578948, | |
| "loss": 55.5913, | |
| "step": 732 | |
| }, | |
| { | |
| "crossentropy": 2.690516948699951, | |
| "epoch": 11.029, | |
| "grad_norm": 0.77734375, | |
| "grad_norm_var": 0.00027256011962890626, | |
| "learning_rate": 0.0002810526315789474, | |
| "loss": 53.9815, | |
| "step": 733 | |
| }, | |
| { | |
| "crossentropy": 2.7604899406433105, | |
| "epoch": 11.03, | |
| "grad_norm": 0.73046875, | |
| "grad_norm_var": 0.00028324127197265625, | |
| "learning_rate": 0.00028000000000000003, | |
| "loss": 54.824, | |
| "step": 734 | |
| }, | |
| { | |
| "crossentropy": 2.7863725423812866, | |
| "epoch": 11.031, | |
| "grad_norm": 0.70703125, | |
| "grad_norm_var": 0.00026950836181640627, | |
| "learning_rate": 0.0002789473684210526, | |
| "loss": 55.8541, | |
| "step": 735 | |
| }, | |
| { | |
| "crossentropy": 2.8232542276382446, | |
| "epoch": 11.032, | |
| "grad_norm": 0.76171875, | |
| "grad_norm_var": 0.000302886962890625, | |
| "learning_rate": 0.0002778947368421053, | |
| "loss": 56.7185, | |
| "step": 736 | |
| }, | |
| { | |
| "crossentropy": 2.7417391538619995, | |
| "epoch": 11.033, | |
| "grad_norm": 0.76171875, | |
| "grad_norm_var": 0.0003330866495768229, | |
| "learning_rate": 0.0002768421052631579, | |
| "loss": 57.2269, | |
| "step": 737 | |
| }, | |
| { | |
| "crossentropy": 2.819975972175598, | |
| "epoch": 11.034, | |
| "grad_norm": 0.73046875, | |
| "grad_norm_var": 0.0003265380859375, | |
| "learning_rate": 0.00027578947368421053, | |
| "loss": 56.6551, | |
| "step": 738 | |
| }, | |
| { | |
| "crossentropy": 2.9095921516418457, | |
| "epoch": 11.035, | |
| "grad_norm": 0.77734375, | |
| "grad_norm_var": 0.00040836334228515624, | |
| "learning_rate": 0.00027473684210526316, | |
| "loss": 56.8075, | |
| "step": 739 | |
| }, | |
| { | |
| "crossentropy": 2.91646945476532, | |
| "epoch": 11.036, | |
| "grad_norm": 0.76171875, | |
| "grad_norm_var": 0.0003956476847330729, | |
| "learning_rate": 0.00027368421052631584, | |
| "loss": 57.2162, | |
| "step": 740 | |
| }, | |
| { | |
| "crossentropy": 2.9138376712799072, | |
| "epoch": 11.037, | |
| "grad_norm": 0.765625, | |
| "grad_norm_var": 0.0004088719685872396, | |
| "learning_rate": 0.00027263157894736847, | |
| "loss": 56.7284, | |
| "step": 741 | |
| }, | |
| { | |
| "crossentropy": 2.84540057182312, | |
| "epoch": 11.038, | |
| "grad_norm": 0.70703125, | |
| "grad_norm_var": 0.0005045572916666667, | |
| "learning_rate": 0.00027157894736842104, | |
| "loss": 55.8447, | |
| "step": 742 | |
| }, | |
| { | |
| "crossentropy": 2.7151050567626953, | |
| "epoch": 11.039, | |
| "grad_norm": 0.734375, | |
| "grad_norm_var": 0.0005116780598958333, | |
| "learning_rate": 0.00027052631578947366, | |
| "loss": 55.9625, | |
| "step": 743 | |
| }, | |
| { | |
| "crossentropy": 2.9206697940826416, | |
| "epoch": 11.04, | |
| "grad_norm": 0.7734375, | |
| "grad_norm_var": 0.0005619684855143229, | |
| "learning_rate": 0.0002694736842105263, | |
| "loss": 57.6432, | |
| "step": 744 | |
| }, | |
| { | |
| "crossentropy": 2.8370519876480103, | |
| "epoch": 11.041, | |
| "grad_norm": 0.765625, | |
| "grad_norm_var": 0.000577545166015625, | |
| "learning_rate": 0.00026842105263157897, | |
| "loss": 55.4367, | |
| "step": 745 | |
| }, | |
| { | |
| "crossentropy": 2.8395227193832397, | |
| "epoch": 11.042, | |
| "grad_norm": 0.74609375, | |
| "grad_norm_var": 0.0005156834920247396, | |
| "learning_rate": 0.0002673684210526316, | |
| "loss": 56.171, | |
| "step": 746 | |
| }, | |
| { | |
| "crossentropy": 2.786525845527649, | |
| "epoch": 11.043, | |
| "grad_norm": 0.7734375, | |
| "grad_norm_var": 0.0005467096964518229, | |
| "learning_rate": 0.0002663157894736842, | |
| "loss": 57.2315, | |
| "step": 747 | |
| }, | |
| { | |
| "crossentropy": 2.9187395572662354, | |
| "epoch": 11.044, | |
| "grad_norm": 0.7578125, | |
| "grad_norm_var": 0.0005472819010416667, | |
| "learning_rate": 0.00026526315789473685, | |
| "loss": 57.2511, | |
| "step": 748 | |
| }, | |
| { | |
| "crossentropy": 2.7996715307235718, | |
| "epoch": 11.045, | |
| "grad_norm": 0.70703125, | |
| "grad_norm_var": 0.0006182352701822917, | |
| "learning_rate": 0.0002642105263157895, | |
| "loss": 57.045, | |
| "step": 749 | |
| }, | |
| { | |
| "crossentropy": 2.8842201232910156, | |
| "epoch": 11.046, | |
| "grad_norm": 0.7421875, | |
| "grad_norm_var": 0.0006001154581705729, | |
| "learning_rate": 0.0002631578947368421, | |
| "loss": 57.6562, | |
| "step": 750 | |
| }, | |
| { | |
| "crossentropy": 2.805160880088806, | |
| "epoch": 11.047, | |
| "grad_norm": 0.75390625, | |
| "grad_norm_var": 0.0004795710245768229, | |
| "learning_rate": 0.0002621052631578947, | |
| "loss": 56.6321, | |
| "step": 751 | |
| }, | |
| { | |
| "crossentropy": 2.9018155336380005, | |
| "epoch": 11.048, | |
| "grad_norm": 0.734375, | |
| "grad_norm_var": 0.00048802693684895835, | |
| "learning_rate": 0.00026105263157894735, | |
| "loss": 57.7864, | |
| "step": 752 | |
| }, | |
| { | |
| "crossentropy": 2.8916486501693726, | |
| "epoch": 11.049, | |
| "grad_norm": 0.7734375, | |
| "grad_norm_var": 0.0005156834920247396, | |
| "learning_rate": 0.00026000000000000003, | |
| "loss": 56.4045, | |
| "step": 753 | |
| }, | |
| { | |
| "crossentropy": 2.8715128898620605, | |
| "epoch": 11.05, | |
| "grad_norm": 0.78125, | |
| "grad_norm_var": 0.0005429585774739584, | |
| "learning_rate": 0.00025894736842105266, | |
| "loss": 57.9977, | |
| "step": 754 | |
| }, | |
| { | |
| "crossentropy": 2.798703193664551, | |
| "epoch": 11.051, | |
| "grad_norm": 0.76171875, | |
| "grad_norm_var": 0.0005083719889322917, | |
| "learning_rate": 0.0002578947368421053, | |
| "loss": 57.3152, | |
| "step": 755 | |
| }, | |
| { | |
| "crossentropy": 2.9445585012435913, | |
| "epoch": 11.052, | |
| "grad_norm": 0.75, | |
| "grad_norm_var": 0.0005024592081705729, | |
| "learning_rate": 0.00025684210526315786, | |
| "loss": 57.4762, | |
| "step": 756 | |
| }, | |
| { | |
| "crossentropy": 2.8702229261398315, | |
| "epoch": 11.053, | |
| "grad_norm": 0.74609375, | |
| "grad_norm_var": 0.0004900614420572917, | |
| "learning_rate": 0.00025578947368421054, | |
| "loss": 57.5399, | |
| "step": 757 | |
| }, | |
| { | |
| "crossentropy": 2.8421502113342285, | |
| "epoch": 11.054, | |
| "grad_norm": 0.7421875, | |
| "grad_norm_var": 0.0003636042277018229, | |
| "learning_rate": 0.00025473684210526316, | |
| "loss": 58.3388, | |
| "step": 758 | |
| }, | |
| { | |
| "crossentropy": 2.9485079050064087, | |
| "epoch": 11.055, | |
| "grad_norm": 0.7421875, | |
| "grad_norm_var": 0.0003483454386393229, | |
| "learning_rate": 0.0002536842105263158, | |
| "loss": 58.1595, | |
| "step": 759 | |
| }, | |
| { | |
| "crossentropy": 2.7640316486358643, | |
| "epoch": 11.056, | |
| "grad_norm": 0.75, | |
| "grad_norm_var": 0.0003193537394205729, | |
| "learning_rate": 0.0002526315789473684, | |
| "loss": 55.4479, | |
| "step": 760 | |
| }, | |
| { | |
| "crossentropy": 2.9783310890197754, | |
| "epoch": 11.057, | |
| "grad_norm": 0.76953125, | |
| "grad_norm_var": 0.00032755533854166664, | |
| "learning_rate": 0.0002515789473684211, | |
| "loss": 58.1017, | |
| "step": 761 | |
| }, | |
| { | |
| "crossentropy": 2.8926788568496704, | |
| "epoch": 11.058, | |
| "grad_norm": 0.76953125, | |
| "grad_norm_var": 0.00034357706705729165, | |
| "learning_rate": 0.0002505263157894737, | |
| "loss": 56.6529, | |
| "step": 762 | |
| }, | |
| { | |
| "crossentropy": 2.82997465133667, | |
| "epoch": 11.059, | |
| "grad_norm": 0.7734375, | |
| "grad_norm_var": 0.00034357706705729165, | |
| "learning_rate": 0.00024947368421052635, | |
| "loss": 56.8071, | |
| "step": 763 | |
| }, | |
| { | |
| "crossentropy": 2.813700556755066, | |
| "epoch": 11.06, | |
| "grad_norm": 0.73046875, | |
| "grad_norm_var": 0.0003742853800455729, | |
| "learning_rate": 0.00024842105263157897, | |
| "loss": 55.8355, | |
| "step": 764 | |
| }, | |
| { | |
| "crossentropy": 2.7938244342803955, | |
| "epoch": 11.061, | |
| "grad_norm": 0.73828125, | |
| "grad_norm_var": 0.0002491633097330729, | |
| "learning_rate": 0.0002473684210526316, | |
| "loss": 55.8545, | |
| "step": 765 | |
| }, | |
| { | |
| "crossentropy": 2.8322267532348633, | |
| "epoch": 11.062, | |
| "grad_norm": 0.75, | |
| "grad_norm_var": 0.00024102528889973957, | |
| "learning_rate": 0.0002463157894736842, | |
| "loss": 57.2321, | |
| "step": 766 | |
| }, | |
| { | |
| "crossentropy": 2.7156633138656616, | |
| "epoch": 11.063, | |
| "grad_norm": 0.75390625, | |
| "grad_norm_var": 0.00024102528889973957, | |
| "learning_rate": 0.00024526315789473685, | |
| "loss": 54.8035, | |
| "step": 767 | |
| }, | |
| { | |
| "crossentropy": 2.634229063987732, | |
| "epoch": 11.064, | |
| "grad_norm": 0.73828125, | |
| "grad_norm_var": 0.00023167928059895834, | |
| "learning_rate": 0.0002442105263157895, | |
| "loss": 54.4477, | |
| "step": 768 | |
| }, | |
| { | |
| "crossentropy": 2.7296793460845947, | |
| "epoch": 12.001, | |
| "grad_norm": 0.78125, | |
| "grad_norm_var": 0.0002196629842122396, | |
| "learning_rate": 0.0002431578947368421, | |
| "loss": 56.2054, | |
| "step": 769 | |
| }, | |
| { | |
| "crossentropy": 2.8033626079559326, | |
| "epoch": 12.002, | |
| "grad_norm": 0.76171875, | |
| "grad_norm_var": 0.0002196629842122396, | |
| "learning_rate": 0.00024210526315789475, | |
| "loss": 56.5435, | |
| "step": 770 | |
| }, | |
| { | |
| "crossentropy": 2.8827661275863647, | |
| "epoch": 12.003, | |
| "grad_norm": 0.7265625, | |
| "grad_norm_var": 0.0002608617146809896, | |
| "learning_rate": 0.00024105263157894738, | |
| "loss": 56.553, | |
| "step": 771 | |
| }, | |
| { | |
| "crossentropy": 2.798128128051758, | |
| "epoch": 12.004, | |
| "grad_norm": 0.75390625, | |
| "grad_norm_var": 0.0002598444620768229, | |
| "learning_rate": 0.00024, | |
| "loss": 56.1054, | |
| "step": 772 | |
| }, | |
| { | |
| "crossentropy": 2.79728901386261, | |
| "epoch": 12.005, | |
| "grad_norm": 0.72265625, | |
| "grad_norm_var": 0.0003072102864583333, | |
| "learning_rate": 0.00023894736842105263, | |
| "loss": 57.2369, | |
| "step": 773 | |
| }, | |
| { | |
| "crossentropy": 2.758781909942627, | |
| "epoch": 12.006, | |
| "grad_norm": 0.734375, | |
| "grad_norm_var": 0.0003191630045572917, | |
| "learning_rate": 0.00023789473684210529, | |
| "loss": 54.4973, | |
| "step": 774 | |
| }, | |
| { | |
| "crossentropy": 2.9363932609558105, | |
| "epoch": 12.007, | |
| "grad_norm": 0.8046875, | |
| "grad_norm_var": 0.0005096435546875, | |
| "learning_rate": 0.00023684210526315788, | |
| "loss": 57.2391, | |
| "step": 775 | |
| }, | |
| { | |
| "crossentropy": 2.8779611587524414, | |
| "epoch": 12.008, | |
| "grad_norm": 0.76171875, | |
| "grad_norm_var": 0.0004961649576822917, | |
| "learning_rate": 0.00023578947368421054, | |
| "loss": 56.8895, | |
| "step": 776 | |
| }, | |
| { | |
| "crossentropy": 2.824180841445923, | |
| "epoch": 12.009, | |
| "grad_norm": 0.75, | |
| "grad_norm_var": 0.00047550201416015627, | |
| "learning_rate": 0.00023473684210526316, | |
| "loss": 57.5237, | |
| "step": 777 | |
| }, | |
| { | |
| "crossentropy": 2.8232827186584473, | |
| "epoch": 12.01, | |
| "grad_norm": 0.7265625, | |
| "grad_norm_var": 0.00047397613525390625, | |
| "learning_rate": 0.00023368421052631582, | |
| "loss": 56.4012, | |
| "step": 778 | |
| }, | |
| { | |
| "crossentropy": 2.817031145095825, | |
| "epoch": 12.011, | |
| "grad_norm": 0.74609375, | |
| "grad_norm_var": 0.0004521052042643229, | |
| "learning_rate": 0.00023263157894736841, | |
| "loss": 55.2773, | |
| "step": 779 | |
| }, | |
| { | |
| "crossentropy": 2.731096386909485, | |
| "epoch": 12.012, | |
| "grad_norm": 0.7578125, | |
| "grad_norm_var": 0.00044733683268229166, | |
| "learning_rate": 0.00023157894736842107, | |
| "loss": 55.6655, | |
| "step": 780 | |
| }, | |
| { | |
| "crossentropy": 2.7005906105041504, | |
| "epoch": 12.013, | |
| "grad_norm": 0.7421875, | |
| "grad_norm_var": 0.00045166015625, | |
| "learning_rate": 0.0002305263157894737, | |
| "loss": 55.448, | |
| "step": 781 | |
| }, | |
| { | |
| "crossentropy": 2.7934017181396484, | |
| "epoch": 12.014, | |
| "grad_norm": 0.71484375, | |
| "grad_norm_var": 0.0005266825358072917, | |
| "learning_rate": 0.00022947368421052632, | |
| "loss": 54.7745, | |
| "step": 782 | |
| }, | |
| { | |
| "crossentropy": 2.8664556741714478, | |
| "epoch": 12.015, | |
| "grad_norm": 0.7421875, | |
| "grad_norm_var": 0.0005228042602539063, | |
| "learning_rate": 0.00022842105263157895, | |
| "loss": 57.5525, | |
| "step": 783 | |
| }, | |
| { | |
| "crossentropy": 2.797169327735901, | |
| "epoch": 12.016, | |
| "grad_norm": 0.75, | |
| "grad_norm_var": 0.000516510009765625, | |
| "learning_rate": 0.0002273684210526316, | |
| "loss": 56.7851, | |
| "step": 784 | |
| }, | |
| { | |
| "crossentropy": 2.8436896800994873, | |
| "epoch": 12.017, | |
| "grad_norm": 0.73046875, | |
| "grad_norm_var": 0.00045617421468098957, | |
| "learning_rate": 0.00022631578947368422, | |
| "loss": 56.0802, | |
| "step": 785 | |
| }, | |
| { | |
| "crossentropy": 2.664197087287903, | |
| "epoch": 12.018, | |
| "grad_norm": 0.79296875, | |
| "grad_norm_var": 0.0005853652954101562, | |
| "learning_rate": 0.00022526315789473682, | |
| "loss": 54.7135, | |
| "step": 786 | |
| }, | |
| { | |
| "crossentropy": 2.6850061416625977, | |
| "epoch": 12.019, | |
| "grad_norm": 0.78125, | |
| "grad_norm_var": 0.0006209691365559896, | |
| "learning_rate": 0.00022421052631578948, | |
| "loss": 55.2802, | |
| "step": 787 | |
| }, | |
| { | |
| "crossentropy": 2.7967450618743896, | |
| "epoch": 12.02, | |
| "grad_norm": 0.7421875, | |
| "grad_norm_var": 0.0006245930989583333, | |
| "learning_rate": 0.0002231578947368421, | |
| "loss": 57.1233, | |
| "step": 788 | |
| }, | |
| { | |
| "crossentropy": 2.872409701347351, | |
| "epoch": 12.021, | |
| "grad_norm": 0.72265625, | |
| "grad_norm_var": 0.0006245930989583333, | |
| "learning_rate": 0.00022210526315789476, | |
| "loss": 56.7216, | |
| "step": 789 | |
| }, | |
| { | |
| "crossentropy": 2.8131096363067627, | |
| "epoch": 12.022, | |
| "grad_norm": 0.734375, | |
| "grad_norm_var": 0.0006245930989583333, | |
| "learning_rate": 0.00022105263157894735, | |
| "loss": 55.5856, | |
| "step": 790 | |
| }, | |
| { | |
| "crossentropy": 2.847323417663574, | |
| "epoch": 12.023, | |
| "grad_norm": 0.7421875, | |
| "grad_norm_var": 0.00041300455729166666, | |
| "learning_rate": 0.00022, | |
| "loss": 56.5617, | |
| "step": 791 | |
| }, | |
| { | |
| "crossentropy": 2.8835920095443726, | |
| "epoch": 12.024, | |
| "grad_norm": 0.7421875, | |
| "grad_norm_var": 0.00039615631103515623, | |
| "learning_rate": 0.00021894736842105263, | |
| "loss": 57.2592, | |
| "step": 792 | |
| }, | |
| { | |
| "crossentropy": 2.830420136451721, | |
| "epoch": 12.025, | |
| "grad_norm": 0.7578125, | |
| "grad_norm_var": 0.00040531158447265625, | |
| "learning_rate": 0.00021789473684210526, | |
| "loss": 56.2414, | |
| "step": 793 | |
| }, | |
| { | |
| "crossentropy": 2.755424976348877, | |
| "epoch": 12.026, | |
| "grad_norm": 0.734375, | |
| "grad_norm_var": 0.0003895441691080729, | |
| "learning_rate": 0.00021684210526315789, | |
| "loss": 55.1508, | |
| "step": 794 | |
| }, | |
| { | |
| "crossentropy": 2.737109422683716, | |
| "epoch": 12.027, | |
| "grad_norm": 0.7421875, | |
| "grad_norm_var": 0.0003903706868489583, | |
| "learning_rate": 0.00021578947368421054, | |
| "loss": 55.5294, | |
| "step": 795 | |
| }, | |
| { | |
| "crossentropy": 2.8102558851242065, | |
| "epoch": 12.028, | |
| "grad_norm": 0.71875, | |
| "grad_norm_var": 0.0004221598307291667, | |
| "learning_rate": 0.00021473684210526316, | |
| "loss": 55.6168, | |
| "step": 796 | |
| }, | |
| { | |
| "crossentropy": 2.8057838678359985, | |
| "epoch": 12.029, | |
| "grad_norm": 0.72265625, | |
| "grad_norm_var": 0.00044854482014973957, | |
| "learning_rate": 0.0002136842105263158, | |
| "loss": 56.8656, | |
| "step": 797 | |
| }, | |
| { | |
| "crossentropy": 2.7969436645507812, | |
| "epoch": 12.03, | |
| "grad_norm": 0.7734375, | |
| "grad_norm_var": 0.0004514058430989583, | |
| "learning_rate": 0.00021263157894736842, | |
| "loss": 56.5856, | |
| "step": 798 | |
| }, | |
| { | |
| "crossentropy": 2.854472517967224, | |
| "epoch": 12.031, | |
| "grad_norm": 0.7578125, | |
| "grad_norm_var": 0.0004595438639322917, | |
| "learning_rate": 0.00021157894736842107, | |
| "loss": 57.0911, | |
| "step": 799 | |
| }, | |
| { | |
| "crossentropy": 2.705453634262085, | |
| "epoch": 12.032, | |
| "grad_norm": 0.75390625, | |
| "grad_norm_var": 0.0004622777303059896, | |
| "learning_rate": 0.00021052631578947367, | |
| "loss": 53.9657, | |
| "step": 800 | |
| }, | |
| { | |
| "crossentropy": 2.823037266731262, | |
| "epoch": 12.033, | |
| "grad_norm": 0.73046875, | |
| "grad_norm_var": 0.0004622777303059896, | |
| "learning_rate": 0.00020947368421052632, | |
| "loss": 56.4739, | |
| "step": 801 | |
| }, | |
| { | |
| "crossentropy": 2.826178789138794, | |
| "epoch": 12.034, | |
| "grad_norm": 0.765625, | |
| "grad_norm_var": 0.0003407796223958333, | |
| "learning_rate": 0.00020842105263157895, | |
| "loss": 57.7884, | |
| "step": 802 | |
| }, | |
| { | |
| "crossentropy": 2.868850827217102, | |
| "epoch": 12.035, | |
| "grad_norm": 0.7109375, | |
| "grad_norm_var": 0.0003110249837239583, | |
| "learning_rate": 0.0002073684210526316, | |
| "loss": 56.8438, | |
| "step": 803 | |
| }, | |
| { | |
| "crossentropy": 2.870843529701233, | |
| "epoch": 12.036, | |
| "grad_norm": 0.75390625, | |
| "grad_norm_var": 0.0003218968709309896, | |
| "learning_rate": 0.0002063157894736842, | |
| "loss": 57.301, | |
| "step": 804 | |
| }, | |
| { | |
| "crossentropy": 2.8651864528656006, | |
| "epoch": 12.037, | |
| "grad_norm": 0.7265625, | |
| "grad_norm_var": 0.00031305948893229165, | |
| "learning_rate": 0.00020526315789473685, | |
| "loss": 57.0191, | |
| "step": 805 | |
| }, | |
| { | |
| "crossentropy": 2.9826983213424683, | |
| "epoch": 12.038, | |
| "grad_norm": 0.76953125, | |
| "grad_norm_var": 0.0003559748331705729, | |
| "learning_rate": 0.00020421052631578948, | |
| "loss": 57.7126, | |
| "step": 806 | |
| }, | |
| { | |
| "crossentropy": 2.7978984117507935, | |
| "epoch": 12.039, | |
| "grad_norm": 0.71875, | |
| "grad_norm_var": 0.0003956476847330729, | |
| "learning_rate": 0.0002031578947368421, | |
| "loss": 55.453, | |
| "step": 807 | |
| }, | |
| { | |
| "crossentropy": 2.668187379837036, | |
| "epoch": 12.04, | |
| "grad_norm": 0.71484375, | |
| "grad_norm_var": 0.000443267822265625, | |
| "learning_rate": 0.00020210526315789473, | |
| "loss": 53.316, | |
| "step": 808 | |
| }, | |
| { | |
| "crossentropy": 2.7966721057891846, | |
| "epoch": 12.041, | |
| "grad_norm": 0.7421875, | |
| "grad_norm_var": 0.00042292277018229164, | |
| "learning_rate": 0.00020105263157894738, | |
| "loss": 55.6218, | |
| "step": 809 | |
| }, | |
| { | |
| "crossentropy": 2.739244222640991, | |
| "epoch": 12.042, | |
| "grad_norm": 0.7578125, | |
| "grad_norm_var": 0.0004404703776041667, | |
| "learning_rate": 0.0002, | |
| "loss": 56.3849, | |
| "step": 810 | |
| }, | |
| { | |
| "crossentropy": 2.783705234527588, | |
| "epoch": 12.043, | |
| "grad_norm": 0.7265625, | |
| "grad_norm_var": 0.00045369466145833335, | |
| "learning_rate": 0.00019894736842105264, | |
| "loss": 57.1809, | |
| "step": 811 | |
| }, | |
| { | |
| "crossentropy": 2.8895143270492554, | |
| "epoch": 12.044, | |
| "grad_norm": 0.7578125, | |
| "grad_norm_var": 0.000437164306640625, | |
| "learning_rate": 0.00019789473684210526, | |
| "loss": 58.2889, | |
| "step": 812 | |
| }, | |
| { | |
| "crossentropy": 2.8347127437591553, | |
| "epoch": 12.045, | |
| "grad_norm": 0.75390625, | |
| "grad_norm_var": 0.00041478474934895834, | |
| "learning_rate": 0.00019684210526315791, | |
| "loss": 56.1132, | |
| "step": 813 | |
| }, | |
| { | |
| "crossentropy": 2.8544065952301025, | |
| "epoch": 12.046, | |
| "grad_norm": 0.73046875, | |
| "grad_norm_var": 0.0003651301066080729, | |
| "learning_rate": 0.0001957894736842105, | |
| "loss": 56.9511, | |
| "step": 814 | |
| }, | |
| { | |
| "crossentropy": 2.7046180963516235, | |
| "epoch": 12.047, | |
| "grad_norm": 0.7265625, | |
| "grad_norm_var": 0.0003600438435872396, | |
| "learning_rate": 0.00019473684210526317, | |
| "loss": 54.0523, | |
| "step": 815 | |
| }, | |
| { | |
| "crossentropy": 2.912235379219055, | |
| "epoch": 12.048, | |
| "grad_norm": 0.7265625, | |
| "grad_norm_var": 0.0003560384114583333, | |
| "learning_rate": 0.0001936842105263158, | |
| "loss": 57.7615, | |
| "step": 816 | |
| }, | |
| { | |
| "crossentropy": 2.8122063875198364, | |
| "epoch": 12.049, | |
| "grad_norm": 0.75, | |
| "grad_norm_var": 0.00035953521728515625, | |
| "learning_rate": 0.00019263157894736845, | |
| "loss": 57.801, | |
| "step": 817 | |
| }, | |
| { | |
| "crossentropy": 2.74889874458313, | |
| "epoch": 12.05, | |
| "grad_norm": 0.76171875, | |
| "grad_norm_var": 0.00034688313802083335, | |
| "learning_rate": 0.00019157894736842104, | |
| "loss": 57.198, | |
| "step": 818 | |
| }, | |
| { | |
| "crossentropy": 2.9050692319869995, | |
| "epoch": 12.051, | |
| "grad_norm": 0.73046875, | |
| "grad_norm_var": 0.00029697418212890623, | |
| "learning_rate": 0.0001905263157894737, | |
| "loss": 56.6941, | |
| "step": 819 | |
| }, | |
| { | |
| "crossentropy": 2.7928906679153442, | |
| "epoch": 12.052, | |
| "grad_norm": 0.734375, | |
| "grad_norm_var": 0.00028584798177083335, | |
| "learning_rate": 0.00018947368421052632, | |
| "loss": 56.1308, | |
| "step": 820 | |
| }, | |
| { | |
| "crossentropy": 2.7914161682128906, | |
| "epoch": 12.053, | |
| "grad_norm": 0.7734375, | |
| "grad_norm_var": 0.00034383138020833336, | |
| "learning_rate": 0.00018842105263157895, | |
| "loss": 56.491, | |
| "step": 821 | |
| }, | |
| { | |
| "crossentropy": 2.7516348361968994, | |
| "epoch": 12.054, | |
| "grad_norm": 0.734375, | |
| "grad_norm_var": 0.0002929051717122396, | |
| "learning_rate": 0.00018736842105263158, | |
| "loss": 56.2496, | |
| "step": 822 | |
| }, | |
| { | |
| "crossentropy": 2.7576178312301636, | |
| "epoch": 12.055, | |
| "grad_norm": 0.71875, | |
| "grad_norm_var": 0.0002929051717122396, | |
| "learning_rate": 0.00018631578947368423, | |
| "loss": 56.6343, | |
| "step": 823 | |
| }, | |
| { | |
| "crossentropy": 2.8195626735687256, | |
| "epoch": 12.056, | |
| "grad_norm": 0.73828125, | |
| "grad_norm_var": 0.0002486546834309896, | |
| "learning_rate": 0.00018526315789473685, | |
| "loss": 57.3709, | |
| "step": 824 | |
| }, | |
| { | |
| "crossentropy": 2.7876213788986206, | |
| "epoch": 12.057, | |
| "grad_norm": 0.7421875, | |
| "grad_norm_var": 0.0002486546834309896, | |
| "learning_rate": 0.00018421052631578948, | |
| "loss": 55.6639, | |
| "step": 825 | |
| }, | |
| { | |
| "crossentropy": 2.88731849193573, | |
| "epoch": 12.058, | |
| "grad_norm": 0.7265625, | |
| "grad_norm_var": 0.00024153391520182293, | |
| "learning_rate": 0.0001831578947368421, | |
| "loss": 57.4164, | |
| "step": 826 | |
| }, | |
| { | |
| "crossentropy": 2.9272228479385376, | |
| "epoch": 12.059, | |
| "grad_norm": 0.76953125, | |
| "grad_norm_var": 0.00028279622395833336, | |
| "learning_rate": 0.00018210526315789476, | |
| "loss": 57.3789, | |
| "step": 827 | |
| }, | |
| { | |
| "crossentropy": 2.833387613296509, | |
| "epoch": 12.06, | |
| "grad_norm": 0.78515625, | |
| "grad_norm_var": 0.0003864924112955729, | |
| "learning_rate": 0.00018105263157894739, | |
| "loss": 57.4141, | |
| "step": 828 | |
| }, | |
| { | |
| "crossentropy": 2.896293044090271, | |
| "epoch": 12.061, | |
| "grad_norm": 0.73046875, | |
| "grad_norm_var": 0.0003895441691080729, | |
| "learning_rate": 0.00017999999999999998, | |
| "loss": 58.0117, | |
| "step": 829 | |
| }, | |
| { | |
| "crossentropy": 2.831289052963257, | |
| "epoch": 12.062, | |
| "grad_norm": 0.7578125, | |
| "grad_norm_var": 0.00039265950520833335, | |
| "learning_rate": 0.00017894736842105264, | |
| "loss": 56.8003, | |
| "step": 830 | |
| }, | |
| { | |
| "crossentropy": 2.8674780130386353, | |
| "epoch": 12.063, | |
| "grad_norm": 0.69140625, | |
| "grad_norm_var": 0.0005523045857747396, | |
| "learning_rate": 0.00017789473684210526, | |
| "loss": 56.9051, | |
| "step": 831 | |
| }, | |
| { | |
| "crossentropy": 2.9149069786071777, | |
| "epoch": 12.064, | |
| "grad_norm": 0.78515625, | |
| "grad_norm_var": 0.0006467183430989583, | |
| "learning_rate": 0.0001768421052631579, | |
| "loss": 57.5528, | |
| "step": 832 | |
| }, | |
| { | |
| "crossentropy": 2.824967622756958, | |
| "epoch": 13.001, | |
| "grad_norm": 0.77734375, | |
| "grad_norm_var": 0.0007888158162434896, | |
| "learning_rate": 0.00017578947368421052, | |
| "loss": 56.2444, | |
| "step": 833 | |
| }, | |
| { | |
| "crossentropy": 2.784726858139038, | |
| "epoch": 13.002, | |
| "grad_norm": 0.765625, | |
| "grad_norm_var": 0.0007802327473958333, | |
| "learning_rate": 0.00017473684210526317, | |
| "loss": 54.6207, | |
| "step": 834 | |
| }, | |
| { | |
| "crossentropy": 2.924685478210449, | |
| "epoch": 13.003, | |
| "grad_norm": 0.76171875, | |
| "grad_norm_var": 0.0007664362589518229, | |
| "learning_rate": 0.0001736842105263158, | |
| "loss": 57.5746, | |
| "step": 835 | |
| }, | |
| { | |
| "crossentropy": 2.897099494934082, | |
| "epoch": 13.004, | |
| "grad_norm": 0.71484375, | |
| "grad_norm_var": 0.0008188883463541666, | |
| "learning_rate": 0.00017263157894736842, | |
| "loss": 56.227, | |
| "step": 836 | |
| }, | |
| { | |
| "crossentropy": 2.7194520235061646, | |
| "epoch": 13.005, | |
| "grad_norm": 0.734375, | |
| "grad_norm_var": 0.0008188883463541666, | |
| "learning_rate": 0.00017157894736842105, | |
| "loss": 54.1649, | |
| "step": 837 | |
| }, | |
| { | |
| "crossentropy": 2.7639235258102417, | |
| "epoch": 13.006, | |
| "grad_norm": 0.7265625, | |
| "grad_norm_var": 0.000791168212890625, | |
| "learning_rate": 0.0001705263157894737, | |
| "loss": 55.093, | |
| "step": 838 | |
| }, | |
| { | |
| "crossentropy": 2.7386432886123657, | |
| "epoch": 13.007, | |
| "grad_norm": 0.73046875, | |
| "grad_norm_var": 0.0008066813151041667, | |
| "learning_rate": 0.0001694736842105263, | |
| "loss": 54.6862, | |
| "step": 839 | |
| }, | |
| { | |
| "crossentropy": 2.814002513885498, | |
| "epoch": 13.008, | |
| "grad_norm": 0.7421875, | |
| "grad_norm_var": 0.0008066813151041667, | |
| "learning_rate": 0.00016842105263157895, | |
| "loss": 56.0865, | |
| "step": 840 | |
| }, | |
| { | |
| "crossentropy": 2.8583909273147583, | |
| "epoch": 13.009, | |
| "grad_norm": 0.76953125, | |
| "grad_norm_var": 0.0007933934529622395, | |
| "learning_rate": 0.00016736842105263158, | |
| "loss": 56.4426, | |
| "step": 841 | |
| }, | |
| { | |
| "crossentropy": 2.79672372341156, | |
| "epoch": 13.01, | |
| "grad_norm": 0.7421875, | |
| "grad_norm_var": 0.000775146484375, | |
| "learning_rate": 0.00016631578947368423, | |
| "loss": 55.4208, | |
| "step": 842 | |
| }, | |
| { | |
| "crossentropy": 2.8741607666015625, | |
| "epoch": 13.011, | |
| "grad_norm": 0.7109375, | |
| "grad_norm_var": 0.0007715225219726562, | |
| "learning_rate": 0.00016526315789473683, | |
| "loss": 56.4795, | |
| "step": 843 | |
| }, | |
| { | |
| "crossentropy": 2.7828328609466553, | |
| "epoch": 13.012, | |
| "grad_norm": 0.71875, | |
| "grad_norm_var": 0.000803375244140625, | |
| "learning_rate": 0.00016421052631578948, | |
| "loss": 54.505, | |
| "step": 844 | |
| }, | |
| { | |
| "crossentropy": 2.7500873804092407, | |
| "epoch": 13.013, | |
| "grad_norm": 0.75390625, | |
| "grad_norm_var": 0.0007974624633789063, | |
| "learning_rate": 0.0001631578947368421, | |
| "loss": 54.9745, | |
| "step": 845 | |
| }, | |
| { | |
| "crossentropy": 2.7167465686798096, | |
| "epoch": 13.014, | |
| "grad_norm": 0.71875, | |
| "grad_norm_var": 0.0006510416666666666, | |
| "learning_rate": 0.00016210526315789473, | |
| "loss": 54.637, | |
| "step": 846 | |
| }, | |
| { | |
| "crossentropy": 2.838550090789795, | |
| "epoch": 13.015, | |
| "grad_norm": 0.73046875, | |
| "grad_norm_var": 0.000553131103515625, | |
| "learning_rate": 0.00016105263157894736, | |
| "loss": 56.9125, | |
| "step": 847 | |
| }, | |
| { | |
| "crossentropy": 2.728639602661133, | |
| "epoch": 13.016, | |
| "grad_norm": 0.734375, | |
| "grad_norm_var": 0.00042667388916015623, | |
| "learning_rate": 0.00016, | |
| "loss": 55.2942, | |
| "step": 848 | |
| }, | |
| { | |
| "crossentropy": 2.8884626626968384, | |
| "epoch": 13.017, | |
| "grad_norm": 0.81640625, | |
| "grad_norm_var": 0.000719134012858073, | |
| "learning_rate": 0.00015894736842105264, | |
| "loss": 58.9905, | |
| "step": 849 | |
| }, | |
| { | |
| "crossentropy": 2.6712993383407593, | |
| "epoch": 13.018, | |
| "grad_norm": 0.74609375, | |
| "grad_norm_var": 0.000681304931640625, | |
| "learning_rate": 0.00015789473684210527, | |
| "loss": 54.4483, | |
| "step": 850 | |
| }, | |
| { | |
| "crossentropy": 2.8165026903152466, | |
| "epoch": 13.019, | |
| "grad_norm": 0.71484375, | |
| "grad_norm_var": 0.000687408447265625, | |
| "learning_rate": 0.0001568421052631579, | |
| "loss": 56.7351, | |
| "step": 851 | |
| }, | |
| { | |
| "crossentropy": 2.9343361854553223, | |
| "epoch": 13.02, | |
| "grad_norm": 0.7578125, | |
| "grad_norm_var": 0.0006713231404622395, | |
| "learning_rate": 0.00015578947368421054, | |
| "loss": 58.3815, | |
| "step": 852 | |
| }, | |
| { | |
| "crossentropy": 2.931985378265381, | |
| "epoch": 13.021, | |
| "grad_norm": 0.70703125, | |
| "grad_norm_var": 0.0007403055826822917, | |
| "learning_rate": 0.00015473684210526314, | |
| "loss": 57.2322, | |
| "step": 853 | |
| }, | |
| { | |
| "crossentropy": 2.8127466440200806, | |
| "epoch": 13.022, | |
| "grad_norm": 0.734375, | |
| "grad_norm_var": 0.0007314046223958333, | |
| "learning_rate": 0.0001536842105263158, | |
| "loss": 55.7789, | |
| "step": 854 | |
| }, | |
| { | |
| "crossentropy": 2.8048046827316284, | |
| "epoch": 13.023, | |
| "grad_norm": 0.70703125, | |
| "grad_norm_var": 0.0007932027180989583, | |
| "learning_rate": 0.00015263157894736842, | |
| "loss": 56.2506, | |
| "step": 855 | |
| }, | |
| { | |
| "crossentropy": 2.787923812866211, | |
| "epoch": 13.024, | |
| "grad_norm": 0.703125, | |
| "grad_norm_var": 0.0008656819661458333, | |
| "learning_rate": 0.00015157894736842108, | |
| "loss": 54.8036, | |
| "step": 856 | |
| }, | |
| { | |
| "crossentropy": 2.829706907272339, | |
| "epoch": 13.025, | |
| "grad_norm": 0.72265625, | |
| "grad_norm_var": 0.0007893880208333333, | |
| "learning_rate": 0.00015052631578947367, | |
| "loss": 56.4524, | |
| "step": 857 | |
| }, | |
| { | |
| "crossentropy": 2.810854196548462, | |
| "epoch": 13.026, | |
| "grad_norm": 0.7421875, | |
| "grad_norm_var": 0.0007893880208333333, | |
| "learning_rate": 0.00014947368421052633, | |
| "loss": 56.9006, | |
| "step": 858 | |
| }, | |
| { | |
| "crossentropy": 2.7314698696136475, | |
| "epoch": 13.027, | |
| "grad_norm": 0.7421875, | |
| "grad_norm_var": 0.0007609049479166667, | |
| "learning_rate": 0.00014842105263157895, | |
| "loss": 55.7139, | |
| "step": 859 | |
| }, | |
| { | |
| "crossentropy": 2.747408866882324, | |
| "epoch": 13.028, | |
| "grad_norm": 0.73828125, | |
| "grad_norm_var": 0.0007440567016601563, | |
| "learning_rate": 0.00014736842105263158, | |
| "loss": 54.023, | |
| "step": 860 | |
| }, | |
| { | |
| "crossentropy": 2.8353370428085327, | |
| "epoch": 13.029, | |
| "grad_norm": 0.72265625, | |
| "grad_norm_var": 0.0007287979125976563, | |
| "learning_rate": 0.0001463157894736842, | |
| "loss": 55.9548, | |
| "step": 861 | |
| }, | |
| { | |
| "crossentropy": 2.757072687149048, | |
| "epoch": 13.03, | |
| "grad_norm": 0.7109375, | |
| "grad_norm_var": 0.0007481257120768229, | |
| "learning_rate": 0.00014526315789473686, | |
| "loss": 54.7874, | |
| "step": 862 | |
| }, | |
| { | |
| "crossentropy": 2.8938580751419067, | |
| "epoch": 13.031, | |
| "grad_norm": 0.77734375, | |
| "grad_norm_var": 0.0008686701456705729, | |
| "learning_rate": 0.00014421052631578948, | |
| "loss": 58.1519, | |
| "step": 863 | |
| }, | |
| { | |
| "crossentropy": 2.7633159160614014, | |
| "epoch": 13.032, | |
| "grad_norm": 0.7421875, | |
| "grad_norm_var": 0.0008707046508789062, | |
| "learning_rate": 0.0001431578947368421, | |
| "loss": 56.3047, | |
| "step": 864 | |
| }, | |
| { | |
| "crossentropy": 2.810257911682129, | |
| "epoch": 13.033, | |
| "grad_norm": 0.7734375, | |
| "grad_norm_var": 0.000528717041015625, | |
| "learning_rate": 0.00014210526315789474, | |
| "loss": 57.6018, | |
| "step": 865 | |
| }, | |
| { | |
| "crossentropy": 2.753029227256775, | |
| "epoch": 13.034, | |
| "grad_norm": 0.7265625, | |
| "grad_norm_var": 0.000520769755045573, | |
| "learning_rate": 0.0001410526315789474, | |
| "loss": 56.7448, | |
| "step": 866 | |
| }, | |
| { | |
| "crossentropy": 2.7661619186401367, | |
| "epoch": 13.035, | |
| "grad_norm": 0.71875, | |
| "grad_norm_var": 0.0005124409993489584, | |
| "learning_rate": 0.00014000000000000001, | |
| "loss": 56.7924, | |
| "step": 867 | |
| }, | |
| { | |
| "crossentropy": 2.915842056274414, | |
| "epoch": 13.036, | |
| "grad_norm": 0.73828125, | |
| "grad_norm_var": 0.00047143300374348957, | |
| "learning_rate": 0.00013894736842105264, | |
| "loss": 57.8143, | |
| "step": 868 | |
| }, | |
| { | |
| "crossentropy": 2.838146448135376, | |
| "epoch": 13.037, | |
| "grad_norm": 0.75, | |
| "grad_norm_var": 0.000445556640625, | |
| "learning_rate": 0.00013789473684210527, | |
| "loss": 57.094, | |
| "step": 869 | |
| }, | |
| { | |
| "crossentropy": 2.917192220687866, | |
| "epoch": 13.038, | |
| "grad_norm": 0.75, | |
| "grad_norm_var": 0.0004608154296875, | |
| "learning_rate": 0.00013684210526315792, | |
| "loss": 58.5578, | |
| "step": 870 | |
| }, | |
| { | |
| "crossentropy": 2.8409940004348755, | |
| "epoch": 13.039, | |
| "grad_norm": 0.73828125, | |
| "grad_norm_var": 0.00040384928385416664, | |
| "learning_rate": 0.00013578947368421052, | |
| "loss": 55.9825, | |
| "step": 871 | |
| }, | |
| { | |
| "crossentropy": 2.70103657245636, | |
| "epoch": 13.04, | |
| "grad_norm": 0.73828125, | |
| "grad_norm_var": 0.0003208796183268229, | |
| "learning_rate": 0.00013473684210526314, | |
| "loss": 55.2032, | |
| "step": 872 | |
| }, | |
| { | |
| "crossentropy": 2.7406742572784424, | |
| "epoch": 13.041, | |
| "grad_norm": 0.734375, | |
| "grad_norm_var": 0.0003031412760416667, | |
| "learning_rate": 0.0001336842105263158, | |
| "loss": 56.4148, | |
| "step": 873 | |
| }, | |
| { | |
| "crossentropy": 2.919757843017578, | |
| "epoch": 13.042, | |
| "grad_norm": 0.765625, | |
| "grad_norm_var": 0.00034357706705729165, | |
| "learning_rate": 0.00013263157894736842, | |
| "loss": 57.4301, | |
| "step": 874 | |
| }, | |
| { | |
| "crossentropy": 2.7461795806884766, | |
| "epoch": 13.043, | |
| "grad_norm": 0.70703125, | |
| "grad_norm_var": 0.0004185358683268229, | |
| "learning_rate": 0.00013157894736842105, | |
| "loss": 54.4688, | |
| "step": 875 | |
| }, | |
| { | |
| "crossentropy": 2.7317864894866943, | |
| "epoch": 13.044, | |
| "grad_norm": 0.75, | |
| "grad_norm_var": 0.0004252115885416667, | |
| "learning_rate": 0.00013052631578947368, | |
| "loss": 55.3466, | |
| "step": 876 | |
| }, | |
| { | |
| "crossentropy": 2.761776566505432, | |
| "epoch": 13.045, | |
| "grad_norm": 0.71875, | |
| "grad_norm_var": 0.0004353205362955729, | |
| "learning_rate": 0.00012947368421052633, | |
| "loss": 56.4126, | |
| "step": 877 | |
| }, | |
| { | |
| "crossentropy": 2.8709728717803955, | |
| "epoch": 13.046, | |
| "grad_norm": 0.734375, | |
| "grad_norm_var": 0.0003788630167643229, | |
| "learning_rate": 0.00012842105263157893, | |
| "loss": 55.527, | |
| "step": 878 | |
| }, | |
| { | |
| "crossentropy": 2.9433149099349976, | |
| "epoch": 13.047, | |
| "grad_norm": 0.7421875, | |
| "grad_norm_var": 0.0002878824869791667, | |
| "learning_rate": 0.00012736842105263158, | |
| "loss": 56.7329, | |
| "step": 879 | |
| }, | |
| { | |
| "crossentropy": 2.7954342365264893, | |
| "epoch": 13.048, | |
| "grad_norm": 0.7734375, | |
| "grad_norm_var": 0.0003611246744791667, | |
| "learning_rate": 0.0001263157894736842, | |
| "loss": 56.6293, | |
| "step": 880 | |
| }, | |
| { | |
| "crossentropy": 2.9298503398895264, | |
| "epoch": 13.049, | |
| "grad_norm": 0.7265625, | |
| "grad_norm_var": 0.00029703776041666664, | |
| "learning_rate": 0.00012526315789473686, | |
| "loss": 57.9576, | |
| "step": 881 | |
| }, | |
| { | |
| "crossentropy": 2.910145401954651, | |
| "epoch": 13.05, | |
| "grad_norm": 0.71484375, | |
| "grad_norm_var": 0.00032393137613932293, | |
| "learning_rate": 0.00012421052631578949, | |
| "loss": 57.0496, | |
| "step": 882 | |
| }, | |
| { | |
| "crossentropy": 2.80023729801178, | |
| "epoch": 13.051, | |
| "grad_norm": 0.7578125, | |
| "grad_norm_var": 0.00032138824462890625, | |
| "learning_rate": 0.0001231578947368421, | |
| "loss": 56.4184, | |
| "step": 883 | |
| }, | |
| { | |
| "crossentropy": 2.8765305280685425, | |
| "epoch": 13.052, | |
| "grad_norm": 0.7265625, | |
| "grad_norm_var": 0.0003326416015625, | |
| "learning_rate": 0.00012210526315789474, | |
| "loss": 57.1571, | |
| "step": 884 | |
| }, | |
| { | |
| "crossentropy": 2.877085328102112, | |
| "epoch": 13.053, | |
| "grad_norm": 0.72265625, | |
| "grad_norm_var": 0.0003402074178059896, | |
| "learning_rate": 0.00012105263157894738, | |
| "loss": 56.3713, | |
| "step": 885 | |
| }, | |
| { | |
| "crossentropy": 2.825315833091736, | |
| "epoch": 13.054, | |
| "grad_norm": 0.73046875, | |
| "grad_norm_var": 0.00033162434895833335, | |
| "learning_rate": 0.00012, | |
| "loss": 58.7512, | |
| "step": 886 | |
| }, | |
| { | |
| "crossentropy": 2.9117591381073, | |
| "epoch": 13.055, | |
| "grad_norm": 0.7265625, | |
| "grad_norm_var": 0.0003371556599934896, | |
| "learning_rate": 0.00011894736842105264, | |
| "loss": 57.4971, | |
| "step": 887 | |
| }, | |
| { | |
| "crossentropy": 2.95816707611084, | |
| "epoch": 13.056, | |
| "grad_norm": 0.73828125, | |
| "grad_norm_var": 0.0003371556599934896, | |
| "learning_rate": 0.00011789473684210527, | |
| "loss": 58.199, | |
| "step": 888 | |
| }, | |
| { | |
| "crossentropy": 2.877577304840088, | |
| "epoch": 13.057, | |
| "grad_norm": 0.7421875, | |
| "grad_norm_var": 0.00033969879150390624, | |
| "learning_rate": 0.00011684210526315791, | |
| "loss": 57.5653, | |
| "step": 889 | |
| }, | |
| { | |
| "crossentropy": 2.839280843734741, | |
| "epoch": 13.058, | |
| "grad_norm": 0.796875, | |
| "grad_norm_var": 0.0005238215128580729, | |
| "learning_rate": 0.00011578947368421053, | |
| "loss": 55.8712, | |
| "step": 890 | |
| }, | |
| { | |
| "crossentropy": 2.9066461324691772, | |
| "epoch": 13.059, | |
| "grad_norm": 0.78515625, | |
| "grad_norm_var": 0.0005823135375976563, | |
| "learning_rate": 0.00011473684210526316, | |
| "loss": 57.5233, | |
| "step": 891 | |
| }, | |
| { | |
| "crossentropy": 2.963346004486084, | |
| "epoch": 13.06, | |
| "grad_norm": 0.73046875, | |
| "grad_norm_var": 0.0005877176920572917, | |
| "learning_rate": 0.0001136842105263158, | |
| "loss": 58.7213, | |
| "step": 892 | |
| }, | |
| { | |
| "crossentropy": 2.887979745864868, | |
| "epoch": 13.061, | |
| "grad_norm": 0.734375, | |
| "grad_norm_var": 0.0005551656087239583, | |
| "learning_rate": 0.00011263157894736841, | |
| "loss": 57.1343, | |
| "step": 893 | |
| }, | |
| { | |
| "crossentropy": 2.8576492071151733, | |
| "epoch": 13.062, | |
| "grad_norm": 0.765625, | |
| "grad_norm_var": 0.0005816141764322917, | |
| "learning_rate": 0.00011157894736842105, | |
| "loss": 57.0907, | |
| "step": 894 | |
| }, | |
| { | |
| "crossentropy": 2.950958728790283, | |
| "epoch": 13.063, | |
| "grad_norm": 0.71484375, | |
| "grad_norm_var": 0.0006372451782226563, | |
| "learning_rate": 0.00011052631578947368, | |
| "loss": 56.6607, | |
| "step": 895 | |
| }, | |
| { | |
| "crossentropy": 2.8504726886749268, | |
| "epoch": 13.064, | |
| "grad_norm": 0.73828125, | |
| "grad_norm_var": 0.000571441650390625, | |
| "learning_rate": 0.00010947368421052632, | |
| "loss": 57.7151, | |
| "step": 896 | |
| }, | |
| { | |
| "crossentropy": 2.878578782081604, | |
| "epoch": 14.001, | |
| "grad_norm": 0.73046875, | |
| "grad_norm_var": 0.0005177179972330729, | |
| "learning_rate": 0.00010842105263157894, | |
| "loss": 55.9718, | |
| "step": 897 | |
| }, | |
| { | |
| "crossentropy": 2.782250165939331, | |
| "epoch": 14.002, | |
| "grad_norm": 0.73046875, | |
| "grad_norm_var": 0.0005083719889322917, | |
| "learning_rate": 0.00010736842105263158, | |
| "loss": 57.0449, | |
| "step": 898 | |
| }, | |
| { | |
| "crossentropy": 2.8027374744415283, | |
| "epoch": 14.003, | |
| "grad_norm": 0.734375, | |
| "grad_norm_var": 0.0004974365234375, | |
| "learning_rate": 0.00010631578947368421, | |
| "loss": 55.2197, | |
| "step": 899 | |
| }, | |
| { | |
| "crossentropy": 2.7636749744415283, | |
| "epoch": 14.004, | |
| "grad_norm": 0.73046875, | |
| "grad_norm_var": 0.0004819234212239583, | |
| "learning_rate": 0.00010526315789473683, | |
| "loss": 55.1626, | |
| "step": 900 | |
| }, | |
| { | |
| "crossentropy": 2.7231706380844116, | |
| "epoch": 14.005, | |
| "grad_norm": 0.734375, | |
| "grad_norm_var": 0.00047702789306640624, | |
| "learning_rate": 0.00010421052631578947, | |
| "loss": 54.001, | |
| "step": 901 | |
| }, | |
| { | |
| "crossentropy": 2.883240580558777, | |
| "epoch": 14.006, | |
| "grad_norm": 0.73046875, | |
| "grad_norm_var": 0.000469970703125, | |
| "learning_rate": 0.0001031578947368421, | |
| "loss": 58.3301, | |
| "step": 902 | |
| }, | |
| { | |
| "crossentropy": 2.6990073919296265, | |
| "epoch": 14.007, | |
| "grad_norm": 0.703125, | |
| "grad_norm_var": 0.0005655288696289062, | |
| "learning_rate": 0.00010210526315789474, | |
| "loss": 55.7926, | |
| "step": 903 | |
| }, | |
| { | |
| "crossentropy": 2.786812424659729, | |
| "epoch": 14.008, | |
| "grad_norm": 0.73046875, | |
| "grad_norm_var": 0.0005706787109375, | |
| "learning_rate": 0.00010105263157894737, | |
| "loss": 56.0858, | |
| "step": 904 | |
| }, | |
| { | |
| "crossentropy": 2.8873369693756104, | |
| "epoch": 14.009, | |
| "grad_norm": 0.75, | |
| "grad_norm_var": 0.000347900390625, | |
| "learning_rate": 0.0001, | |
| "loss": 57.1081, | |
| "step": 905 | |
| }, | |
| { | |
| "crossentropy": 2.8812583684921265, | |
| "epoch": 14.01, | |
| "grad_norm": 0.7734375, | |
| "grad_norm_var": 0.00028018951416015626, | |
| "learning_rate": 9.894736842105263e-05, | |
| "loss": 56.194, | |
| "step": 906 | |
| }, | |
| { | |
| "crossentropy": 2.73581063747406, | |
| "epoch": 14.011, | |
| "grad_norm": 0.77734375, | |
| "grad_norm_var": 0.00038547515869140624, | |
| "learning_rate": 9.789473684210526e-05, | |
| "loss": 56.1636, | |
| "step": 907 | |
| }, | |
| { | |
| "crossentropy": 2.7859939336776733, | |
| "epoch": 14.012, | |
| "grad_norm": 0.73046875, | |
| "grad_norm_var": 0.00038859049479166664, | |
| "learning_rate": 9.68421052631579e-05, | |
| "loss": 56.4249, | |
| "step": 908 | |
| }, | |
| { | |
| "crossentropy": 2.8880062103271484, | |
| "epoch": 14.013, | |
| "grad_norm": 0.74609375, | |
| "grad_norm_var": 0.00034122467041015626, | |
| "learning_rate": 9.578947368421052e-05, | |
| "loss": 58.172, | |
| "step": 909 | |
| }, | |
| { | |
| "crossentropy": 2.772728443145752, | |
| "epoch": 14.014, | |
| "grad_norm": 0.72265625, | |
| "grad_norm_var": 0.0003218968709309896, | |
| "learning_rate": 9.473684210526316e-05, | |
| "loss": 55.1069, | |
| "step": 910 | |
| }, | |
| { | |
| "crossentropy": 2.77231502532959, | |
| "epoch": 14.015, | |
| "grad_norm": 0.7734375, | |
| "grad_norm_var": 0.0004025777180989583, | |
| "learning_rate": 9.368421052631579e-05, | |
| "loss": 54.3101, | |
| "step": 911 | |
| }, | |
| { | |
| "crossentropy": 2.7247848510742188, | |
| "epoch": 14.016, | |
| "grad_norm": 0.73046875, | |
| "grad_norm_var": 0.00040791829427083335, | |
| "learning_rate": 9.263157894736843e-05, | |
| "loss": 55.4261, | |
| "step": 912 | |
| }, | |
| { | |
| "crossentropy": 2.907187581062317, | |
| "epoch": 14.017, | |
| "grad_norm": 0.74609375, | |
| "grad_norm_var": 0.00040486653645833336, | |
| "learning_rate": 9.157894736842105e-05, | |
| "loss": 57.3701, | |
| "step": 913 | |
| }, | |
| { | |
| "crossentropy": 2.816227078437805, | |
| "epoch": 14.018, | |
| "grad_norm": 0.69921875, | |
| "grad_norm_var": 0.000506591796875, | |
| "learning_rate": 9.052631578947369e-05, | |
| "loss": 55.6567, | |
| "step": 914 | |
| }, | |
| { | |
| "crossentropy": 2.8363900184631348, | |
| "epoch": 14.019, | |
| "grad_norm": 0.75390625, | |
| "grad_norm_var": 0.0005202611287434896, | |
| "learning_rate": 8.947368421052632e-05, | |
| "loss": 57.0809, | |
| "step": 915 | |
| }, | |
| { | |
| "crossentropy": 2.856706380844116, | |
| "epoch": 14.02, | |
| "grad_norm": 0.69921875, | |
| "grad_norm_var": 0.0006189346313476562, | |
| "learning_rate": 8.842105263157894e-05, | |
| "loss": 57.1048, | |
| "step": 916 | |
| }, | |
| { | |
| "crossentropy": 2.8060814142227173, | |
| "epoch": 14.021, | |
| "grad_norm": 0.73828125, | |
| "grad_norm_var": 0.0006182352701822917, | |
| "learning_rate": 8.736842105263158e-05, | |
| "loss": 55.9918, | |
| "step": 917 | |
| }, | |
| { | |
| "crossentropy": 2.956759810447693, | |
| "epoch": 14.022, | |
| "grad_norm": 0.71875, | |
| "grad_norm_var": 0.0006382624308268229, | |
| "learning_rate": 8.631578947368421e-05, | |
| "loss": 57.2669, | |
| "step": 918 | |
| }, | |
| { | |
| "crossentropy": 2.754731297492981, | |
| "epoch": 14.023, | |
| "grad_norm": 0.7109375, | |
| "grad_norm_var": 0.0006067276000976563, | |
| "learning_rate": 8.526315789473685e-05, | |
| "loss": 53.8387, | |
| "step": 919 | |
| }, | |
| { | |
| "crossentropy": 2.7637789249420166, | |
| "epoch": 14.024, | |
| "grad_norm": 0.76953125, | |
| "grad_norm_var": 0.0006652196248372396, | |
| "learning_rate": 8.421052631578948e-05, | |
| "loss": 55.6373, | |
| "step": 920 | |
| }, | |
| { | |
| "crossentropy": 2.721121907234192, | |
| "epoch": 14.025, | |
| "grad_norm": 0.7421875, | |
| "grad_norm_var": 0.0006586074829101563, | |
| "learning_rate": 8.315789473684212e-05, | |
| "loss": 54.8625, | |
| "step": 921 | |
| }, | |
| { | |
| "crossentropy": 2.8451952934265137, | |
| "epoch": 14.026, | |
| "grad_norm": 0.7421875, | |
| "grad_norm_var": 0.0005782445271809896, | |
| "learning_rate": 8.210526315789474e-05, | |
| "loss": 57.2868, | |
| "step": 922 | |
| }, | |
| { | |
| "crossentropy": 2.796187996864319, | |
| "epoch": 14.027, | |
| "grad_norm": 0.73046875, | |
| "grad_norm_var": 0.0004668553670247396, | |
| "learning_rate": 8.105263157894737e-05, | |
| "loss": 56.2234, | |
| "step": 923 | |
| }, | |
| { | |
| "crossentropy": 2.824811339378357, | |
| "epoch": 14.028, | |
| "grad_norm": 0.703125, | |
| "grad_norm_var": 0.000528717041015625, | |
| "learning_rate": 8e-05, | |
| "loss": 56.7024, | |
| "step": 924 | |
| }, | |
| { | |
| "crossentropy": 2.7085684537887573, | |
| "epoch": 14.029, | |
| "grad_norm": 0.765625, | |
| "grad_norm_var": 0.0005868911743164062, | |
| "learning_rate": 7.894736842105263e-05, | |
| "loss": 56.1883, | |
| "step": 925 | |
| }, | |
| { | |
| "crossentropy": 2.7962204217910767, | |
| "epoch": 14.03, | |
| "grad_norm": 0.73828125, | |
| "grad_norm_var": 0.0005782445271809896, | |
| "learning_rate": 7.789473684210527e-05, | |
| "loss": 57.1829, | |
| "step": 926 | |
| }, | |
| { | |
| "crossentropy": 2.796944737434387, | |
| "epoch": 14.031, | |
| "grad_norm": 0.7578125, | |
| "grad_norm_var": 0.0005136489868164063, | |
| "learning_rate": 7.68421052631579e-05, | |
| "loss": 56.7551, | |
| "step": 927 | |
| }, | |
| { | |
| "crossentropy": 2.7813810110092163, | |
| "epoch": 14.032, | |
| "grad_norm": 0.74609375, | |
| "grad_norm_var": 0.0005212783813476563, | |
| "learning_rate": 7.578947368421054e-05, | |
| "loss": 56.2743, | |
| "step": 928 | |
| }, | |
| { | |
| "crossentropy": 2.8674248456954956, | |
| "epoch": 14.033, | |
| "grad_norm": 0.72265625, | |
| "grad_norm_var": 0.0005212783813476563, | |
| "learning_rate": 7.473684210526316e-05, | |
| "loss": 58.4157, | |
| "step": 929 | |
| }, | |
| { | |
| "crossentropy": 2.7428762912750244, | |
| "epoch": 14.034, | |
| "grad_norm": 0.69140625, | |
| "grad_norm_var": 0.0005609512329101563, | |
| "learning_rate": 7.368421052631579e-05, | |
| "loss": 55.4523, | |
| "step": 930 | |
| }, | |
| { | |
| "crossentropy": 2.7889604568481445, | |
| "epoch": 14.035, | |
| "grad_norm": 0.73046875, | |
| "grad_norm_var": 0.0005304336547851563, | |
| "learning_rate": 7.263157894736843e-05, | |
| "loss": 56.8295, | |
| "step": 931 | |
| }, | |
| { | |
| "crossentropy": 2.78811252117157, | |
| "epoch": 14.036, | |
| "grad_norm": 0.76171875, | |
| "grad_norm_var": 0.0005039850870768229, | |
| "learning_rate": 7.157894736842105e-05, | |
| "loss": 56.2246, | |
| "step": 932 | |
| }, | |
| { | |
| "crossentropy": 2.797974109649658, | |
| "epoch": 14.037, | |
| "grad_norm": 0.69921875, | |
| "grad_norm_var": 0.0005853652954101562, | |
| "learning_rate": 7.05263157894737e-05, | |
| "loss": 56.1583, | |
| "step": 933 | |
| }, | |
| { | |
| "crossentropy": 2.9307310581207275, | |
| "epoch": 14.038, | |
| "grad_norm": 0.71875, | |
| "grad_norm_var": 0.0005853652954101562, | |
| "learning_rate": 6.947368421052632e-05, | |
| "loss": 57.1375, | |
| "step": 934 | |
| }, | |
| { | |
| "crossentropy": 2.8903194665908813, | |
| "epoch": 14.039, | |
| "grad_norm": 0.72265625, | |
| "grad_norm_var": 0.000559234619140625, | |
| "learning_rate": 6.842105263157896e-05, | |
| "loss": 57.0463, | |
| "step": 935 | |
| }, | |
| { | |
| "crossentropy": 2.865442395210266, | |
| "epoch": 14.04, | |
| "grad_norm": 0.69921875, | |
| "grad_norm_var": 0.0005340576171875, | |
| "learning_rate": 6.736842105263157e-05, | |
| "loss": 56.6052, | |
| "step": 936 | |
| }, | |
| { | |
| "crossentropy": 2.9218584299087524, | |
| "epoch": 14.041, | |
| "grad_norm": 0.7265625, | |
| "grad_norm_var": 0.0005228678385416667, | |
| "learning_rate": 6.631578947368421e-05, | |
| "loss": 57.2276, | |
| "step": 937 | |
| }, | |
| { | |
| "crossentropy": 2.819886326789856, | |
| "epoch": 14.042, | |
| "grad_norm": 0.73828125, | |
| "grad_norm_var": 0.0005167007446289062, | |
| "learning_rate": 6.526315789473684e-05, | |
| "loss": 57.4423, | |
| "step": 938 | |
| }, | |
| { | |
| "crossentropy": 2.6768345832824707, | |
| "epoch": 14.043, | |
| "grad_norm": 0.73828125, | |
| "grad_norm_var": 0.0005228042602539063, | |
| "learning_rate": 6.421052631578946e-05, | |
| "loss": 54.1839, | |
| "step": 939 | |
| }, | |
| { | |
| "crossentropy": 2.7807631492614746, | |
| "epoch": 14.044, | |
| "grad_norm": 0.73046875, | |
| "grad_norm_var": 0.00047607421875, | |
| "learning_rate": 6.31578947368421e-05, | |
| "loss": 55.5346, | |
| "step": 940 | |
| }, | |
| { | |
| "crossentropy": 2.8685619831085205, | |
| "epoch": 14.045, | |
| "grad_norm": 0.7421875, | |
| "grad_norm_var": 0.000400543212890625, | |
| "learning_rate": 6.210526315789474e-05, | |
| "loss": 57.8767, | |
| "step": 941 | |
| }, | |
| { | |
| "crossentropy": 2.8203389644622803, | |
| "epoch": 14.046, | |
| "grad_norm": 0.72265625, | |
| "grad_norm_var": 0.00039647420247395835, | |
| "learning_rate": 6.105263157894737e-05, | |
| "loss": 56.1573, | |
| "step": 942 | |
| }, | |
| { | |
| "crossentropy": 2.8836749792099, | |
| "epoch": 14.047, | |
| "grad_norm": 0.71484375, | |
| "grad_norm_var": 0.00034122467041015626, | |
| "learning_rate": 6e-05, | |
| "loss": 56.838, | |
| "step": 943 | |
| }, | |
| { | |
| "crossentropy": 2.9158384799957275, | |
| "epoch": 14.048, | |
| "grad_norm": 0.7578125, | |
| "grad_norm_var": 0.000382232666015625, | |
| "learning_rate": 5.8947368421052634e-05, | |
| "loss": 58.3756, | |
| "step": 944 | |
| }, | |
| { | |
| "crossentropy": 2.7543821334838867, | |
| "epoch": 14.049, | |
| "grad_norm": 0.6953125, | |
| "grad_norm_var": 0.0004414240519205729, | |
| "learning_rate": 5.789473684210527e-05, | |
| "loss": 55.6096, | |
| "step": 945 | |
| }, | |
| { | |
| "crossentropy": 2.918057680130005, | |
| "epoch": 14.05, | |
| "grad_norm": 0.75, | |
| "grad_norm_var": 0.0003985087076822917, | |
| "learning_rate": 5.68421052631579e-05, | |
| "loss": 56.9547, | |
| "step": 946 | |
| }, | |
| { | |
| "crossentropy": 2.8211581707000732, | |
| "epoch": 14.051, | |
| "grad_norm": 0.71484375, | |
| "grad_norm_var": 0.00040868123372395836, | |
| "learning_rate": 5.5789473684210526e-05, | |
| "loss": 55.7815, | |
| "step": 947 | |
| }, | |
| { | |
| "crossentropy": 2.7923535108566284, | |
| "epoch": 14.052, | |
| "grad_norm": 0.72265625, | |
| "grad_norm_var": 0.000323486328125, | |
| "learning_rate": 5.473684210526316e-05, | |
| "loss": 55.9897, | |
| "step": 948 | |
| }, | |
| { | |
| "crossentropy": 2.8318454027175903, | |
| "epoch": 14.053, | |
| "grad_norm": 0.7109375, | |
| "grad_norm_var": 0.00029239654541015627, | |
| "learning_rate": 5.368421052631579e-05, | |
| "loss": 57.443, | |
| "step": 949 | |
| }, | |
| { | |
| "crossentropy": 2.7717995643615723, | |
| "epoch": 14.054, | |
| "grad_norm": 0.73046875, | |
| "grad_norm_var": 0.000290679931640625, | |
| "learning_rate": 5.263157894736842e-05, | |
| "loss": 55.1007, | |
| "step": 950 | |
| }, | |
| { | |
| "crossentropy": 2.7858974933624268, | |
| "epoch": 14.055, | |
| "grad_norm": 0.71875, | |
| "grad_norm_var": 0.00029341379801432293, | |
| "learning_rate": 5.157894736842105e-05, | |
| "loss": 57.4654, | |
| "step": 951 | |
| }, | |
| { | |
| "crossentropy": 2.9458006620407104, | |
| "epoch": 14.056, | |
| "grad_norm": 0.73828125, | |
| "grad_norm_var": 0.00025018056233723957, | |
| "learning_rate": 5.052631578947368e-05, | |
| "loss": 57.8048, | |
| "step": 952 | |
| }, | |
| { | |
| "crossentropy": 2.7548274993896484, | |
| "epoch": 14.057, | |
| "grad_norm": 0.72265625, | |
| "grad_norm_var": 0.00025202433268229165, | |
| "learning_rate": 4.9473684210526315e-05, | |
| "loss": 56.2672, | |
| "step": 953 | |
| }, | |
| { | |
| "crossentropy": 2.8330026865005493, | |
| "epoch": 14.058, | |
| "grad_norm": 0.734375, | |
| "grad_norm_var": 0.00024763743082682293, | |
| "learning_rate": 4.842105263157895e-05, | |
| "loss": 56.3621, | |
| "step": 954 | |
| }, | |
| { | |
| "crossentropy": 2.775303602218628, | |
| "epoch": 14.059, | |
| "grad_norm": 0.73046875, | |
| "grad_norm_var": 0.00024051666259765624, | |
| "learning_rate": 4.736842105263158e-05, | |
| "loss": 56.4681, | |
| "step": 955 | |
| }, | |
| { | |
| "crossentropy": 2.84080708026886, | |
| "epoch": 14.06, | |
| "grad_norm": 0.76953125, | |
| "grad_norm_var": 0.0003524144490559896, | |
| "learning_rate": 4.6315789473684214e-05, | |
| "loss": 57.448, | |
| "step": 956 | |
| }, | |
| { | |
| "crossentropy": 2.891412377357483, | |
| "epoch": 14.061, | |
| "grad_norm": 0.7421875, | |
| "grad_norm_var": 0.0003524144490559896, | |
| "learning_rate": 4.5263157894736846e-05, | |
| "loss": 57.3745, | |
| "step": 957 | |
| }, | |
| { | |
| "crossentropy": 2.778434157371521, | |
| "epoch": 14.062, | |
| "grad_norm": 0.73046875, | |
| "grad_norm_var": 0.00034885406494140626, | |
| "learning_rate": 4.421052631578947e-05, | |
| "loss": 55.2243, | |
| "step": 958 | |
| }, | |
| { | |
| "crossentropy": 2.949416756629944, | |
| "epoch": 14.063, | |
| "grad_norm": 0.7578125, | |
| "grad_norm_var": 0.000376129150390625, | |
| "learning_rate": 4.3157894736842105e-05, | |
| "loss": 59.2026, | |
| "step": 959 | |
| }, | |
| { | |
| "crossentropy": 2.902771472930908, | |
| "epoch": 14.064, | |
| "grad_norm": 0.76171875, | |
| "grad_norm_var": 0.00039005279541015625, | |
| "learning_rate": 4.210526315789474e-05, | |
| "loss": 56.5269, | |
| "step": 960 | |
| }, | |
| { | |
| "crossentropy": 2.698053002357483, | |
| "epoch": 15.001, | |
| "grad_norm": 0.72265625, | |
| "grad_norm_var": 0.0003310521443684896, | |
| "learning_rate": 4.105263157894737e-05, | |
| "loss": 54.2453, | |
| "step": 961 | |
| }, | |
| { | |
| "crossentropy": 2.75066876411438, | |
| "epoch": 15.002, | |
| "grad_norm": 0.7421875, | |
| "grad_norm_var": 0.0003021240234375, | |
| "learning_rate": 4e-05, | |
| "loss": 56.7785, | |
| "step": 962 | |
| }, | |
| { | |
| "crossentropy": 2.881886601448059, | |
| "epoch": 15.003, | |
| "grad_norm": 0.734375, | |
| "grad_norm_var": 0.00028781890869140626, | |
| "learning_rate": 3.8947368421052636e-05, | |
| "loss": 56.9223, | |
| "step": 963 | |
| }, | |
| { | |
| "crossentropy": 2.8759313821792603, | |
| "epoch": 15.004, | |
| "grad_norm": 0.72265625, | |
| "grad_norm_var": 0.000254058837890625, | |
| "learning_rate": 3.789473684210527e-05, | |
| "loss": 56.867, | |
| "step": 964 | |
| }, | |
| { | |
| "crossentropy": 2.783510446548462, | |
| "epoch": 15.005, | |
| "grad_norm": 0.74609375, | |
| "grad_norm_var": 0.00025202433268229165, | |
| "learning_rate": 3.6842105263157895e-05, | |
| "loss": 56.8626, | |
| "step": 965 | |
| }, | |
| { | |
| "crossentropy": 2.778250217437744, | |
| "epoch": 15.006, | |
| "grad_norm": 0.6953125, | |
| "grad_norm_var": 0.00035196940104166666, | |
| "learning_rate": 3.578947368421053e-05, | |
| "loss": 54.6814, | |
| "step": 966 | |
| }, | |
| { | |
| "crossentropy": 2.755368709564209, | |
| "epoch": 15.007, | |
| "grad_norm": 0.73828125, | |
| "grad_norm_var": 0.00035196940104166666, | |
| "learning_rate": 3.473684210526316e-05, | |
| "loss": 56.5905, | |
| "step": 967 | |
| }, | |
| { | |
| "crossentropy": 2.8220062255859375, | |
| "epoch": 15.008, | |
| "grad_norm": 0.72265625, | |
| "grad_norm_var": 0.00035196940104166666, | |
| "learning_rate": 3.3684210526315786e-05, | |
| "loss": 56.3069, | |
| "step": 968 | |
| }, | |
| { | |
| "crossentropy": 2.8596099615097046, | |
| "epoch": 15.009, | |
| "grad_norm": 0.7265625, | |
| "grad_norm_var": 0.0003598531087239583, | |
| "learning_rate": 3.263157894736842e-05, | |
| "loss": 56.5936, | |
| "step": 969 | |
| }, | |
| { | |
| "crossentropy": 2.797579288482666, | |
| "epoch": 15.01, | |
| "grad_norm": 0.71875, | |
| "grad_norm_var": 0.00037988026936848957, | |
| "learning_rate": 3.157894736842105e-05, | |
| "loss": 55.846, | |
| "step": 970 | |
| }, | |
| { | |
| "crossentropy": 2.6855785846710205, | |
| "epoch": 15.011, | |
| "grad_norm": 0.72265625, | |
| "grad_norm_var": 0.0003142674763997396, | |
| "learning_rate": 3.0526315789473684e-05, | |
| "loss": 55.2178, | |
| "step": 971 | |
| }, | |
| { | |
| "crossentropy": 2.7928699254989624, | |
| "epoch": 15.012, | |
| "grad_norm": 0.6953125, | |
| "grad_norm_var": 0.0004012425740559896, | |
| "learning_rate": 2.9473684210526317e-05, | |
| "loss": 54.3003, | |
| "step": 972 | |
| }, | |
| { | |
| "crossentropy": 2.7457377910614014, | |
| "epoch": 15.013, | |
| "grad_norm": 0.73828125, | |
| "grad_norm_var": 0.0004042943318684896, | |
| "learning_rate": 2.842105263157895e-05, | |
| "loss": 54.456, | |
| "step": 973 | |
| }, | |
| { | |
| "crossentropy": 2.825121760368347, | |
| "epoch": 15.014, | |
| "grad_norm": 0.75390625, | |
| "grad_norm_var": 0.0003916422526041667, | |
| "learning_rate": 2.736842105263158e-05, | |
| "loss": 56.7055, | |
| "step": 974 | |
| }, | |
| { | |
| "crossentropy": 2.6830825805664062, | |
| "epoch": 15.015, | |
| "grad_norm": 0.75390625, | |
| "grad_norm_var": 0.000363922119140625, | |
| "learning_rate": 2.631578947368421e-05, | |
| "loss": 55.219, | |
| "step": 975 | |
| }, | |
| { | |
| "crossentropy": 2.712595582008362, | |
| "epoch": 15.016, | |
| "grad_norm": 0.734375, | |
| "grad_norm_var": 0.00029850006103515625, | |
| "learning_rate": 2.526315789473684e-05, | |
| "loss": 55.8602, | |
| "step": 976 | |
| }, | |
| { | |
| "crossentropy": 2.9039396047592163, | |
| "epoch": 15.017, | |
| "grad_norm": 0.734375, | |
| "grad_norm_var": 0.000296783447265625, | |
| "learning_rate": 2.4210526315789474e-05, | |
| "loss": 56.9117, | |
| "step": 977 | |
| }, | |
| { | |
| "crossentropy": 2.8268353939056396, | |
| "epoch": 15.018, | |
| "grad_norm": 0.765625, | |
| "grad_norm_var": 0.0003692626953125, | |
| "learning_rate": 2.3157894736842107e-05, | |
| "loss": 55.0809, | |
| "step": 978 | |
| }, | |
| { | |
| "crossentropy": 2.846464157104492, | |
| "epoch": 15.019, | |
| "grad_norm": 0.73828125, | |
| "grad_norm_var": 0.00037174224853515626, | |
| "learning_rate": 2.2105263157894736e-05, | |
| "loss": 57.271, | |
| "step": 979 | |
| }, | |
| { | |
| "crossentropy": 2.920137643814087, | |
| "epoch": 15.02, | |
| "grad_norm": 0.72265625, | |
| "grad_norm_var": 0.00037174224853515626, | |
| "learning_rate": 2.105263157894737e-05, | |
| "loss": 58.0873, | |
| "step": 980 | |
| }, | |
| { | |
| "crossentropy": 2.5910059213638306, | |
| "epoch": 15.021, | |
| "grad_norm": 0.703125, | |
| "grad_norm_var": 0.00040461222330729165, | |
| "learning_rate": 2e-05, | |
| "loss": 54.132, | |
| "step": 981 | |
| }, | |
| { | |
| "crossentropy": 2.8294392824172974, | |
| "epoch": 15.022, | |
| "grad_norm": 0.75, | |
| "grad_norm_var": 0.0003458658854166667, | |
| "learning_rate": 1.8947368421052634e-05, | |
| "loss": 57.1192, | |
| "step": 982 | |
| }, | |
| { | |
| "crossentropy": 2.8623571395874023, | |
| "epoch": 15.023, | |
| "grad_norm": 0.74609375, | |
| "grad_norm_var": 0.00035578409830729166, | |
| "learning_rate": 1.7894736842105264e-05, | |
| "loss": 56.702, | |
| "step": 983 | |
| }, | |
| { | |
| "crossentropy": 2.8070725202560425, | |
| "epoch": 15.024, | |
| "grad_norm": 0.69921875, | |
| "grad_norm_var": 0.0004221598307291667, | |
| "learning_rate": 1.6842105263157893e-05, | |
| "loss": 55.9888, | |
| "step": 984 | |
| }, | |
| { | |
| "crossentropy": 2.7253717184066772, | |
| "epoch": 15.025, | |
| "grad_norm": 0.73046875, | |
| "grad_norm_var": 0.00042057037353515625, | |
| "learning_rate": 1.5789473684210526e-05, | |
| "loss": 56.3073, | |
| "step": 985 | |
| }, | |
| { | |
| "crossentropy": 2.8384543657302856, | |
| "epoch": 15.026, | |
| "grad_norm": 0.71875, | |
| "grad_norm_var": 0.00042057037353515625, | |
| "learning_rate": 1.4736842105263159e-05, | |
| "loss": 56.2086, | |
| "step": 986 | |
| }, | |
| { | |
| "crossentropy": 2.8448028564453125, | |
| "epoch": 15.027, | |
| "grad_norm": 0.7265625, | |
| "grad_norm_var": 0.00041681925455729166, | |
| "learning_rate": 1.368421052631579e-05, | |
| "loss": 57.0379, | |
| "step": 987 | |
| }, | |
| { | |
| "crossentropy": 2.7504059076309204, | |
| "epoch": 15.028, | |
| "grad_norm": 0.75, | |
| "grad_norm_var": 0.00033671061197916666, | |
| "learning_rate": 1.263157894736842e-05, | |
| "loss": 56.9316, | |
| "step": 988 | |
| }, | |
| { | |
| "crossentropy": 2.8394349813461304, | |
| "epoch": 15.029, | |
| "grad_norm": 0.765625, | |
| "grad_norm_var": 0.0003941218058268229, | |
| "learning_rate": 1.1578947368421053e-05, | |
| "loss": 57.3295, | |
| "step": 989 | |
| }, | |
| { | |
| "crossentropy": 2.8349748849868774, | |
| "epoch": 15.03, | |
| "grad_norm": 0.70703125, | |
| "grad_norm_var": 0.0004261652628580729, | |
| "learning_rate": 1.0526315789473684e-05, | |
| "loss": 56.5169, | |
| "step": 990 | |
| }, | |
| { | |
| "crossentropy": 2.8274471759796143, | |
| "epoch": 15.031, | |
| "grad_norm": 0.74609375, | |
| "grad_norm_var": 0.0004093805948893229, | |
| "learning_rate": 9.473684210526317e-06, | |
| "loss": 56.015, | |
| "step": 991 | |
| }, | |
| { | |
| "crossentropy": 2.8977288007736206, | |
| "epoch": 15.032, | |
| "grad_norm": 0.72265625, | |
| "grad_norm_var": 0.00041681925455729166, | |
| "learning_rate": 8.421052631578947e-06, | |
| "loss": 56.5352, | |
| "step": 992 | |
| }, | |
| { | |
| "crossentropy": 2.9102202653884888, | |
| "epoch": 15.033, | |
| "grad_norm": 0.71875, | |
| "grad_norm_var": 0.0004290262858072917, | |
| "learning_rate": 7.368421052631579e-06, | |
| "loss": 57.9547, | |
| "step": 993 | |
| }, | |
| { | |
| "crossentropy": 2.9221819639205933, | |
| "epoch": 15.034, | |
| "grad_norm": 0.734375, | |
| "grad_norm_var": 0.0003496805826822917, | |
| "learning_rate": 6.31578947368421e-06, | |
| "loss": 56.7362, | |
| "step": 994 | |
| }, | |
| { | |
| "crossentropy": 2.800140857696533, | |
| "epoch": 15.035, | |
| "grad_norm": 0.73828125, | |
| "grad_norm_var": 0.0003496805826822917, | |
| "learning_rate": 5.263157894736842e-06, | |
| "loss": 56.3905, | |
| "step": 995 | |
| }, | |
| { | |
| "crossentropy": 2.6751104593276978, | |
| "epoch": 15.036, | |
| "grad_norm": 0.7109375, | |
| "grad_norm_var": 0.00036970774332682293, | |
| "learning_rate": 4.210526315789473e-06, | |
| "loss": 55.695, | |
| "step": 996 | |
| }, | |
| { | |
| "crossentropy": 2.7358882427215576, | |
| "epoch": 15.037, | |
| "grad_norm": 0.72265625, | |
| "grad_norm_var": 0.0003255208333333333, | |
| "learning_rate": 3.157894736842105e-06, | |
| "loss": 57.0907, | |
| "step": 997 | |
| }, | |
| { | |
| "crossentropy": 2.7598263025283813, | |
| "epoch": 15.038, | |
| "grad_norm": 0.7109375, | |
| "grad_norm_var": 0.0003191630045572917, | |
| "learning_rate": 2.1052631578947366e-06, | |
| "loss": 55.385, | |
| "step": 998 | |
| }, | |
| { | |
| "crossentropy": 2.8410521745681763, | |
| "epoch": 15.039, | |
| "grad_norm": 0.6953125, | |
| "grad_norm_var": 0.00035800933837890623, | |
| "learning_rate": 1.0526315789473683e-06, | |
| "loss": 57.0506, | |
| "step": 999 | |
| }, | |
| { | |
| "crossentropy": 2.7283369302749634, | |
| "epoch": 15.04, | |
| "grad_norm": 0.7265625, | |
| "grad_norm_var": 0.000311279296875, | |
| "learning_rate": 0.0, | |
| "loss": 57.0943, | |
| "step": 1000 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 1000, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 9223372036854775807, | |
| "save_steps": 250, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.033762523578368e+18, | |
| "train_batch_size": 16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |