|
{ |
|
"best_metric": 6.406748294830322, |
|
"best_model_checkpoint": "HBERTv1_emb_compress_48_L12_H64_A2/checkpoint-290000", |
|
"epoch": 5.0, |
|
"eval_steps": 10000, |
|
"global_step": 305145, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 5.000000000000001e-07, |
|
"loss": 10.3179, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 1.0000000000000002e-06, |
|
"loss": 10.2814, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 1.5e-06, |
|
"loss": 10.2576, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 2.0000000000000003e-06, |
|
"loss": 10.2271, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 2.5e-06, |
|
"loss": 10.1895, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 3e-06, |
|
"loss": 10.1441, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 3.5e-06, |
|
"loss": 10.0896, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 4.000000000000001e-06, |
|
"loss": 10.0257, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 4.5e-06, |
|
"loss": 9.9517, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 5e-06, |
|
"loss": 9.8689, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 5.500000000000001e-06, |
|
"loss": 9.7764, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 6e-06, |
|
"loss": 9.6759, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 6.5000000000000004e-06, |
|
"loss": 9.5681, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 7e-06, |
|
"loss": 9.4531, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 7.500000000000001e-06, |
|
"loss": 9.3312, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 8.000000000000001e-06, |
|
"loss": 9.2039, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 8.5e-06, |
|
"loss": 9.0723, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 9e-06, |
|
"loss": 8.9366, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 9.5e-06, |
|
"loss": 8.7964, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 1e-05, |
|
"loss": 8.6554, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"eval_accuracy": 0.048290781267576265, |
|
"eval_loss": 8.584630012512207, |
|
"eval_runtime": 600.2506, |
|
"eval_samples_per_second": 513.712, |
|
"eval_steps_per_second": 5.353, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 9.983059174304157e-06, |
|
"loss": 8.5166, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 9.966118348608312e-06, |
|
"loss": 8.3802, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 9.949177522912468e-06, |
|
"loss": 8.2563, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 9.932236697216624e-06, |
|
"loss": 8.1368, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 9.91529587152078e-06, |
|
"loss": 8.0261, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 9.898355045824935e-06, |
|
"loss": 7.9253, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 9.881414220129089e-06, |
|
"loss": 7.8284, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 9.864473394433246e-06, |
|
"loss": 7.7407, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 9.847532568737402e-06, |
|
"loss": 7.6611, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 9.830591743041556e-06, |
|
"loss": 7.59, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 9.813650917345712e-06, |
|
"loss": 7.5243, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 9.796710091649867e-06, |
|
"loss": 7.4708, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 9.779769265954023e-06, |
|
"loss": 7.4185, |
|
"step": 16500 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 9.762828440258179e-06, |
|
"loss": 7.3726, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 9.745887614562334e-06, |
|
"loss": 7.3442, |
|
"step": 17500 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 9.72894678886649e-06, |
|
"loss": 7.3171, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 9.712005963170646e-06, |
|
"loss": 7.2839, |
|
"step": 18500 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 9.695065137474802e-06, |
|
"loss": 7.2703, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 9.678124311778957e-06, |
|
"loss": 7.2537, |
|
"step": 19500 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 9.661183486083113e-06, |
|
"loss": 7.2331, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"eval_accuracy": 0.05423351427871706, |
|
"eval_loss": 7.227975845336914, |
|
"eval_runtime": 605.972, |
|
"eval_samples_per_second": 508.862, |
|
"eval_steps_per_second": 5.302, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 9.644242660387269e-06, |
|
"loss": 7.2207, |
|
"step": 20500 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 9.627301834691424e-06, |
|
"loss": 7.2044, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 9.610361008995578e-06, |
|
"loss": 7.1869, |
|
"step": 21500 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 9.593420183299736e-06, |
|
"loss": 7.1736, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 9.576479357603891e-06, |
|
"loss": 7.1594, |
|
"step": 22500 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 9.559538531908045e-06, |
|
"loss": 7.1396, |
|
"step": 23000 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 9.542597706212201e-06, |
|
"loss": 7.1306, |
|
"step": 23500 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 9.525656880516357e-06, |
|
"loss": 7.1185, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 9.508716054820512e-06, |
|
"loss": 7.1029, |
|
"step": 24500 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 9.491775229124668e-06, |
|
"loss": 7.0927, |
|
"step": 25000 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 9.474834403428824e-06, |
|
"loss": 7.0818, |
|
"step": 25500 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 9.45789357773298e-06, |
|
"loss": 7.0696, |
|
"step": 26000 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 9.440952752037135e-06, |
|
"loss": 7.0553, |
|
"step": 26500 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 9.424011926341291e-06, |
|
"loss": 7.0496, |
|
"step": 27000 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 9.407071100645447e-06, |
|
"loss": 7.038, |
|
"step": 27500 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 9.390130274949602e-06, |
|
"loss": 7.0289, |
|
"step": 28000 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 9.373189449253758e-06, |
|
"loss": 7.0244, |
|
"step": 28500 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 9.356248623557914e-06, |
|
"loss": 7.0145, |
|
"step": 29000 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 9.339307797862068e-06, |
|
"loss": 7.0029, |
|
"step": 29500 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 9.322366972166225e-06, |
|
"loss": 7.0014, |
|
"step": 30000 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"eval_accuracy": 0.06774155914288765, |
|
"eval_loss": 6.992653846740723, |
|
"eval_runtime": 600.1356, |
|
"eval_samples_per_second": 513.811, |
|
"eval_steps_per_second": 5.354, |
|
"step": 30000 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 9.305426146470379e-06, |
|
"loss": 6.9903, |
|
"step": 30500 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 9.288485320774535e-06, |
|
"loss": 6.9807, |
|
"step": 31000 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 9.27154449507869e-06, |
|
"loss": 6.9704, |
|
"step": 31500 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 9.254603669382846e-06, |
|
"loss": 6.9613, |
|
"step": 32000 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 9.237662843687002e-06, |
|
"loss": 6.9601, |
|
"step": 32500 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 9.220722017991158e-06, |
|
"loss": 6.9596, |
|
"step": 33000 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 9.203781192295313e-06, |
|
"loss": 6.9484, |
|
"step": 33500 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 9.186840366599469e-06, |
|
"loss": 6.9372, |
|
"step": 34000 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 9.169899540903625e-06, |
|
"loss": 6.932, |
|
"step": 34500 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 9.15295871520778e-06, |
|
"loss": 6.925, |
|
"step": 35000 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 9.136017889511936e-06, |
|
"loss": 6.9252, |
|
"step": 35500 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 9.11907706381609e-06, |
|
"loss": 6.9177, |
|
"step": 36000 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 9.102136238120247e-06, |
|
"loss": 6.906, |
|
"step": 36500 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 9.085195412424403e-06, |
|
"loss": 6.8972, |
|
"step": 37000 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 9.068254586728557e-06, |
|
"loss": 6.8942, |
|
"step": 37500 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 9.051313761032714e-06, |
|
"loss": 6.8912, |
|
"step": 38000 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 9.034372935336868e-06, |
|
"loss": 6.8861, |
|
"step": 38500 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 9.017432109641024e-06, |
|
"loss": 6.8749, |
|
"step": 39000 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 9.00049128394518e-06, |
|
"loss": 6.8718, |
|
"step": 39500 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 8.983550458249335e-06, |
|
"loss": 6.8699, |
|
"step": 40000 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"eval_accuracy": 0.08556003293560886, |
|
"eval_loss": 6.8636794090271, |
|
"eval_runtime": 598.5444, |
|
"eval_samples_per_second": 515.176, |
|
"eval_steps_per_second": 5.368, |
|
"step": 40000 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 8.966609632553491e-06, |
|
"loss": 6.8626, |
|
"step": 40500 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 8.949668806857647e-06, |
|
"loss": 6.8559, |
|
"step": 41000 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 8.932727981161803e-06, |
|
"loss": 6.8509, |
|
"step": 41500 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 8.915787155465958e-06, |
|
"loss": 6.844, |
|
"step": 42000 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 8.898846329770114e-06, |
|
"loss": 6.8383, |
|
"step": 42500 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 8.88190550407427e-06, |
|
"loss": 6.8398, |
|
"step": 43000 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 8.864964678378425e-06, |
|
"loss": 6.8233, |
|
"step": 43500 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 8.84802385268258e-06, |
|
"loss": 6.8242, |
|
"step": 44000 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 8.831083026986737e-06, |
|
"loss": 6.82, |
|
"step": 44500 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 8.814142201290892e-06, |
|
"loss": 6.8142, |
|
"step": 45000 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 8.797201375595046e-06, |
|
"loss": 6.8122, |
|
"step": 45500 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 8.780260549899204e-06, |
|
"loss": 6.8017, |
|
"step": 46000 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 8.763319724203358e-06, |
|
"loss": 6.8021, |
|
"step": 46500 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 8.746378898507513e-06, |
|
"loss": 6.8006, |
|
"step": 47000 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 8.729438072811671e-06, |
|
"loss": 6.7897, |
|
"step": 47500 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 8.712497247115825e-06, |
|
"loss": 6.7932, |
|
"step": 48000 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 8.69555642141998e-06, |
|
"loss": 6.7881, |
|
"step": 48500 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 8.678615595724136e-06, |
|
"loss": 6.7793, |
|
"step": 49000 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 8.661674770028292e-06, |
|
"loss": 6.7823, |
|
"step": 49500 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 8.644733944332448e-06, |
|
"loss": 6.7777, |
|
"step": 50000 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"eval_accuracy": 0.09222163830820224, |
|
"eval_loss": 6.772622108459473, |
|
"eval_runtime": 600.0695, |
|
"eval_samples_per_second": 513.867, |
|
"eval_steps_per_second": 5.354, |
|
"step": 50000 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 8.627793118636603e-06, |
|
"loss": 6.7669, |
|
"step": 50500 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 8.610852292940759e-06, |
|
"loss": 6.7646, |
|
"step": 51000 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 8.593911467244915e-06, |
|
"loss": 6.7643, |
|
"step": 51500 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 8.576970641549069e-06, |
|
"loss": 6.7579, |
|
"step": 52000 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 8.560029815853226e-06, |
|
"loss": 6.753, |
|
"step": 52500 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 8.543088990157382e-06, |
|
"loss": 6.7522, |
|
"step": 53000 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 8.526148164461536e-06, |
|
"loss": 6.7583, |
|
"step": 53500 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 8.509207338765693e-06, |
|
"loss": 6.747, |
|
"step": 54000 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 8.492266513069847e-06, |
|
"loss": 6.7433, |
|
"step": 54500 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 8.475325687374003e-06, |
|
"loss": 6.7394, |
|
"step": 55000 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 8.45838486167816e-06, |
|
"loss": 6.7346, |
|
"step": 55500 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 8.441444035982314e-06, |
|
"loss": 6.7408, |
|
"step": 56000 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 8.42450321028647e-06, |
|
"loss": 6.7341, |
|
"step": 56500 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 8.407562384590626e-06, |
|
"loss": 6.7339, |
|
"step": 57000 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 8.390621558894781e-06, |
|
"loss": 6.7284, |
|
"step": 57500 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 8.373680733198937e-06, |
|
"loss": 6.732, |
|
"step": 58000 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 8.356739907503093e-06, |
|
"loss": 6.7212, |
|
"step": 58500 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 8.339799081807248e-06, |
|
"loss": 6.7156, |
|
"step": 59000 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 8.322858256111404e-06, |
|
"loss": 6.7182, |
|
"step": 59500 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 8.305917430415558e-06, |
|
"loss": 6.7091, |
|
"step": 60000 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"eval_accuracy": 0.09736718543824835, |
|
"eval_loss": 6.7100605964660645, |
|
"eval_runtime": 601.1731, |
|
"eval_samples_per_second": 512.924, |
|
"eval_steps_per_second": 5.345, |
|
"step": 60000 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 8.288976604719715e-06, |
|
"loss": 6.7102, |
|
"step": 60500 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 8.272035779023871e-06, |
|
"loss": 6.7105, |
|
"step": 61000 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 8.255094953328025e-06, |
|
"loss": 6.7048, |
|
"step": 61500 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 8.238154127632183e-06, |
|
"loss": 6.7056, |
|
"step": 62000 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 8.221213301936337e-06, |
|
"loss": 6.6984, |
|
"step": 62500 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"learning_rate": 8.204272476240492e-06, |
|
"loss": 6.6931, |
|
"step": 63000 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"learning_rate": 8.18733165054465e-06, |
|
"loss": 6.6938, |
|
"step": 63500 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"learning_rate": 8.170390824848804e-06, |
|
"loss": 6.6919, |
|
"step": 64000 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 8.15344999915296e-06, |
|
"loss": 6.6935, |
|
"step": 64500 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"learning_rate": 8.136509173457115e-06, |
|
"loss": 6.6856, |
|
"step": 65000 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"learning_rate": 8.11956834776127e-06, |
|
"loss": 6.6865, |
|
"step": 65500 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"learning_rate": 8.102627522065426e-06, |
|
"loss": 6.6818, |
|
"step": 66000 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 8.085686696369582e-06, |
|
"loss": 6.6852, |
|
"step": 66500 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 8.068745870673738e-06, |
|
"loss": 6.6818, |
|
"step": 67000 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"learning_rate": 8.051805044977893e-06, |
|
"loss": 6.6742, |
|
"step": 67500 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"learning_rate": 8.034864219282047e-06, |
|
"loss": 6.6729, |
|
"step": 68000 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 8.017923393586205e-06, |
|
"loss": 6.6691, |
|
"step": 68500 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"learning_rate": 8.00098256789036e-06, |
|
"loss": 6.6618, |
|
"step": 69000 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 7.984041742194514e-06, |
|
"loss": 6.6685, |
|
"step": 69500 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"learning_rate": 7.967100916498672e-06, |
|
"loss": 6.6626, |
|
"step": 70000 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"eval_accuracy": 0.1015238380889028, |
|
"eval_loss": 6.661967754364014, |
|
"eval_runtime": 605.0085, |
|
"eval_samples_per_second": 509.672, |
|
"eval_steps_per_second": 5.311, |
|
"step": 70000 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"learning_rate": 7.950160090802826e-06, |
|
"loss": 6.6613, |
|
"step": 70500 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"learning_rate": 7.933219265106982e-06, |
|
"loss": 6.6616, |
|
"step": 71000 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 7.916278439411137e-06, |
|
"loss": 6.6625, |
|
"step": 71500 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"learning_rate": 7.899337613715293e-06, |
|
"loss": 6.6572, |
|
"step": 72000 |
|
}, |
|
{ |
|
"epoch": 1.19, |
|
"learning_rate": 7.882396788019449e-06, |
|
"loss": 6.652, |
|
"step": 72500 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 7.865455962323604e-06, |
|
"loss": 6.6498, |
|
"step": 73000 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 7.84851513662776e-06, |
|
"loss": 6.6477, |
|
"step": 73500 |
|
}, |
|
{ |
|
"epoch": 1.21, |
|
"learning_rate": 7.831574310931916e-06, |
|
"loss": 6.6456, |
|
"step": 74000 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"learning_rate": 7.814633485236071e-06, |
|
"loss": 6.6418, |
|
"step": 74500 |
|
}, |
|
{ |
|
"epoch": 1.23, |
|
"learning_rate": 7.797692659540227e-06, |
|
"loss": 6.6447, |
|
"step": 75000 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"learning_rate": 7.780751833844383e-06, |
|
"loss": 6.6428, |
|
"step": 75500 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"learning_rate": 7.763811008148537e-06, |
|
"loss": 6.635, |
|
"step": 76000 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"learning_rate": 7.746870182452694e-06, |
|
"loss": 6.6335, |
|
"step": 76500 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"learning_rate": 7.729929356756848e-06, |
|
"loss": 6.637, |
|
"step": 77000 |
|
}, |
|
{ |
|
"epoch": 1.27, |
|
"learning_rate": 7.712988531061004e-06, |
|
"loss": 6.6324, |
|
"step": 77500 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 7.696047705365161e-06, |
|
"loss": 6.6344, |
|
"step": 78000 |
|
}, |
|
{ |
|
"epoch": 1.29, |
|
"learning_rate": 7.679106879669315e-06, |
|
"loss": 6.629, |
|
"step": 78500 |
|
}, |
|
{ |
|
"epoch": 1.29, |
|
"learning_rate": 7.662166053973471e-06, |
|
"loss": 6.6266, |
|
"step": 79000 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"learning_rate": 7.645225228277627e-06, |
|
"loss": 6.6295, |
|
"step": 79500 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"learning_rate": 7.628284402581782e-06, |
|
"loss": 6.6279, |
|
"step": 80000 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"eval_accuracy": 0.10400515267929009, |
|
"eval_loss": 6.625459671020508, |
|
"eval_runtime": 603.1684, |
|
"eval_samples_per_second": 511.227, |
|
"eval_steps_per_second": 5.327, |
|
"step": 80000 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"learning_rate": 7.611343576885938e-06, |
|
"loss": 6.6241, |
|
"step": 80500 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"learning_rate": 7.5944027511900945e-06, |
|
"loss": 6.6207, |
|
"step": 81000 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"learning_rate": 7.577461925494249e-06, |
|
"loss": 6.6216, |
|
"step": 81500 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"learning_rate": 7.560521099798405e-06, |
|
"loss": 6.619, |
|
"step": 82000 |
|
}, |
|
{ |
|
"epoch": 1.35, |
|
"learning_rate": 7.54358027410256e-06, |
|
"loss": 6.6153, |
|
"step": 82500 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"learning_rate": 7.5266394484067164e-06, |
|
"loss": 6.6181, |
|
"step": 83000 |
|
}, |
|
{ |
|
"epoch": 1.37, |
|
"learning_rate": 7.509698622710871e-06, |
|
"loss": 6.6137, |
|
"step": 83500 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"learning_rate": 7.492757797015027e-06, |
|
"loss": 6.6157, |
|
"step": 84000 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"learning_rate": 7.475816971319183e-06, |
|
"loss": 6.6114, |
|
"step": 84500 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"learning_rate": 7.458876145623338e-06, |
|
"loss": 6.6068, |
|
"step": 85000 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"learning_rate": 7.441935319927493e-06, |
|
"loss": 6.6081, |
|
"step": 85500 |
|
}, |
|
{ |
|
"epoch": 1.41, |
|
"learning_rate": 7.42499449423165e-06, |
|
"loss": 6.6026, |
|
"step": 86000 |
|
}, |
|
{ |
|
"epoch": 1.42, |
|
"learning_rate": 7.4080536685358054e-06, |
|
"loss": 6.6031, |
|
"step": 86500 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"learning_rate": 7.39111284283996e-06, |
|
"loss": 6.6086, |
|
"step": 87000 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"learning_rate": 7.374172017144117e-06, |
|
"loss": 6.6048, |
|
"step": 87500 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"learning_rate": 7.357231191448272e-06, |
|
"loss": 6.6031, |
|
"step": 88000 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"learning_rate": 7.340290365752427e-06, |
|
"loss": 6.5961, |
|
"step": 88500 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"learning_rate": 7.323349540056584e-06, |
|
"loss": 6.5977, |
|
"step": 89000 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"learning_rate": 7.306408714360739e-06, |
|
"loss": 6.598, |
|
"step": 89500 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"learning_rate": 7.2894678886648936e-06, |
|
"loss": 6.5917, |
|
"step": 90000 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"eval_accuracy": 0.10684875378443538, |
|
"eval_loss": 6.594837188720703, |
|
"eval_runtime": 601.7144, |
|
"eval_samples_per_second": 512.462, |
|
"eval_steps_per_second": 5.34, |
|
"step": 90000 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"learning_rate": 7.272527062969049e-06, |
|
"loss": 6.5912, |
|
"step": 90500 |
|
}, |
|
{ |
|
"epoch": 1.49, |
|
"learning_rate": 7.255586237273206e-06, |
|
"loss": 6.5903, |
|
"step": 91000 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 7.238645411577361e-06, |
|
"loss": 6.5949, |
|
"step": 91500 |
|
}, |
|
{ |
|
"epoch": 1.51, |
|
"learning_rate": 7.221704585881516e-06, |
|
"loss": 6.5932, |
|
"step": 92000 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"learning_rate": 7.204763760185672e-06, |
|
"loss": 6.5887, |
|
"step": 92500 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"learning_rate": 7.187822934489828e-06, |
|
"loss": 6.5879, |
|
"step": 93000 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"learning_rate": 7.1708821087939826e-06, |
|
"loss": 6.5892, |
|
"step": 93500 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"learning_rate": 7.153941283098139e-06, |
|
"loss": 6.5873, |
|
"step": 94000 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"learning_rate": 7.137000457402295e-06, |
|
"loss": 6.5908, |
|
"step": 94500 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"learning_rate": 7.12005963170645e-06, |
|
"loss": 6.5839, |
|
"step": 95000 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"learning_rate": 7.103118806010606e-06, |
|
"loss": 6.5803, |
|
"step": 95500 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"learning_rate": 7.086177980314761e-06, |
|
"loss": 6.5832, |
|
"step": 96000 |
|
}, |
|
{ |
|
"epoch": 1.58, |
|
"learning_rate": 7.069237154618917e-06, |
|
"loss": 6.5801, |
|
"step": 96500 |
|
}, |
|
{ |
|
"epoch": 1.59, |
|
"learning_rate": 7.052296328923072e-06, |
|
"loss": 6.5743, |
|
"step": 97000 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 7.035355503227228e-06, |
|
"loss": 6.576, |
|
"step": 97500 |
|
}, |
|
{ |
|
"epoch": 1.61, |
|
"learning_rate": 7.018414677531383e-06, |
|
"loss": 6.5739, |
|
"step": 98000 |
|
}, |
|
{ |
|
"epoch": 1.61, |
|
"learning_rate": 7.001473851835539e-06, |
|
"loss": 6.5728, |
|
"step": 98500 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"learning_rate": 6.984533026139695e-06, |
|
"loss": 6.5703, |
|
"step": 99000 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"learning_rate": 6.96759220044385e-06, |
|
"loss": 6.5732, |
|
"step": 99500 |
|
}, |
|
{ |
|
"epoch": 1.64, |
|
"learning_rate": 6.950651374748006e-06, |
|
"loss": 6.5691, |
|
"step": 100000 |
|
}, |
|
{ |
|
"epoch": 1.64, |
|
"eval_accuracy": 0.10944561377176425, |
|
"eval_loss": 6.569516181945801, |
|
"eval_runtime": 601.8291, |
|
"eval_samples_per_second": 512.365, |
|
"eval_steps_per_second": 5.339, |
|
"step": 100000 |
|
}, |
|
{ |
|
"epoch": 1.65, |
|
"learning_rate": 6.933710549052161e-06, |
|
"loss": 6.5714, |
|
"step": 100500 |
|
}, |
|
{ |
|
"epoch": 1.65, |
|
"learning_rate": 6.916769723356317e-06, |
|
"loss": 6.5635, |
|
"step": 101000 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"learning_rate": 6.899828897660472e-06, |
|
"loss": 6.572, |
|
"step": 101500 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"learning_rate": 6.8828880719646285e-06, |
|
"loss": 6.5643, |
|
"step": 102000 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"learning_rate": 6.865947246268783e-06, |
|
"loss": 6.5663, |
|
"step": 102500 |
|
}, |
|
{ |
|
"epoch": 1.69, |
|
"learning_rate": 6.849006420572939e-06, |
|
"loss": 6.5591, |
|
"step": 103000 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"learning_rate": 6.8320655948770955e-06, |
|
"loss": 6.5602, |
|
"step": 103500 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"learning_rate": 6.81512476918125e-06, |
|
"loss": 6.5603, |
|
"step": 104000 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"learning_rate": 6.798183943485406e-06, |
|
"loss": 6.5588, |
|
"step": 104500 |
|
}, |
|
{ |
|
"epoch": 1.72, |
|
"learning_rate": 6.781243117789562e-06, |
|
"loss": 6.5568, |
|
"step": 105000 |
|
}, |
|
{ |
|
"epoch": 1.73, |
|
"learning_rate": 6.7643022920937174e-06, |
|
"loss": 6.5571, |
|
"step": 105500 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"learning_rate": 6.747361466397872e-06, |
|
"loss": 6.5582, |
|
"step": 106000 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"learning_rate": 6.730420640702028e-06, |
|
"loss": 6.5519, |
|
"step": 106500 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"learning_rate": 6.7134798150061845e-06, |
|
"loss": 6.5571, |
|
"step": 107000 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"learning_rate": 6.696538989310339e-06, |
|
"loss": 6.5534, |
|
"step": 107500 |
|
}, |
|
{ |
|
"epoch": 1.77, |
|
"learning_rate": 6.679598163614495e-06, |
|
"loss": 6.5516, |
|
"step": 108000 |
|
}, |
|
{ |
|
"epoch": 1.78, |
|
"learning_rate": 6.662657337918651e-06, |
|
"loss": 6.5497, |
|
"step": 108500 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"learning_rate": 6.6457165122228064e-06, |
|
"loss": 6.5465, |
|
"step": 109000 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"learning_rate": 6.628775686526961e-06, |
|
"loss": 6.5517, |
|
"step": 109500 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"learning_rate": 6.611834860831118e-06, |
|
"loss": 6.5486, |
|
"step": 110000 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"eval_accuracy": 0.11219145006851503, |
|
"eval_loss": 6.546039581298828, |
|
"eval_runtime": 601.6578, |
|
"eval_samples_per_second": 512.511, |
|
"eval_steps_per_second": 5.34, |
|
"step": 110000 |
|
}, |
|
{ |
|
"epoch": 1.81, |
|
"learning_rate": 6.594894035135273e-06, |
|
"loss": 6.5533, |
|
"step": 110500 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"learning_rate": 6.577953209439428e-06, |
|
"loss": 6.5467, |
|
"step": 111000 |
|
}, |
|
{ |
|
"epoch": 1.83, |
|
"learning_rate": 6.561012383743585e-06, |
|
"loss": 6.5454, |
|
"step": 111500 |
|
}, |
|
{ |
|
"epoch": 1.84, |
|
"learning_rate": 6.54407155804774e-06, |
|
"loss": 6.5416, |
|
"step": 112000 |
|
}, |
|
{ |
|
"epoch": 1.84, |
|
"learning_rate": 6.5271307323518954e-06, |
|
"loss": 6.5416, |
|
"step": 112500 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"learning_rate": 6.510189906656051e-06, |
|
"loss": 6.543, |
|
"step": 113000 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"learning_rate": 6.493249080960207e-06, |
|
"loss": 6.5443, |
|
"step": 113500 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"learning_rate": 6.476308255264362e-06, |
|
"loss": 6.534, |
|
"step": 114000 |
|
}, |
|
{ |
|
"epoch": 1.88, |
|
"learning_rate": 6.459367429568517e-06, |
|
"loss": 6.5395, |
|
"step": 114500 |
|
}, |
|
{ |
|
"epoch": 1.88, |
|
"learning_rate": 6.442426603872674e-06, |
|
"loss": 6.5386, |
|
"step": 115000 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"learning_rate": 6.425485778176829e-06, |
|
"loss": 6.5399, |
|
"step": 115500 |
|
}, |
|
{ |
|
"epoch": 1.9, |
|
"learning_rate": 6.4085449524809836e-06, |
|
"loss": 6.5382, |
|
"step": 116000 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"learning_rate": 6.39160412678514e-06, |
|
"loss": 6.5309, |
|
"step": 116500 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"learning_rate": 6.374663301089296e-06, |
|
"loss": 6.533, |
|
"step": 117000 |
|
}, |
|
{ |
|
"epoch": 1.93, |
|
"learning_rate": 6.357722475393451e-06, |
|
"loss": 6.5269, |
|
"step": 117500 |
|
}, |
|
{ |
|
"epoch": 1.93, |
|
"learning_rate": 6.340781649697607e-06, |
|
"loss": 6.533, |
|
"step": 118000 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"learning_rate": 6.323840824001762e-06, |
|
"loss": 6.5268, |
|
"step": 118500 |
|
}, |
|
{ |
|
"epoch": 1.95, |
|
"learning_rate": 6.306899998305918e-06, |
|
"loss": 6.5326, |
|
"step": 119000 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"learning_rate": 6.289959172610074e-06, |
|
"loss": 6.5306, |
|
"step": 119500 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"learning_rate": 6.273018346914229e-06, |
|
"loss": 6.5246, |
|
"step": 120000 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"eval_accuracy": 0.11442907817610445, |
|
"eval_loss": 6.527458667755127, |
|
"eval_runtime": 601.5697, |
|
"eval_samples_per_second": 512.586, |
|
"eval_steps_per_second": 5.341, |
|
"step": 120000 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"learning_rate": 6.256077521218385e-06, |
|
"loss": 6.5326, |
|
"step": 120500 |
|
}, |
|
{ |
|
"epoch": 1.98, |
|
"learning_rate": 6.2391366955225405e-06, |
|
"loss": 6.535, |
|
"step": 121000 |
|
}, |
|
{ |
|
"epoch": 1.99, |
|
"learning_rate": 6.222195869826696e-06, |
|
"loss": 6.5223, |
|
"step": 121500 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 6.205255044130851e-06, |
|
"loss": 6.5225, |
|
"step": 122000 |
|
}, |
|
{ |
|
"epoch": 2.01, |
|
"learning_rate": 6.1883142184350076e-06, |
|
"loss": 6.518, |
|
"step": 122500 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"learning_rate": 6.171373392739162e-06, |
|
"loss": 6.5249, |
|
"step": 123000 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"learning_rate": 6.154432567043318e-06, |
|
"loss": 6.5216, |
|
"step": 123500 |
|
}, |
|
{ |
|
"epoch": 2.03, |
|
"learning_rate": 6.137491741347473e-06, |
|
"loss": 6.5238, |
|
"step": 124000 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"learning_rate": 6.1205509156516295e-06, |
|
"loss": 6.5162, |
|
"step": 124500 |
|
}, |
|
{ |
|
"epoch": 2.05, |
|
"learning_rate": 6.103610089955785e-06, |
|
"loss": 6.5211, |
|
"step": 125000 |
|
}, |
|
{ |
|
"epoch": 2.06, |
|
"learning_rate": 6.08666926425994e-06, |
|
"loss": 6.5196, |
|
"step": 125500 |
|
}, |
|
{ |
|
"epoch": 2.06, |
|
"learning_rate": 6.0697284385640965e-06, |
|
"loss": 6.5158, |
|
"step": 126000 |
|
}, |
|
{ |
|
"epoch": 2.07, |
|
"learning_rate": 6.052787612868251e-06, |
|
"loss": 6.5173, |
|
"step": 126500 |
|
}, |
|
{ |
|
"epoch": 2.08, |
|
"learning_rate": 6.035846787172407e-06, |
|
"loss": 6.5195, |
|
"step": 127000 |
|
}, |
|
{ |
|
"epoch": 2.09, |
|
"learning_rate": 6.018905961476564e-06, |
|
"loss": 6.525, |
|
"step": 127500 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"learning_rate": 6.0019651357807185e-06, |
|
"loss": 6.5169, |
|
"step": 128000 |
|
}, |
|
{ |
|
"epoch": 2.11, |
|
"learning_rate": 5.985024310084873e-06, |
|
"loss": 6.5191, |
|
"step": 128500 |
|
}, |
|
{ |
|
"epoch": 2.11, |
|
"learning_rate": 5.96808348438903e-06, |
|
"loss": 6.5111, |
|
"step": 129000 |
|
}, |
|
{ |
|
"epoch": 2.12, |
|
"learning_rate": 5.9511426586931855e-06, |
|
"loss": 6.5128, |
|
"step": 129500 |
|
}, |
|
{ |
|
"epoch": 2.13, |
|
"learning_rate": 5.93420183299734e-06, |
|
"loss": 6.5069, |
|
"step": 130000 |
|
}, |
|
{ |
|
"epoch": 2.13, |
|
"eval_accuracy": 0.11622728049791442, |
|
"eval_loss": 6.511504650115967, |
|
"eval_runtime": 605.5053, |
|
"eval_samples_per_second": 509.254, |
|
"eval_steps_per_second": 5.306, |
|
"step": 130000 |
|
}, |
|
{ |
|
"epoch": 2.14, |
|
"learning_rate": 5.917261007301497e-06, |
|
"loss": 6.5104, |
|
"step": 130500 |
|
}, |
|
{ |
|
"epoch": 2.15, |
|
"learning_rate": 5.900320181605652e-06, |
|
"loss": 6.5106, |
|
"step": 131000 |
|
}, |
|
{ |
|
"epoch": 2.15, |
|
"learning_rate": 5.8833793559098074e-06, |
|
"loss": 6.5054, |
|
"step": 131500 |
|
}, |
|
{ |
|
"epoch": 2.16, |
|
"learning_rate": 5.866438530213962e-06, |
|
"loss": 6.5124, |
|
"step": 132000 |
|
}, |
|
{ |
|
"epoch": 2.17, |
|
"learning_rate": 5.849497704518119e-06, |
|
"loss": 6.508, |
|
"step": 132500 |
|
}, |
|
{ |
|
"epoch": 2.18, |
|
"learning_rate": 5.8325568788222745e-06, |
|
"loss": 6.5066, |
|
"step": 133000 |
|
}, |
|
{ |
|
"epoch": 2.19, |
|
"learning_rate": 5.815616053126429e-06, |
|
"loss": 6.5102, |
|
"step": 133500 |
|
}, |
|
{ |
|
"epoch": 2.2, |
|
"learning_rate": 5.798675227430586e-06, |
|
"loss": 6.5045, |
|
"step": 134000 |
|
}, |
|
{ |
|
"epoch": 2.2, |
|
"learning_rate": 5.781734401734741e-06, |
|
"loss": 6.5027, |
|
"step": 134500 |
|
}, |
|
{ |
|
"epoch": 2.21, |
|
"learning_rate": 5.7647935760388964e-06, |
|
"loss": 6.502, |
|
"step": 135000 |
|
}, |
|
{ |
|
"epoch": 2.22, |
|
"learning_rate": 5.747852750343053e-06, |
|
"loss": 6.4952, |
|
"step": 135500 |
|
}, |
|
{ |
|
"epoch": 2.23, |
|
"learning_rate": 5.730911924647208e-06, |
|
"loss": 6.5016, |
|
"step": 136000 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"learning_rate": 5.713971098951363e-06, |
|
"loss": 6.5039, |
|
"step": 136500 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"learning_rate": 5.697030273255519e-06, |
|
"loss": 6.5077, |
|
"step": 137000 |
|
}, |
|
{ |
|
"epoch": 2.25, |
|
"learning_rate": 5.680089447559675e-06, |
|
"loss": 6.5012, |
|
"step": 137500 |
|
}, |
|
{ |
|
"epoch": 2.26, |
|
"learning_rate": 5.66314862186383e-06, |
|
"loss": 6.4987, |
|
"step": 138000 |
|
}, |
|
{ |
|
"epoch": 2.27, |
|
"learning_rate": 5.646207796167986e-06, |
|
"loss": 6.4977, |
|
"step": 138500 |
|
}, |
|
{ |
|
"epoch": 2.28, |
|
"learning_rate": 5.629266970472141e-06, |
|
"loss": 6.4992, |
|
"step": 139000 |
|
}, |
|
{ |
|
"epoch": 2.29, |
|
"learning_rate": 5.612326144776297e-06, |
|
"loss": 6.498, |
|
"step": 139500 |
|
}, |
|
{ |
|
"epoch": 2.29, |
|
"learning_rate": 5.595385319080452e-06, |
|
"loss": 6.5001, |
|
"step": 140000 |
|
}, |
|
{ |
|
"epoch": 2.29, |
|
"eval_accuracy": 0.1180354638977201, |
|
"eval_loss": 6.496232509613037, |
|
"eval_runtime": 603.9932, |
|
"eval_samples_per_second": 510.529, |
|
"eval_steps_per_second": 5.32, |
|
"step": 140000 |
|
}, |
|
{ |
|
"epoch": 2.3, |
|
"learning_rate": 5.578444493384608e-06, |
|
"loss": 6.4969, |
|
"step": 140500 |
|
}, |
|
{ |
|
"epoch": 2.31, |
|
"learning_rate": 5.561503667688764e-06, |
|
"loss": 6.4959, |
|
"step": 141000 |
|
}, |
|
{ |
|
"epoch": 2.32, |
|
"learning_rate": 5.544562841992919e-06, |
|
"loss": 6.4939, |
|
"step": 141500 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"learning_rate": 5.527622016297075e-06, |
|
"loss": 6.4908, |
|
"step": 142000 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"learning_rate": 5.51068119060123e-06, |
|
"loss": 6.4954, |
|
"step": 142500 |
|
}, |
|
{ |
|
"epoch": 2.34, |
|
"learning_rate": 5.493740364905386e-06, |
|
"loss": 6.4911, |
|
"step": 143000 |
|
}, |
|
{ |
|
"epoch": 2.35, |
|
"learning_rate": 5.4767995392095415e-06, |
|
"loss": 6.4886, |
|
"step": 143500 |
|
}, |
|
{ |
|
"epoch": 2.36, |
|
"learning_rate": 5.459858713513697e-06, |
|
"loss": 6.492, |
|
"step": 144000 |
|
}, |
|
{ |
|
"epoch": 2.37, |
|
"learning_rate": 5.442917887817852e-06, |
|
"loss": 6.4917, |
|
"step": 144500 |
|
}, |
|
{ |
|
"epoch": 2.38, |
|
"learning_rate": 5.4259770621220086e-06, |
|
"loss": 6.491, |
|
"step": 145000 |
|
}, |
|
{ |
|
"epoch": 2.38, |
|
"learning_rate": 5.409036236426164e-06, |
|
"loss": 6.4872, |
|
"step": 145500 |
|
}, |
|
{ |
|
"epoch": 2.39, |
|
"learning_rate": 5.392095410730319e-06, |
|
"loss": 6.4833, |
|
"step": 146000 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"learning_rate": 5.375154585034476e-06, |
|
"loss": 6.4912, |
|
"step": 146500 |
|
}, |
|
{ |
|
"epoch": 2.41, |
|
"learning_rate": 5.3582137593386305e-06, |
|
"loss": 6.4895, |
|
"step": 147000 |
|
}, |
|
{ |
|
"epoch": 2.42, |
|
"learning_rate": 5.341272933642786e-06, |
|
"loss": 6.4869, |
|
"step": 147500 |
|
}, |
|
{ |
|
"epoch": 2.43, |
|
"learning_rate": 5.324332107946941e-06, |
|
"loss": 6.4875, |
|
"step": 148000 |
|
}, |
|
{ |
|
"epoch": 2.43, |
|
"learning_rate": 5.3073912822510975e-06, |
|
"loss": 6.4895, |
|
"step": 148500 |
|
}, |
|
{ |
|
"epoch": 2.44, |
|
"learning_rate": 5.290450456555252e-06, |
|
"loss": 6.48, |
|
"step": 149000 |
|
}, |
|
{ |
|
"epoch": 2.45, |
|
"learning_rate": 5.273509630859408e-06, |
|
"loss": 6.486, |
|
"step": 149500 |
|
}, |
|
{ |
|
"epoch": 2.46, |
|
"learning_rate": 5.256568805163565e-06, |
|
"loss": 6.4785, |
|
"step": 150000 |
|
}, |
|
{ |
|
"epoch": 2.46, |
|
"eval_accuracy": 0.11971311116288914, |
|
"eval_loss": 6.4821858406066895, |
|
"eval_runtime": 600.3142, |
|
"eval_samples_per_second": 513.658, |
|
"eval_steps_per_second": 5.352, |
|
"step": 150000 |
|
}, |
|
{ |
|
"epoch": 2.47, |
|
"learning_rate": 5.2396279794677195e-06, |
|
"loss": 6.4843, |
|
"step": 150500 |
|
}, |
|
{ |
|
"epoch": 2.47, |
|
"learning_rate": 5.222687153771875e-06, |
|
"loss": 6.4887, |
|
"step": 151000 |
|
}, |
|
{ |
|
"epoch": 2.48, |
|
"learning_rate": 5.205746328076031e-06, |
|
"loss": 6.4861, |
|
"step": 151500 |
|
}, |
|
{ |
|
"epoch": 2.49, |
|
"learning_rate": 5.1888055023801865e-06, |
|
"loss": 6.4865, |
|
"step": 152000 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"learning_rate": 5.171864676684341e-06, |
|
"loss": 6.4767, |
|
"step": 152500 |
|
}, |
|
{ |
|
"epoch": 2.51, |
|
"learning_rate": 5.154923850988498e-06, |
|
"loss": 6.4775, |
|
"step": 153000 |
|
}, |
|
{ |
|
"epoch": 2.52, |
|
"learning_rate": 5.137983025292654e-06, |
|
"loss": 6.4793, |
|
"step": 153500 |
|
}, |
|
{ |
|
"epoch": 2.52, |
|
"learning_rate": 5.1210421995968085e-06, |
|
"loss": 6.479, |
|
"step": 154000 |
|
}, |
|
{ |
|
"epoch": 2.53, |
|
"learning_rate": 5.104101373900965e-06, |
|
"loss": 6.4783, |
|
"step": 154500 |
|
}, |
|
{ |
|
"epoch": 2.54, |
|
"learning_rate": 5.08716054820512e-06, |
|
"loss": 6.479, |
|
"step": 155000 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"learning_rate": 5.0702197225092755e-06, |
|
"loss": 6.4804, |
|
"step": 155500 |
|
}, |
|
{ |
|
"epoch": 2.56, |
|
"learning_rate": 5.053278896813432e-06, |
|
"loss": 6.473, |
|
"step": 156000 |
|
}, |
|
{ |
|
"epoch": 2.56, |
|
"learning_rate": 5.036338071117587e-06, |
|
"loss": 6.4769, |
|
"step": 156500 |
|
}, |
|
{ |
|
"epoch": 2.57, |
|
"learning_rate": 5.019397245421742e-06, |
|
"loss": 6.4778, |
|
"step": 157000 |
|
}, |
|
{ |
|
"epoch": 2.58, |
|
"learning_rate": 5.0024564197258974e-06, |
|
"loss": 6.4764, |
|
"step": 157500 |
|
}, |
|
{ |
|
"epoch": 2.59, |
|
"learning_rate": 4.985515594030054e-06, |
|
"loss": 6.47, |
|
"step": 158000 |
|
}, |
|
{ |
|
"epoch": 2.6, |
|
"learning_rate": 4.968574768334209e-06, |
|
"loss": 6.4716, |
|
"step": 158500 |
|
}, |
|
{ |
|
"epoch": 2.61, |
|
"learning_rate": 4.9516339426383645e-06, |
|
"loss": 6.4754, |
|
"step": 159000 |
|
}, |
|
{ |
|
"epoch": 2.61, |
|
"learning_rate": 4.93469311694252e-06, |
|
"loss": 6.4741, |
|
"step": 159500 |
|
}, |
|
{ |
|
"epoch": 2.62, |
|
"learning_rate": 4.917752291246676e-06, |
|
"loss": 6.4706, |
|
"step": 160000 |
|
}, |
|
{ |
|
"epoch": 2.62, |
|
"eval_accuracy": 0.12115731725892939, |
|
"eval_loss": 6.471449851989746, |
|
"eval_runtime": 601.7368, |
|
"eval_samples_per_second": 512.443, |
|
"eval_steps_per_second": 5.34, |
|
"step": 160000 |
|
}, |
|
{ |
|
"epoch": 2.63, |
|
"learning_rate": 4.900811465550832e-06, |
|
"loss": 6.4739, |
|
"step": 160500 |
|
}, |
|
{ |
|
"epoch": 2.64, |
|
"learning_rate": 4.8838706398549864e-06, |
|
"loss": 6.4717, |
|
"step": 161000 |
|
}, |
|
{ |
|
"epoch": 2.65, |
|
"learning_rate": 4.866929814159143e-06, |
|
"loss": 6.468, |
|
"step": 161500 |
|
}, |
|
{ |
|
"epoch": 2.65, |
|
"learning_rate": 4.849988988463299e-06, |
|
"loss": 6.4758, |
|
"step": 162000 |
|
}, |
|
{ |
|
"epoch": 2.66, |
|
"learning_rate": 4.8330481627674535e-06, |
|
"loss": 6.4698, |
|
"step": 162500 |
|
}, |
|
{ |
|
"epoch": 2.67, |
|
"learning_rate": 4.816107337071609e-06, |
|
"loss": 6.4681, |
|
"step": 163000 |
|
}, |
|
{ |
|
"epoch": 2.68, |
|
"learning_rate": 4.799166511375765e-06, |
|
"loss": 6.47, |
|
"step": 163500 |
|
}, |
|
{ |
|
"epoch": 2.69, |
|
"learning_rate": 4.782225685679921e-06, |
|
"loss": 6.4711, |
|
"step": 164000 |
|
}, |
|
{ |
|
"epoch": 2.7, |
|
"learning_rate": 4.765284859984076e-06, |
|
"loss": 6.4707, |
|
"step": 164500 |
|
}, |
|
{ |
|
"epoch": 2.7, |
|
"learning_rate": 4.748344034288231e-06, |
|
"loss": 6.4717, |
|
"step": 165000 |
|
}, |
|
{ |
|
"epoch": 2.71, |
|
"learning_rate": 4.731403208592387e-06, |
|
"loss": 6.4677, |
|
"step": 165500 |
|
}, |
|
{ |
|
"epoch": 2.72, |
|
"learning_rate": 4.714462382896543e-06, |
|
"loss": 6.4677, |
|
"step": 166000 |
|
}, |
|
{ |
|
"epoch": 2.73, |
|
"learning_rate": 4.697521557200698e-06, |
|
"loss": 6.4642, |
|
"step": 166500 |
|
}, |
|
{ |
|
"epoch": 2.74, |
|
"learning_rate": 4.680580731504854e-06, |
|
"loss": 6.4587, |
|
"step": 167000 |
|
}, |
|
{ |
|
"epoch": 2.74, |
|
"learning_rate": 4.6636399058090096e-06, |
|
"loss": 6.4666, |
|
"step": 167500 |
|
}, |
|
{ |
|
"epoch": 2.75, |
|
"learning_rate": 4.646699080113165e-06, |
|
"loss": 6.4691, |
|
"step": 168000 |
|
}, |
|
{ |
|
"epoch": 2.76, |
|
"learning_rate": 4.629758254417321e-06, |
|
"loss": 6.4628, |
|
"step": 168500 |
|
}, |
|
{ |
|
"epoch": 2.77, |
|
"learning_rate": 4.612817428721476e-06, |
|
"loss": 6.4632, |
|
"step": 169000 |
|
}, |
|
{ |
|
"epoch": 2.78, |
|
"learning_rate": 4.5958766030256315e-06, |
|
"loss": 6.4586, |
|
"step": 169500 |
|
}, |
|
{ |
|
"epoch": 2.79, |
|
"learning_rate": 4.578935777329788e-06, |
|
"loss": 6.4612, |
|
"step": 170000 |
|
}, |
|
{ |
|
"epoch": 2.79, |
|
"eval_accuracy": 0.12253875669316174, |
|
"eval_loss": 6.461043357849121, |
|
"eval_runtime": 602.3343, |
|
"eval_samples_per_second": 511.935, |
|
"eval_steps_per_second": 5.334, |
|
"step": 170000 |
|
}, |
|
{ |
|
"epoch": 2.79, |
|
"learning_rate": 4.561994951633943e-06, |
|
"loss": 6.4673, |
|
"step": 170500 |
|
}, |
|
{ |
|
"epoch": 2.8, |
|
"learning_rate": 4.5450541259380986e-06, |
|
"loss": 6.4579, |
|
"step": 171000 |
|
}, |
|
{ |
|
"epoch": 2.81, |
|
"learning_rate": 4.528113300242254e-06, |
|
"loss": 6.4594, |
|
"step": 171500 |
|
}, |
|
{ |
|
"epoch": 2.82, |
|
"learning_rate": 4.51117247454641e-06, |
|
"loss": 6.4634, |
|
"step": 172000 |
|
}, |
|
{ |
|
"epoch": 2.83, |
|
"learning_rate": 4.494231648850566e-06, |
|
"loss": 6.4586, |
|
"step": 172500 |
|
}, |
|
{ |
|
"epoch": 2.83, |
|
"learning_rate": 4.4772908231547205e-06, |
|
"loss": 6.4587, |
|
"step": 173000 |
|
}, |
|
{ |
|
"epoch": 2.84, |
|
"learning_rate": 4.460349997458876e-06, |
|
"loss": 6.4599, |
|
"step": 173500 |
|
}, |
|
{ |
|
"epoch": 2.85, |
|
"learning_rate": 4.443409171763033e-06, |
|
"loss": 6.4596, |
|
"step": 174000 |
|
}, |
|
{ |
|
"epoch": 2.86, |
|
"learning_rate": 4.4264683460671875e-06, |
|
"loss": 6.4547, |
|
"step": 174500 |
|
}, |
|
{ |
|
"epoch": 2.87, |
|
"learning_rate": 4.409527520371343e-06, |
|
"loss": 6.4575, |
|
"step": 175000 |
|
}, |
|
{ |
|
"epoch": 2.88, |
|
"learning_rate": 4.392586694675499e-06, |
|
"loss": 6.4556, |
|
"step": 175500 |
|
}, |
|
{ |
|
"epoch": 2.88, |
|
"learning_rate": 4.375645868979655e-06, |
|
"loss": 6.4581, |
|
"step": 176000 |
|
}, |
|
{ |
|
"epoch": 2.89, |
|
"learning_rate": 4.35870504328381e-06, |
|
"loss": 6.4535, |
|
"step": 176500 |
|
}, |
|
{ |
|
"epoch": 2.9, |
|
"learning_rate": 4.341764217587965e-06, |
|
"loss": 6.4617, |
|
"step": 177000 |
|
}, |
|
{ |
|
"epoch": 2.91, |
|
"learning_rate": 4.324823391892121e-06, |
|
"loss": 6.4535, |
|
"step": 177500 |
|
}, |
|
{ |
|
"epoch": 2.92, |
|
"learning_rate": 4.307882566196277e-06, |
|
"loss": 6.4543, |
|
"step": 178000 |
|
}, |
|
{ |
|
"epoch": 2.92, |
|
"learning_rate": 4.290941740500432e-06, |
|
"loss": 6.4535, |
|
"step": 178500 |
|
}, |
|
{ |
|
"epoch": 2.93, |
|
"learning_rate": 4.274000914804588e-06, |
|
"loss": 6.4523, |
|
"step": 179000 |
|
}, |
|
{ |
|
"epoch": 2.94, |
|
"learning_rate": 4.257060089108744e-06, |
|
"loss": 6.4542, |
|
"step": 179500 |
|
}, |
|
{ |
|
"epoch": 2.95, |
|
"learning_rate": 4.240119263412899e-06, |
|
"loss": 6.4485, |
|
"step": 180000 |
|
}, |
|
{ |
|
"epoch": 2.95, |
|
"eval_accuracy": 0.1232825828604018, |
|
"eval_loss": 6.4530487060546875, |
|
"eval_runtime": 601.0649, |
|
"eval_samples_per_second": 513.016, |
|
"eval_steps_per_second": 5.346, |
|
"step": 180000 |
|
}, |
|
{ |
|
"epoch": 2.96, |
|
"learning_rate": 4.223178437717055e-06, |
|
"loss": 6.455, |
|
"step": 180500 |
|
}, |
|
{ |
|
"epoch": 2.97, |
|
"learning_rate": 4.20623761202121e-06, |
|
"loss": 6.4519, |
|
"step": 181000 |
|
}, |
|
{ |
|
"epoch": 2.97, |
|
"learning_rate": 4.1892967863253655e-06, |
|
"loss": 6.455, |
|
"step": 181500 |
|
}, |
|
{ |
|
"epoch": 2.98, |
|
"learning_rate": 4.172355960629522e-06, |
|
"loss": 6.4511, |
|
"step": 182000 |
|
}, |
|
{ |
|
"epoch": 2.99, |
|
"learning_rate": 4.155415134933677e-06, |
|
"loss": 6.4468, |
|
"step": 182500 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 4.138474309237833e-06, |
|
"loss": 6.4473, |
|
"step": 183000 |
|
}, |
|
{ |
|
"epoch": 3.01, |
|
"learning_rate": 4.121533483541988e-06, |
|
"loss": 6.4471, |
|
"step": 183500 |
|
}, |
|
{ |
|
"epoch": 3.01, |
|
"learning_rate": 4.104592657846144e-06, |
|
"loss": 6.4467, |
|
"step": 184000 |
|
}, |
|
{ |
|
"epoch": 3.02, |
|
"learning_rate": 4.0876518321503e-06, |
|
"loss": 6.4515, |
|
"step": 184500 |
|
}, |
|
{ |
|
"epoch": 3.03, |
|
"learning_rate": 4.0707110064544545e-06, |
|
"loss": 6.448, |
|
"step": 185000 |
|
}, |
|
{ |
|
"epoch": 3.04, |
|
"learning_rate": 4.05377018075861e-06, |
|
"loss": 6.4491, |
|
"step": 185500 |
|
}, |
|
{ |
|
"epoch": 3.05, |
|
"learning_rate": 4.036829355062766e-06, |
|
"loss": 6.4505, |
|
"step": 186000 |
|
}, |
|
{ |
|
"epoch": 3.06, |
|
"learning_rate": 4.019888529366922e-06, |
|
"loss": 6.4471, |
|
"step": 186500 |
|
}, |
|
{ |
|
"epoch": 3.06, |
|
"learning_rate": 4.002947703671077e-06, |
|
"loss": 6.4435, |
|
"step": 187000 |
|
}, |
|
{ |
|
"epoch": 3.07, |
|
"learning_rate": 3.986006877975233e-06, |
|
"loss": 6.4472, |
|
"step": 187500 |
|
}, |
|
{ |
|
"epoch": 3.08, |
|
"learning_rate": 3.969066052279389e-06, |
|
"loss": 6.4453, |
|
"step": 188000 |
|
}, |
|
{ |
|
"epoch": 3.09, |
|
"learning_rate": 3.952125226583544e-06, |
|
"loss": 6.449, |
|
"step": 188500 |
|
}, |
|
{ |
|
"epoch": 3.1, |
|
"learning_rate": 3.935184400887699e-06, |
|
"loss": 6.4475, |
|
"step": 189000 |
|
}, |
|
{ |
|
"epoch": 3.11, |
|
"learning_rate": 3.918243575191855e-06, |
|
"loss": 6.4411, |
|
"step": 189500 |
|
}, |
|
{ |
|
"epoch": 3.11, |
|
"learning_rate": 3.9013027494960106e-06, |
|
"loss": 6.4477, |
|
"step": 190000 |
|
}, |
|
{ |
|
"epoch": 3.11, |
|
"eval_accuracy": 0.1243327551641345, |
|
"eval_loss": 6.444092273712158, |
|
"eval_runtime": 605.0337, |
|
"eval_samples_per_second": 509.651, |
|
"eval_steps_per_second": 5.31, |
|
"step": 190000 |
|
}, |
|
{ |
|
"epoch": 3.12, |
|
"learning_rate": 3.884361923800166e-06, |
|
"loss": 6.4434, |
|
"step": 190500 |
|
}, |
|
{ |
|
"epoch": 3.13, |
|
"learning_rate": 3.867421098104322e-06, |
|
"loss": 6.4469, |
|
"step": 191000 |
|
}, |
|
{ |
|
"epoch": 3.14, |
|
"learning_rate": 3.850480272408478e-06, |
|
"loss": 6.4424, |
|
"step": 191500 |
|
}, |
|
{ |
|
"epoch": 3.15, |
|
"learning_rate": 3.833539446712633e-06, |
|
"loss": 6.4427, |
|
"step": 192000 |
|
}, |
|
{ |
|
"epoch": 3.15, |
|
"learning_rate": 3.816598621016789e-06, |
|
"loss": 6.4407, |
|
"step": 192500 |
|
}, |
|
{ |
|
"epoch": 3.16, |
|
"learning_rate": 3.799657795320944e-06, |
|
"loss": 6.4434, |
|
"step": 193000 |
|
}, |
|
{ |
|
"epoch": 3.17, |
|
"learning_rate": 3.7827169696251e-06, |
|
"loss": 6.442, |
|
"step": 193500 |
|
}, |
|
{ |
|
"epoch": 3.18, |
|
"learning_rate": 3.7657761439292557e-06, |
|
"loss": 6.438, |
|
"step": 194000 |
|
}, |
|
{ |
|
"epoch": 3.19, |
|
"learning_rate": 3.748835318233411e-06, |
|
"loss": 6.4435, |
|
"step": 194500 |
|
}, |
|
{ |
|
"epoch": 3.2, |
|
"learning_rate": 3.7318944925375666e-06, |
|
"loss": 6.4418, |
|
"step": 195000 |
|
}, |
|
{ |
|
"epoch": 3.2, |
|
"learning_rate": 3.7149536668417223e-06, |
|
"loss": 6.4368, |
|
"step": 195500 |
|
}, |
|
{ |
|
"epoch": 3.21, |
|
"learning_rate": 3.6980128411458776e-06, |
|
"loss": 6.4424, |
|
"step": 196000 |
|
}, |
|
{ |
|
"epoch": 3.22, |
|
"learning_rate": 3.6810720154500333e-06, |
|
"loss": 6.4395, |
|
"step": 196500 |
|
}, |
|
{ |
|
"epoch": 3.23, |
|
"learning_rate": 3.6641311897541886e-06, |
|
"loss": 6.4424, |
|
"step": 197000 |
|
}, |
|
{ |
|
"epoch": 3.24, |
|
"learning_rate": 3.6471903640583447e-06, |
|
"loss": 6.4355, |
|
"step": 197500 |
|
}, |
|
{ |
|
"epoch": 3.24, |
|
"learning_rate": 3.6302495383625004e-06, |
|
"loss": 6.4401, |
|
"step": 198000 |
|
}, |
|
{ |
|
"epoch": 3.25, |
|
"learning_rate": 3.6133087126666556e-06, |
|
"loss": 6.4408, |
|
"step": 198500 |
|
}, |
|
{ |
|
"epoch": 3.26, |
|
"learning_rate": 3.5963678869708113e-06, |
|
"loss": 6.4426, |
|
"step": 199000 |
|
}, |
|
{ |
|
"epoch": 3.27, |
|
"learning_rate": 3.579427061274967e-06, |
|
"loss": 6.4364, |
|
"step": 199500 |
|
}, |
|
{ |
|
"epoch": 3.28, |
|
"learning_rate": 3.5624862355791223e-06, |
|
"loss": 6.4373, |
|
"step": 200000 |
|
}, |
|
{ |
|
"epoch": 3.28, |
|
"eval_accuracy": 0.12513166304842002, |
|
"eval_loss": 6.439530849456787, |
|
"eval_runtime": 605.3063, |
|
"eval_samples_per_second": 509.421, |
|
"eval_steps_per_second": 5.308, |
|
"step": 200000 |
|
}, |
|
{ |
|
"epoch": 3.29, |
|
"learning_rate": 3.545545409883278e-06, |
|
"loss": 6.4369, |
|
"step": 200500 |
|
}, |
|
{ |
|
"epoch": 3.29, |
|
"learning_rate": 3.5286045841874332e-06, |
|
"loss": 6.4347, |
|
"step": 201000 |
|
}, |
|
{ |
|
"epoch": 3.3, |
|
"learning_rate": 3.511663758491589e-06, |
|
"loss": 6.435, |
|
"step": 201500 |
|
}, |
|
{ |
|
"epoch": 3.31, |
|
"learning_rate": 3.494722932795745e-06, |
|
"loss": 6.4335, |
|
"step": 202000 |
|
}, |
|
{ |
|
"epoch": 3.32, |
|
"learning_rate": 3.4777821070999003e-06, |
|
"loss": 6.4391, |
|
"step": 202500 |
|
}, |
|
{ |
|
"epoch": 3.33, |
|
"learning_rate": 3.460841281404056e-06, |
|
"loss": 6.4361, |
|
"step": 203000 |
|
}, |
|
{ |
|
"epoch": 3.33, |
|
"learning_rate": 3.4439004557082117e-06, |
|
"loss": 6.4367, |
|
"step": 203500 |
|
}, |
|
{ |
|
"epoch": 3.34, |
|
"learning_rate": 3.426959630012367e-06, |
|
"loss": 6.4366, |
|
"step": 204000 |
|
}, |
|
{ |
|
"epoch": 3.35, |
|
"learning_rate": 3.4100188043165226e-06, |
|
"loss": 6.4339, |
|
"step": 204500 |
|
}, |
|
{ |
|
"epoch": 3.36, |
|
"learning_rate": 3.393077978620678e-06, |
|
"loss": 6.4402, |
|
"step": 205000 |
|
}, |
|
{ |
|
"epoch": 3.37, |
|
"learning_rate": 3.3761371529248336e-06, |
|
"loss": 6.433, |
|
"step": 205500 |
|
}, |
|
{ |
|
"epoch": 3.38, |
|
"learning_rate": 3.3591963272289897e-06, |
|
"loss": 6.4381, |
|
"step": 206000 |
|
}, |
|
{ |
|
"epoch": 3.38, |
|
"learning_rate": 3.342255501533145e-06, |
|
"loss": 6.4369, |
|
"step": 206500 |
|
}, |
|
{ |
|
"epoch": 3.39, |
|
"learning_rate": 3.3253146758373007e-06, |
|
"loss": 6.4371, |
|
"step": 207000 |
|
}, |
|
{ |
|
"epoch": 3.4, |
|
"learning_rate": 3.3083738501414564e-06, |
|
"loss": 6.4339, |
|
"step": 207500 |
|
}, |
|
{ |
|
"epoch": 3.41, |
|
"learning_rate": 3.2914330244456116e-06, |
|
"loss": 6.434, |
|
"step": 208000 |
|
}, |
|
{ |
|
"epoch": 3.42, |
|
"learning_rate": 3.2744921987497673e-06, |
|
"loss": 6.4282, |
|
"step": 208500 |
|
}, |
|
{ |
|
"epoch": 3.42, |
|
"learning_rate": 3.2575513730539226e-06, |
|
"loss": 6.4306, |
|
"step": 209000 |
|
}, |
|
{ |
|
"epoch": 3.43, |
|
"learning_rate": 3.2406105473580783e-06, |
|
"loss": 6.4309, |
|
"step": 209500 |
|
}, |
|
{ |
|
"epoch": 3.44, |
|
"learning_rate": 3.2236697216622344e-06, |
|
"loss": 6.4351, |
|
"step": 210000 |
|
}, |
|
{ |
|
"epoch": 3.44, |
|
"eval_accuracy": 0.12585777151942737, |
|
"eval_loss": 6.432172775268555, |
|
"eval_runtime": 603.5717, |
|
"eval_samples_per_second": 510.885, |
|
"eval_steps_per_second": 5.323, |
|
"step": 210000 |
|
}, |
|
{ |
|
"epoch": 3.45, |
|
"learning_rate": 3.2067288959663897e-06, |
|
"loss": 6.4332, |
|
"step": 210500 |
|
}, |
|
{ |
|
"epoch": 3.46, |
|
"learning_rate": 3.1897880702705454e-06, |
|
"loss": 6.435, |
|
"step": 211000 |
|
}, |
|
{ |
|
"epoch": 3.47, |
|
"learning_rate": 3.172847244574701e-06, |
|
"loss": 6.4315, |
|
"step": 211500 |
|
}, |
|
{ |
|
"epoch": 3.47, |
|
"learning_rate": 3.1559064188788563e-06, |
|
"loss": 6.4349, |
|
"step": 212000 |
|
}, |
|
{ |
|
"epoch": 3.48, |
|
"learning_rate": 3.138965593183012e-06, |
|
"loss": 6.4264, |
|
"step": 212500 |
|
}, |
|
{ |
|
"epoch": 3.49, |
|
"learning_rate": 3.1220247674871673e-06, |
|
"loss": 6.4345, |
|
"step": 213000 |
|
}, |
|
{ |
|
"epoch": 3.5, |
|
"learning_rate": 3.105083941791323e-06, |
|
"loss": 6.4287, |
|
"step": 213500 |
|
}, |
|
{ |
|
"epoch": 3.51, |
|
"learning_rate": 3.088143116095479e-06, |
|
"loss": 6.433, |
|
"step": 214000 |
|
}, |
|
{ |
|
"epoch": 3.51, |
|
"learning_rate": 3.071202290399634e-06, |
|
"loss": 6.4296, |
|
"step": 214500 |
|
}, |
|
{ |
|
"epoch": 3.52, |
|
"learning_rate": 3.05426146470379e-06, |
|
"loss": 6.4261, |
|
"step": 215000 |
|
}, |
|
{ |
|
"epoch": 3.53, |
|
"learning_rate": 3.0373206390079457e-06, |
|
"loss": 6.4337, |
|
"step": 215500 |
|
}, |
|
{ |
|
"epoch": 3.54, |
|
"learning_rate": 3.020379813312101e-06, |
|
"loss": 6.4286, |
|
"step": 216000 |
|
}, |
|
{ |
|
"epoch": 3.55, |
|
"learning_rate": 3.0034389876162567e-06, |
|
"loss": 6.4267, |
|
"step": 216500 |
|
}, |
|
{ |
|
"epoch": 3.56, |
|
"learning_rate": 2.986498161920412e-06, |
|
"loss": 6.431, |
|
"step": 217000 |
|
}, |
|
{ |
|
"epoch": 3.56, |
|
"learning_rate": 2.9695573362245676e-06, |
|
"loss": 6.4325, |
|
"step": 217500 |
|
}, |
|
{ |
|
"epoch": 3.57, |
|
"learning_rate": 2.9526165105287238e-06, |
|
"loss": 6.4253, |
|
"step": 218000 |
|
}, |
|
{ |
|
"epoch": 3.58, |
|
"learning_rate": 2.9356756848328786e-06, |
|
"loss": 6.426, |
|
"step": 218500 |
|
}, |
|
{ |
|
"epoch": 3.59, |
|
"learning_rate": 2.9187348591370347e-06, |
|
"loss": 6.4327, |
|
"step": 219000 |
|
}, |
|
{ |
|
"epoch": 3.6, |
|
"learning_rate": 2.9017940334411904e-06, |
|
"loss": 6.4291, |
|
"step": 219500 |
|
}, |
|
{ |
|
"epoch": 3.6, |
|
"learning_rate": 2.8848532077453457e-06, |
|
"loss": 6.4273, |
|
"step": 220000 |
|
}, |
|
{ |
|
"epoch": 3.6, |
|
"eval_accuracy": 0.1262452488408161, |
|
"eval_loss": 6.426393508911133, |
|
"eval_runtime": 606.2126, |
|
"eval_samples_per_second": 508.66, |
|
"eval_steps_per_second": 5.3, |
|
"step": 220000 |
|
}, |
|
{ |
|
"epoch": 3.61, |
|
"learning_rate": 2.8679123820495014e-06, |
|
"loss": 6.4263, |
|
"step": 220500 |
|
}, |
|
{ |
|
"epoch": 3.62, |
|
"learning_rate": 2.8509715563536566e-06, |
|
"loss": 6.4267, |
|
"step": 221000 |
|
}, |
|
{ |
|
"epoch": 3.63, |
|
"learning_rate": 2.8340307306578123e-06, |
|
"loss": 6.425, |
|
"step": 221500 |
|
}, |
|
{ |
|
"epoch": 3.64, |
|
"learning_rate": 2.817089904961968e-06, |
|
"loss": 6.4224, |
|
"step": 222000 |
|
}, |
|
{ |
|
"epoch": 3.65, |
|
"learning_rate": 2.8001490792661233e-06, |
|
"loss": 6.4217, |
|
"step": 222500 |
|
}, |
|
{ |
|
"epoch": 3.65, |
|
"learning_rate": 2.7832082535702794e-06, |
|
"loss": 6.4258, |
|
"step": 223000 |
|
}, |
|
{ |
|
"epoch": 3.66, |
|
"learning_rate": 2.766267427874435e-06, |
|
"loss": 6.4253, |
|
"step": 223500 |
|
}, |
|
{ |
|
"epoch": 3.67, |
|
"learning_rate": 2.7493266021785904e-06, |
|
"loss": 6.4266, |
|
"step": 224000 |
|
}, |
|
{ |
|
"epoch": 3.68, |
|
"learning_rate": 2.732385776482746e-06, |
|
"loss": 6.4271, |
|
"step": 224500 |
|
}, |
|
{ |
|
"epoch": 3.69, |
|
"learning_rate": 2.7154449507869017e-06, |
|
"loss": 6.4294, |
|
"step": 225000 |
|
}, |
|
{ |
|
"epoch": 3.69, |
|
"learning_rate": 2.698504125091057e-06, |
|
"loss": 6.4243, |
|
"step": 225500 |
|
}, |
|
{ |
|
"epoch": 3.7, |
|
"learning_rate": 2.6815632993952127e-06, |
|
"loss": 6.4242, |
|
"step": 226000 |
|
}, |
|
{ |
|
"epoch": 3.71, |
|
"learning_rate": 2.664622473699368e-06, |
|
"loss": 6.4242, |
|
"step": 226500 |
|
}, |
|
{ |
|
"epoch": 3.72, |
|
"learning_rate": 2.647681648003524e-06, |
|
"loss": 6.4262, |
|
"step": 227000 |
|
}, |
|
{ |
|
"epoch": 3.73, |
|
"learning_rate": 2.6307408223076798e-06, |
|
"loss": 6.4197, |
|
"step": 227500 |
|
}, |
|
{ |
|
"epoch": 3.74, |
|
"learning_rate": 2.613799996611835e-06, |
|
"loss": 6.4246, |
|
"step": 228000 |
|
}, |
|
{ |
|
"epoch": 3.74, |
|
"learning_rate": 2.5968591709159907e-06, |
|
"loss": 6.4237, |
|
"step": 228500 |
|
}, |
|
{ |
|
"epoch": 3.75, |
|
"learning_rate": 2.5799183452201464e-06, |
|
"loss": 6.421, |
|
"step": 229000 |
|
}, |
|
{ |
|
"epoch": 3.76, |
|
"learning_rate": 2.5629775195243017e-06, |
|
"loss": 6.4207, |
|
"step": 229500 |
|
}, |
|
{ |
|
"epoch": 3.77, |
|
"learning_rate": 2.5460366938284574e-06, |
|
"loss": 6.4153, |
|
"step": 230000 |
|
}, |
|
{ |
|
"epoch": 3.77, |
|
"eval_accuracy": 0.12687429199787553, |
|
"eval_loss": 6.421925067901611, |
|
"eval_runtime": 603.0076, |
|
"eval_samples_per_second": 511.363, |
|
"eval_steps_per_second": 5.328, |
|
"step": 230000 |
|
}, |
|
{ |
|
"epoch": 3.78, |
|
"learning_rate": 2.5290958681326126e-06, |
|
"loss": 6.4211, |
|
"step": 230500 |
|
}, |
|
{ |
|
"epoch": 3.79, |
|
"learning_rate": 2.5121550424367688e-06, |
|
"loss": 6.4257, |
|
"step": 231000 |
|
}, |
|
{ |
|
"epoch": 3.79, |
|
"learning_rate": 2.495214216740924e-06, |
|
"loss": 6.4207, |
|
"step": 231500 |
|
}, |
|
{ |
|
"epoch": 3.8, |
|
"learning_rate": 2.4782733910450797e-06, |
|
"loss": 6.4207, |
|
"step": 232000 |
|
}, |
|
{ |
|
"epoch": 3.81, |
|
"learning_rate": 2.4613325653492354e-06, |
|
"loss": 6.4252, |
|
"step": 232500 |
|
}, |
|
{ |
|
"epoch": 3.82, |
|
"learning_rate": 2.4443917396533907e-06, |
|
"loss": 6.4226, |
|
"step": 233000 |
|
}, |
|
{ |
|
"epoch": 3.83, |
|
"learning_rate": 2.4274509139575464e-06, |
|
"loss": 6.4271, |
|
"step": 233500 |
|
}, |
|
{ |
|
"epoch": 3.83, |
|
"learning_rate": 2.410510088261702e-06, |
|
"loss": 6.4204, |
|
"step": 234000 |
|
}, |
|
{ |
|
"epoch": 3.84, |
|
"learning_rate": 2.3935692625658577e-06, |
|
"loss": 6.423, |
|
"step": 234500 |
|
}, |
|
{ |
|
"epoch": 3.85, |
|
"learning_rate": 2.376628436870013e-06, |
|
"loss": 6.4211, |
|
"step": 235000 |
|
}, |
|
{ |
|
"epoch": 3.86, |
|
"learning_rate": 2.3596876111741687e-06, |
|
"loss": 6.4206, |
|
"step": 235500 |
|
}, |
|
{ |
|
"epoch": 3.87, |
|
"learning_rate": 2.3427467854783244e-06, |
|
"loss": 6.4194, |
|
"step": 236000 |
|
}, |
|
{ |
|
"epoch": 3.88, |
|
"learning_rate": 2.32580595978248e-06, |
|
"loss": 6.4166, |
|
"step": 236500 |
|
}, |
|
{ |
|
"epoch": 3.88, |
|
"learning_rate": 2.3088651340866354e-06, |
|
"loss": 6.4259, |
|
"step": 237000 |
|
}, |
|
{ |
|
"epoch": 3.89, |
|
"learning_rate": 2.291924308390791e-06, |
|
"loss": 6.4142, |
|
"step": 237500 |
|
}, |
|
{ |
|
"epoch": 3.9, |
|
"learning_rate": 2.2749834826949467e-06, |
|
"loss": 6.4215, |
|
"step": 238000 |
|
}, |
|
{ |
|
"epoch": 3.91, |
|
"learning_rate": 2.2580426569991024e-06, |
|
"loss": 6.4135, |
|
"step": 238500 |
|
}, |
|
{ |
|
"epoch": 3.92, |
|
"learning_rate": 2.2411018313032577e-06, |
|
"loss": 6.4174, |
|
"step": 239000 |
|
}, |
|
{ |
|
"epoch": 3.92, |
|
"learning_rate": 2.2241610056074134e-06, |
|
"loss": 6.4231, |
|
"step": 239500 |
|
}, |
|
{ |
|
"epoch": 3.93, |
|
"learning_rate": 2.207220179911569e-06, |
|
"loss": 6.4188, |
|
"step": 240000 |
|
}, |
|
{ |
|
"epoch": 3.93, |
|
"eval_accuracy": 0.12740316889553732, |
|
"eval_loss": 6.418206691741943, |
|
"eval_runtime": 604.087, |
|
"eval_samples_per_second": 510.45, |
|
"eval_steps_per_second": 5.319, |
|
"step": 240000 |
|
}, |
|
{ |
|
"epoch": 3.94, |
|
"learning_rate": 2.1902793542157248e-06, |
|
"loss": 6.4167, |
|
"step": 240500 |
|
}, |
|
{ |
|
"epoch": 3.95, |
|
"learning_rate": 2.17333852851988e-06, |
|
"loss": 6.4203, |
|
"step": 241000 |
|
}, |
|
{ |
|
"epoch": 3.96, |
|
"learning_rate": 2.1563977028240357e-06, |
|
"loss": 6.417, |
|
"step": 241500 |
|
}, |
|
{ |
|
"epoch": 3.97, |
|
"learning_rate": 2.1394568771281914e-06, |
|
"loss": 6.4171, |
|
"step": 242000 |
|
}, |
|
{ |
|
"epoch": 3.97, |
|
"learning_rate": 2.122516051432347e-06, |
|
"loss": 6.4182, |
|
"step": 242500 |
|
}, |
|
{ |
|
"epoch": 3.98, |
|
"learning_rate": 2.1055752257365024e-06, |
|
"loss": 6.4181, |
|
"step": 243000 |
|
}, |
|
{ |
|
"epoch": 3.99, |
|
"learning_rate": 2.088634400040658e-06, |
|
"loss": 6.4159, |
|
"step": 243500 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"learning_rate": 2.0716935743448138e-06, |
|
"loss": 6.4162, |
|
"step": 244000 |
|
}, |
|
{ |
|
"epoch": 4.01, |
|
"learning_rate": 2.0547527486489694e-06, |
|
"loss": 6.4137, |
|
"step": 244500 |
|
}, |
|
{ |
|
"epoch": 4.01, |
|
"learning_rate": 2.0378119229531247e-06, |
|
"loss": 6.4163, |
|
"step": 245000 |
|
}, |
|
{ |
|
"epoch": 4.02, |
|
"learning_rate": 2.0208710972572804e-06, |
|
"loss": 6.4211, |
|
"step": 245500 |
|
}, |
|
{ |
|
"epoch": 4.03, |
|
"learning_rate": 2.003930271561436e-06, |
|
"loss": 6.4166, |
|
"step": 246000 |
|
}, |
|
{ |
|
"epoch": 4.04, |
|
"learning_rate": 1.986989445865592e-06, |
|
"loss": 6.416, |
|
"step": 246500 |
|
}, |
|
{ |
|
"epoch": 4.05, |
|
"learning_rate": 1.970048620169747e-06, |
|
"loss": 6.4162, |
|
"step": 247000 |
|
}, |
|
{ |
|
"epoch": 4.06, |
|
"learning_rate": 1.9531077944739027e-06, |
|
"loss": 6.4138, |
|
"step": 247500 |
|
}, |
|
{ |
|
"epoch": 4.06, |
|
"learning_rate": 1.9361669687780584e-06, |
|
"loss": 6.411, |
|
"step": 248000 |
|
}, |
|
{ |
|
"epoch": 4.07, |
|
"learning_rate": 1.919226143082214e-06, |
|
"loss": 6.4124, |
|
"step": 248500 |
|
}, |
|
{ |
|
"epoch": 4.08, |
|
"learning_rate": 1.9022853173863696e-06, |
|
"loss": 6.4154, |
|
"step": 249000 |
|
}, |
|
{ |
|
"epoch": 4.09, |
|
"learning_rate": 1.885344491690525e-06, |
|
"loss": 6.4149, |
|
"step": 249500 |
|
}, |
|
{ |
|
"epoch": 4.1, |
|
"learning_rate": 1.8684036659946808e-06, |
|
"loss": 6.4128, |
|
"step": 250000 |
|
}, |
|
{ |
|
"epoch": 4.1, |
|
"eval_accuracy": 0.12784806575663238, |
|
"eval_loss": 6.414950370788574, |
|
"eval_runtime": 607.4915, |
|
"eval_samples_per_second": 507.589, |
|
"eval_steps_per_second": 5.289, |
|
"step": 250000 |
|
}, |
|
{ |
|
"epoch": 4.1, |
|
"learning_rate": 1.8514628402988363e-06, |
|
"loss": 6.414, |
|
"step": 250500 |
|
}, |
|
{ |
|
"epoch": 4.11, |
|
"learning_rate": 1.8345220146029917e-06, |
|
"loss": 6.41, |
|
"step": 251000 |
|
}, |
|
{ |
|
"epoch": 4.12, |
|
"learning_rate": 1.8175811889071474e-06, |
|
"loss": 6.4131, |
|
"step": 251500 |
|
}, |
|
{ |
|
"epoch": 4.13, |
|
"learning_rate": 1.8006403632113031e-06, |
|
"loss": 6.4146, |
|
"step": 252000 |
|
}, |
|
{ |
|
"epoch": 4.14, |
|
"learning_rate": 1.7836995375154586e-06, |
|
"loss": 6.4126, |
|
"step": 252500 |
|
}, |
|
{ |
|
"epoch": 4.15, |
|
"learning_rate": 1.766758711819614e-06, |
|
"loss": 6.4164, |
|
"step": 253000 |
|
}, |
|
{ |
|
"epoch": 4.15, |
|
"learning_rate": 1.7498178861237698e-06, |
|
"loss": 6.4147, |
|
"step": 253500 |
|
}, |
|
{ |
|
"epoch": 4.16, |
|
"learning_rate": 1.7328770604279255e-06, |
|
"loss": 6.4068, |
|
"step": 254000 |
|
}, |
|
{ |
|
"epoch": 4.17, |
|
"learning_rate": 1.715936234732081e-06, |
|
"loss": 6.4117, |
|
"step": 254500 |
|
}, |
|
{ |
|
"epoch": 4.18, |
|
"learning_rate": 1.6989954090362364e-06, |
|
"loss": 6.415, |
|
"step": 255000 |
|
}, |
|
{ |
|
"epoch": 4.19, |
|
"learning_rate": 1.6820545833403921e-06, |
|
"loss": 6.4131, |
|
"step": 255500 |
|
}, |
|
{ |
|
"epoch": 4.19, |
|
"learning_rate": 1.6651137576445478e-06, |
|
"loss": 6.4125, |
|
"step": 256000 |
|
}, |
|
{ |
|
"epoch": 4.2, |
|
"learning_rate": 1.6481729319487033e-06, |
|
"loss": 6.4145, |
|
"step": 256500 |
|
}, |
|
{ |
|
"epoch": 4.21, |
|
"learning_rate": 1.6312321062528588e-06, |
|
"loss": 6.4155, |
|
"step": 257000 |
|
}, |
|
{ |
|
"epoch": 4.22, |
|
"learning_rate": 1.6142912805570147e-06, |
|
"loss": 6.4066, |
|
"step": 257500 |
|
}, |
|
{ |
|
"epoch": 4.23, |
|
"learning_rate": 1.5973504548611701e-06, |
|
"loss": 6.4129, |
|
"step": 258000 |
|
}, |
|
{ |
|
"epoch": 4.24, |
|
"learning_rate": 1.5804096291653256e-06, |
|
"loss": 6.4079, |
|
"step": 258500 |
|
}, |
|
{ |
|
"epoch": 4.24, |
|
"learning_rate": 1.563468803469481e-06, |
|
"loss": 6.4124, |
|
"step": 259000 |
|
}, |
|
{ |
|
"epoch": 4.25, |
|
"learning_rate": 1.546527977773637e-06, |
|
"loss": 6.4124, |
|
"step": 259500 |
|
}, |
|
{ |
|
"epoch": 4.26, |
|
"learning_rate": 1.5295871520777925e-06, |
|
"loss": 6.4189, |
|
"step": 260000 |
|
}, |
|
{ |
|
"epoch": 4.26, |
|
"eval_accuracy": 0.12798065025917943, |
|
"eval_loss": 6.412052154541016, |
|
"eval_runtime": 604.5339, |
|
"eval_samples_per_second": 510.072, |
|
"eval_steps_per_second": 5.315, |
|
"step": 260000 |
|
}, |
|
{ |
|
"epoch": 4.27, |
|
"learning_rate": 1.512646326381948e-06, |
|
"loss": 6.4115, |
|
"step": 260500 |
|
}, |
|
{ |
|
"epoch": 4.28, |
|
"learning_rate": 1.4957055006861034e-06, |
|
"loss": 6.4086, |
|
"step": 261000 |
|
}, |
|
{ |
|
"epoch": 4.28, |
|
"learning_rate": 1.4787646749902593e-06, |
|
"loss": 6.4153, |
|
"step": 261500 |
|
}, |
|
{ |
|
"epoch": 4.29, |
|
"learning_rate": 1.4618238492944148e-06, |
|
"loss": 6.4042, |
|
"step": 262000 |
|
}, |
|
{ |
|
"epoch": 4.3, |
|
"learning_rate": 1.4448830235985703e-06, |
|
"loss": 6.4188, |
|
"step": 262500 |
|
}, |
|
{ |
|
"epoch": 4.31, |
|
"learning_rate": 1.4279421979027258e-06, |
|
"loss": 6.4107, |
|
"step": 263000 |
|
}, |
|
{ |
|
"epoch": 4.32, |
|
"learning_rate": 1.4110013722068817e-06, |
|
"loss": 6.4104, |
|
"step": 263500 |
|
}, |
|
{ |
|
"epoch": 4.33, |
|
"learning_rate": 1.3940605465110372e-06, |
|
"loss": 6.4108, |
|
"step": 264000 |
|
}, |
|
{ |
|
"epoch": 4.33, |
|
"learning_rate": 1.3771197208151926e-06, |
|
"loss": 6.4096, |
|
"step": 264500 |
|
}, |
|
{ |
|
"epoch": 4.34, |
|
"learning_rate": 1.3601788951193481e-06, |
|
"loss": 6.4139, |
|
"step": 265000 |
|
}, |
|
{ |
|
"epoch": 4.35, |
|
"learning_rate": 1.343238069423504e-06, |
|
"loss": 6.4142, |
|
"step": 265500 |
|
}, |
|
{ |
|
"epoch": 4.36, |
|
"learning_rate": 1.3262972437276595e-06, |
|
"loss": 6.4186, |
|
"step": 266000 |
|
}, |
|
{ |
|
"epoch": 4.37, |
|
"learning_rate": 1.309356418031815e-06, |
|
"loss": 6.4126, |
|
"step": 266500 |
|
}, |
|
{ |
|
"epoch": 4.37, |
|
"learning_rate": 1.2924155923359705e-06, |
|
"loss": 6.408, |
|
"step": 267000 |
|
}, |
|
{ |
|
"epoch": 4.38, |
|
"learning_rate": 1.2754747666401264e-06, |
|
"loss": 6.4109, |
|
"step": 267500 |
|
}, |
|
{ |
|
"epoch": 4.39, |
|
"learning_rate": 1.2585339409442818e-06, |
|
"loss": 6.4117, |
|
"step": 268000 |
|
}, |
|
{ |
|
"epoch": 4.4, |
|
"learning_rate": 1.2415931152484373e-06, |
|
"loss": 6.4088, |
|
"step": 268500 |
|
}, |
|
{ |
|
"epoch": 4.41, |
|
"learning_rate": 1.224652289552593e-06, |
|
"loss": 6.4152, |
|
"step": 269000 |
|
}, |
|
{ |
|
"epoch": 4.42, |
|
"learning_rate": 1.2077114638567485e-06, |
|
"loss": 6.41, |
|
"step": 269500 |
|
}, |
|
{ |
|
"epoch": 4.42, |
|
"learning_rate": 1.1907706381609042e-06, |
|
"loss": 6.4102, |
|
"step": 270000 |
|
}, |
|
{ |
|
"epoch": 4.42, |
|
"eval_accuracy": 0.12817913105652246, |
|
"eval_loss": 6.411165237426758, |
|
"eval_runtime": 602.0627, |
|
"eval_samples_per_second": 512.166, |
|
"eval_steps_per_second": 5.337, |
|
"step": 270000 |
|
}, |
|
{ |
|
"epoch": 4.43, |
|
"learning_rate": 1.1738298124650597e-06, |
|
"loss": 6.4107, |
|
"step": 270500 |
|
}, |
|
{ |
|
"epoch": 4.44, |
|
"learning_rate": 1.1568889867692153e-06, |
|
"loss": 6.4099, |
|
"step": 271000 |
|
}, |
|
{ |
|
"epoch": 4.45, |
|
"learning_rate": 1.1399481610733708e-06, |
|
"loss": 6.4162, |
|
"step": 271500 |
|
}, |
|
{ |
|
"epoch": 4.46, |
|
"learning_rate": 1.1230073353775265e-06, |
|
"loss": 6.4103, |
|
"step": 272000 |
|
}, |
|
{ |
|
"epoch": 4.47, |
|
"learning_rate": 1.106066509681682e-06, |
|
"loss": 6.4159, |
|
"step": 272500 |
|
}, |
|
{ |
|
"epoch": 4.47, |
|
"learning_rate": 1.0891256839858377e-06, |
|
"loss": 6.4052, |
|
"step": 273000 |
|
}, |
|
{ |
|
"epoch": 4.48, |
|
"learning_rate": 1.0721848582899932e-06, |
|
"loss": 6.4079, |
|
"step": 273500 |
|
}, |
|
{ |
|
"epoch": 4.49, |
|
"learning_rate": 1.0552440325941489e-06, |
|
"loss": 6.4071, |
|
"step": 274000 |
|
}, |
|
{ |
|
"epoch": 4.5, |
|
"learning_rate": 1.0383032068983043e-06, |
|
"loss": 6.4131, |
|
"step": 274500 |
|
}, |
|
{ |
|
"epoch": 4.51, |
|
"learning_rate": 1.02136238120246e-06, |
|
"loss": 6.4077, |
|
"step": 275000 |
|
}, |
|
{ |
|
"epoch": 4.51, |
|
"learning_rate": 1.0044215555066155e-06, |
|
"loss": 6.4113, |
|
"step": 275500 |
|
}, |
|
{ |
|
"epoch": 4.52, |
|
"learning_rate": 9.87480729810771e-07, |
|
"loss": 6.4141, |
|
"step": 276000 |
|
}, |
|
{ |
|
"epoch": 4.53, |
|
"learning_rate": 9.705399041149267e-07, |
|
"loss": 6.4081, |
|
"step": 276500 |
|
}, |
|
{ |
|
"epoch": 4.54, |
|
"learning_rate": 9.535990784190823e-07, |
|
"loss": 6.4071, |
|
"step": 277000 |
|
}, |
|
{ |
|
"epoch": 4.55, |
|
"learning_rate": 9.366582527232377e-07, |
|
"loss": 6.4078, |
|
"step": 277500 |
|
}, |
|
{ |
|
"epoch": 4.56, |
|
"learning_rate": 9.197174270273934e-07, |
|
"loss": 6.4117, |
|
"step": 278000 |
|
}, |
|
{ |
|
"epoch": 4.56, |
|
"learning_rate": 9.027766013315489e-07, |
|
"loss": 6.4077, |
|
"step": 278500 |
|
}, |
|
{ |
|
"epoch": 4.57, |
|
"learning_rate": 8.858357756357046e-07, |
|
"loss": 6.4106, |
|
"step": 279000 |
|
}, |
|
{ |
|
"epoch": 4.58, |
|
"learning_rate": 8.688949499398601e-07, |
|
"loss": 6.4134, |
|
"step": 279500 |
|
}, |
|
{ |
|
"epoch": 4.59, |
|
"learning_rate": 8.519541242440158e-07, |
|
"loss": 6.4105, |
|
"step": 280000 |
|
}, |
|
{ |
|
"epoch": 4.59, |
|
"eval_accuracy": 0.1284733583672517, |
|
"eval_loss": 6.408677577972412, |
|
"eval_runtime": 603.8755, |
|
"eval_samples_per_second": 510.628, |
|
"eval_steps_per_second": 5.321, |
|
"step": 280000 |
|
}, |
|
{ |
|
"epoch": 4.6, |
|
"learning_rate": 8.350132985481713e-07, |
|
"loss": 6.4046, |
|
"step": 280500 |
|
}, |
|
{ |
|
"epoch": 4.6, |
|
"learning_rate": 8.180724728523269e-07, |
|
"loss": 6.4109, |
|
"step": 281000 |
|
}, |
|
{ |
|
"epoch": 4.61, |
|
"learning_rate": 8.011316471564824e-07, |
|
"loss": 6.4117, |
|
"step": 281500 |
|
}, |
|
{ |
|
"epoch": 4.62, |
|
"learning_rate": 7.841908214606381e-07, |
|
"loss": 6.4107, |
|
"step": 282000 |
|
}, |
|
{ |
|
"epoch": 4.63, |
|
"learning_rate": 7.672499957647936e-07, |
|
"loss": 6.415, |
|
"step": 282500 |
|
}, |
|
{ |
|
"epoch": 4.64, |
|
"learning_rate": 7.503091700689493e-07, |
|
"loss": 6.4066, |
|
"step": 283000 |
|
}, |
|
{ |
|
"epoch": 4.65, |
|
"learning_rate": 7.333683443731048e-07, |
|
"loss": 6.4075, |
|
"step": 283500 |
|
}, |
|
{ |
|
"epoch": 4.65, |
|
"learning_rate": 7.164275186772605e-07, |
|
"loss": 6.4086, |
|
"step": 284000 |
|
}, |
|
{ |
|
"epoch": 4.66, |
|
"learning_rate": 6.994866929814159e-07, |
|
"loss": 6.4054, |
|
"step": 284500 |
|
}, |
|
{ |
|
"epoch": 4.67, |
|
"learning_rate": 6.825458672855715e-07, |
|
"loss": 6.4022, |
|
"step": 285000 |
|
}, |
|
{ |
|
"epoch": 4.68, |
|
"learning_rate": 6.656050415897271e-07, |
|
"loss": 6.406, |
|
"step": 285500 |
|
}, |
|
{ |
|
"epoch": 4.69, |
|
"learning_rate": 6.486642158938827e-07, |
|
"loss": 6.4032, |
|
"step": 286000 |
|
}, |
|
{ |
|
"epoch": 4.69, |
|
"learning_rate": 6.317233901980383e-07, |
|
"loss": 6.4105, |
|
"step": 286500 |
|
}, |
|
{ |
|
"epoch": 4.7, |
|
"learning_rate": 6.147825645021939e-07, |
|
"loss": 6.4073, |
|
"step": 287000 |
|
}, |
|
{ |
|
"epoch": 4.71, |
|
"learning_rate": 5.978417388063494e-07, |
|
"loss": 6.4058, |
|
"step": 287500 |
|
}, |
|
{ |
|
"epoch": 4.72, |
|
"learning_rate": 5.80900913110505e-07, |
|
"loss": 6.4123, |
|
"step": 288000 |
|
}, |
|
{ |
|
"epoch": 4.73, |
|
"learning_rate": 5.639600874146606e-07, |
|
"loss": 6.4132, |
|
"step": 288500 |
|
}, |
|
{ |
|
"epoch": 4.74, |
|
"learning_rate": 5.470192617188162e-07, |
|
"loss": 6.4072, |
|
"step": 289000 |
|
}, |
|
{ |
|
"epoch": 4.74, |
|
"learning_rate": 5.300784360229718e-07, |
|
"loss": 6.4085, |
|
"step": 289500 |
|
}, |
|
{ |
|
"epoch": 4.75, |
|
"learning_rate": 5.131376103271274e-07, |
|
"loss": 6.4065, |
|
"step": 290000 |
|
}, |
|
{ |
|
"epoch": 4.75, |
|
"eval_accuracy": 0.12868705268687342, |
|
"eval_loss": 6.406748294830322, |
|
"eval_runtime": 608.2162, |
|
"eval_samples_per_second": 506.984, |
|
"eval_steps_per_second": 5.283, |
|
"step": 290000 |
|
}, |
|
{ |
|
"epoch": 4.76, |
|
"learning_rate": 4.96196784631283e-07, |
|
"loss": 6.409, |
|
"step": 290500 |
|
}, |
|
{ |
|
"epoch": 4.77, |
|
"learning_rate": 4.792559589354385e-07, |
|
"loss": 6.4061, |
|
"step": 291000 |
|
}, |
|
{ |
|
"epoch": 4.78, |
|
"learning_rate": 4.623151332395941e-07, |
|
"loss": 6.4078, |
|
"step": 291500 |
|
}, |
|
{ |
|
"epoch": 4.78, |
|
"learning_rate": 4.453743075437497e-07, |
|
"loss": 6.4084, |
|
"step": 292000 |
|
}, |
|
{ |
|
"epoch": 4.79, |
|
"learning_rate": 4.284334818479053e-07, |
|
"loss": 6.412, |
|
"step": 292500 |
|
}, |
|
{ |
|
"epoch": 4.8, |
|
"learning_rate": 4.114926561520609e-07, |
|
"loss": 6.4085, |
|
"step": 293000 |
|
}, |
|
{ |
|
"epoch": 4.81, |
|
"learning_rate": 3.9455183045621646e-07, |
|
"loss": 6.4022, |
|
"step": 293500 |
|
}, |
|
{ |
|
"epoch": 4.82, |
|
"learning_rate": 3.7761100476037205e-07, |
|
"loss": 6.4065, |
|
"step": 294000 |
|
}, |
|
{ |
|
"epoch": 4.83, |
|
"learning_rate": 3.6067017906452763e-07, |
|
"loss": 6.4114, |
|
"step": 294500 |
|
}, |
|
{ |
|
"epoch": 4.83, |
|
"learning_rate": 3.437293533686832e-07, |
|
"loss": 6.4085, |
|
"step": 295000 |
|
}, |
|
{ |
|
"epoch": 4.84, |
|
"learning_rate": 3.267885276728388e-07, |
|
"loss": 6.4117, |
|
"step": 295500 |
|
}, |
|
{ |
|
"epoch": 4.85, |
|
"learning_rate": 3.098477019769944e-07, |
|
"loss": 6.4066, |
|
"step": 296000 |
|
}, |
|
{ |
|
"epoch": 4.86, |
|
"learning_rate": 2.9290687628114997e-07, |
|
"loss": 6.4074, |
|
"step": 296500 |
|
}, |
|
{ |
|
"epoch": 4.87, |
|
"learning_rate": 2.7596605058530556e-07, |
|
"loss": 6.4059, |
|
"step": 297000 |
|
}, |
|
{ |
|
"epoch": 4.87, |
|
"learning_rate": 2.5902522488946114e-07, |
|
"loss": 6.4027, |
|
"step": 297500 |
|
}, |
|
{ |
|
"epoch": 4.88, |
|
"learning_rate": 2.4208439919361673e-07, |
|
"loss": 6.4034, |
|
"step": 298000 |
|
}, |
|
{ |
|
"epoch": 4.89, |
|
"learning_rate": 2.2514357349777229e-07, |
|
"loss": 6.4053, |
|
"step": 298500 |
|
}, |
|
{ |
|
"epoch": 4.9, |
|
"learning_rate": 2.0820274780192787e-07, |
|
"loss": 6.4066, |
|
"step": 299000 |
|
}, |
|
{ |
|
"epoch": 4.91, |
|
"learning_rate": 1.9126192210608346e-07, |
|
"loss": 6.4098, |
|
"step": 299500 |
|
}, |
|
{ |
|
"epoch": 4.92, |
|
"learning_rate": 1.7432109641023904e-07, |
|
"loss": 6.4082, |
|
"step": 300000 |
|
}, |
|
{ |
|
"epoch": 4.92, |
|
"eval_accuracy": 0.12851127491212921, |
|
"eval_loss": 6.407046794891357, |
|
"eval_runtime": 604.7864, |
|
"eval_samples_per_second": 509.859, |
|
"eval_steps_per_second": 5.313, |
|
"step": 300000 |
|
}, |
|
{ |
|
"epoch": 4.92, |
|
"learning_rate": 1.5738027071439465e-07, |
|
"loss": 6.4019, |
|
"step": 300500 |
|
}, |
|
{ |
|
"epoch": 4.93, |
|
"learning_rate": 1.404394450185502e-07, |
|
"loss": 6.4073, |
|
"step": 301000 |
|
}, |
|
{ |
|
"epoch": 4.94, |
|
"learning_rate": 1.234986193227058e-07, |
|
"loss": 6.403, |
|
"step": 301500 |
|
}, |
|
{ |
|
"epoch": 4.95, |
|
"learning_rate": 1.0655779362686138e-07, |
|
"loss": 6.4098, |
|
"step": 302000 |
|
}, |
|
{ |
|
"epoch": 4.96, |
|
"learning_rate": 8.961696793101697e-08, |
|
"loss": 6.4055, |
|
"step": 302500 |
|
}, |
|
{ |
|
"epoch": 4.96, |
|
"learning_rate": 7.267614223517255e-08, |
|
"loss": 6.409, |
|
"step": 303000 |
|
}, |
|
{ |
|
"epoch": 4.97, |
|
"learning_rate": 5.5735316539328136e-08, |
|
"loss": 6.4094, |
|
"step": 303500 |
|
}, |
|
{ |
|
"epoch": 4.98, |
|
"learning_rate": 3.8794490843483714e-08, |
|
"loss": 6.4038, |
|
"step": 304000 |
|
}, |
|
{ |
|
"epoch": 4.99, |
|
"learning_rate": 2.18536651476393e-08, |
|
"loss": 6.4017, |
|
"step": 304500 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"learning_rate": 4.912839451794881e-09, |
|
"loss": 6.4089, |
|
"step": 305000 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"step": 305145, |
|
"total_flos": 6.419869954421555e+16, |
|
"train_loss": 6.680038520005282, |
|
"train_runtime": 123390.5046, |
|
"train_samples_per_second": 237.407, |
|
"train_steps_per_second": 2.473 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 305145, |
|
"num_train_epochs": 5, |
|
"save_steps": 10000, |
|
"total_flos": 6.419869954421555e+16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|