|
{ |
|
"best_metric": 0.899671052631579, |
|
"best_model_checkpoint": "/scratch/mrahma45/pixel/finetuned_models/canine/canine-base-finetuned-pos-ud-Coptic-Scriptorium/checkpoint-7500", |
|
"epoch": 256.4102564102564, |
|
"global_step": 10000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 2.56, |
|
"learning_rate": 7.92e-05, |
|
"loss": 1.66, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 5.13, |
|
"learning_rate": 7.946845637583894e-05, |
|
"loss": 0.6182, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 7.69, |
|
"learning_rate": 7.893154362416109e-05, |
|
"loss": 0.382, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 10.26, |
|
"learning_rate": 7.839463087248322e-05, |
|
"loss": 0.2534, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 12.82, |
|
"learning_rate": 7.785771812080537e-05, |
|
"loss": 0.1725, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 12.82, |
|
"eval_accuracy": 0.8799342105263158, |
|
"eval_loss": 0.4891912639141083, |
|
"eval_runtime": 1.5366, |
|
"eval_samples_per_second": 247.947, |
|
"eval_steps_per_second": 31.237, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 15.38, |
|
"learning_rate": 7.732080536912752e-05, |
|
"loss": 0.1148, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 17.95, |
|
"learning_rate": 7.678389261744967e-05, |
|
"loss": 0.0807, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 20.51, |
|
"learning_rate": 7.624697986577182e-05, |
|
"loss": 0.0562, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 23.08, |
|
"learning_rate": 7.571006711409396e-05, |
|
"loss": 0.0443, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 25.64, |
|
"learning_rate": 7.517315436241611e-05, |
|
"loss": 0.0372, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 25.64, |
|
"eval_accuracy": 0.8819444444444444, |
|
"eval_loss": 0.6423949599266052, |
|
"eval_runtime": 1.5561, |
|
"eval_samples_per_second": 244.836, |
|
"eval_steps_per_second": 30.845, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 28.21, |
|
"learning_rate": 7.463624161073826e-05, |
|
"loss": 0.0289, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 30.77, |
|
"learning_rate": 7.409932885906041e-05, |
|
"loss": 0.0236, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 33.33, |
|
"learning_rate": 7.356241610738256e-05, |
|
"loss": 0.0185, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 35.9, |
|
"learning_rate": 7.30255033557047e-05, |
|
"loss": 0.016, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 38.46, |
|
"learning_rate": 7.248859060402685e-05, |
|
"loss": 0.0161, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 38.46, |
|
"eval_accuracy": 0.8866959064327485, |
|
"eval_loss": 0.7260786890983582, |
|
"eval_runtime": 1.5383, |
|
"eval_samples_per_second": 247.68, |
|
"eval_steps_per_second": 31.204, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 41.03, |
|
"learning_rate": 7.1951677852349e-05, |
|
"loss": 0.0142, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 43.59, |
|
"learning_rate": 7.141476510067115e-05, |
|
"loss": 0.0116, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 46.15, |
|
"learning_rate": 7.08778523489933e-05, |
|
"loss": 0.0117, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 48.72, |
|
"learning_rate": 7.034093959731545e-05, |
|
"loss": 0.0109, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 51.28, |
|
"learning_rate": 6.98040268456376e-05, |
|
"loss": 0.0096, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 51.28, |
|
"eval_accuracy": 0.8880665204678363, |
|
"eval_loss": 0.8069060444831848, |
|
"eval_runtime": 1.5297, |
|
"eval_samples_per_second": 249.069, |
|
"eval_steps_per_second": 31.379, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 53.85, |
|
"learning_rate": 6.926711409395974e-05, |
|
"loss": 0.0086, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 56.41, |
|
"learning_rate": 6.873020134228189e-05, |
|
"loss": 0.0079, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 58.97, |
|
"learning_rate": 6.819328859060404e-05, |
|
"loss": 0.0069, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 61.54, |
|
"learning_rate": 6.765637583892619e-05, |
|
"loss": 0.0063, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 64.1, |
|
"learning_rate": 6.711946308724834e-05, |
|
"loss": 0.0075, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 64.1, |
|
"eval_accuracy": 0.8915387426900585, |
|
"eval_loss": 0.828658401966095, |
|
"eval_runtime": 1.5268, |
|
"eval_samples_per_second": 249.541, |
|
"eval_steps_per_second": 31.438, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 66.67, |
|
"learning_rate": 6.658255033557048e-05, |
|
"loss": 0.0069, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 69.23, |
|
"learning_rate": 6.604563758389262e-05, |
|
"loss": 0.0071, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 71.79, |
|
"learning_rate": 6.550872483221477e-05, |
|
"loss": 0.0071, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 74.36, |
|
"learning_rate": 6.497181208053692e-05, |
|
"loss": 0.006, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 76.92, |
|
"learning_rate": 6.443489932885906e-05, |
|
"loss": 0.0058, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 76.92, |
|
"eval_accuracy": 0.8901681286549707, |
|
"eval_loss": 0.8421908020973206, |
|
"eval_runtime": 1.5301, |
|
"eval_samples_per_second": 249.0, |
|
"eval_steps_per_second": 31.37, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 79.49, |
|
"learning_rate": 6.389798657718121e-05, |
|
"loss": 0.0064, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 82.05, |
|
"learning_rate": 6.336107382550336e-05, |
|
"loss": 0.0058, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 84.62, |
|
"learning_rate": 6.282416107382551e-05, |
|
"loss": 0.0061, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 87.18, |
|
"learning_rate": 6.228724832214766e-05, |
|
"loss": 0.0062, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 89.74, |
|
"learning_rate": 6.17503355704698e-05, |
|
"loss": 0.0052, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 89.74, |
|
"eval_accuracy": 0.8907163742690059, |
|
"eval_loss": 0.8915664553642273, |
|
"eval_runtime": 1.5301, |
|
"eval_samples_per_second": 248.996, |
|
"eval_steps_per_second": 31.37, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 92.31, |
|
"learning_rate": 6.121342281879195e-05, |
|
"loss": 0.004, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 94.87, |
|
"learning_rate": 6.06765100671141e-05, |
|
"loss": 0.0055, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 97.44, |
|
"learning_rate": 6.013959731543624e-05, |
|
"loss": 0.0043, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 100.0, |
|
"learning_rate": 5.960268456375839e-05, |
|
"loss": 0.004, |
|
"step": 3900 |
|
}, |
|
{ |
|
"epoch": 102.56, |
|
"learning_rate": 5.906577181208054e-05, |
|
"loss": 0.0036, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 102.56, |
|
"eval_accuracy": 0.8918128654970761, |
|
"eval_loss": 0.8925005793571472, |
|
"eval_runtime": 1.5336, |
|
"eval_samples_per_second": 248.438, |
|
"eval_steps_per_second": 31.299, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 105.13, |
|
"learning_rate": 5.852885906040269e-05, |
|
"loss": 0.0044, |
|
"step": 4100 |
|
}, |
|
{ |
|
"epoch": 107.69, |
|
"learning_rate": 5.7991946308724836e-05, |
|
"loss": 0.003, |
|
"step": 4200 |
|
}, |
|
{ |
|
"epoch": 110.26, |
|
"learning_rate": 5.7455033557046984e-05, |
|
"loss": 0.0038, |
|
"step": 4300 |
|
}, |
|
{ |
|
"epoch": 112.82, |
|
"learning_rate": 5.691812080536913e-05, |
|
"loss": 0.0034, |
|
"step": 4400 |
|
}, |
|
{ |
|
"epoch": 115.38, |
|
"learning_rate": 5.638120805369128e-05, |
|
"loss": 0.0034, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 115.38, |
|
"eval_accuracy": 0.8918128654970761, |
|
"eval_loss": 0.9117233753204346, |
|
"eval_runtime": 1.5348, |
|
"eval_samples_per_second": 248.239, |
|
"eval_steps_per_second": 31.274, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 117.95, |
|
"learning_rate": 5.584429530201343e-05, |
|
"loss": 0.0039, |
|
"step": 4600 |
|
}, |
|
{ |
|
"epoch": 120.51, |
|
"learning_rate": 5.530738255033558e-05, |
|
"loss": 0.0037, |
|
"step": 4700 |
|
}, |
|
{ |
|
"epoch": 123.08, |
|
"learning_rate": 5.4770469798657725e-05, |
|
"loss": 0.0031, |
|
"step": 4800 |
|
}, |
|
{ |
|
"epoch": 125.64, |
|
"learning_rate": 5.4233557046979874e-05, |
|
"loss": 0.0038, |
|
"step": 4900 |
|
}, |
|
{ |
|
"epoch": 128.21, |
|
"learning_rate": 5.3696644295302015e-05, |
|
"loss": 0.004, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 128.21, |
|
"eval_accuracy": 0.8946454678362573, |
|
"eval_loss": 0.8941249847412109, |
|
"eval_runtime": 1.5416, |
|
"eval_samples_per_second": 247.142, |
|
"eval_steps_per_second": 31.136, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 130.77, |
|
"learning_rate": 5.315973154362416e-05, |
|
"loss": 0.0025, |
|
"step": 5100 |
|
}, |
|
{ |
|
"epoch": 133.33, |
|
"learning_rate": 5.262281879194631e-05, |
|
"loss": 0.0025, |
|
"step": 5200 |
|
}, |
|
{ |
|
"epoch": 135.9, |
|
"learning_rate": 5.208590604026846e-05, |
|
"loss": 0.0035, |
|
"step": 5300 |
|
}, |
|
{ |
|
"epoch": 138.46, |
|
"learning_rate": 5.154899328859061e-05, |
|
"loss": 0.0028, |
|
"step": 5400 |
|
}, |
|
{ |
|
"epoch": 141.03, |
|
"learning_rate": 5.1012080536912756e-05, |
|
"loss": 0.0033, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 141.03, |
|
"eval_accuracy": 0.8901681286549707, |
|
"eval_loss": 0.9474687576293945, |
|
"eval_runtime": 1.5483, |
|
"eval_samples_per_second": 246.083, |
|
"eval_steps_per_second": 31.003, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 143.59, |
|
"learning_rate": 5.0475167785234905e-05, |
|
"loss": 0.0026, |
|
"step": 5600 |
|
}, |
|
{ |
|
"epoch": 146.15, |
|
"learning_rate": 4.993825503355705e-05, |
|
"loss": 0.0021, |
|
"step": 5700 |
|
}, |
|
{ |
|
"epoch": 148.72, |
|
"learning_rate": 4.94013422818792e-05, |
|
"loss": 0.0025, |
|
"step": 5800 |
|
}, |
|
{ |
|
"epoch": 151.28, |
|
"learning_rate": 4.886442953020135e-05, |
|
"loss": 0.002, |
|
"step": 5900 |
|
}, |
|
{ |
|
"epoch": 153.85, |
|
"learning_rate": 4.83275167785235e-05, |
|
"loss": 0.0028, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 153.85, |
|
"eval_accuracy": 0.8967470760233918, |
|
"eval_loss": 0.9054576754570007, |
|
"eval_runtime": 1.5492, |
|
"eval_samples_per_second": 245.926, |
|
"eval_steps_per_second": 30.983, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 156.41, |
|
"learning_rate": 4.779060402684564e-05, |
|
"loss": 0.0026, |
|
"step": 6100 |
|
}, |
|
{ |
|
"epoch": 158.97, |
|
"learning_rate": 4.725369127516779e-05, |
|
"loss": 0.0022, |
|
"step": 6200 |
|
}, |
|
{ |
|
"epoch": 161.54, |
|
"learning_rate": 4.6716778523489936e-05, |
|
"loss": 0.0026, |
|
"step": 6300 |
|
}, |
|
{ |
|
"epoch": 164.1, |
|
"learning_rate": 4.6179865771812084e-05, |
|
"loss": 0.0025, |
|
"step": 6400 |
|
}, |
|
{ |
|
"epoch": 166.67, |
|
"learning_rate": 4.564295302013423e-05, |
|
"loss": 0.0014, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 166.67, |
|
"eval_accuracy": 0.8951937134502924, |
|
"eval_loss": 0.9585675597190857, |
|
"eval_runtime": 1.544, |
|
"eval_samples_per_second": 246.764, |
|
"eval_steps_per_second": 31.088, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 169.23, |
|
"learning_rate": 4.510604026845638e-05, |
|
"loss": 0.0027, |
|
"step": 6600 |
|
}, |
|
{ |
|
"epoch": 171.79, |
|
"learning_rate": 4.456912751677853e-05, |
|
"loss": 0.0016, |
|
"step": 6700 |
|
}, |
|
{ |
|
"epoch": 174.36, |
|
"learning_rate": 4.403221476510068e-05, |
|
"loss": 0.0024, |
|
"step": 6800 |
|
}, |
|
{ |
|
"epoch": 176.92, |
|
"learning_rate": 4.3495302013422825e-05, |
|
"loss": 0.0017, |
|
"step": 6900 |
|
}, |
|
{ |
|
"epoch": 179.49, |
|
"learning_rate": 4.295838926174497e-05, |
|
"loss": 0.0018, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 179.49, |
|
"eval_accuracy": 0.89546783625731, |
|
"eval_loss": 0.9691464900970459, |
|
"eval_runtime": 1.518, |
|
"eval_samples_per_second": 250.996, |
|
"eval_steps_per_second": 31.622, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 182.05, |
|
"learning_rate": 4.242147651006712e-05, |
|
"loss": 0.0023, |
|
"step": 7100 |
|
}, |
|
{ |
|
"epoch": 184.62, |
|
"learning_rate": 4.188456375838927e-05, |
|
"loss": 0.0027, |
|
"step": 7200 |
|
}, |
|
{ |
|
"epoch": 187.18, |
|
"learning_rate": 4.134765100671141e-05, |
|
"loss": 0.0021, |
|
"step": 7300 |
|
}, |
|
{ |
|
"epoch": 189.74, |
|
"learning_rate": 4.081073825503356e-05, |
|
"loss": 0.002, |
|
"step": 7400 |
|
}, |
|
{ |
|
"epoch": 192.31, |
|
"learning_rate": 4.027382550335571e-05, |
|
"loss": 0.0017, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 192.31, |
|
"eval_accuracy": 0.899671052631579, |
|
"eval_loss": 0.92512446641922, |
|
"eval_runtime": 1.5211, |
|
"eval_samples_per_second": 250.471, |
|
"eval_steps_per_second": 31.555, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 194.87, |
|
"learning_rate": 3.9736912751677856e-05, |
|
"loss": 0.0017, |
|
"step": 7600 |
|
}, |
|
{ |
|
"epoch": 197.44, |
|
"learning_rate": 3.9200000000000004e-05, |
|
"loss": 0.0019, |
|
"step": 7700 |
|
}, |
|
{ |
|
"epoch": 200.0, |
|
"learning_rate": 3.866308724832215e-05, |
|
"loss": 0.0022, |
|
"step": 7800 |
|
}, |
|
{ |
|
"epoch": 202.56, |
|
"learning_rate": 3.81261744966443e-05, |
|
"loss": 0.0012, |
|
"step": 7900 |
|
}, |
|
{ |
|
"epoch": 205.13, |
|
"learning_rate": 3.758926174496645e-05, |
|
"loss": 0.0016, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 205.13, |
|
"eval_accuracy": 0.8972953216374269, |
|
"eval_loss": 0.9501896500587463, |
|
"eval_runtime": 1.5504, |
|
"eval_samples_per_second": 245.737, |
|
"eval_steps_per_second": 30.959, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 207.69, |
|
"learning_rate": 3.705234899328859e-05, |
|
"loss": 0.0013, |
|
"step": 8100 |
|
}, |
|
{ |
|
"epoch": 210.26, |
|
"learning_rate": 3.651543624161074e-05, |
|
"loss": 0.0011, |
|
"step": 8200 |
|
}, |
|
{ |
|
"epoch": 212.82, |
|
"learning_rate": 3.597852348993289e-05, |
|
"loss": 0.0015, |
|
"step": 8300 |
|
}, |
|
{ |
|
"epoch": 215.38, |
|
"learning_rate": 3.5441610738255035e-05, |
|
"loss": 0.0009, |
|
"step": 8400 |
|
}, |
|
{ |
|
"epoch": 217.95, |
|
"learning_rate": 3.490469798657718e-05, |
|
"loss": 0.0015, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 217.95, |
|
"eval_accuracy": 0.8966557017543859, |
|
"eval_loss": 0.9619430303573608, |
|
"eval_runtime": 1.5407, |
|
"eval_samples_per_second": 247.295, |
|
"eval_steps_per_second": 31.155, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 220.51, |
|
"learning_rate": 3.436778523489933e-05, |
|
"loss": 0.0011, |
|
"step": 8600 |
|
}, |
|
{ |
|
"epoch": 223.08, |
|
"learning_rate": 3.383087248322148e-05, |
|
"loss": 0.002, |
|
"step": 8700 |
|
}, |
|
{ |
|
"epoch": 225.64, |
|
"learning_rate": 3.329395973154363e-05, |
|
"loss": 0.0012, |
|
"step": 8800 |
|
}, |
|
{ |
|
"epoch": 228.21, |
|
"learning_rate": 3.2757046979865776e-05, |
|
"loss": 0.0011, |
|
"step": 8900 |
|
}, |
|
{ |
|
"epoch": 230.77, |
|
"learning_rate": 3.2220134228187925e-05, |
|
"loss": 0.001, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 230.77, |
|
"eval_accuracy": 0.899671052631579, |
|
"eval_loss": 0.9450499415397644, |
|
"eval_runtime": 1.5395, |
|
"eval_samples_per_second": 247.489, |
|
"eval_steps_per_second": 31.18, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 233.33, |
|
"learning_rate": 3.168322147651007e-05, |
|
"loss": 0.0009, |
|
"step": 9100 |
|
}, |
|
{ |
|
"epoch": 235.9, |
|
"learning_rate": 3.114630872483222e-05, |
|
"loss": 0.0009, |
|
"step": 9200 |
|
}, |
|
{ |
|
"epoch": 238.46, |
|
"learning_rate": 3.060939597315436e-05, |
|
"loss": 0.0011, |
|
"step": 9300 |
|
}, |
|
{ |
|
"epoch": 241.03, |
|
"learning_rate": 3.007248322147651e-05, |
|
"loss": 0.0006, |
|
"step": 9400 |
|
}, |
|
{ |
|
"epoch": 243.59, |
|
"learning_rate": 2.953557046979866e-05, |
|
"loss": 0.0011, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 243.59, |
|
"eval_accuracy": 0.8972953216374269, |
|
"eval_loss": 0.9794738292694092, |
|
"eval_runtime": 1.5217, |
|
"eval_samples_per_second": 250.373, |
|
"eval_steps_per_second": 31.543, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 246.15, |
|
"learning_rate": 2.8998657718120807e-05, |
|
"loss": 0.0009, |
|
"step": 9600 |
|
}, |
|
{ |
|
"epoch": 248.72, |
|
"learning_rate": 2.8461744966442955e-05, |
|
"loss": 0.0009, |
|
"step": 9700 |
|
}, |
|
{ |
|
"epoch": 251.28, |
|
"learning_rate": 2.79248322147651e-05, |
|
"loss": 0.0008, |
|
"step": 9800 |
|
}, |
|
{ |
|
"epoch": 253.85, |
|
"learning_rate": 2.738791946308725e-05, |
|
"loss": 0.0004, |
|
"step": 9900 |
|
}, |
|
{ |
|
"epoch": 256.41, |
|
"learning_rate": 2.6851006711409397e-05, |
|
"loss": 0.0011, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 256.41, |
|
"eval_accuracy": 0.8970211988304093, |
|
"eval_loss": 1.0004897117614746, |
|
"eval_runtime": 1.5228, |
|
"eval_samples_per_second": 250.203, |
|
"eval_steps_per_second": 31.522, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 256.41, |
|
"step": 10000, |
|
"total_flos": 5.166941021601792e+16, |
|
"train_loss": 0.03826559160500765, |
|
"train_runtime": 1428.3699, |
|
"train_samples_per_second": 336.047, |
|
"train_steps_per_second": 10.501 |
|
} |
|
], |
|
"max_steps": 15000, |
|
"num_train_epochs": 385, |
|
"total_flos": 5.166941021601792e+16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|