gyr66
Add parameters
c4fdd18
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.2723658051689861,
"eval_steps": 500,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01,
"learning_rate": 0.0198,
"loss": 0.8181,
"step": 1
},
{
"epoch": 0.03,
"learning_rate": 0.0196,
"loss": 0.787,
"step": 2
},
{
"epoch": 0.04,
"learning_rate": 0.0194,
"loss": 1.0047,
"step": 3
},
{
"epoch": 0.05,
"learning_rate": 0.0192,
"loss": 0.8688,
"step": 4
},
{
"epoch": 0.06,
"learning_rate": 0.019,
"loss": 0.7173,
"step": 5
},
{
"epoch": 0.08,
"learning_rate": 0.0188,
"loss": 0.5175,
"step": 6
},
{
"epoch": 0.09,
"learning_rate": 0.018600000000000002,
"loss": 0.7559,
"step": 7
},
{
"epoch": 0.1,
"learning_rate": 0.0184,
"loss": 0.9278,
"step": 8
},
{
"epoch": 0.11,
"learning_rate": 0.0182,
"loss": 0.6011,
"step": 9
},
{
"epoch": 0.13,
"learning_rate": 0.018000000000000002,
"loss": 0.8014,
"step": 10
},
{
"epoch": 0.14,
"learning_rate": 0.0178,
"loss": 1.2581,
"step": 11
},
{
"epoch": 0.15,
"learning_rate": 0.0176,
"loss": 0.9886,
"step": 12
},
{
"epoch": 0.17,
"learning_rate": 0.0174,
"loss": 0.7866,
"step": 13
},
{
"epoch": 0.18,
"learning_rate": 0.0172,
"loss": 0.936,
"step": 14
},
{
"epoch": 0.19,
"learning_rate": 0.017,
"loss": 1.0503,
"step": 15
},
{
"epoch": 0.2,
"learning_rate": 0.0168,
"loss": 0.5689,
"step": 16
},
{
"epoch": 0.22,
"learning_rate": 0.0166,
"loss": 0.8576,
"step": 17
},
{
"epoch": 0.23,
"learning_rate": 0.016399999999999998,
"loss": 1.0946,
"step": 18
},
{
"epoch": 0.24,
"learning_rate": 0.016200000000000003,
"loss": 0.9075,
"step": 19
},
{
"epoch": 0.25,
"learning_rate": 0.016,
"loss": 1.1441,
"step": 20
},
{
"epoch": 0.27,
"learning_rate": 0.0158,
"loss": 0.7794,
"step": 21
},
{
"epoch": 0.28,
"learning_rate": 0.015600000000000001,
"loss": 0.9574,
"step": 22
},
{
"epoch": 0.29,
"learning_rate": 0.0154,
"loss": 0.8937,
"step": 23
},
{
"epoch": 0.31,
"learning_rate": 0.0152,
"loss": 0.709,
"step": 24
},
{
"epoch": 0.32,
"learning_rate": 0.015,
"loss": 0.8731,
"step": 25
},
{
"epoch": 0.33,
"learning_rate": 0.0148,
"loss": 0.719,
"step": 26
},
{
"epoch": 0.34,
"learning_rate": 0.0146,
"loss": 0.7419,
"step": 27
},
{
"epoch": 0.36,
"learning_rate": 0.0144,
"loss": 0.9224,
"step": 28
},
{
"epoch": 0.37,
"learning_rate": 0.014199999999999999,
"loss": 1.0802,
"step": 29
},
{
"epoch": 0.38,
"learning_rate": 0.013999999999999999,
"loss": 0.8187,
"step": 30
},
{
"epoch": 0.39,
"learning_rate": 0.0138,
"loss": 0.615,
"step": 31
},
{
"epoch": 0.41,
"learning_rate": 0.013600000000000001,
"loss": 0.5214,
"step": 32
},
{
"epoch": 0.42,
"learning_rate": 0.0134,
"loss": 0.649,
"step": 33
},
{
"epoch": 0.43,
"learning_rate": 0.013200000000000002,
"loss": 0.6523,
"step": 34
},
{
"epoch": 0.45,
"learning_rate": 0.013000000000000001,
"loss": 0.7002,
"step": 35
},
{
"epoch": 0.46,
"learning_rate": 0.0128,
"loss": 0.6161,
"step": 36
},
{
"epoch": 0.47,
"learning_rate": 0.0126,
"loss": 1.0374,
"step": 37
},
{
"epoch": 0.48,
"learning_rate": 0.0124,
"loss": 1.0328,
"step": 38
},
{
"epoch": 0.5,
"learning_rate": 0.0122,
"loss": 0.7637,
"step": 39
},
{
"epoch": 0.51,
"learning_rate": 0.012,
"loss": 0.6332,
"step": 40
},
{
"epoch": 0.52,
"learning_rate": 0.0118,
"loss": 0.74,
"step": 41
},
{
"epoch": 0.53,
"learning_rate": 0.0116,
"loss": 0.7284,
"step": 42
},
{
"epoch": 0.55,
"learning_rate": 0.011399999999999999,
"loss": 0.9198,
"step": 43
},
{
"epoch": 0.56,
"learning_rate": 0.011200000000000002,
"loss": 0.626,
"step": 44
},
{
"epoch": 0.57,
"learning_rate": 0.011000000000000001,
"loss": 0.628,
"step": 45
},
{
"epoch": 0.59,
"learning_rate": 0.0108,
"loss": 0.5322,
"step": 46
},
{
"epoch": 0.6,
"learning_rate": 0.0106,
"loss": 0.7844,
"step": 47
},
{
"epoch": 0.61,
"learning_rate": 0.010400000000000001,
"loss": 0.5957,
"step": 48
},
{
"epoch": 0.62,
"learning_rate": 0.0102,
"loss": 0.6681,
"step": 49
},
{
"epoch": 0.64,
"learning_rate": 0.01,
"loss": 0.8281,
"step": 50
},
{
"epoch": 0.65,
"learning_rate": 0.0098,
"loss": 0.5284,
"step": 51
},
{
"epoch": 0.66,
"learning_rate": 0.0096,
"loss": 0.8251,
"step": 52
},
{
"epoch": 0.67,
"learning_rate": 0.0094,
"loss": 0.9845,
"step": 53
},
{
"epoch": 0.69,
"learning_rate": 0.0092,
"loss": 0.9525,
"step": 54
},
{
"epoch": 0.7,
"learning_rate": 0.009000000000000001,
"loss": 0.9454,
"step": 55
},
{
"epoch": 0.71,
"learning_rate": 0.0088,
"loss": 0.4058,
"step": 56
},
{
"epoch": 0.73,
"learning_rate": 0.0086,
"loss": 0.5435,
"step": 57
},
{
"epoch": 0.74,
"learning_rate": 0.0084,
"loss": 0.6892,
"step": 58
},
{
"epoch": 0.75,
"learning_rate": 0.008199999999999999,
"loss": 0.6426,
"step": 59
},
{
"epoch": 0.76,
"learning_rate": 0.008,
"loss": 0.9414,
"step": 60
},
{
"epoch": 0.78,
"learning_rate": 0.0078000000000000005,
"loss": 0.7945,
"step": 61
},
{
"epoch": 0.79,
"learning_rate": 0.0076,
"loss": 0.6295,
"step": 62
},
{
"epoch": 0.8,
"learning_rate": 0.0074,
"loss": 0.7888,
"step": 63
},
{
"epoch": 0.81,
"learning_rate": 0.0072,
"loss": 0.5454,
"step": 64
},
{
"epoch": 0.83,
"learning_rate": 0.006999999999999999,
"loss": 0.711,
"step": 65
},
{
"epoch": 0.84,
"learning_rate": 0.0068000000000000005,
"loss": 0.713,
"step": 66
},
{
"epoch": 0.85,
"learning_rate": 0.006600000000000001,
"loss": 0.6058,
"step": 67
},
{
"epoch": 0.87,
"learning_rate": 0.0064,
"loss": 0.8203,
"step": 68
},
{
"epoch": 0.88,
"learning_rate": 0.0062,
"loss": 0.8275,
"step": 69
},
{
"epoch": 0.89,
"learning_rate": 0.006,
"loss": 0.4923,
"step": 70
},
{
"epoch": 0.9,
"learning_rate": 0.0058,
"loss": 0.5219,
"step": 71
},
{
"epoch": 0.92,
"learning_rate": 0.005600000000000001,
"loss": 0.9954,
"step": 72
},
{
"epoch": 0.93,
"learning_rate": 0.0054,
"loss": 0.6206,
"step": 73
},
{
"epoch": 0.94,
"learning_rate": 0.005200000000000001,
"loss": 0.6064,
"step": 74
},
{
"epoch": 0.95,
"learning_rate": 0.005,
"loss": 0.6584,
"step": 75
},
{
"epoch": 0.97,
"learning_rate": 0.0048,
"loss": 0.8461,
"step": 76
},
{
"epoch": 0.98,
"learning_rate": 0.0046,
"loss": 0.9615,
"step": 77
},
{
"epoch": 0.99,
"learning_rate": 0.0044,
"loss": 0.6508,
"step": 78
},
{
"epoch": 1.01,
"learning_rate": 0.0042,
"loss": 1.0089,
"step": 79
},
{
"epoch": 1.02,
"learning_rate": 0.004,
"loss": 0.7515,
"step": 80
},
{
"epoch": 1.03,
"learning_rate": 0.0038,
"loss": 0.4172,
"step": 81
},
{
"epoch": 1.04,
"learning_rate": 0.0036,
"loss": 0.7634,
"step": 82
},
{
"epoch": 1.06,
"learning_rate": 0.0034000000000000002,
"loss": 0.585,
"step": 83
},
{
"epoch": 1.07,
"learning_rate": 0.0032,
"loss": 0.7668,
"step": 84
},
{
"epoch": 1.08,
"learning_rate": 0.003,
"loss": 0.5403,
"step": 85
},
{
"epoch": 1.09,
"learning_rate": 0.0028000000000000004,
"loss": 0.5995,
"step": 86
},
{
"epoch": 1.11,
"learning_rate": 0.0026000000000000003,
"loss": 0.4515,
"step": 87
},
{
"epoch": 1.12,
"learning_rate": 0.0024,
"loss": 0.6288,
"step": 88
},
{
"epoch": 1.13,
"learning_rate": 0.0022,
"loss": 0.7387,
"step": 89
},
{
"epoch": 1.15,
"learning_rate": 0.002,
"loss": 0.6517,
"step": 90
},
{
"epoch": 1.16,
"learning_rate": 0.0018,
"loss": 0.5389,
"step": 91
},
{
"epoch": 1.17,
"learning_rate": 0.0016,
"loss": 0.4433,
"step": 92
},
{
"epoch": 1.18,
"learning_rate": 0.0014000000000000002,
"loss": 0.6643,
"step": 93
},
{
"epoch": 1.2,
"learning_rate": 0.0012,
"loss": 0.5825,
"step": 94
},
{
"epoch": 1.21,
"learning_rate": 0.001,
"loss": 0.7709,
"step": 95
},
{
"epoch": 1.22,
"learning_rate": 0.0008,
"loss": 0.562,
"step": 96
},
{
"epoch": 1.23,
"learning_rate": 0.0006,
"loss": 0.5581,
"step": 97
},
{
"epoch": 1.25,
"learning_rate": 0.0004,
"loss": 0.4679,
"step": 98
},
{
"epoch": 1.26,
"learning_rate": 0.0002,
"loss": 0.5063,
"step": 99
},
{
"epoch": 1.27,
"learning_rate": 0.0,
"loss": 0.5527,
"step": 100
},
{
"epoch": 1.27,
"step": 100,
"total_flos": 1.323218757484544e+17,
"train_loss": 0.7395605874061585,
"train_runtime": 1218.4689,
"train_samples_per_second": 2.626,
"train_steps_per_second": 0.082
}
],
"logging_steps": 1.0,
"max_steps": 100,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 500,
"total_flos": 1.323218757484544e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}