File size: 3,364 Bytes
5a23b27 561e87c 5a23b27 561e87c 5a23b27 561e87c 5a23b27 561e87c 5a23b27 561e87c 5a23b27 561e87c 5a23b27 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 |
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.860775588048315,
"eval_steps": 500,
"global_step": 9000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.16,
"grad_norm": 18.022613525390625,
"learning_rate": 4.735113371477008e-05,
"loss": 2.3719,
"step": 500
},
{
"epoch": 0.32,
"grad_norm": 21.23277473449707,
"learning_rate": 4.4702267429540156e-05,
"loss": 2.0069,
"step": 1000
},
{
"epoch": 0.48,
"grad_norm": 16.839088439941406,
"learning_rate": 4.205340114431024e-05,
"loss": 1.8155,
"step": 1500
},
{
"epoch": 0.64,
"grad_norm": 15.446898460388184,
"learning_rate": 3.9404534859080315e-05,
"loss": 1.7476,
"step": 2000
},
{
"epoch": 0.79,
"grad_norm": 24.382474899291992,
"learning_rate": 3.675566857385039e-05,
"loss": 1.6636,
"step": 2500
},
{
"epoch": 0.95,
"grad_norm": 21.278900146484375,
"learning_rate": 3.410680228862047e-05,
"loss": 1.624,
"step": 3000
},
{
"epoch": 1.11,
"grad_norm": 14.305024147033691,
"learning_rate": 3.1457936003390545e-05,
"loss": 1.2214,
"step": 3500
},
{
"epoch": 1.27,
"grad_norm": 13.627291679382324,
"learning_rate": 2.8809069718160632e-05,
"loss": 1.0202,
"step": 4000
},
{
"epoch": 1.43,
"grad_norm": 11.309160232543945,
"learning_rate": 2.616020343293071e-05,
"loss": 1.0545,
"step": 4500
},
{
"epoch": 1.59,
"grad_norm": 12.876075744628906,
"learning_rate": 2.3511337147700785e-05,
"loss": 1.0358,
"step": 5000
},
{
"epoch": 1.75,
"grad_norm": 11.487010955810547,
"learning_rate": 2.0862470862470865e-05,
"loss": 1.0278,
"step": 5500
},
{
"epoch": 1.91,
"grad_norm": 12.434341430664062,
"learning_rate": 1.821360457724094e-05,
"loss": 1.0069,
"step": 6000
},
{
"epoch": 2.07,
"grad_norm": 11.067166328430176,
"learning_rate": 1.5564738292011018e-05,
"loss": 0.8587,
"step": 6500
},
{
"epoch": 2.23,
"grad_norm": 9.134782791137695,
"learning_rate": 1.29158720067811e-05,
"loss": 0.6709,
"step": 7000
},
{
"epoch": 2.38,
"grad_norm": 12.168549537658691,
"learning_rate": 1.0267005721551176e-05,
"loss": 0.6534,
"step": 7500
},
{
"epoch": 2.54,
"grad_norm": 8.282147407531738,
"learning_rate": 7.6181394363212545e-06,
"loss": 0.6821,
"step": 8000
},
{
"epoch": 2.7,
"grad_norm": 22.135112762451172,
"learning_rate": 4.9692731510913336e-06,
"loss": 0.671,
"step": 8500
},
{
"epoch": 2.86,
"grad_norm": 7.933387756347656,
"learning_rate": 2.3204068658614114e-06,
"loss": 0.669,
"step": 9000
}
],
"logging_steps": 500,
"max_steps": 9438,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"total_flos": 184716162957312.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}
|