File size: 4,935 Bytes
d068511
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
{
  "best_metric": null,
  "best_model_checkpoint": null,
  "epoch": 38.46153846153846,
  "eval_steps": 500,
  "global_step": 2500,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 1.5384615384615383,
      "grad_norm": 134.7136688232422,
      "learning_rate": 9.981600000000001e-06,
      "loss": 5.6198,
      "step": 100
    },
    {
      "epoch": 3.076923076923077,
      "grad_norm": 115.98611450195312,
      "learning_rate": 9.961600000000001e-06,
      "loss": 3.5959,
      "step": 200
    },
    {
      "epoch": 4.615384615384615,
      "grad_norm": 114.23514556884766,
      "learning_rate": 9.941600000000002e-06,
      "loss": 3.3845,
      "step": 300
    },
    {
      "epoch": 6.153846153846154,
      "grad_norm": 140.5648193359375,
      "learning_rate": 9.921600000000002e-06,
      "loss": 3.1494,
      "step": 400
    },
    {
      "epoch": 7.6923076923076925,
      "grad_norm": 117.4485855102539,
      "learning_rate": 9.901600000000002e-06,
      "loss": 3.0773,
      "step": 500
    },
    {
      "epoch": 9.23076923076923,
      "grad_norm": 389.71490478515625,
      "learning_rate": 9.8816e-06,
      "loss": 3.2116,
      "step": 600
    },
    {
      "epoch": 10.76923076923077,
      "grad_norm": 107.11251831054688,
      "learning_rate": 9.8616e-06,
      "loss": 3.0471,
      "step": 700
    },
    {
      "epoch": 12.307692307692308,
      "grad_norm": 85.49571228027344,
      "learning_rate": 9.8416e-06,
      "loss": 3.0201,
      "step": 800
    },
    {
      "epoch": 13.846153846153847,
      "grad_norm": 121.62274932861328,
      "learning_rate": 9.821600000000001e-06,
      "loss": 2.9355,
      "step": 900
    },
    {
      "epoch": 15.384615384615385,
      "grad_norm": 64.66451263427734,
      "learning_rate": 9.801600000000001e-06,
      "loss": 2.9637,
      "step": 1000
    },
    {
      "epoch": 16.923076923076923,
      "grad_norm": 182.4657440185547,
      "learning_rate": 9.781600000000001e-06,
      "loss": 2.9819,
      "step": 1100
    },
    {
      "epoch": 18.46153846153846,
      "grad_norm": 97.80529022216797,
      "learning_rate": 9.761600000000002e-06,
      "loss": 2.9486,
      "step": 1200
    },
    {
      "epoch": 20.0,
      "grad_norm": 220.0562744140625,
      "learning_rate": 9.741600000000002e-06,
      "loss": 2.8608,
      "step": 1300
    },
    {
      "epoch": 21.53846153846154,
      "grad_norm": 95.53397369384766,
      "learning_rate": 9.7216e-06,
      "loss": 2.8322,
      "step": 1400
    },
    {
      "epoch": 23.076923076923077,
      "grad_norm": 67.54853057861328,
      "learning_rate": 9.7016e-06,
      "loss": 2.9429,
      "step": 1500
    },
    {
      "epoch": 24.615384615384617,
      "grad_norm": 214.53131103515625,
      "learning_rate": 9.6816e-06,
      "loss": 2.7927,
      "step": 1600
    },
    {
      "epoch": 26.153846153846153,
      "grad_norm": 293.3318786621094,
      "learning_rate": 9.6616e-06,
      "loss": 2.7665,
      "step": 1700
    },
    {
      "epoch": 27.692307692307693,
      "grad_norm": 216.3682861328125,
      "learning_rate": 9.641600000000001e-06,
      "loss": 2.8309,
      "step": 1800
    },
    {
      "epoch": 29.23076923076923,
      "grad_norm": 168.0605010986328,
      "learning_rate": 9.621600000000001e-06,
      "loss": 2.8433,
      "step": 1900
    },
    {
      "epoch": 30.76923076923077,
      "grad_norm": 103.49143981933594,
      "learning_rate": 9.601600000000001e-06,
      "loss": 2.7606,
      "step": 2000
    },
    {
      "epoch": 32.30769230769231,
      "grad_norm": 116.6761474609375,
      "learning_rate": 9.581600000000002e-06,
      "loss": 2.6595,
      "step": 2100
    },
    {
      "epoch": 33.84615384615385,
      "grad_norm": 176.0087432861328,
      "learning_rate": 9.5616e-06,
      "loss": 2.632,
      "step": 2200
    },
    {
      "epoch": 35.38461538461539,
      "grad_norm": 120.39679718017578,
      "learning_rate": 9.5416e-06,
      "loss": 2.5941,
      "step": 2300
    },
    {
      "epoch": 36.92307692307692,
      "grad_norm": 100.69256591796875,
      "learning_rate": 9.5216e-06,
      "loss": 2.6007,
      "step": 2400
    },
    {
      "epoch": 38.46153846153846,
      "grad_norm": 254.54440307617188,
      "learning_rate": 9.5016e-06,
      "loss": 2.5548,
      "step": 2500
    }
  ],
  "logging_steps": 100,
  "max_steps": 50000,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 770,
  "save_steps": 500,
  "stateful_callbacks": {
    "TrainerControl": {
      "args": {
        "should_epoch_stop": false,
        "should_evaluate": false,
        "should_log": false,
        "should_save": true,
        "should_training_stop": false
      },
      "attributes": {}
    }
  },
  "total_flos": 9.5560791552e+18,
  "train_batch_size": 8,
  "trial_name": null,
  "trial_params": null
}