File size: 2,273 Bytes
9ddd7b6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
{
  "best_metric": null,
  "best_model_checkpoint": null,
  "epoch": 3.0,
  "eval_steps": 500,
  "global_step": 1266,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.2,
      "learning_rate": 1.9606299212598425e-05,
      "loss": 4.3298,
      "step": 84
    },
    {
      "epoch": 0.4,
      "learning_rate": 2.8946444249341527e-05,
      "loss": 4.1173,
      "step": 168
    },
    {
      "epoch": 0.6,
      "learning_rate": 2.6733977172958736e-05,
      "loss": 3.8942,
      "step": 252
    },
    {
      "epoch": 0.8,
      "learning_rate": 2.4521510096575942e-05,
      "loss": 3.8492,
      "step": 336
    },
    {
      "epoch": 1.0,
      "learning_rate": 2.2309043020193154e-05,
      "loss": 3.7421,
      "step": 420
    },
    {
      "epoch": 1.19,
      "learning_rate": 2.009657594381036e-05,
      "loss": 3.71,
      "step": 504
    },
    {
      "epoch": 1.39,
      "learning_rate": 1.788410886742757e-05,
      "loss": 3.639,
      "step": 588
    },
    {
      "epoch": 1.59,
      "learning_rate": 1.5671641791044774e-05,
      "loss": 3.6233,
      "step": 672
    },
    {
      "epoch": 1.79,
      "learning_rate": 1.3459174714661984e-05,
      "loss": 3.6277,
      "step": 756
    },
    {
      "epoch": 1.99,
      "learning_rate": 1.1246707638279193e-05,
      "loss": 3.5984,
      "step": 840
    },
    {
      "epoch": 2.19,
      "learning_rate": 9.0342405618964e-06,
      "loss": 3.5838,
      "step": 924
    },
    {
      "epoch": 2.39,
      "learning_rate": 6.8217734855136085e-06,
      "loss": 3.5981,
      "step": 1008
    },
    {
      "epoch": 2.59,
      "learning_rate": 4.6093064091308165e-06,
      "loss": 3.5963,
      "step": 1092
    },
    {
      "epoch": 2.79,
      "learning_rate": 2.396839332748025e-06,
      "loss": 3.5397,
      "step": 1176
    },
    {
      "epoch": 2.99,
      "learning_rate": 1.8437225636523267e-07,
      "loss": 3.5674,
      "step": 1260
    }
  ],
  "logging_steps": 84,
  "max_steps": 1266,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 3,
  "save_steps": 500,
  "total_flos": 4721807550578688.0,
  "train_batch_size": 2,
  "trial_name": null,
  "trial_params": null
}