File size: 5,965 Bytes
1b68517
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
{
  "best_metric": null,
  "best_model_checkpoint": null,
  "epoch": 0.9976019184652278,
  "eval_steps": 500,
  "global_step": 104,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.01,
      "learning_rate": 4.5454545454545457e-07,
      "logits/chosen": 0.133200004696846,
      "logits/rejected": 0.3101254105567932,
      "logps/chosen": -439.1437072753906,
      "logps/rejected": -369.0645751953125,
      "loss": 0.3882,
      "rewards/accuracies": 0.0,
      "rewards/chosen": 0.0,
      "rewards/margins": 0.0,
      "rewards/rejected": 0.0,
      "step": 1
    },
    {
      "epoch": 0.1,
      "learning_rate": 4.5454545454545455e-06,
      "logits/chosen": 0.13730913400650024,
      "logits/rejected": 0.17233937978744507,
      "logps/chosen": -361.9382629394531,
      "logps/rejected": -331.0599670410156,
      "loss": 0.3676,
      "rewards/accuracies": 0.3611111044883728,
      "rewards/chosen": -3.202176230843179e-05,
      "rewards/margins": -2.2185584384715185e-05,
      "rewards/rejected": -9.836176104727201e-06,
      "step": 10
    },
    {
      "epoch": 0.19,
      "learning_rate": 4.8853481410001225e-06,
      "logits/chosen": 0.14704950153827667,
      "logits/rejected": 0.24530892074108124,
      "logps/chosen": -333.5304260253906,
      "logps/rejected": -330.6236877441406,
      "loss": 0.3766,
      "rewards/accuracies": 0.606249988079071,
      "rewards/chosen": 0.00022825358610134572,
      "rewards/margins": 0.0006691188318654895,
      "rewards/rejected": -0.0004408652021083981,
      "step": 20
    },
    {
      "epoch": 0.29,
      "learning_rate": 4.502502736173462e-06,
      "logits/chosen": 0.08989942073822021,
      "logits/rejected": 0.20580899715423584,
      "logps/chosen": -343.5199890136719,
      "logps/rejected": -330.2860412597656,
      "loss": 0.3623,
      "rewards/accuracies": 0.706250011920929,
      "rewards/chosen": 0.000688703847117722,
      "rewards/margins": 0.0018687937408685684,
      "rewards/rejected": -0.00118009012658149,
      "step": 30
    },
    {
      "epoch": 0.38,
      "learning_rate": 3.893311157806091e-06,
      "logits/chosen": 0.09253476560115814,
      "logits/rejected": 0.13635697960853577,
      "logps/chosen": -357.9090576171875,
      "logps/rejected": -349.3745422363281,
      "loss": 0.3657,
      "rewards/accuracies": 0.637499988079071,
      "rewards/chosen": -0.0009900133591145277,
      "rewards/margins": 0.003044883953407407,
      "rewards/rejected": -0.0040348973125219345,
      "step": 40
    },
    {
      "epoch": 0.48,
      "learning_rate": 3.1266313306468018e-06,
      "logits/chosen": 0.12737445533275604,
      "logits/rejected": 0.24501362442970276,
      "logps/chosen": -348.5634765625,
      "logps/rejected": -323.2393493652344,
      "loss": 0.3646,
      "rewards/accuracies": 0.625,
      "rewards/chosen": -0.001117801875807345,
      "rewards/margins": 0.005832134746015072,
      "rewards/rejected": -0.006949936505407095,
      "step": 50
    },
    {
      "epoch": 0.58,
      "learning_rate": 2.2891223348923888e-06,
      "logits/chosen": 0.08873095363378525,
      "logits/rejected": 0.14634013175964355,
      "logps/chosen": -342.6759033203125,
      "logps/rejected": -323.6924133300781,
      "loss": 0.3595,
      "rewards/accuracies": 0.637499988079071,
      "rewards/chosen": -0.0012518768198788166,
      "rewards/margins": 0.009539250284433365,
      "rewards/rejected": -0.010791127569973469,
      "step": 60
    },
    {
      "epoch": 0.67,
      "learning_rate": 1.475449188008532e-06,
      "logits/chosen": 0.0587996244430542,
      "logits/rejected": 0.11694981157779694,
      "logps/chosen": -353.9031066894531,
      "logps/rejected": -343.61822509765625,
      "loss": 0.3519,
      "rewards/accuracies": 0.699999988079071,
      "rewards/chosen": -0.003407146781682968,
      "rewards/margins": 0.016061358153820038,
      "rewards/rejected": -0.019468504935503006,
      "step": 70
    },
    {
      "epoch": 0.77,
      "learning_rate": 7.775827023107835e-07,
      "logits/chosen": 0.10978138446807861,
      "logits/rejected": 0.16459235548973083,
      "logps/chosen": -333.46038818359375,
      "logps/rejected": -321.7098083496094,
      "loss": 0.3659,
      "rewards/accuracies": 0.6312500238418579,
      "rewards/chosen": -0.008328983560204506,
      "rewards/margins": 0.014517639763653278,
      "rewards/rejected": -0.02284662239253521,
      "step": 80
    },
    {
      "epoch": 0.86,
      "learning_rate": 2.7440387297912126e-07,
      "logits/chosen": 0.06614189594984055,
      "logits/rejected": 0.20078620314598083,
      "logps/chosen": -375.4840087890625,
      "logps/rejected": -375.40948486328125,
      "loss": 0.3559,
      "rewards/accuracies": 0.668749988079071,
      "rewards/chosen": -0.010338048450648785,
      "rewards/margins": 0.018196452409029007,
      "rewards/rejected": -0.028534505516290665,
      "step": 90
    },
    {
      "epoch": 0.96,
      "learning_rate": 2.27878296044029e-08,
      "logits/chosen": 0.10308869183063507,
      "logits/rejected": 0.15276041626930237,
      "logps/chosen": -368.84906005859375,
      "logps/rejected": -368.1903381347656,
      "loss": 0.3517,
      "rewards/accuracies": 0.6187499761581421,
      "rewards/chosen": -0.01480852346867323,
      "rewards/margins": 0.012228922918438911,
      "rewards/rejected": -0.027037447318434715,
      "step": 100
    },
    {
      "epoch": 1.0,
      "step": 104,
      "total_flos": 0.0,
      "train_loss": 0.3612480771083098,
      "train_runtime": 1444.6651,
      "train_samples_per_second": 3.461,
      "train_steps_per_second": 0.072
    }
  ],
  "logging_steps": 10,
  "max_steps": 104,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 1,
  "save_steps": 100,
  "total_flos": 0.0,
  "train_batch_size": 4,
  "trial_name": null,
  "trial_params": null
}