File size: 4,064 Bytes
e222dac
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
{
  "best_metric": null,
  "best_model_checkpoint": null,
  "epoch": 0.9874476987447699,
  "eval_steps": 500,
  "global_step": 59,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.02,
      "learning_rate": 8.333333333333333e-08,
      "logits/chosen": -2.6420538425445557,
      "logits/rejected": -2.573397636413574,
      "logps/chosen": -246.4191131591797,
      "logps/pi_response": -236.30819702148438,
      "logps/ref_response": -236.30819702148438,
      "logps/rejected": -276.9232177734375,
      "loss": 0.6931,
      "rewards/accuracies": 0.0,
      "rewards/chosen": 0.0,
      "rewards/margins": 0.0,
      "rewards/rejected": 0.0,
      "step": 1
    },
    {
      "epoch": 0.17,
      "learning_rate": 4.930057285201027e-07,
      "logits/chosen": -2.5625665187835693,
      "logits/rejected": -2.543930768966675,
      "logps/chosen": -266.3973083496094,
      "logps/pi_response": -240.02105712890625,
      "logps/ref_response": -236.90733337402344,
      "logps/rejected": -259.9803466796875,
      "loss": 0.6891,
      "rewards/accuracies": 0.5208333134651184,
      "rewards/chosen": -0.011344672180712223,
      "rewards/margins": 0.01600966602563858,
      "rewards/rejected": -0.02735433727502823,
      "step": 10
    },
    {
      "epoch": 0.33,
      "learning_rate": 4.187457503795526e-07,
      "logits/chosen": -2.483142137527466,
      "logits/rejected": -2.4834489822387695,
      "logps/chosen": -278.94293212890625,
      "logps/pi_response": -261.69927978515625,
      "logps/ref_response": -229.9475555419922,
      "logps/rejected": -274.2855224609375,
      "loss": 0.6631,
      "rewards/accuracies": 0.578125,
      "rewards/chosen": -0.1398383378982544,
      "rewards/margins": 0.09113852679729462,
      "rewards/rejected": -0.230976864695549,
      "step": 20
    },
    {
      "epoch": 0.5,
      "learning_rate": 2.8691164100062034e-07,
      "logits/chosen": -2.442124128341675,
      "logits/rejected": -2.4576478004455566,
      "logps/chosen": -307.03875732421875,
      "logps/pi_response": -289.63275146484375,
      "logps/ref_response": -234.99520874023438,
      "logps/rejected": -298.96575927734375,
      "loss": 0.6596,
      "rewards/accuracies": 0.65625,
      "rewards/chosen": -0.31300970911979675,
      "rewards/margins": 0.1431526243686676,
      "rewards/rejected": -0.45616236329078674,
      "step": 30
    },
    {
      "epoch": 0.67,
      "learning_rate": 1.4248369943086995e-07,
      "logits/chosen": -2.412471294403076,
      "logits/rejected": -2.4030799865722656,
      "logps/chosen": -290.8214416503906,
      "logps/pi_response": -291.937744140625,
      "logps/ref_response": -231.7711944580078,
      "logps/rejected": -297.19244384765625,
      "loss": 0.6416,
      "rewards/accuracies": 0.612500011920929,
      "rewards/chosen": -0.3306281864643097,
      "rewards/margins": 0.18531939387321472,
      "rewards/rejected": -0.5159475803375244,
      "step": 40
    },
    {
      "epoch": 0.84,
      "learning_rate": 3.473909705816111e-08,
      "logits/chosen": -2.4195713996887207,
      "logits/rejected": -2.424147129058838,
      "logps/chosen": -299.3436584472656,
      "logps/pi_response": -297.82391357421875,
      "logps/ref_response": -234.6703643798828,
      "logps/rejected": -301.0117492675781,
      "loss": 0.6343,
      "rewards/accuracies": 0.671875,
      "rewards/chosen": -0.33631107211112976,
      "rewards/margins": 0.2048371136188507,
      "rewards/rejected": -0.5411481857299805,
      "step": 50
    },
    {
      "epoch": 0.99,
      "step": 59,
      "total_flos": 0.0,
      "train_loss": 0.6547156754186598,
      "train_runtime": 3352.7467,
      "train_samples_per_second": 4.558,
      "train_steps_per_second": 0.018
    }
  ],
  "logging_steps": 10,
  "max_steps": 59,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 1,
  "save_steps": 100,
  "total_flos": 0.0,
  "train_batch_size": 8,
  "trial_name": null,
  "trial_params": null
}