File size: 4,063 Bytes
7069de2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
{
  "best_metric": null,
  "best_model_checkpoint": null,
  "epoch": 0.9874476987447699,
  "eval_steps": 500,
  "global_step": 59,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.02,
      "learning_rate": 8.333333333333333e-08,
      "logits/chosen": -2.828038215637207,
      "logits/rejected": -2.728235960006714,
      "logps/chosen": -198.4669189453125,
      "logps/pi_response": -116.54592895507812,
      "logps/ref_response": -116.54592895507812,
      "logps/rejected": -205.3916015625,
      "loss": 0.6931,
      "rewards/accuracies": 0.0,
      "rewards/chosen": 0.0,
      "rewards/margins": 0.0,
      "rewards/rejected": 0.0,
      "step": 1
    },
    {
      "epoch": 0.17,
      "learning_rate": 4.930057285201027e-07,
      "logits/chosen": -2.744903087615967,
      "logits/rejected": -2.707003593444824,
      "logps/chosen": -220.6236114501953,
      "logps/pi_response": -125.67618560791016,
      "logps/ref_response": -125.32001495361328,
      "logps/rejected": -271.0813293457031,
      "loss": 0.685,
      "rewards/accuracies": 0.5625,
      "rewards/chosen": -0.02556823194026947,
      "rewards/margins": 0.025313667953014374,
      "rewards/rejected": -0.05088190361857414,
      "step": 10
    },
    {
      "epoch": 0.33,
      "learning_rate": 4.187457503795526e-07,
      "logits/chosen": -2.7646822929382324,
      "logits/rejected": -2.7211151123046875,
      "logps/chosen": -259.53033447265625,
      "logps/pi_response": -120.56251525878906,
      "logps/ref_response": -122.2673110961914,
      "logps/rejected": -314.5663146972656,
      "loss": 0.6285,
      "rewards/accuracies": 0.640625,
      "rewards/chosen": -0.23617057502269745,
      "rewards/margins": 0.2836803197860718,
      "rewards/rejected": -0.5198509097099304,
      "step": 20
    },
    {
      "epoch": 0.5,
      "learning_rate": 2.8691164100062034e-07,
      "logits/chosen": -2.744175672531128,
      "logits/rejected": -2.706383466720581,
      "logps/chosen": -286.24603271484375,
      "logps/pi_response": -146.5166015625,
      "logps/ref_response": -124.40946197509766,
      "logps/rejected": -356.7808532714844,
      "loss": 0.5884,
      "rewards/accuracies": 0.7124999761581421,
      "rewards/chosen": -0.49397367238998413,
      "rewards/margins": 0.4614318311214447,
      "rewards/rejected": -0.9554054141044617,
      "step": 30
    },
    {
      "epoch": 0.67,
      "learning_rate": 1.4248369943086995e-07,
      "logits/chosen": -2.663506031036377,
      "logits/rejected": -2.6273674964904785,
      "logps/chosen": -286.8318786621094,
      "logps/pi_response": -154.93138122558594,
      "logps/ref_response": -118.28387451171875,
      "logps/rejected": -361.5838928222656,
      "loss": 0.5653,
      "rewards/accuracies": 0.6656249761581421,
      "rewards/chosen": -0.5869585275650024,
      "rewards/margins": 0.4853137135505676,
      "rewards/rejected": -1.0722721815109253,
      "step": 40
    },
    {
      "epoch": 0.84,
      "learning_rate": 3.473909705816111e-08,
      "logits/chosen": -2.6390087604522705,
      "logits/rejected": -2.5878398418426514,
      "logps/chosen": -286.9554138183594,
      "logps/pi_response": -164.14089965820312,
      "logps/ref_response": -121.82298278808594,
      "logps/rejected": -359.47662353515625,
      "loss": 0.5618,
      "rewards/accuracies": 0.706250011920929,
      "rewards/chosen": -0.5756198763847351,
      "rewards/margins": 0.5100170373916626,
      "rewards/rejected": -1.085636854171753,
      "step": 50
    },
    {
      "epoch": 0.99,
      "step": 59,
      "total_flos": 0.0,
      "train_loss": 0.5968249611935373,
      "train_runtime": 3576.717,
      "train_samples_per_second": 4.273,
      "train_steps_per_second": 0.016
    }
  ],
  "logging_steps": 10,
  "max_steps": 59,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 1,
  "save_steps": 100,
  "total_flos": 0.0,
  "train_batch_size": 8,
  "trial_name": null,
  "trial_params": null
}