File size: 3,818 Bytes
4cce2ec
 
 
 
 
 
 
 
 
 
 
 
c5d3b8c
4cce2ec
 
 
 
 
 
 
 
 
 
 
 
 
 
c5d3b8c
4cce2ec
c5d3b8c
 
 
 
 
 
 
 
 
4cce2ec
 
 
 
c5d3b8c
4cce2ec
c5d3b8c
 
 
 
 
 
 
 
 
4cce2ec
 
 
 
c5d3b8c
4cce2ec
c5d3b8c
 
 
 
 
 
 
 
 
4cce2ec
 
 
 
c5d3b8c
4cce2ec
c5d3b8c
 
 
 
 
4cce2ec
c5d3b8c
 
 
4cce2ec
 
 
 
c5d3b8c
4cce2ec
c5d3b8c
 
 
 
 
 
 
 
 
4cce2ec
 
 
 
 
 
c5d3b8c
 
 
4cce2ec
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
{
  "best_metric": null,
  "best_model_checkpoint": null,
  "epoch": 0.9874476987447699,
  "eval_steps": 500,
  "global_step": 59,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.016736401673640166,
      "grad_norm": 7.7724267324991665,
      "learning_rate": 8.333333333333333e-08,
      "logits/chosen": -2.807276487350464,
      "logits/rejected": -2.7759768962860107,
      "logps/chosen": -315.42626953125,
      "logps/rejected": -227.5915985107422,
      "loss": 0.6931,
      "rewards/accuracies": 0.0,
      "rewards/chosen": 0.0,
      "rewards/margins": 0.0,
      "rewards/rejected": 0.0,
      "step": 1
    },
    {
      "epoch": 0.16736401673640167,
      "grad_norm": 7.45652629723192,
      "learning_rate": 4.930057285201027e-07,
      "logits/chosen": -2.7555408477783203,
      "logits/rejected": -2.746152400970459,
      "logps/chosen": -271.9476013183594,
      "logps/rejected": -260.6995849609375,
      "loss": 0.6914,
      "rewards/accuracies": 0.5104166865348816,
      "rewards/chosen": 0.0052236998453736305,
      "rewards/margins": 0.003610293846577406,
      "rewards/rejected": 0.0016134059987962246,
      "step": 10
    },
    {
      "epoch": 0.33472803347280333,
      "grad_norm": 6.711189239787838,
      "learning_rate": 4.187457503795526e-07,
      "logits/chosen": -2.786916971206665,
      "logits/rejected": -2.768092632293701,
      "logps/chosen": -261.447265625,
      "logps/rejected": -250.86181640625,
      "loss": 0.6746,
      "rewards/accuracies": 0.7093750238418579,
      "rewards/chosen": 0.032344214618206024,
      "rewards/margins": 0.04053739458322525,
      "rewards/rejected": -0.008193179033696651,
      "step": 20
    },
    {
      "epoch": 0.502092050209205,
      "grad_norm": 7.388933346143141,
      "learning_rate": 2.8691164100062034e-07,
      "logits/chosen": -2.787904739379883,
      "logits/rejected": -2.768831729888916,
      "logps/chosen": -293.8032531738281,
      "logps/rejected": -254.18838500976562,
      "loss": 0.6486,
      "rewards/accuracies": 0.6812499761581421,
      "rewards/chosen": -0.02347174473106861,
      "rewards/margins": 0.12683136761188507,
      "rewards/rejected": -0.15030309557914734,
      "step": 30
    },
    {
      "epoch": 0.6694560669456067,
      "grad_norm": 8.184184168957682,
      "learning_rate": 1.4248369943086995e-07,
      "logits/chosen": -2.7605953216552734,
      "logits/rejected": -2.738043785095215,
      "logps/chosen": -264.13555908203125,
      "logps/rejected": -255.6201629638672,
      "loss": 0.6353,
      "rewards/accuracies": 0.6875,
      "rewards/chosen": -0.11216118186712265,
      "rewards/margins": 0.16093404591083527,
      "rewards/rejected": -0.2730952203273773,
      "step": 40
    },
    {
      "epoch": 0.8368200836820083,
      "grad_norm": 8.554193147799957,
      "learning_rate": 3.473909705816111e-08,
      "logits/chosen": -2.7702176570892334,
      "logits/rejected": -2.7502920627593994,
      "logps/chosen": -279.1240234375,
      "logps/rejected": -287.3242492675781,
      "loss": 0.6201,
      "rewards/accuracies": 0.625,
      "rewards/chosen": -0.22115378081798553,
      "rewards/margins": 0.1369638293981552,
      "rewards/rejected": -0.35811761021614075,
      "step": 50
    },
    {
      "epoch": 0.9874476987447699,
      "step": 59,
      "total_flos": 0.0,
      "train_loss": 0.649040699005127,
      "train_runtime": 1650.5799,
      "train_samples_per_second": 9.259,
      "train_steps_per_second": 0.036
    }
  ],
  "logging_steps": 10,
  "max_steps": 59,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 1,
  "save_steps": 100,
  "total_flos": 0.0,
  "train_batch_size": 8,
  "trial_name": null,
  "trial_params": null
}