lole25 commited on
Commit
37f2f6b
1 Parent(s): 74c72fc

Model save

Browse files
README.md ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ library_name: peft
4
+ tags:
5
+ - trl
6
+ - dpo
7
+ - generated_from_trainer
8
+ base_model: microsoft/phi-2
9
+ model-index:
10
+ - name: phi-2-dpo-ultrafeedback-lora
11
+ results: []
12
+ ---
13
+
14
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
+ should probably proofread and complete it, then remove this comment. -->
16
+
17
+ # phi-2-dpo-ultrafeedback-lora
18
+
19
+ This model is a fine-tuned version of [microsoft/phi-2](https://huggingface.co/microsoft/phi-2) on the None dataset.
20
+ It achieves the following results on the evaluation set:
21
+ - Loss: 0.6537
22
+ - Rewards/chosen: -0.2570
23
+ - Rewards/rejected: -0.3767
24
+ - Rewards/accuracies: 0.6580
25
+ - Rewards/margins: 0.1196
26
+ - Logps/rejected: -269.1014
27
+ - Logps/chosen: -285.9487
28
+ - Logits/rejected: 0.7335
29
+ - Logits/chosen: 0.6309
30
+
31
+ ## Model description
32
+
33
+ More information needed
34
+
35
+ ## Intended uses & limitations
36
+
37
+ More information needed
38
+
39
+ ## Training and evaluation data
40
+
41
+ More information needed
42
+
43
+ ## Training procedure
44
+
45
+ ### Training hyperparameters
46
+
47
+ The following hyperparameters were used during training:
48
+ - learning_rate: 5e-06
49
+ - train_batch_size: 4
50
+ - eval_batch_size: 4
51
+ - seed: 42
52
+ - distributed_type: multi-GPU
53
+ - num_devices: 4
54
+ - gradient_accumulation_steps: 4
55
+ - total_train_batch_size: 64
56
+ - total_eval_batch_size: 16
57
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
58
+ - lr_scheduler_type: cosine
59
+ - lr_scheduler_warmup_ratio: 0.1
60
+ - num_epochs: 2
61
+
62
+ ### Training results
63
+
64
+ | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
65
+ |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
66
+ | 0.6929 | 0.21 | 100 | 0.6928 | 0.0002 | -0.0010 | 0.5320 | 0.0012 | -231.5360 | -260.2240 | 0.9168 | 0.8145 |
67
+ | 0.6893 | 0.42 | 200 | 0.6891 | -0.0038 | -0.0134 | 0.6500 | 0.0096 | -232.7742 | -260.6225 | 0.9234 | 0.8205 |
68
+ | 0.6809 | 0.63 | 300 | 0.6810 | -0.0312 | -0.0611 | 0.6680 | 0.0299 | -237.5431 | -263.3647 | 0.9151 | 0.8092 |
69
+ | 0.6671 | 0.84 | 400 | 0.6723 | -0.0854 | -0.1408 | 0.6640 | 0.0553 | -245.5124 | -268.7867 | 0.8790 | 0.7713 |
70
+ | 0.6627 | 1.05 | 500 | 0.6645 | -0.1494 | -0.2293 | 0.6680 | 0.0799 | -254.3704 | -275.1849 | 0.8294 | 0.7217 |
71
+ | 0.6476 | 1.26 | 600 | 0.6591 | -0.1979 | -0.2968 | 0.6640 | 0.0989 | -261.1124 | -280.0337 | 0.7883 | 0.6828 |
72
+ | 0.6488 | 1.47 | 700 | 0.6559 | -0.2310 | -0.3414 | 0.6620 | 0.1104 | -265.5783 | -283.3440 | 0.7549 | 0.6511 |
73
+ | 0.6449 | 1.67 | 800 | 0.6542 | -0.2518 | -0.3695 | 0.6560 | 0.1177 | -268.3814 | -285.4226 | 0.7372 | 0.6347 |
74
+ | 0.6487 | 1.88 | 900 | 0.6539 | -0.2571 | -0.3764 | 0.6560 | 0.1193 | -269.0724 | -285.9532 | 0.7320 | 0.6299 |
75
+
76
+
77
+ ### Framework versions
78
+
79
+ - PEFT 0.7.1
80
+ - Transformers 4.36.2
81
+ - Pytorch 2.1.2+cu118
82
+ - Datasets 2.14.6
83
+ - Tokenizers 0.15.2
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5a4e48aad3e92b5a777e7e6886fb7c822510b4e4e73c83a0e72989dcff01c24a
3
  size 41977616
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:18e8c7debd8dff51d1c2995b62bb81a4cff15719362ed2577f580e107afb4a6a
3
  size 41977616
all_results.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.0,
3
+ "eval_logits/chosen": 0.6308508515357971,
4
+ "eval_logits/rejected": 0.7334519028663635,
5
+ "eval_logps/chosen": -285.9486999511719,
6
+ "eval_logps/rejected": -269.1014404296875,
7
+ "eval_loss": 0.6537346243858337,
8
+ "eval_rewards/accuracies": 0.6579999923706055,
9
+ "eval_rewards/chosen": -0.2570453882217407,
10
+ "eval_rewards/margins": 0.11960798501968384,
11
+ "eval_rewards/rejected": -0.37665337324142456,
12
+ "eval_runtime": 325.7825,
13
+ "eval_samples": 2000,
14
+ "eval_samples_per_second": 6.139,
15
+ "eval_steps_per_second": 0.384,
16
+ "train_loss": 0.6680307045922589,
17
+ "train_runtime": 18174.3674,
18
+ "train_samples": 30567,
19
+ "train_samples_per_second": 3.364,
20
+ "train_steps_per_second": 0.052
21
+ }
eval_results.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.0,
3
+ "eval_logits/chosen": 0.6308508515357971,
4
+ "eval_logits/rejected": 0.7334519028663635,
5
+ "eval_logps/chosen": -285.9486999511719,
6
+ "eval_logps/rejected": -269.1014404296875,
7
+ "eval_loss": 0.6537346243858337,
8
+ "eval_rewards/accuracies": 0.6579999923706055,
9
+ "eval_rewards/chosen": -0.2570453882217407,
10
+ "eval_rewards/margins": 0.11960798501968384,
11
+ "eval_rewards/rejected": -0.37665337324142456,
12
+ "eval_runtime": 325.7825,
13
+ "eval_samples": 2000,
14
+ "eval_samples_per_second": 6.139,
15
+ "eval_steps_per_second": 0.384
16
+ }
runs/Mar04_11-43-39_gpu4-119-4/events.out.tfevents.1709513183.gpu4-119-4.2293727.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cde15c1b3726b60de049bb0ab81759efeaf07e3168cd4176c4f0b095cd485d3c
3
- size 69044
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9f31e217d6d987f66ffee394f6fd2e669ed3c0b75e199acfdfd9eafcf6276480
3
+ size 72568
runs/Mar04_11-43-39_gpu4-119-4/events.out.tfevents.1709531683.gpu4-119-4.2293727.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c7ce6d34e46b08c034c6db625bfab99f376909a1884b93d2f5a80033bbd3e95
3
+ size 828
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.0,
3
+ "train_loss": 0.6680307045922589,
4
+ "train_runtime": 18174.3674,
5
+ "train_samples": 30567,
6
+ "train_samples_per_second": 3.364,
7
+ "train_steps_per_second": 0.052
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,1518 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 1.9968602825745683,
5
+ "eval_steps": 100,
6
+ "global_step": 954,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.0,
13
+ "learning_rate": 5.208333333333333e-08,
14
+ "logits/chosen": 0.952304482460022,
15
+ "logits/rejected": 0.5888463854789734,
16
+ "logps/chosen": -223.79486083984375,
17
+ "logps/rejected": -209.482666015625,
18
+ "loss": 0.6931,
19
+ "rewards/accuracies": 0.0,
20
+ "rewards/chosen": 0.0,
21
+ "rewards/margins": 0.0,
22
+ "rewards/rejected": 0.0,
23
+ "step": 1
24
+ },
25
+ {
26
+ "epoch": 0.02,
27
+ "learning_rate": 5.208333333333334e-07,
28
+ "logits/chosen": 0.836148738861084,
29
+ "logits/rejected": 0.8545415997505188,
30
+ "logps/chosen": -236.217529296875,
31
+ "logps/rejected": -221.92974853515625,
32
+ "loss": 0.6932,
33
+ "rewards/accuracies": 0.4444444477558136,
34
+ "rewards/chosen": 0.00045002170372754335,
35
+ "rewards/margins": 0.0005102179129607975,
36
+ "rewards/rejected": -6.019628563080914e-05,
37
+ "step": 10
38
+ },
39
+ {
40
+ "epoch": 0.04,
41
+ "learning_rate": 1.0416666666666667e-06,
42
+ "logits/chosen": 0.8339964151382446,
43
+ "logits/rejected": 0.9286314249038696,
44
+ "logps/chosen": -254.7573699951172,
45
+ "logps/rejected": -247.9052734375,
46
+ "loss": 0.6931,
47
+ "rewards/accuracies": 0.5562499761581421,
48
+ "rewards/chosen": -0.0004581348621286452,
49
+ "rewards/margins": 0.00032393785659223795,
50
+ "rewards/rejected": -0.0007820727187208831,
51
+ "step": 20
52
+ },
53
+ {
54
+ "epoch": 0.06,
55
+ "learning_rate": 1.5625e-06,
56
+ "logits/chosen": 0.8594380617141724,
57
+ "logits/rejected": 0.916830837726593,
58
+ "logps/chosen": -260.4742736816406,
59
+ "logps/rejected": -232.0736846923828,
60
+ "loss": 0.6929,
61
+ "rewards/accuracies": 0.53125,
62
+ "rewards/chosen": 0.0009176501189358532,
63
+ "rewards/margins": 0.0013712994987145066,
64
+ "rewards/rejected": -0.00045364940888248384,
65
+ "step": 30
66
+ },
67
+ {
68
+ "epoch": 0.08,
69
+ "learning_rate": 2.0833333333333334e-06,
70
+ "logits/chosen": 0.8113815188407898,
71
+ "logits/rejected": 0.9032772183418274,
72
+ "logps/chosen": -280.20867919921875,
73
+ "logps/rejected": -228.60598754882812,
74
+ "loss": 0.693,
75
+ "rewards/accuracies": 0.4937500059604645,
76
+ "rewards/chosen": 3.258117794757709e-05,
77
+ "rewards/margins": 0.0006167444516904652,
78
+ "rewards/rejected": -0.0005841633537784219,
79
+ "step": 40
80
+ },
81
+ {
82
+ "epoch": 0.1,
83
+ "learning_rate": 2.604166666666667e-06,
84
+ "logits/chosen": 0.8574679493904114,
85
+ "logits/rejected": 0.9363411068916321,
86
+ "logps/chosen": -257.6195373535156,
87
+ "logps/rejected": -219.19448852539062,
88
+ "loss": 0.6928,
89
+ "rewards/accuracies": 0.59375,
90
+ "rewards/chosen": 0.0007853487622924149,
91
+ "rewards/margins": 0.001927538076415658,
92
+ "rewards/rejected": -0.0011421891395002604,
93
+ "step": 50
94
+ },
95
+ {
96
+ "epoch": 0.13,
97
+ "learning_rate": 3.125e-06,
98
+ "logits/chosen": 0.8745417594909668,
99
+ "logits/rejected": 0.901624858379364,
100
+ "logps/chosen": -237.000244140625,
101
+ "logps/rejected": -237.52395629882812,
102
+ "loss": 0.6929,
103
+ "rewards/accuracies": 0.48750001192092896,
104
+ "rewards/chosen": 1.0722555089159869e-05,
105
+ "rewards/margins": 0.0009478582069277763,
106
+ "rewards/rejected": -0.0009371357737109065,
107
+ "step": 60
108
+ },
109
+ {
110
+ "epoch": 0.15,
111
+ "learning_rate": 3.6458333333333333e-06,
112
+ "logits/chosen": 0.8507300615310669,
113
+ "logits/rejected": 0.8796240091323853,
114
+ "logps/chosen": -260.81085205078125,
115
+ "logps/rejected": -227.607421875,
116
+ "loss": 0.6931,
117
+ "rewards/accuracies": 0.48750001192092896,
118
+ "rewards/chosen": -0.0008873953483998775,
119
+ "rewards/margins": -0.0005922773270867765,
120
+ "rewards/rejected": -0.00029511802131310105,
121
+ "step": 70
122
+ },
123
+ {
124
+ "epoch": 0.17,
125
+ "learning_rate": 4.166666666666667e-06,
126
+ "logits/chosen": 0.8868101239204407,
127
+ "logits/rejected": 0.9193674921989441,
128
+ "logps/chosen": -251.10498046875,
129
+ "logps/rejected": -231.2660675048828,
130
+ "loss": 0.6929,
131
+ "rewards/accuracies": 0.518750011920929,
132
+ "rewards/chosen": -0.00020074064377695322,
133
+ "rewards/margins": 0.0003954143321607262,
134
+ "rewards/rejected": -0.0005961551214568317,
135
+ "step": 80
136
+ },
137
+ {
138
+ "epoch": 0.19,
139
+ "learning_rate": 4.6875000000000004e-06,
140
+ "logits/chosen": 0.8633348345756531,
141
+ "logits/rejected": 0.9175950884819031,
142
+ "logps/chosen": -225.3305206298828,
143
+ "logps/rejected": -241.3758087158203,
144
+ "loss": 0.6931,
145
+ "rewards/accuracies": 0.5249999761581421,
146
+ "rewards/chosen": -0.0006292141624726355,
147
+ "rewards/margins": 0.0004391434194985777,
148
+ "rewards/rejected": -0.0010683576110750437,
149
+ "step": 90
150
+ },
151
+ {
152
+ "epoch": 0.21,
153
+ "learning_rate": 4.999731868769027e-06,
154
+ "logits/chosen": 0.9241905212402344,
155
+ "logits/rejected": 0.9234986305236816,
156
+ "logps/chosen": -242.17343139648438,
157
+ "logps/rejected": -221.4640350341797,
158
+ "loss": 0.6929,
159
+ "rewards/accuracies": 0.5375000238418579,
160
+ "rewards/chosen": -0.00037607658305205405,
161
+ "rewards/margins": 0.0006910150987096131,
162
+ "rewards/rejected": -0.0010670917108654976,
163
+ "step": 100
164
+ },
165
+ {
166
+ "epoch": 0.21,
167
+ "eval_logits/chosen": 0.8145024180412292,
168
+ "eval_logits/rejected": 0.9167963862419128,
169
+ "eval_logps/chosen": -260.2240295410156,
170
+ "eval_logps/rejected": -231.53602600097656,
171
+ "eval_loss": 0.6927996873855591,
172
+ "eval_rewards/accuracies": 0.5320000052452087,
173
+ "eval_rewards/chosen": 0.00020143474102951586,
174
+ "eval_rewards/margins": 0.0012005382450297475,
175
+ "eval_rewards/rejected": -0.000999103649519384,
176
+ "eval_runtime": 326.2051,
177
+ "eval_samples_per_second": 6.131,
178
+ "eval_steps_per_second": 0.383,
179
+ "step": 100
180
+ },
181
+ {
182
+ "epoch": 0.23,
183
+ "learning_rate": 4.996716052911017e-06,
184
+ "logits/chosen": 0.8340839147567749,
185
+ "logits/rejected": 0.8756756782531738,
186
+ "logps/chosen": -264.01495361328125,
187
+ "logps/rejected": -219.5539093017578,
188
+ "loss": 0.6927,
189
+ "rewards/accuracies": 0.543749988079071,
190
+ "rewards/chosen": 7.205537986010313e-05,
191
+ "rewards/margins": 0.0008332778816111386,
192
+ "rewards/rejected": -0.0007612224435433745,
193
+ "step": 110
194
+ },
195
+ {
196
+ "epoch": 0.25,
197
+ "learning_rate": 4.9903533134293035e-06,
198
+ "logits/chosen": 0.8605300784111023,
199
+ "logits/rejected": 0.9697392582893372,
200
+ "logps/chosen": -254.9059295654297,
201
+ "logps/rejected": -219.1450958251953,
202
+ "loss": 0.6924,
203
+ "rewards/accuracies": 0.5562499761581421,
204
+ "rewards/chosen": 0.0002610751544125378,
205
+ "rewards/margins": 0.002276130486279726,
206
+ "rewards/rejected": -0.002015055390074849,
207
+ "step": 120
208
+ },
209
+ {
210
+ "epoch": 0.27,
211
+ "learning_rate": 4.9806521797692184e-06,
212
+ "logits/chosen": 0.877278208732605,
213
+ "logits/rejected": 0.8787814974784851,
214
+ "logps/chosen": -264.6486511230469,
215
+ "logps/rejected": -247.06497192382812,
216
+ "loss": 0.6921,
217
+ "rewards/accuracies": 0.5687500238418579,
218
+ "rewards/chosen": -0.00013167767610866576,
219
+ "rewards/margins": 0.002138237003237009,
220
+ "rewards/rejected": -0.0022699148394167423,
221
+ "step": 130
222
+ },
223
+ {
224
+ "epoch": 0.29,
225
+ "learning_rate": 4.967625656594782e-06,
226
+ "logits/chosen": 0.8538626432418823,
227
+ "logits/rejected": 0.9312636256217957,
228
+ "logps/chosen": -222.04147338867188,
229
+ "logps/rejected": -232.23892211914062,
230
+ "loss": 0.6921,
231
+ "rewards/accuracies": 0.5375000238418579,
232
+ "rewards/chosen": -0.0011232274118810892,
233
+ "rewards/margins": 0.0009854320669546723,
234
+ "rewards/rejected": -0.0021086593624204397,
235
+ "step": 140
236
+ },
237
+ {
238
+ "epoch": 0.31,
239
+ "learning_rate": 4.95129120635556e-06,
240
+ "logits/chosen": 0.8747261166572571,
241
+ "logits/rejected": 0.9058718681335449,
242
+ "logps/chosen": -258.2791442871094,
243
+ "logps/rejected": -215.7833251953125,
244
+ "loss": 0.6919,
245
+ "rewards/accuracies": 0.5625,
246
+ "rewards/chosen": -0.0004979403456673026,
247
+ "rewards/margins": 0.002242505783215165,
248
+ "rewards/rejected": -0.0027404462452977896,
249
+ "step": 150
250
+ },
251
+ {
252
+ "epoch": 0.33,
253
+ "learning_rate": 4.93167072587771e-06,
254
+ "logits/chosen": 0.7788752317428589,
255
+ "logits/rejected": 0.8470233082771301,
256
+ "logps/chosen": -257.824951171875,
257
+ "logps/rejected": -250.79281616210938,
258
+ "loss": 0.6914,
259
+ "rewards/accuracies": 0.606249988079071,
260
+ "rewards/chosen": -0.0017024253029376268,
261
+ "rewards/margins": 0.0027075002435594797,
262
+ "rewards/rejected": -0.0044099255464971066,
263
+ "step": 160
264
+ },
265
+ {
266
+ "epoch": 0.36,
267
+ "learning_rate": 4.908790517010637e-06,
268
+ "logits/chosen": 0.9306305050849915,
269
+ "logits/rejected": 0.9719738960266113,
270
+ "logps/chosen": -239.07101440429688,
271
+ "logps/rejected": -252.9778289794922,
272
+ "loss": 0.6911,
273
+ "rewards/accuracies": 0.550000011920929,
274
+ "rewards/chosen": -0.002183904405683279,
275
+ "rewards/margins": 0.002954904455691576,
276
+ "rewards/rejected": -0.005138809327036142,
277
+ "step": 170
278
+ },
279
+ {
280
+ "epoch": 0.38,
281
+ "learning_rate": 4.882681251368549e-06,
282
+ "logits/chosen": 0.853600800037384,
283
+ "logits/rejected": 0.8488330841064453,
284
+ "logps/chosen": -270.89105224609375,
285
+ "logps/rejected": -257.0163269042969,
286
+ "loss": 0.6906,
287
+ "rewards/accuracies": 0.5874999761581421,
288
+ "rewards/chosen": -0.0012318979715928435,
289
+ "rewards/margins": 0.004222923889756203,
290
+ "rewards/rejected": -0.0054548210464417934,
291
+ "step": 180
292
+ },
293
+ {
294
+ "epoch": 0.4,
295
+ "learning_rate": 4.853377929214243e-06,
296
+ "logits/chosen": 0.7871135473251343,
297
+ "logits/rejected": 0.8603521585464478,
298
+ "logps/chosen": -252.1376190185547,
299
+ "logps/rejected": -245.2152862548828,
300
+ "loss": 0.6898,
301
+ "rewards/accuracies": 0.625,
302
+ "rewards/chosen": -0.0007233443320728838,
303
+ "rewards/margins": 0.007929561659693718,
304
+ "rewards/rejected": -0.008652905933558941,
305
+ "step": 190
306
+ },
307
+ {
308
+ "epoch": 0.42,
309
+ "learning_rate": 4.8209198325401815e-06,
310
+ "logits/chosen": 0.8782480955123901,
311
+ "logits/rejected": 0.8951404690742493,
312
+ "logps/chosen": -237.61087036132812,
313
+ "logps/rejected": -233.7824249267578,
314
+ "loss": 0.6893,
315
+ "rewards/accuracies": 0.6625000238418579,
316
+ "rewards/chosen": -0.002221657894551754,
317
+ "rewards/margins": 0.00844704918563366,
318
+ "rewards/rejected": -0.010668707080185413,
319
+ "step": 200
320
+ },
321
+ {
322
+ "epoch": 0.42,
323
+ "eval_logits/chosen": 0.8204554915428162,
324
+ "eval_logits/rejected": 0.9234155416488647,
325
+ "eval_logps/chosen": -260.62249755859375,
326
+ "eval_logps/rejected": -232.77418518066406,
327
+ "eval_loss": 0.6890601515769958,
328
+ "eval_rewards/accuracies": 0.6499999761581421,
329
+ "eval_rewards/chosen": -0.0037833875976502895,
330
+ "eval_rewards/margins": 0.009597329422831535,
331
+ "eval_rewards/rejected": -0.013380718417465687,
332
+ "eval_runtime": 326.0716,
333
+ "eval_samples_per_second": 6.134,
334
+ "eval_steps_per_second": 0.383,
335
+ "step": 200
336
+ },
337
+ {
338
+ "epoch": 0.44,
339
+ "learning_rate": 4.785350472409792e-06,
340
+ "logits/chosen": 0.906767725944519,
341
+ "logits/rejected": 0.9123601913452148,
342
+ "logps/chosen": -233.3804168701172,
343
+ "logps/rejected": -228.94284057617188,
344
+ "loss": 0.689,
345
+ "rewards/accuracies": 0.606249988079071,
346
+ "rewards/chosen": -0.00779892411082983,
347
+ "rewards/margins": 0.007471003569662571,
348
+ "rewards/rejected": -0.015269925817847252,
349
+ "step": 210
350
+ },
351
+ {
352
+ "epoch": 0.46,
353
+ "learning_rate": 4.746717530629565e-06,
354
+ "logits/chosen": 0.8570274114608765,
355
+ "logits/rejected": 0.8925272226333618,
356
+ "logps/chosen": -259.993896484375,
357
+ "logps/rejected": -239.8516082763672,
358
+ "loss": 0.6887,
359
+ "rewards/accuracies": 0.6499999761581421,
360
+ "rewards/chosen": -0.006393183022737503,
361
+ "rewards/margins": 0.01187940128147602,
362
+ "rewards/rejected": -0.018272582441568375,
363
+ "step": 220
364
+ },
365
+ {
366
+ "epoch": 0.48,
367
+ "learning_rate": 4.7050727958301505e-06,
368
+ "logits/chosen": 0.8952150344848633,
369
+ "logits/rejected": 0.8783475756645203,
370
+ "logps/chosen": -244.82919311523438,
371
+ "logps/rejected": -230.5172882080078,
372
+ "loss": 0.687,
373
+ "rewards/accuracies": 0.6312500238418579,
374
+ "rewards/chosen": -0.008937228471040726,
375
+ "rewards/margins": 0.014644329436123371,
376
+ "rewards/rejected": -0.023581556975841522,
377
+ "step": 230
378
+ },
379
+ {
380
+ "epoch": 0.5,
381
+ "learning_rate": 4.660472094042121e-06,
382
+ "logits/chosen": 0.857439398765564,
383
+ "logits/rejected": 0.8656377792358398,
384
+ "logps/chosen": -282.5680847167969,
385
+ "logps/rejected": -236.55111694335938,
386
+ "loss": 0.6853,
387
+ "rewards/accuracies": 0.7250000238418579,
388
+ "rewards/chosen": -0.012710051611065865,
389
+ "rewards/margins": 0.017861289903521538,
390
+ "rewards/rejected": -0.030571341514587402,
391
+ "step": 240
392
+ },
393
+ {
394
+ "epoch": 0.52,
395
+ "learning_rate": 4.612975213859487e-06,
396
+ "logits/chosen": 0.8455727696418762,
397
+ "logits/rejected": 0.9185819625854492,
398
+ "logps/chosen": -268.9493713378906,
399
+ "logps/rejected": -243.8383331298828,
400
+ "loss": 0.6852,
401
+ "rewards/accuracies": 0.643750011920929,
402
+ "rewards/chosen": -0.014121539890766144,
403
+ "rewards/margins": 0.015637289732694626,
404
+ "rewards/rejected": -0.02975882962346077,
405
+ "step": 250
406
+ },
407
+ {
408
+ "epoch": 0.54,
409
+ "learning_rate": 4.5626458262912745e-06,
410
+ "logits/chosen": 0.858254075050354,
411
+ "logits/rejected": 0.8988651037216187,
412
+ "logps/chosen": -274.9979553222656,
413
+ "logps/rejected": -262.4846496582031,
414
+ "loss": 0.6842,
415
+ "rewards/accuracies": 0.675000011920929,
416
+ "rewards/chosen": -0.008313321508467197,
417
+ "rewards/margins": 0.024480264633893967,
418
+ "rewards/rejected": -0.03279358521103859,
419
+ "step": 260
420
+ },
421
+ {
422
+ "epoch": 0.57,
423
+ "learning_rate": 4.509551399408598e-06,
424
+ "logits/chosen": 0.9360445737838745,
425
+ "logits/rejected": 0.9437822103500366,
426
+ "logps/chosen": -253.78689575195312,
427
+ "logps/rejected": -211.69509887695312,
428
+ "loss": 0.683,
429
+ "rewards/accuracies": 0.699999988079071,
430
+ "rewards/chosen": -0.015140311792492867,
431
+ "rewards/margins": 0.022892246022820473,
432
+ "rewards/rejected": -0.03803255409002304,
433
+ "step": 270
434
+ },
435
+ {
436
+ "epoch": 0.59,
437
+ "learning_rate": 4.453763107901676e-06,
438
+ "logits/chosen": 0.9350343942642212,
439
+ "logits/rejected": 0.8994883298873901,
440
+ "logps/chosen": -247.9213104248047,
441
+ "logps/rejected": -256.08056640625,
442
+ "loss": 0.6842,
443
+ "rewards/accuracies": 0.612500011920929,
444
+ "rewards/chosen": -0.025496864691376686,
445
+ "rewards/margins": 0.012935856357216835,
446
+ "rewards/rejected": -0.03843272104859352,
447
+ "step": 280
448
+ },
449
+ {
450
+ "epoch": 0.61,
451
+ "learning_rate": 4.3953557376679856e-06,
452
+ "logits/chosen": 0.8583111763000488,
453
+ "logits/rejected": 0.8643442988395691,
454
+ "logps/chosen": -262.2920837402344,
455
+ "logps/rejected": -258.9486083984375,
456
+ "loss": 0.6827,
457
+ "rewards/accuracies": 0.59375,
458
+ "rewards/chosen": -0.027247220277786255,
459
+ "rewards/margins": 0.015835126861929893,
460
+ "rewards/rejected": -0.0430823490023613,
461
+ "step": 290
462
+ },
463
+ {
464
+ "epoch": 0.63,
465
+ "learning_rate": 4.33440758555951e-06,
466
+ "logits/chosen": 0.8355463743209839,
467
+ "logits/rejected": 0.9161020517349243,
468
+ "logps/chosen": -250.9317626953125,
469
+ "logps/rejected": -247.6198272705078,
470
+ "loss": 0.6809,
471
+ "rewards/accuracies": 0.6812499761581421,
472
+ "rewards/chosen": -0.018615344539284706,
473
+ "rewards/margins": 0.033209316432476044,
474
+ "rewards/rejected": -0.0518246591091156,
475
+ "step": 300
476
+ },
477
+ {
478
+ "epoch": 0.63,
479
+ "eval_logits/chosen": 0.8092363476753235,
480
+ "eval_logits/rejected": 0.9150914549827576,
481
+ "eval_logps/chosen": -263.3646545410156,
482
+ "eval_logps/rejected": -237.54312133789062,
483
+ "eval_loss": 0.6809768080711365,
484
+ "eval_rewards/accuracies": 0.6679999828338623,
485
+ "eval_rewards/chosen": -0.03120502457022667,
486
+ "eval_rewards/margins": 0.029865048825740814,
487
+ "eval_rewards/rejected": -0.061070073395967484,
488
+ "eval_runtime": 325.9788,
489
+ "eval_samples_per_second": 6.135,
490
+ "eval_steps_per_second": 0.383,
491
+ "step": 300
492
+ },
493
+ {
494
+ "epoch": 0.65,
495
+ "learning_rate": 4.2710003544234255e-06,
496
+ "logits/chosen": 0.855351448059082,
497
+ "logits/rejected": 0.8703336715698242,
498
+ "logps/chosen": -238.54061889648438,
499
+ "logps/rejected": -230.7489776611328,
500
+ "loss": 0.6787,
501
+ "rewards/accuracies": 0.6187499761581421,
502
+ "rewards/chosen": -0.036315977573394775,
503
+ "rewards/margins": 0.025386247783899307,
504
+ "rewards/rejected": -0.061702221632003784,
505
+ "step": 310
506
+ },
507
+ {
508
+ "epoch": 0.67,
509
+ "learning_rate": 4.205219043576955e-06,
510
+ "logits/chosen": 0.8615506887435913,
511
+ "logits/rejected": 0.8854458928108215,
512
+ "logps/chosen": -227.1722869873047,
513
+ "logps/rejected": -220.97412109375,
514
+ "loss": 0.6785,
515
+ "rewards/accuracies": 0.6812499761581421,
516
+ "rewards/chosen": -0.03975047916173935,
517
+ "rewards/margins": 0.028606727719306946,
518
+ "rewards/rejected": -0.06835721433162689,
519
+ "step": 320
520
+ },
521
+ {
522
+ "epoch": 0.69,
523
+ "learning_rate": 4.137151834863213e-06,
524
+ "logits/chosen": 0.8440972566604614,
525
+ "logits/rejected": 0.8271803855895996,
526
+ "logps/chosen": -256.4277038574219,
527
+ "logps/rejected": -264.7651062011719,
528
+ "loss": 0.6787,
529
+ "rewards/accuracies": 0.637499988079071,
530
+ "rewards/chosen": -0.04135146737098694,
531
+ "rewards/margins": 0.039213813841342926,
532
+ "rewards/rejected": -0.08056528866291046,
533
+ "step": 330
534
+ },
535
+ {
536
+ "epoch": 0.71,
537
+ "learning_rate": 4.066889974440757e-06,
538
+ "logits/chosen": 0.8700094223022461,
539
+ "logits/rejected": 0.8766329884529114,
540
+ "logps/chosen": -269.4642333984375,
541
+ "logps/rejected": -255.2469024658203,
542
+ "loss": 0.6737,
543
+ "rewards/accuracies": 0.65625,
544
+ "rewards/chosen": -0.03877495974302292,
545
+ "rewards/margins": 0.04113053157925606,
546
+ "rewards/rejected": -0.07990548759698868,
547
+ "step": 340
548
+ },
549
+ {
550
+ "epoch": 0.73,
551
+ "learning_rate": 3.994527650465352e-06,
552
+ "logits/chosen": 0.8648789525032043,
553
+ "logits/rejected": 0.8878329396247864,
554
+ "logps/chosen": -227.1282958984375,
555
+ "logps/rejected": -212.8804931640625,
556
+ "loss": 0.6755,
557
+ "rewards/accuracies": 0.6875,
558
+ "rewards/chosen": -0.05504993349313736,
559
+ "rewards/margins": 0.040687672793865204,
560
+ "rewards/rejected": -0.09573759883642197,
561
+ "step": 350
562
+ },
563
+ {
564
+ "epoch": 0.75,
565
+ "learning_rate": 3.92016186682789e-06,
566
+ "logits/chosen": 0.7903240919113159,
567
+ "logits/rejected": 0.8026289939880371,
568
+ "logps/chosen": -214.0880889892578,
569
+ "logps/rejected": -244.24618530273438,
570
+ "loss": 0.6752,
571
+ "rewards/accuracies": 0.6812499761581421,
572
+ "rewards/chosen": -0.06658105552196503,
573
+ "rewards/margins": 0.03603743016719818,
574
+ "rewards/rejected": -0.1026184931397438,
575
+ "step": 360
576
+ },
577
+ {
578
+ "epoch": 0.77,
579
+ "learning_rate": 3.843892313117724e-06,
580
+ "logits/chosen": 0.8764356374740601,
581
+ "logits/rejected": 0.9107359051704407,
582
+ "logps/chosen": -273.88873291015625,
583
+ "logps/rejected": -251.10812377929688,
584
+ "loss": 0.6758,
585
+ "rewards/accuracies": 0.6625000238418579,
586
+ "rewards/chosen": -0.06244384124875069,
587
+ "rewards/margins": 0.04542430490255356,
588
+ "rewards/rejected": -0.10786814987659454,
589
+ "step": 370
590
+ },
591
+ {
592
+ "epoch": 0.8,
593
+ "learning_rate": 3.7658212309857576e-06,
594
+ "logits/chosen": 0.8514490127563477,
595
+ "logits/rejected": 0.8745658993721008,
596
+ "logps/chosen": -244.4405059814453,
597
+ "logps/rejected": -224.3061981201172,
598
+ "loss": 0.6708,
599
+ "rewards/accuracies": 0.6812499761581421,
600
+ "rewards/chosen": -0.06743437796831131,
601
+ "rewards/margins": 0.048164453357458115,
602
+ "rewards/rejected": -0.11559884250164032,
603
+ "step": 380
604
+ },
605
+ {
606
+ "epoch": 0.82,
607
+ "learning_rate": 3.686053277086401e-06,
608
+ "logits/chosen": 0.8000567555427551,
609
+ "logits/rejected": 0.8884351849555969,
610
+ "logps/chosen": -268.4157409667969,
611
+ "logps/rejected": -247.18115234375,
612
+ "loss": 0.6696,
613
+ "rewards/accuracies": 0.675000011920929,
614
+ "rewards/chosen": -0.07314668595790863,
615
+ "rewards/margins": 0.049585141241550446,
616
+ "rewards/rejected": -0.12273182719945908,
617
+ "step": 390
618
+ },
619
+ {
620
+ "epoch": 0.84,
621
+ "learning_rate": 3.604695382782159e-06,
622
+ "logits/chosen": 0.8107595443725586,
623
+ "logits/rejected": 0.8327080607414246,
624
+ "logps/chosen": -284.17218017578125,
625
+ "logps/rejected": -263.6735534667969,
626
+ "loss": 0.6671,
627
+ "rewards/accuracies": 0.643750011920929,
628
+ "rewards/chosen": -0.0770660936832428,
629
+ "rewards/margins": 0.0486028790473938,
630
+ "rewards/rejected": -0.1256689727306366,
631
+ "step": 400
632
+ },
633
+ {
634
+ "epoch": 0.84,
635
+ "eval_logits/chosen": 0.7712985873222351,
636
+ "eval_logits/rejected": 0.8790242671966553,
637
+ "eval_logps/chosen": -268.7867431640625,
638
+ "eval_logps/rejected": -245.51242065429688,
639
+ "eval_loss": 0.6722846627235413,
640
+ "eval_rewards/accuracies": 0.6639999747276306,
641
+ "eval_rewards/chosen": -0.0854262262582779,
642
+ "eval_rewards/margins": 0.055336710065603256,
643
+ "eval_rewards/rejected": -0.14076292514801025,
644
+ "eval_runtime": 325.7205,
645
+ "eval_samples_per_second": 6.14,
646
+ "eval_steps_per_second": 0.384,
647
+ "step": 400
648
+ },
649
+ {
650
+ "epoch": 0.86,
651
+ "learning_rate": 3.5218566107988872e-06,
652
+ "logits/chosen": 0.8288625478744507,
653
+ "logits/rejected": 0.8551315069198608,
654
+ "logps/chosen": -276.74462890625,
655
+ "logps/rejected": -247.576416015625,
656
+ "loss": 0.6718,
657
+ "rewards/accuracies": 0.6812499761581421,
658
+ "rewards/chosen": -0.08322989195585251,
659
+ "rewards/margins": 0.06066075712442398,
660
+ "rewards/rejected": -0.1438906490802765,
661
+ "step": 410
662
+ },
663
+ {
664
+ "epoch": 0.88,
665
+ "learning_rate": 3.437648009023905e-06,
666
+ "logits/chosen": 0.7773796916007996,
667
+ "logits/rejected": 0.8443312644958496,
668
+ "logps/chosen": -222.3345184326172,
669
+ "logps/rejected": -218.86825561523438,
670
+ "loss": 0.6696,
671
+ "rewards/accuracies": 0.6312500238418579,
672
+ "rewards/chosen": -0.10068164020776749,
673
+ "rewards/margins": 0.04352349415421486,
674
+ "rewards/rejected": -0.14420512318611145,
675
+ "step": 420
676
+ },
677
+ {
678
+ "epoch": 0.9,
679
+ "learning_rate": 3.352182461642929e-06,
680
+ "logits/chosen": 0.8025181889533997,
681
+ "logits/rejected": 0.8646955490112305,
682
+ "logps/chosen": -243.1133270263672,
683
+ "logps/rejected": -232.02047729492188,
684
+ "loss": 0.6638,
685
+ "rewards/accuracies": 0.6875,
686
+ "rewards/chosen": -0.08811922371387482,
687
+ "rewards/margins": 0.06207479164004326,
688
+ "rewards/rejected": -0.15019401907920837,
689
+ "step": 430
690
+ },
691
+ {
692
+ "epoch": 0.92,
693
+ "learning_rate": 3.265574537815398e-06,
694
+ "logits/chosen": 0.8092749714851379,
695
+ "logits/rejected": 0.8513230085372925,
696
+ "logps/chosen": -288.71636962890625,
697
+ "logps/rejected": -256.467529296875,
698
+ "loss": 0.6676,
699
+ "rewards/accuracies": 0.6875,
700
+ "rewards/chosen": -0.0855972021818161,
701
+ "rewards/margins": 0.07402561604976654,
702
+ "rewards/rejected": -0.15962281823158264,
703
+ "step": 440
704
+ },
705
+ {
706
+ "epoch": 0.94,
707
+ "learning_rate": 3.177940338091043e-06,
708
+ "logits/chosen": 0.8230969309806824,
709
+ "logits/rejected": 0.8771616816520691,
710
+ "logps/chosen": -262.885009765625,
711
+ "logps/rejected": -236.41366577148438,
712
+ "loss": 0.6661,
713
+ "rewards/accuracies": 0.668749988079071,
714
+ "rewards/chosen": -0.10893376171588898,
715
+ "rewards/margins": 0.05591960996389389,
716
+ "rewards/rejected": -0.16485336422920227,
717
+ "step": 450
718
+ },
719
+ {
720
+ "epoch": 0.96,
721
+ "learning_rate": 3.089397338773569e-06,
722
+ "logits/chosen": 0.8003429174423218,
723
+ "logits/rejected": 0.8373273015022278,
724
+ "logps/chosen": -269.59130859375,
725
+ "logps/rejected": -243.3413543701172,
726
+ "loss": 0.6635,
727
+ "rewards/accuracies": 0.706250011920929,
728
+ "rewards/chosen": -0.12066192924976349,
729
+ "rewards/margins": 0.07056786119937897,
730
+ "rewards/rejected": -0.19122979044914246,
731
+ "step": 460
732
+ },
733
+ {
734
+ "epoch": 0.98,
735
+ "learning_rate": 3.0000642344401115e-06,
736
+ "logits/chosen": 0.7690973877906799,
737
+ "logits/rejected": 0.8160909414291382,
738
+ "logps/chosen": -250.65017700195312,
739
+ "logps/rejected": -241.54763793945312,
740
+ "loss": 0.6624,
741
+ "rewards/accuracies": 0.637499988079071,
742
+ "rewards/chosen": -0.12458320707082748,
743
+ "rewards/margins": 0.06558409333229065,
744
+ "rewards/rejected": -0.19016727805137634,
745
+ "step": 470
746
+ },
747
+ {
748
+ "epoch": 1.0,
749
+ "learning_rate": 2.9100607788275547e-06,
750
+ "logits/chosen": 0.8020121455192566,
751
+ "logits/rejected": 0.8617356419563293,
752
+ "logps/chosen": -256.21771240234375,
753
+ "logps/rejected": -253.4419708251953,
754
+ "loss": 0.6658,
755
+ "rewards/accuracies": 0.668749988079071,
756
+ "rewards/chosen": -0.1322631537914276,
757
+ "rewards/margins": 0.06032481789588928,
758
+ "rewards/rejected": -0.1925879716873169,
759
+ "step": 480
760
+ },
761
+ {
762
+ "epoch": 1.03,
763
+ "learning_rate": 2.8195076242990124e-06,
764
+ "logits/chosen": 0.7743502259254456,
765
+ "logits/rejected": 0.8271788358688354,
766
+ "logps/chosen": -252.33804321289062,
767
+ "logps/rejected": -235.423828125,
768
+ "loss": 0.6624,
769
+ "rewards/accuracies": 0.643750011920929,
770
+ "rewards/chosen": -0.14343151450157166,
771
+ "rewards/margins": 0.07069016247987747,
772
+ "rewards/rejected": -0.21412166953086853,
773
+ "step": 490
774
+ },
775
+ {
776
+ "epoch": 1.05,
777
+ "learning_rate": 2.72852616010567e-06,
778
+ "logits/chosen": 0.7551236748695374,
779
+ "logits/rejected": 0.7969120144844055,
780
+ "logps/chosen": -266.84375,
781
+ "logps/rejected": -254.37460327148438,
782
+ "loss": 0.6627,
783
+ "rewards/accuracies": 0.71875,
784
+ "rewards/chosen": -0.1373104304075241,
785
+ "rewards/margins": 0.07557874917984009,
786
+ "rewards/rejected": -0.212889164686203,
787
+ "step": 500
788
+ },
789
+ {
790
+ "epoch": 1.05,
791
+ "eval_logits/chosen": 0.7216536998748779,
792
+ "eval_logits/rejected": 0.8293780088424683,
793
+ "eval_logps/chosen": -275.1849365234375,
794
+ "eval_logps/rejected": -254.370361328125,
795
+ "eval_loss": 0.664509654045105,
796
+ "eval_rewards/accuracies": 0.6679999828338623,
797
+ "eval_rewards/chosen": -0.14940780401229858,
798
+ "eval_rewards/margins": 0.07993472367525101,
799
+ "eval_rewards/rejected": -0.2293425351381302,
800
+ "eval_runtime": 325.7214,
801
+ "eval_samples_per_second": 6.14,
802
+ "eval_steps_per_second": 0.384,
803
+ "step": 500
804
+ },
805
+ {
806
+ "epoch": 1.07,
807
+ "learning_rate": 2.637238349660819e-06,
808
+ "logits/chosen": 0.7647446393966675,
809
+ "logits/rejected": 0.8582034111022949,
810
+ "logps/chosen": -251.79904174804688,
811
+ "logps/rejected": -218.86587524414062,
812
+ "loss": 0.665,
813
+ "rewards/accuracies": 0.6875,
814
+ "rewards/chosen": -0.16131970286369324,
815
+ "rewards/margins": 0.06838177144527435,
816
+ "rewards/rejected": -0.2297014743089676,
817
+ "step": 510
818
+ },
819
+ {
820
+ "epoch": 1.09,
821
+ "learning_rate": 2.5457665670441937e-06,
822
+ "logits/chosen": 0.8116461038589478,
823
+ "logits/rejected": 0.8353972434997559,
824
+ "logps/chosen": -263.5601501464844,
825
+ "logps/rejected": -247.66909790039062,
826
+ "loss": 0.6591,
827
+ "rewards/accuracies": 0.6937500238418579,
828
+ "rewards/chosen": -0.14197978377342224,
829
+ "rewards/margins": 0.09031648933887482,
830
+ "rewards/rejected": -0.23229627311229706,
831
+ "step": 520
832
+ },
833
+ {
834
+ "epoch": 1.11,
835
+ "learning_rate": 2.4542334329558075e-06,
836
+ "logits/chosen": 0.7308142781257629,
837
+ "logits/rejected": 0.7632499933242798,
838
+ "logps/chosen": -257.2229919433594,
839
+ "logps/rejected": -250.84469604492188,
840
+ "loss": 0.6607,
841
+ "rewards/accuracies": 0.7437499761581421,
842
+ "rewards/chosen": -0.15741421282291412,
843
+ "rewards/margins": 0.07882841676473618,
844
+ "rewards/rejected": -0.2362426221370697,
845
+ "step": 530
846
+ },
847
+ {
848
+ "epoch": 1.13,
849
+ "learning_rate": 2.3627616503391813e-06,
850
+ "logits/chosen": 0.7235521674156189,
851
+ "logits/rejected": 0.7589094042778015,
852
+ "logps/chosen": -274.9698791503906,
853
+ "logps/rejected": -238.1727752685547,
854
+ "loss": 0.6614,
855
+ "rewards/accuracies": 0.6499999761581421,
856
+ "rewards/chosen": -0.16767871379852295,
857
+ "rewards/margins": 0.07072968035936356,
858
+ "rewards/rejected": -0.2384084016084671,
859
+ "step": 540
860
+ },
861
+ {
862
+ "epoch": 1.15,
863
+ "learning_rate": 2.271473839894331e-06,
864
+ "logits/chosen": 0.7224727869033813,
865
+ "logits/rejected": 0.7499375939369202,
866
+ "logps/chosen": -283.6424560546875,
867
+ "logps/rejected": -272.5013122558594,
868
+ "loss": 0.6593,
869
+ "rewards/accuracies": 0.6499999761581421,
870
+ "rewards/chosen": -0.17033258080482483,
871
+ "rewards/margins": 0.07838577777147293,
872
+ "rewards/rejected": -0.24871835112571716,
873
+ "step": 550
874
+ },
875
+ {
876
+ "epoch": 1.17,
877
+ "learning_rate": 2.1804923757009885e-06,
878
+ "logits/chosen": 0.6960411667823792,
879
+ "logits/rejected": 0.7274879813194275,
880
+ "logps/chosen": -269.6304016113281,
881
+ "logps/rejected": -250.3975830078125,
882
+ "loss": 0.6614,
883
+ "rewards/accuracies": 0.6875,
884
+ "rewards/chosen": -0.18492145836353302,
885
+ "rewards/margins": 0.07485240697860718,
886
+ "rewards/rejected": -0.259773850440979,
887
+ "step": 560
888
+ },
889
+ {
890
+ "epoch": 1.19,
891
+ "learning_rate": 2.089939221172446e-06,
892
+ "logits/chosen": 0.7146167755126953,
893
+ "logits/rejected": 0.7337481379508972,
894
+ "logps/chosen": -288.8470153808594,
895
+ "logps/rejected": -258.131591796875,
896
+ "loss": 0.664,
897
+ "rewards/accuracies": 0.675000011920929,
898
+ "rewards/chosen": -0.19552810490131378,
899
+ "rewards/margins": 0.09874384850263596,
900
+ "rewards/rejected": -0.29427194595336914,
901
+ "step": 570
902
+ },
903
+ {
904
+ "epoch": 1.21,
905
+ "learning_rate": 1.9999357655598894e-06,
906
+ "logits/chosen": 0.7304507493972778,
907
+ "logits/rejected": 0.7039176225662231,
908
+ "logps/chosen": -259.52008056640625,
909
+ "logps/rejected": -250.97146606445312,
910
+ "loss": 0.6591,
911
+ "rewards/accuracies": 0.668749988079071,
912
+ "rewards/chosen": -0.19664093852043152,
913
+ "rewards/margins": 0.058397077023983,
914
+ "rewards/rejected": -0.2550380527973175,
915
+ "step": 580
916
+ },
917
+ {
918
+ "epoch": 1.23,
919
+ "learning_rate": 1.9106026612264316e-06,
920
+ "logits/chosen": 0.7533366084098816,
921
+ "logits/rejected": 0.8193215131759644,
922
+ "logps/chosen": -236.71243286132812,
923
+ "logps/rejected": -238.93679809570312,
924
+ "loss": 0.653,
925
+ "rewards/accuracies": 0.6000000238418579,
926
+ "rewards/chosen": -0.1914629340171814,
927
+ "rewards/margins": 0.07271396368741989,
928
+ "rewards/rejected": -0.2641769051551819,
929
+ "step": 590
930
+ },
931
+ {
932
+ "epoch": 1.26,
933
+ "learning_rate": 1.8220596619089576e-06,
934
+ "logits/chosen": 0.7307643890380859,
935
+ "logits/rejected": 0.7107076048851013,
936
+ "logps/chosen": -281.09710693359375,
937
+ "logps/rejected": -258.6228942871094,
938
+ "loss": 0.6476,
939
+ "rewards/accuracies": 0.6937500238418579,
940
+ "rewards/chosen": -0.19486048817634583,
941
+ "rewards/margins": 0.10412123054265976,
942
+ "rewards/rejected": -0.2989817261695862,
943
+ "step": 600
944
+ },
945
+ {
946
+ "epoch": 1.26,
947
+ "eval_logits/chosen": 0.6828243136405945,
948
+ "eval_logits/rejected": 0.7883425354957581,
949
+ "eval_logps/chosen": -280.03369140625,
950
+ "eval_logps/rejected": -261.11236572265625,
951
+ "eval_loss": 0.659087061882019,
952
+ "eval_rewards/accuracies": 0.6639999747276306,
953
+ "eval_rewards/chosen": -0.1978950798511505,
954
+ "eval_rewards/margins": 0.09886746108531952,
955
+ "eval_rewards/rejected": -0.2967625558376312,
956
+ "eval_runtime": 325.9412,
957
+ "eval_samples_per_second": 6.136,
958
+ "eval_steps_per_second": 0.384,
959
+ "step": 600
960
+ },
961
+ {
962
+ "epoch": 1.28,
963
+ "learning_rate": 1.7344254621846018e-06,
964
+ "logits/chosen": 0.700169026851654,
965
+ "logits/rejected": 0.7478510141372681,
966
+ "logps/chosen": -283.26641845703125,
967
+ "logps/rejected": -278.6813049316406,
968
+ "loss": 0.6514,
969
+ "rewards/accuracies": 0.6625000238418579,
970
+ "rewards/chosen": -0.191552072763443,
971
+ "rewards/margins": 0.09749529510736465,
972
+ "rewards/rejected": -0.28904733061790466,
973
+ "step": 610
974
+ },
975
+ {
976
+ "epoch": 1.3,
977
+ "learning_rate": 1.647817538357072e-06,
978
+ "logits/chosen": 0.7241575121879578,
979
+ "logits/rejected": 0.7943159341812134,
980
+ "logps/chosen": -267.4305725097656,
981
+ "logps/rejected": -256.3984069824219,
982
+ "loss": 0.6591,
983
+ "rewards/accuracies": 0.612500011920929,
984
+ "rewards/chosen": -0.2104833871126175,
985
+ "rewards/margins": 0.07614756375551224,
986
+ "rewards/rejected": -0.2866309583187103,
987
+ "step": 620
988
+ },
989
+ {
990
+ "epoch": 1.32,
991
+ "learning_rate": 1.5623519909760953e-06,
992
+ "logits/chosen": 0.6998089551925659,
993
+ "logits/rejected": 0.7587723731994629,
994
+ "logps/chosen": -264.82110595703125,
995
+ "logps/rejected": -267.1747741699219,
996
+ "loss": 0.6551,
997
+ "rewards/accuracies": 0.637499988079071,
998
+ "rewards/chosen": -0.22911302745342255,
999
+ "rewards/margins": 0.07119645923376083,
1000
+ "rewards/rejected": -0.30030950903892517,
1001
+ "step": 630
1002
+ },
1003
+ {
1004
+ "epoch": 1.34,
1005
+ "learning_rate": 1.4781433892011132e-06,
1006
+ "logits/chosen": 0.7701424360275269,
1007
+ "logits/rejected": 0.7888699173927307,
1008
+ "logps/chosen": -262.2229309082031,
1009
+ "logps/rejected": -286.0958557128906,
1010
+ "loss": 0.658,
1011
+ "rewards/accuracies": 0.668749988079071,
1012
+ "rewards/chosen": -0.2087526023387909,
1013
+ "rewards/margins": 0.07823307812213898,
1014
+ "rewards/rejected": -0.28698569536209106,
1015
+ "step": 640
1016
+ },
1017
+ {
1018
+ "epoch": 1.36,
1019
+ "learning_rate": 1.3953046172178413e-06,
1020
+ "logits/chosen": 0.7079032063484192,
1021
+ "logits/rejected": 0.7274273037910461,
1022
+ "logps/chosen": -276.0498352050781,
1023
+ "logps/rejected": -277.2353820800781,
1024
+ "loss": 0.6536,
1025
+ "rewards/accuracies": 0.6499999761581421,
1026
+ "rewards/chosen": -0.23646607995033264,
1027
+ "rewards/margins": 0.09612749516963959,
1028
+ "rewards/rejected": -0.33259356021881104,
1029
+ "step": 650
1030
+ },
1031
+ {
1032
+ "epoch": 1.38,
1033
+ "learning_rate": 1.3139467229135999e-06,
1034
+ "logits/chosen": 0.6826360821723938,
1035
+ "logits/rejected": 0.751067578792572,
1036
+ "logps/chosen": -241.80044555664062,
1037
+ "logps/rejected": -275.70989990234375,
1038
+ "loss": 0.6556,
1039
+ "rewards/accuracies": 0.637499988079071,
1040
+ "rewards/chosen": -0.22094163298606873,
1041
+ "rewards/margins": 0.08276458084583282,
1042
+ "rewards/rejected": -0.30370622873306274,
1043
+ "step": 660
1044
+ },
1045
+ {
1046
+ "epoch": 1.4,
1047
+ "learning_rate": 1.2341787690142436e-06,
1048
+ "logits/chosen": 0.6527940630912781,
1049
+ "logits/rejected": 0.7365330457687378,
1050
+ "logps/chosen": -320.76385498046875,
1051
+ "logps/rejected": -271.78790283203125,
1052
+ "loss": 0.6552,
1053
+ "rewards/accuracies": 0.6937500238418579,
1054
+ "rewards/chosen": -0.20969745516777039,
1055
+ "rewards/margins": 0.11965203285217285,
1056
+ "rewards/rejected": -0.32934948801994324,
1057
+ "step": 670
1058
+ },
1059
+ {
1060
+ "epoch": 1.42,
1061
+ "learning_rate": 1.1561076868822756e-06,
1062
+ "logits/chosen": 0.6766701936721802,
1063
+ "logits/rejected": 0.7141178250312805,
1064
+ "logps/chosen": -265.7345886230469,
1065
+ "logps/rejected": -245.7202606201172,
1066
+ "loss": 0.6518,
1067
+ "rewards/accuracies": 0.6312500238418579,
1068
+ "rewards/chosen": -0.22580020129680634,
1069
+ "rewards/margins": 0.0855962485074997,
1070
+ "rewards/rejected": -0.31139642000198364,
1071
+ "step": 680
1072
+ },
1073
+ {
1074
+ "epoch": 1.44,
1075
+ "learning_rate": 1.079838133172111e-06,
1076
+ "logits/chosen": 0.7425110936164856,
1077
+ "logits/rejected": 0.7384303212165833,
1078
+ "logps/chosen": -278.8888854980469,
1079
+ "logps/rejected": -266.8258972167969,
1080
+ "loss": 0.6598,
1081
+ "rewards/accuracies": 0.6812499761581421,
1082
+ "rewards/chosen": -0.21307018399238586,
1083
+ "rewards/margins": 0.08619161695241928,
1084
+ "rewards/rejected": -0.29926180839538574,
1085
+ "step": 690
1086
+ },
1087
+ {
1088
+ "epoch": 1.47,
1089
+ "learning_rate": 1.0054723495346484e-06,
1090
+ "logits/chosen": 0.6221153736114502,
1091
+ "logits/rejected": 0.6998614072799683,
1092
+ "logps/chosen": -280.8089599609375,
1093
+ "logps/rejected": -257.3545837402344,
1094
+ "loss": 0.6488,
1095
+ "rewards/accuracies": 0.612500011920929,
1096
+ "rewards/chosen": -0.22230124473571777,
1097
+ "rewards/margins": 0.09477819502353668,
1098
+ "rewards/rejected": -0.31707945466041565,
1099
+ "step": 700
1100
+ },
1101
+ {
1102
+ "epoch": 1.47,
1103
+ "eval_logits/chosen": 0.651095986366272,
1104
+ "eval_logits/rejected": 0.7549068927764893,
1105
+ "eval_logps/chosen": -283.3440246582031,
1106
+ "eval_logps/rejected": -265.57830810546875,
1107
+ "eval_loss": 0.6559058427810669,
1108
+ "eval_rewards/accuracies": 0.6620000004768372,
1109
+ "eval_rewards/chosen": -0.23099879920482635,
1110
+ "eval_rewards/margins": 0.11042327433824539,
1111
+ "eval_rewards/rejected": -0.34142205119132996,
1112
+ "eval_runtime": 325.7526,
1113
+ "eval_samples_per_second": 6.14,
1114
+ "eval_steps_per_second": 0.384,
1115
+ "step": 700
1116
+ },
1117
+ {
1118
+ "epoch": 1.49,
1119
+ "learning_rate": 9.331100255592437e-07,
1120
+ "logits/chosen": 0.7064228653907776,
1121
+ "logits/rejected": 0.7716813683509827,
1122
+ "logps/chosen": -300.17694091796875,
1123
+ "logps/rejected": -261.4855651855469,
1124
+ "loss": 0.6475,
1125
+ "rewards/accuracies": 0.6812499761581421,
1126
+ "rewards/chosen": -0.2144012451171875,
1127
+ "rewards/margins": 0.12820136547088623,
1128
+ "rewards/rejected": -0.34260261058807373,
1129
+ "step": 710
1130
+ },
1131
+ {
1132
+ "epoch": 1.51,
1133
+ "learning_rate": 8.628481651367876e-07,
1134
+ "logits/chosen": 0.6913928985595703,
1135
+ "logits/rejected": 0.7301486730575562,
1136
+ "logps/chosen": -286.8462829589844,
1137
+ "logps/rejected": -276.6960754394531,
1138
+ "loss": 0.6457,
1139
+ "rewards/accuracies": 0.6312500238418579,
1140
+ "rewards/chosen": -0.23222024738788605,
1141
+ "rewards/margins": 0.10797496885061264,
1142
+ "rewards/rejected": -0.3401952087879181,
1143
+ "step": 720
1144
+ },
1145
+ {
1146
+ "epoch": 1.53,
1147
+ "learning_rate": 7.947809564230446e-07,
1148
+ "logits/chosen": 0.7156798839569092,
1149
+ "logits/rejected": 0.7468422055244446,
1150
+ "logps/chosen": -288.5824890136719,
1151
+ "logps/rejected": -286.32293701171875,
1152
+ "loss": 0.6439,
1153
+ "rewards/accuracies": 0.7250000238418579,
1154
+ "rewards/chosen": -0.23393268883228302,
1155
+ "rewards/margins": 0.12296883016824722,
1156
+ "rewards/rejected": -0.35690149664878845,
1157
+ "step": 730
1158
+ },
1159
+ {
1160
+ "epoch": 1.55,
1161
+ "learning_rate": 7.289996455765749e-07,
1162
+ "logits/chosen": 0.668700098991394,
1163
+ "logits/rejected": 0.6845619678497314,
1164
+ "logps/chosen": -299.6327819824219,
1165
+ "logps/rejected": -283.09295654296875,
1166
+ "loss": 0.6584,
1167
+ "rewards/accuracies": 0.637499988079071,
1168
+ "rewards/chosen": -0.2437756061553955,
1169
+ "rewards/margins": 0.07863186299800873,
1170
+ "rewards/rejected": -0.32240745425224304,
1171
+ "step": 740
1172
+ },
1173
+ {
1174
+ "epoch": 1.57,
1175
+ "learning_rate": 6.655924144404907e-07,
1176
+ "logits/chosen": 0.6234613656997681,
1177
+ "logits/rejected": 0.6867505311965942,
1178
+ "logps/chosen": -281.64007568359375,
1179
+ "logps/rejected": -268.5241394042969,
1180
+ "loss": 0.6515,
1181
+ "rewards/accuracies": 0.6875,
1182
+ "rewards/chosen": -0.21235065162181854,
1183
+ "rewards/margins": 0.12807723879814148,
1184
+ "rewards/rejected": -0.34042784571647644,
1185
+ "step": 750
1186
+ },
1187
+ {
1188
+ "epoch": 1.59,
1189
+ "learning_rate": 6.046442623320145e-07,
1190
+ "logits/chosen": 0.7112501859664917,
1191
+ "logits/rejected": 0.7498377561569214,
1192
+ "logps/chosen": -281.40875244140625,
1193
+ "logps/rejected": -251.820068359375,
1194
+ "loss": 0.6496,
1195
+ "rewards/accuracies": 0.668749988079071,
1196
+ "rewards/chosen": -0.26181793212890625,
1197
+ "rewards/margins": 0.08466657996177673,
1198
+ "rewards/rejected": -0.3464844822883606,
1199
+ "step": 760
1200
+ },
1201
+ {
1202
+ "epoch": 1.61,
1203
+ "learning_rate": 5.462368920983249e-07,
1204
+ "logits/chosen": 0.6798941493034363,
1205
+ "logits/rejected": 0.7346147298812866,
1206
+ "logps/chosen": -266.92315673828125,
1207
+ "logps/rejected": -247.5948028564453,
1208
+ "loss": 0.6566,
1209
+ "rewards/accuracies": 0.625,
1210
+ "rewards/chosen": -0.25860121846199036,
1211
+ "rewards/margins": 0.05472899600863457,
1212
+ "rewards/rejected": -0.31333020329475403,
1213
+ "step": 770
1214
+ },
1215
+ {
1216
+ "epoch": 1.63,
1217
+ "learning_rate": 4.904486005914027e-07,
1218
+ "logits/chosen": 0.6729373931884766,
1219
+ "logits/rejected": 0.7290818095207214,
1220
+ "logps/chosen": -307.30926513671875,
1221
+ "logps/rejected": -292.6742248535156,
1222
+ "loss": 0.6463,
1223
+ "rewards/accuracies": 0.6937500238418579,
1224
+ "rewards/chosen": -0.2319561243057251,
1225
+ "rewards/margins": 0.1268167346715927,
1226
+ "rewards/rejected": -0.3587728440761566,
1227
+ "step": 780
1228
+ },
1229
+ {
1230
+ "epoch": 1.65,
1231
+ "learning_rate": 4.373541737087264e-07,
1232
+ "logits/chosen": 0.6597692966461182,
1233
+ "logits/rejected": 0.7631260752677917,
1234
+ "logps/chosen": -288.7844543457031,
1235
+ "logps/rejected": -258.93450927734375,
1236
+ "loss": 0.6505,
1237
+ "rewards/accuracies": 0.6312500238418579,
1238
+ "rewards/chosen": -0.2365461140871048,
1239
+ "rewards/margins": 0.12159979343414307,
1240
+ "rewards/rejected": -0.35814589262008667,
1241
+ "step": 790
1242
+ },
1243
+ {
1244
+ "epoch": 1.67,
1245
+ "learning_rate": 3.8702478614051353e-07,
1246
+ "logits/chosen": 0.6355623006820679,
1247
+ "logits/rejected": 0.64263516664505,
1248
+ "logps/chosen": -261.3099670410156,
1249
+ "logps/rejected": -265.4583435058594,
1250
+ "loss": 0.6449,
1251
+ "rewards/accuracies": 0.6812499761581421,
1252
+ "rewards/chosen": -0.23573680222034454,
1253
+ "rewards/margins": 0.13916215300559998,
1254
+ "rewards/rejected": -0.3748989403247833,
1255
+ "step": 800
1256
+ },
1257
+ {
1258
+ "epoch": 1.67,
1259
+ "eval_logits/chosen": 0.6347466111183167,
1260
+ "eval_logits/rejected": 0.7372007966041565,
1261
+ "eval_logps/chosen": -285.4225769042969,
1262
+ "eval_logps/rejected": -268.3813781738281,
1263
+ "eval_loss": 0.6541773676872253,
1264
+ "eval_rewards/accuracies": 0.656000018119812,
1265
+ "eval_rewards/chosen": -0.2517840266227722,
1266
+ "eval_rewards/margins": 0.11766859143972397,
1267
+ "eval_rewards/rejected": -0.3694525957107544,
1268
+ "eval_runtime": 325.7549,
1269
+ "eval_samples_per_second": 6.14,
1270
+ "eval_steps_per_second": 0.384,
1271
+ "step": 800
1272
+ },
1273
+ {
1274
+ "epoch": 1.7,
1275
+ "learning_rate": 3.3952790595787986e-07,
1276
+ "logits/chosen": 0.7447446584701538,
1277
+ "logits/rejected": 0.7175893783569336,
1278
+ "logps/chosen": -275.09906005859375,
1279
+ "logps/rejected": -265.5423889160156,
1280
+ "loss": 0.6508,
1281
+ "rewards/accuracies": 0.581250011920929,
1282
+ "rewards/chosen": -0.25975173711776733,
1283
+ "rewards/margins": 0.08070627599954605,
1284
+ "rewards/rejected": -0.3404580354690552,
1285
+ "step": 810
1286
+ },
1287
+ {
1288
+ "epoch": 1.72,
1289
+ "learning_rate": 2.9492720416985004e-07,
1290
+ "logits/chosen": 0.5959675908088684,
1291
+ "logits/rejected": 0.6474281549453735,
1292
+ "logps/chosen": -294.9203186035156,
1293
+ "logps/rejected": -284.7016906738281,
1294
+ "loss": 0.6455,
1295
+ "rewards/accuracies": 0.71875,
1296
+ "rewards/chosen": -0.2046935260295868,
1297
+ "rewards/margins": 0.13686785101890564,
1298
+ "rewards/rejected": -0.34156137704849243,
1299
+ "step": 820
1300
+ },
1301
+ {
1302
+ "epoch": 1.74,
1303
+ "learning_rate": 2.5328246937043526e-07,
1304
+ "logits/chosen": 0.6916497349739075,
1305
+ "logits/rejected": 0.7139278650283813,
1306
+ "logps/chosen": -264.2368469238281,
1307
+ "logps/rejected": -253.6294403076172,
1308
+ "loss": 0.6506,
1309
+ "rewards/accuracies": 0.668749988079071,
1310
+ "rewards/chosen": -0.28065750002861023,
1311
+ "rewards/margins": 0.10091479122638702,
1312
+ "rewards/rejected": -0.38157230615615845,
1313
+ "step": 830
1314
+ },
1315
+ {
1316
+ "epoch": 1.76,
1317
+ "learning_rate": 2.1464952759020857e-07,
1318
+ "logits/chosen": 0.6801538467407227,
1319
+ "logits/rejected": 0.7134484648704529,
1320
+ "logps/chosen": -274.87481689453125,
1321
+ "logps/rejected": -282.79766845703125,
1322
+ "loss": 0.6522,
1323
+ "rewards/accuracies": 0.668749988079071,
1324
+ "rewards/chosen": -0.2523757517337799,
1325
+ "rewards/margins": 0.11816386878490448,
1326
+ "rewards/rejected": -0.3705395758152008,
1327
+ "step": 840
1328
+ },
1329
+ {
1330
+ "epoch": 1.78,
1331
+ "learning_rate": 1.790801674598186e-07,
1332
+ "logits/chosen": 0.6462175846099854,
1333
+ "logits/rejected": 0.6658031940460205,
1334
+ "logps/chosen": -292.3714599609375,
1335
+ "logps/rejected": -269.7221984863281,
1336
+ "loss": 0.6522,
1337
+ "rewards/accuracies": 0.7250000238418579,
1338
+ "rewards/chosen": -0.24048607051372528,
1339
+ "rewards/margins": 0.10906896740198135,
1340
+ "rewards/rejected": -0.34955504536628723,
1341
+ "step": 850
1342
+ },
1343
+ {
1344
+ "epoch": 1.8,
1345
+ "learning_rate": 1.4662207078575685e-07,
1346
+ "logits/chosen": 0.6442946791648865,
1347
+ "logits/rejected": 0.7183449864387512,
1348
+ "logps/chosen": -256.558837890625,
1349
+ "logps/rejected": -274.9573669433594,
1350
+ "loss": 0.6476,
1351
+ "rewards/accuracies": 0.643750011920929,
1352
+ "rewards/chosen": -0.2482720911502838,
1353
+ "rewards/margins": 0.1144469827413559,
1354
+ "rewards/rejected": -0.3627190887928009,
1355
+ "step": 860
1356
+ },
1357
+ {
1358
+ "epoch": 1.82,
1359
+ "learning_rate": 1.1731874863145143e-07,
1360
+ "logits/chosen": 0.68730229139328,
1361
+ "logits/rejected": 0.7417377829551697,
1362
+ "logps/chosen": -285.8447570800781,
1363
+ "logps/rejected": -258.12408447265625,
1364
+ "loss": 0.6483,
1365
+ "rewards/accuracies": 0.7124999761581421,
1366
+ "rewards/chosen": -0.24029162526130676,
1367
+ "rewards/margins": 0.10951662063598633,
1368
+ "rewards/rejected": -0.3498082160949707,
1369
+ "step": 870
1370
+ },
1371
+ {
1372
+ "epoch": 1.84,
1373
+ "learning_rate": 9.120948298936422e-08,
1374
+ "logits/chosen": 0.7039914131164551,
1375
+ "logits/rejected": 0.720660388469696,
1376
+ "logps/chosen": -275.22564697265625,
1377
+ "logps/rejected": -259.50921630859375,
1378
+ "loss": 0.6391,
1379
+ "rewards/accuracies": 0.6499999761581421,
1380
+ "rewards/chosen": -0.2585623860359192,
1381
+ "rewards/margins": 0.10217638313770294,
1382
+ "rewards/rejected": -0.36073875427246094,
1383
+ "step": 880
1384
+ },
1385
+ {
1386
+ "epoch": 1.86,
1387
+ "learning_rate": 6.832927412229017e-08,
1388
+ "logits/chosen": 0.634792685508728,
1389
+ "logits/rejected": 0.7409111857414246,
1390
+ "logps/chosen": -285.78076171875,
1391
+ "logps/rejected": -263.0660095214844,
1392
+ "loss": 0.6441,
1393
+ "rewards/accuracies": 0.668749988079071,
1394
+ "rewards/chosen": -0.24097540974617004,
1395
+ "rewards/margins": 0.12255308777093887,
1396
+ "rewards/rejected": -0.3635285198688507,
1397
+ "step": 890
1398
+ },
1399
+ {
1400
+ "epoch": 1.88,
1401
+ "learning_rate": 4.870879364444109e-08,
1402
+ "logits/chosen": 0.6492586731910706,
1403
+ "logits/rejected": 0.7190669775009155,
1404
+ "logps/chosen": -275.6029357910156,
1405
+ "logps/rejected": -243.29281616210938,
1406
+ "loss": 0.6487,
1407
+ "rewards/accuracies": 0.706250011920929,
1408
+ "rewards/chosen": -0.24271516501903534,
1409
+ "rewards/margins": 0.14071747660636902,
1410
+ "rewards/rejected": -0.38343262672424316,
1411
+ "step": 900
1412
+ },
1413
+ {
1414
+ "epoch": 1.88,
1415
+ "eval_logits/chosen": 0.6298695802688599,
1416
+ "eval_logits/rejected": 0.7320161461830139,
1417
+ "eval_logps/chosen": -285.9532165527344,
1418
+ "eval_logps/rejected": -269.07244873046875,
1419
+ "eval_loss": 0.6538792848587036,
1420
+ "eval_rewards/accuracies": 0.656000018119812,
1421
+ "eval_rewards/chosen": -0.25709065794944763,
1422
+ "eval_rewards/margins": 0.11927253752946854,
1423
+ "eval_rewards/rejected": -0.37636318802833557,
1424
+ "eval_runtime": 326.0646,
1425
+ "eval_samples_per_second": 6.134,
1426
+ "eval_steps_per_second": 0.383,
1427
+ "step": 900
1428
+ },
1429
+ {
1430
+ "epoch": 1.9,
1431
+ "learning_rate": 3.237434340521789e-08,
1432
+ "logits/chosen": 0.6427167654037476,
1433
+ "logits/rejected": 0.7207670211791992,
1434
+ "logps/chosen": -286.7334289550781,
1435
+ "logps/rejected": -282.4278259277344,
1436
+ "loss": 0.6455,
1437
+ "rewards/accuracies": 0.65625,
1438
+ "rewards/chosen": -0.24073800444602966,
1439
+ "rewards/margins": 0.1223975196480751,
1440
+ "rewards/rejected": -0.36313554644584656,
1441
+ "step": 910
1442
+ },
1443
+ {
1444
+ "epoch": 1.93,
1445
+ "learning_rate": 1.93478202307823e-08,
1446
+ "logits/chosen": 0.6422888040542603,
1447
+ "logits/rejected": 0.6643663644790649,
1448
+ "logps/chosen": -267.14398193359375,
1449
+ "logps/rejected": -278.8194274902344,
1450
+ "loss": 0.6473,
1451
+ "rewards/accuracies": 0.6499999761581421,
1452
+ "rewards/chosen": -0.257813036441803,
1453
+ "rewards/margins": 0.0857832059264183,
1454
+ "rewards/rejected": -0.34359627962112427,
1455
+ "step": 920
1456
+ },
1457
+ {
1458
+ "epoch": 1.95,
1459
+ "learning_rate": 9.646686570697062e-09,
1460
+ "logits/chosen": 0.6971439123153687,
1461
+ "logits/rejected": 0.722652792930603,
1462
+ "logps/chosen": -283.13067626953125,
1463
+ "logps/rejected": -285.1507568359375,
1464
+ "loss": 0.6572,
1465
+ "rewards/accuracies": 0.6499999761581421,
1466
+ "rewards/chosen": -0.2655975818634033,
1467
+ "rewards/margins": 0.10348521173000336,
1468
+ "rewards/rejected": -0.36908283829689026,
1469
+ "step": 930
1470
+ },
1471
+ {
1472
+ "epoch": 1.97,
1473
+ "learning_rate": 3.283947088983663e-09,
1474
+ "logits/chosen": 0.6804937720298767,
1475
+ "logits/rejected": 0.6877144575119019,
1476
+ "logps/chosen": -265.1544494628906,
1477
+ "logps/rejected": -278.4422302246094,
1478
+ "loss": 0.6544,
1479
+ "rewards/accuracies": 0.5874999761581421,
1480
+ "rewards/chosen": -0.27914106845855713,
1481
+ "rewards/margins": 0.08945213258266449,
1482
+ "rewards/rejected": -0.3685932457447052,
1483
+ "step": 940
1484
+ },
1485
+ {
1486
+ "epoch": 1.99,
1487
+ "learning_rate": 2.681312309735229e-10,
1488
+ "logits/chosen": 0.6534776091575623,
1489
+ "logits/rejected": 0.7444522976875305,
1490
+ "logps/chosen": -257.7799377441406,
1491
+ "logps/rejected": -263.00030517578125,
1492
+ "loss": 0.654,
1493
+ "rewards/accuracies": 0.612500011920929,
1494
+ "rewards/chosen": -0.2684522867202759,
1495
+ "rewards/margins": 0.08305440098047256,
1496
+ "rewards/rejected": -0.35150665044784546,
1497
+ "step": 950
1498
+ },
1499
+ {
1500
+ "epoch": 2.0,
1501
+ "step": 954,
1502
+ "total_flos": 0.0,
1503
+ "train_loss": 0.6680307045922589,
1504
+ "train_runtime": 18174.3674,
1505
+ "train_samples_per_second": 3.364,
1506
+ "train_steps_per_second": 0.052
1507
+ }
1508
+ ],
1509
+ "logging_steps": 10,
1510
+ "max_steps": 954,
1511
+ "num_input_tokens_seen": 0,
1512
+ "num_train_epochs": 2,
1513
+ "save_steps": 100,
1514
+ "total_flos": 0.0,
1515
+ "train_batch_size": 4,
1516
+ "trial_name": null,
1517
+ "trial_params": null
1518
+ }