lole25 commited on
Commit
b6cf0f6
1 Parent(s): 37bf073

Model save

Browse files
README.md ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ library_name: peft
4
+ tags:
5
+ - trl
6
+ - dpo
7
+ - generated_from_trainer
8
+ base_model: microsoft/phi-2
9
+ model-index:
10
+ - name: phi-2-gpo-ultrafeedback-lora
11
+ results: []
12
+ ---
13
+
14
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
+ should probably proofread and complete it, then remove this comment. -->
16
+
17
+ # phi-2-gpo-ultrafeedback-lora
18
+
19
+ This model is a fine-tuned version of [microsoft/phi-2](https://huggingface.co/microsoft/phi-2) on the None dataset.
20
+ It achieves the following results on the evaluation set:
21
+ - Loss: 0.0021
22
+ - Rewards/chosen: -0.0083
23
+ - Rewards/rejected: -0.0184
24
+ - Rewards/accuracies: 0.6920
25
+ - Rewards/margins: 0.0101
26
+ - Logps/rejected: -233.2711
27
+ - Logps/chosen: -261.0694
28
+ - Logits/rejected: 0.8833
29
+ - Logits/chosen: 0.7809
30
+
31
+ ## Model description
32
+
33
+ More information needed
34
+
35
+ ## Intended uses & limitations
36
+
37
+ More information needed
38
+
39
+ ## Training and evaluation data
40
+
41
+ More information needed
42
+
43
+ ## Training procedure
44
+
45
+ ### Training hyperparameters
46
+
47
+ The following hyperparameters were used during training:
48
+ - learning_rate: 5e-06
49
+ - train_batch_size: 4
50
+ - eval_batch_size: 4
51
+ - seed: 42
52
+ - distributed_type: multi-GPU
53
+ - num_devices: 4
54
+ - gradient_accumulation_steps: 4
55
+ - total_train_batch_size: 64
56
+ - total_eval_batch_size: 16
57
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
58
+ - lr_scheduler_type: cosine
59
+ - lr_scheduler_warmup_ratio: 0.1
60
+ - num_epochs: 2
61
+
62
+ ### Training results
63
+
64
+ | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
65
+ |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
66
+ | 0.0026 | 0.21 | 100 | 0.0025 | 0.0001 | -0.0005 | 0.5080 | 0.0006 | -231.4896 | -260.2373 | 0.9175 | 0.8151 |
67
+ | 0.0023 | 0.42 | 200 | 0.0023 | -0.0015 | -0.0068 | 0.6560 | 0.0053 | -232.1152 | -260.3932 | 0.9120 | 0.8092 |
68
+ | 0.0022 | 0.63 | 300 | 0.0022 | -0.0067 | -0.0141 | 0.6700 | 0.0073 | -232.8447 | -260.9179 | 0.9022 | 0.7992 |
69
+ | 0.0021 | 0.84 | 400 | 0.0022 | -0.0092 | -0.0178 | 0.6640 | 0.0086 | -233.2157 | -261.1620 | 0.8914 | 0.7884 |
70
+ | 0.0022 | 1.05 | 500 | 0.0021 | -0.0094 | -0.0193 | 0.7100 | 0.0098 | -233.3614 | -261.1852 | 0.8853 | 0.7821 |
71
+ | 0.002 | 1.26 | 600 | 0.0021 | -0.0088 | -0.0185 | 0.6940 | 0.0097 | -233.2843 | -261.1207 | 0.8840 | 0.7815 |
72
+ | 0.0021 | 1.47 | 700 | 0.0021 | -0.0083 | -0.0182 | 0.7000 | 0.0099 | -233.2560 | -261.0788 | 0.8816 | 0.7790 |
73
+ | 0.0021 | 1.67 | 800 | 0.0021 | -0.0082 | -0.0184 | 0.6940 | 0.0102 | -233.2740 | -261.0643 | 0.8811 | 0.7781 |
74
+ | 0.0021 | 1.88 | 900 | 0.0021 | -0.0085 | -0.0178 | 0.6900 | 0.0093 | -233.2118 | -261.0922 | 0.8833 | 0.7806 |
75
+
76
+
77
+ ### Framework versions
78
+
79
+ - PEFT 0.7.1
80
+ - Transformers 4.36.2
81
+ - Pytorch 2.1.2+cu118
82
+ - Datasets 2.14.6
83
+ - Tokenizers 0.15.2
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:abf5fe404d26ba3f030c1f64b98a9e64307abb0f22973d1c24304df93b6a304b
3
  size 41977616
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d0dea53272310862afda3712da656b50c1d9a7ad7a46f0642635168a85f6d5a0
3
  size 41977616
all_results.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.0,
3
+ "eval_logits/chosen": 0.7808946371078491,
4
+ "eval_logits/rejected": 0.8833128213882446,
5
+ "eval_logps/chosen": -261.0694274902344,
6
+ "eval_logps/rejected": -233.27114868164062,
7
+ "eval_loss": 0.0021080097649246454,
8
+ "eval_rewards/accuracies": 0.6919999718666077,
9
+ "eval_rewards/chosen": -0.008252721279859543,
10
+ "eval_rewards/margins": 0.01009758934378624,
11
+ "eval_rewards/rejected": -0.018350308761000633,
12
+ "eval_runtime": 325.1898,
13
+ "eval_samples": 2000,
14
+ "eval_samples_per_second": 6.15,
15
+ "eval_steps_per_second": 0.384,
16
+ "train_loss": 0.0021909422920118682,
17
+ "train_runtime": 18127.9992,
18
+ "train_samples": 30567,
19
+ "train_samples_per_second": 3.372,
20
+ "train_steps_per_second": 0.053
21
+ }
eval_results.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.0,
3
+ "eval_logits/chosen": 0.7808946371078491,
4
+ "eval_logits/rejected": 0.8833128213882446,
5
+ "eval_logps/chosen": -261.0694274902344,
6
+ "eval_logps/rejected": -233.27114868164062,
7
+ "eval_loss": 0.0021080097649246454,
8
+ "eval_rewards/accuracies": 0.6919999718666077,
9
+ "eval_rewards/chosen": -0.008252721279859543,
10
+ "eval_rewards/margins": 0.01009758934378624,
11
+ "eval_rewards/rejected": -0.018350308761000633,
12
+ "eval_runtime": 325.1898,
13
+ "eval_samples": 2000,
14
+ "eval_samples_per_second": 6.15,
15
+ "eval_steps_per_second": 0.384
16
+ }
runs/Mar04_23-03-52_gpu4-119-4/events.out.tfevents.1709553990.gpu4-119-4.2645530.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c43fd2ea2ec14cb6725d2a202bd2f6e331e2345930a4efb1b1d8d3f7d92fefc7
3
- size 69040
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:86e7d65dd1ecab0062868e9eb0fa64b5d449a74a3b81d0fccc5cbbfc683b08ee
3
+ size 72564
runs/Mar04_23-03-52_gpu4-119-4/events.out.tfevents.1709572444.gpu4-119-4.2645530.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8536b7640e441d92bc78ca3e370ef6cd1c50bece09183ec2c87d1de81f9f887e
3
+ size 828
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.0,
3
+ "train_loss": 0.0021909422920118682,
4
+ "train_runtime": 18127.9992,
5
+ "train_samples": 30567,
6
+ "train_samples_per_second": 3.372,
7
+ "train_steps_per_second": 0.053
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,1518 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 1.9968602825745683,
5
+ "eval_steps": 100,
6
+ "global_step": 954,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.0,
13
+ "learning_rate": 5.208333333333333e-08,
14
+ "logits/chosen": 0.952304482460022,
15
+ "logits/rejected": 0.5888463854789734,
16
+ "logps/chosen": -223.79486083984375,
17
+ "logps/rejected": -209.482666015625,
18
+ "loss": 0.0025,
19
+ "rewards/accuracies": 0.0,
20
+ "rewards/chosen": 0.0,
21
+ "rewards/margins": 0.0,
22
+ "rewards/rejected": 0.0,
23
+ "step": 1
24
+ },
25
+ {
26
+ "epoch": 0.02,
27
+ "learning_rate": 5.208333333333334e-07,
28
+ "logits/chosen": 0.8366600871086121,
29
+ "logits/rejected": 0.8544472455978394,
30
+ "logps/chosen": -236.2534942626953,
31
+ "logps/rejected": -221.8985137939453,
32
+ "loss": 0.0026,
33
+ "rewards/accuracies": 0.4583333432674408,
34
+ "rewards/chosen": 9.055635746335611e-05,
35
+ "rewards/margins": -0.00016146278358064592,
36
+ "rewards/rejected": 0.00025201926473528147,
37
+ "step": 10
38
+ },
39
+ {
40
+ "epoch": 0.04,
41
+ "learning_rate": 1.0416666666666667e-06,
42
+ "logits/chosen": 0.8331616520881653,
43
+ "logits/rejected": 0.9283801317214966,
44
+ "logps/chosen": -254.7387237548828,
45
+ "logps/rejected": -247.924560546875,
46
+ "loss": 0.0025,
47
+ "rewards/accuracies": 0.581250011920929,
48
+ "rewards/chosen": -0.0002719077165238559,
49
+ "rewards/margins": 0.000702966412063688,
50
+ "rewards/rejected": -0.0009748738375492394,
51
+ "step": 20
52
+ },
53
+ {
54
+ "epoch": 0.06,
55
+ "learning_rate": 1.5625e-06,
56
+ "logits/chosen": 0.8597829937934875,
57
+ "logits/rejected": 0.9174444079399109,
58
+ "logps/chosen": -260.46356201171875,
59
+ "logps/rejected": -232.0428924560547,
60
+ "loss": 0.0025,
61
+ "rewards/accuracies": 0.518750011920929,
62
+ "rewards/chosen": 0.0010247982572764158,
63
+ "rewards/margins": 0.0011705085635185242,
64
+ "rewards/rejected": -0.0001457103790016845,
65
+ "step": 30
66
+ },
67
+ {
68
+ "epoch": 0.08,
69
+ "learning_rate": 2.0833333333333334e-06,
70
+ "logits/chosen": 0.8120288848876953,
71
+ "logits/rejected": 0.9034429788589478,
72
+ "logps/chosen": -280.2599792480469,
73
+ "logps/rejected": -228.59304809570312,
74
+ "loss": 0.0025,
75
+ "rewards/accuracies": 0.4625000059604645,
76
+ "rewards/chosen": -0.0004804997588507831,
77
+ "rewards/margins": -2.5537923647789285e-05,
78
+ "rewards/rejected": -0.00045496178790926933,
79
+ "step": 40
80
+ },
81
+ {
82
+ "epoch": 0.1,
83
+ "learning_rate": 2.604166666666667e-06,
84
+ "logits/chosen": 0.856873631477356,
85
+ "logits/rejected": 0.936148464679718,
86
+ "logps/chosen": -257.70074462890625,
87
+ "logps/rejected": -219.18563842773438,
88
+ "loss": 0.0025,
89
+ "rewards/accuracies": 0.574999988079071,
90
+ "rewards/chosen": -2.7128717192681506e-05,
91
+ "rewards/margins": 0.0010264910524711013,
92
+ "rewards/rejected": -0.0010536197805777192,
93
+ "step": 50
94
+ },
95
+ {
96
+ "epoch": 0.13,
97
+ "learning_rate": 3.125e-06,
98
+ "logits/chosen": 0.8756526708602905,
99
+ "logits/rejected": 0.9026013612747192,
100
+ "logps/chosen": -237.04653930664062,
101
+ "logps/rejected": -237.41769409179688,
102
+ "loss": 0.0025,
103
+ "rewards/accuracies": 0.4749999940395355,
104
+ "rewards/chosen": -0.00045221406617201865,
105
+ "rewards/margins": -0.0005772784352302551,
106
+ "rewards/rejected": 0.0001250644854735583,
107
+ "step": 60
108
+ },
109
+ {
110
+ "epoch": 0.15,
111
+ "learning_rate": 3.6458333333333333e-06,
112
+ "logits/chosen": 0.8502365946769714,
113
+ "logits/rejected": 0.878851592540741,
114
+ "logps/chosen": -260.7863464355469,
115
+ "logps/rejected": -227.58700561523438,
116
+ "loss": 0.0025,
117
+ "rewards/accuracies": 0.4937500059604645,
118
+ "rewards/chosen": -0.000642338243778795,
119
+ "rewards/margins": -0.0005514883669093251,
120
+ "rewards/rejected": -9.084997873287648e-05,
121
+ "step": 70
122
+ },
123
+ {
124
+ "epoch": 0.17,
125
+ "learning_rate": 4.166666666666667e-06,
126
+ "logits/chosen": 0.8864189982414246,
127
+ "logits/rejected": 0.9194203615188599,
128
+ "logps/chosen": -251.07644653320312,
129
+ "logps/rejected": -231.24453735351562,
130
+ "loss": 0.0025,
131
+ "rewards/accuracies": 0.53125,
132
+ "rewards/chosen": 8.48618583404459e-05,
133
+ "rewards/margins": 0.00046581291826441884,
134
+ "rewards/rejected": -0.00038095106719993055,
135
+ "step": 80
136
+ },
137
+ {
138
+ "epoch": 0.19,
139
+ "learning_rate": 4.6875000000000004e-06,
140
+ "logits/chosen": 0.8617275953292847,
141
+ "logits/rejected": 0.9164407849311829,
142
+ "logps/chosen": -225.30056762695312,
143
+ "logps/rejected": -241.3493194580078,
144
+ "loss": 0.0025,
145
+ "rewards/accuracies": 0.543749988079071,
146
+ "rewards/chosen": -0.00032968426239676774,
147
+ "rewards/margins": 0.0004738254356198013,
148
+ "rewards/rejected": -0.0008035098435357213,
149
+ "step": 90
150
+ },
151
+ {
152
+ "epoch": 0.21,
153
+ "learning_rate": 4.999731868769027e-06,
154
+ "logits/chosen": 0.9239044189453125,
155
+ "logits/rejected": 0.9236629605293274,
156
+ "logps/chosen": -242.187744140625,
157
+ "logps/rejected": -221.4308624267578,
158
+ "loss": 0.0026,
159
+ "rewards/accuracies": 0.518750011920929,
160
+ "rewards/chosen": -0.0005193214165046811,
161
+ "rewards/margins": 0.00021600276522804052,
162
+ "rewards/rejected": -0.0007353241671808064,
163
+ "step": 100
164
+ },
165
+ {
166
+ "epoch": 0.21,
167
+ "eval_logits/chosen": 0.8150850534439087,
168
+ "eval_logits/rejected": 0.917453408241272,
169
+ "eval_logps/chosen": -260.2373046875,
170
+ "eval_logps/rejected": -231.48963928222656,
171
+ "eval_loss": 0.00248239329084754,
172
+ "eval_rewards/accuracies": 0.5080000162124634,
173
+ "eval_rewards/chosen": 6.857867265352979e-05,
174
+ "eval_rewards/margins": 0.0006035350379534066,
175
+ "eval_rewards/rejected": -0.0005349563434720039,
176
+ "eval_runtime": 327.6723,
177
+ "eval_samples_per_second": 6.104,
178
+ "eval_steps_per_second": 0.381,
179
+ "step": 100
180
+ },
181
+ {
182
+ "epoch": 0.23,
183
+ "learning_rate": 4.996716052911017e-06,
184
+ "logits/chosen": 0.8340710401535034,
185
+ "logits/rejected": 0.8745189905166626,
186
+ "logps/chosen": -264.0061950683594,
187
+ "logps/rejected": -219.55416870117188,
188
+ "loss": 0.0025,
189
+ "rewards/accuracies": 0.574999988079071,
190
+ "rewards/chosen": 0.00015970889944583178,
191
+ "rewards/margins": 0.0009235168108716607,
192
+ "rewards/rejected": -0.000763807853218168,
193
+ "step": 110
194
+ },
195
+ {
196
+ "epoch": 0.25,
197
+ "learning_rate": 4.9903533134293035e-06,
198
+ "logits/chosen": 0.8607433438301086,
199
+ "logits/rejected": 0.9696208238601685,
200
+ "logps/chosen": -254.9775390625,
201
+ "logps/rejected": -219.08029174804688,
202
+ "loss": 0.0025,
203
+ "rewards/accuracies": 0.512499988079071,
204
+ "rewards/chosen": -0.00045476845116354525,
205
+ "rewards/margins": 0.0009122647461481392,
206
+ "rewards/rejected": -0.001367033226415515,
207
+ "step": 120
208
+ },
209
+ {
210
+ "epoch": 0.27,
211
+ "learning_rate": 4.9806521797692184e-06,
212
+ "logits/chosen": 0.8772906064987183,
213
+ "logits/rejected": 0.8781763911247253,
214
+ "logps/chosen": -264.6737365722656,
215
+ "logps/rejected": -246.98770141601562,
216
+ "loss": 0.0025,
217
+ "rewards/accuracies": 0.5062500238418579,
218
+ "rewards/chosen": -0.00038236891850829124,
219
+ "rewards/margins": 0.0011148005723953247,
220
+ "rewards/rejected": -0.0014971692580729723,
221
+ "step": 130
222
+ },
223
+ {
224
+ "epoch": 0.29,
225
+ "learning_rate": 4.967625656594782e-06,
226
+ "logits/chosen": 0.8523051142692566,
227
+ "logits/rejected": 0.9305570721626282,
228
+ "logps/chosen": -222.01806640625,
229
+ "logps/rejected": -232.17919921875,
230
+ "loss": 0.0024,
231
+ "rewards/accuracies": 0.4937500059604645,
232
+ "rewards/chosen": -0.0008891393663361669,
233
+ "rewards/margins": 0.0006222378578968346,
234
+ "rewards/rejected": -0.0015113770496100187,
235
+ "step": 140
236
+ },
237
+ {
238
+ "epoch": 0.31,
239
+ "learning_rate": 4.95129120635556e-06,
240
+ "logits/chosen": 0.8730077743530273,
241
+ "logits/rejected": 0.9034315943717957,
242
+ "logps/chosen": -258.2915954589844,
243
+ "logps/rejected": -215.7738037109375,
244
+ "loss": 0.0024,
245
+ "rewards/accuracies": 0.5375000238418579,
246
+ "rewards/chosen": -0.0006225308170542121,
247
+ "rewards/margins": 0.002022756729274988,
248
+ "rewards/rejected": -0.0026452874299138784,
249
+ "step": 150
250
+ },
251
+ {
252
+ "epoch": 0.33,
253
+ "learning_rate": 4.93167072587771e-06,
254
+ "logits/chosen": 0.7767001390457153,
255
+ "logits/rejected": 0.845874011516571,
256
+ "logps/chosen": -257.7080383300781,
257
+ "logps/rejected": -250.625732421875,
258
+ "loss": 0.0024,
259
+ "rewards/accuracies": 0.574999988079071,
260
+ "rewards/chosen": -0.0005334648885764182,
261
+ "rewards/margins": 0.0022061350755393505,
262
+ "rewards/rejected": -0.0027396001387387514,
263
+ "step": 160
264
+ },
265
+ {
266
+ "epoch": 0.36,
267
+ "learning_rate": 4.908790517010637e-06,
268
+ "logits/chosen": 0.9272924661636353,
269
+ "logits/rejected": 0.9679857492446899,
270
+ "logps/chosen": -239.0647735595703,
271
+ "logps/rejected": -252.8076934814453,
272
+ "loss": 0.0024,
273
+ "rewards/accuracies": 0.5562499761581421,
274
+ "rewards/chosen": -0.002121446654200554,
275
+ "rewards/margins": 0.001315777888521552,
276
+ "rewards/rejected": -0.0034372243098914623,
277
+ "step": 170
278
+ },
279
+ {
280
+ "epoch": 0.38,
281
+ "learning_rate": 4.882681251368549e-06,
282
+ "logits/chosen": 0.8513296246528625,
283
+ "logits/rejected": 0.8445970416069031,
284
+ "logps/chosen": -270.7970886230469,
285
+ "logps/rejected": -256.8065185546875,
286
+ "loss": 0.0023,
287
+ "rewards/accuracies": 0.606249988079071,
288
+ "rewards/chosen": -0.00029190973145887256,
289
+ "rewards/margins": 0.0030645509250462055,
290
+ "rewards/rejected": -0.0033564604818820953,
291
+ "step": 180
292
+ },
293
+ {
294
+ "epoch": 0.4,
295
+ "learning_rate": 4.853377929214243e-06,
296
+ "logits/chosen": 0.7836719751358032,
297
+ "logits/rejected": 0.8565704226493835,
298
+ "logps/chosen": -252.17233276367188,
299
+ "logps/rejected": -244.7781219482422,
300
+ "loss": 0.0023,
301
+ "rewards/accuracies": 0.606249988079071,
302
+ "rewards/chosen": -0.001070442027412355,
303
+ "rewards/margins": 0.0032107688020914793,
304
+ "rewards/rejected": -0.004281210713088512,
305
+ "step": 190
306
+ },
307
+ {
308
+ "epoch": 0.42,
309
+ "learning_rate": 4.8209198325401815e-06,
310
+ "logits/chosen": 0.872645378112793,
311
+ "logits/rejected": 0.8895421028137207,
312
+ "logps/chosen": -237.6038055419922,
313
+ "logps/rejected": -233.2046661376953,
314
+ "loss": 0.0023,
315
+ "rewards/accuracies": 0.581250011920929,
316
+ "rewards/chosen": -0.0021511532831937075,
317
+ "rewards/margins": 0.002740217139944434,
318
+ "rewards/rejected": -0.004891370423138142,
319
+ "step": 200
320
+ },
321
+ {
322
+ "epoch": 0.42,
323
+ "eval_logits/chosen": 0.8092045783996582,
324
+ "eval_logits/rejected": 0.9120355844497681,
325
+ "eval_logps/chosen": -260.39324951171875,
326
+ "eval_logps/rejected": -232.11520385742188,
327
+ "eval_loss": 0.0022759963758289814,
328
+ "eval_rewards/accuracies": 0.656000018119812,
329
+ "eval_rewards/chosen": -0.0014907378936186433,
330
+ "eval_rewards/margins": 0.005300111137330532,
331
+ "eval_rewards/rejected": -0.006790849845856428,
332
+ "eval_runtime": 327.2093,
333
+ "eval_samples_per_second": 6.112,
334
+ "eval_steps_per_second": 0.382,
335
+ "step": 200
336
+ },
337
+ {
338
+ "epoch": 0.44,
339
+ "learning_rate": 4.785350472409792e-06,
340
+ "logits/chosen": 0.9010735750198364,
341
+ "logits/rejected": 0.9061653017997742,
342
+ "logps/chosen": -232.94741821289062,
343
+ "logps/rejected": -228.1055145263672,
344
+ "loss": 0.0023,
345
+ "rewards/accuracies": 0.6875,
346
+ "rewards/chosen": -0.003469156799837947,
347
+ "rewards/margins": 0.003427647752687335,
348
+ "rewards/rejected": -0.006896805018186569,
349
+ "step": 210
350
+ },
351
+ {
352
+ "epoch": 0.46,
353
+ "learning_rate": 4.746717530629565e-06,
354
+ "logits/chosen": 0.8498672246932983,
355
+ "logits/rejected": 0.8839853405952454,
356
+ "logps/chosen": -259.6465148925781,
357
+ "logps/rejected": -238.8597869873047,
358
+ "loss": 0.0023,
359
+ "rewards/accuracies": 0.6499999761581421,
360
+ "rewards/chosen": -0.002919531427323818,
361
+ "rewards/margins": 0.00543471472337842,
362
+ "rewards/rejected": -0.00835424568504095,
363
+ "step": 220
364
+ },
365
+ {
366
+ "epoch": 0.48,
367
+ "learning_rate": 4.7050727958301505e-06,
368
+ "logits/chosen": 0.8881880044937134,
369
+ "logits/rejected": 0.8712642788887024,
370
+ "logps/chosen": -244.35507202148438,
371
+ "logps/rejected": -229.05136108398438,
372
+ "loss": 0.0022,
373
+ "rewards/accuracies": 0.637499988079071,
374
+ "rewards/chosen": -0.004195825196802616,
375
+ "rewards/margins": 0.004726833663880825,
376
+ "rewards/rejected": -0.008922659792006016,
377
+ "step": 230
378
+ },
379
+ {
380
+ "epoch": 0.5,
381
+ "learning_rate": 4.660472094042121e-06,
382
+ "logits/chosen": 0.8501306772232056,
383
+ "logits/rejected": 0.8583809733390808,
384
+ "logps/chosen": -281.69854736328125,
385
+ "logps/rejected": -234.6123504638672,
386
+ "loss": 0.0022,
387
+ "rewards/accuracies": 0.675000011920929,
388
+ "rewards/chosen": -0.004014792386442423,
389
+ "rewards/margins": 0.0071690999902784824,
390
+ "rewards/rejected": -0.011183892376720905,
391
+ "step": 240
392
+ },
393
+ {
394
+ "epoch": 0.52,
395
+ "learning_rate": 4.612975213859487e-06,
396
+ "logits/chosen": 0.8360323905944824,
397
+ "logits/rejected": 0.9075163006782532,
398
+ "logps/chosen": -268.0133361816406,
399
+ "logps/rejected": -241.88339233398438,
400
+ "loss": 0.0023,
401
+ "rewards/accuracies": 0.643750011920929,
402
+ "rewards/chosen": -0.0047610728070139885,
403
+ "rewards/margins": 0.0054484582506120205,
404
+ "rewards/rejected": -0.010209531523287296,
405
+ "step": 250
406
+ },
407
+ {
408
+ "epoch": 0.54,
409
+ "learning_rate": 4.5626458262912745e-06,
410
+ "logits/chosen": 0.8462463617324829,
411
+ "logits/rejected": 0.8862007260322571,
412
+ "logps/chosen": -274.3870544433594,
413
+ "logps/rejected": -260.1866149902344,
414
+ "loss": 0.0022,
415
+ "rewards/accuracies": 0.699999988079071,
416
+ "rewards/chosen": -0.002204451011493802,
417
+ "rewards/margins": 0.007608965039253235,
418
+ "rewards/rejected": -0.009813414886593819,
419
+ "step": 260
420
+ },
421
+ {
422
+ "epoch": 0.57,
423
+ "learning_rate": 4.509551399408598e-06,
424
+ "logits/chosen": 0.9279536008834839,
425
+ "logits/rejected": 0.9327741861343384,
426
+ "logps/chosen": -252.70175170898438,
427
+ "logps/rejected": -209.066650390625,
428
+ "loss": 0.0022,
429
+ "rewards/accuracies": 0.637499988079071,
430
+ "rewards/chosen": -0.004289106000214815,
431
+ "rewards/margins": 0.007459082640707493,
432
+ "rewards/rejected": -0.01174818817526102,
433
+ "step": 270
434
+ },
435
+ {
436
+ "epoch": 0.59,
437
+ "learning_rate": 4.453763107901676e-06,
438
+ "logits/chosen": 0.9238850474357605,
439
+ "logits/rejected": 0.8847238421440125,
440
+ "logps/chosen": -246.0789031982422,
441
+ "logps/rejected": -253.26327514648438,
442
+ "loss": 0.0023,
443
+ "rewards/accuracies": 0.5375000238418579,
444
+ "rewards/chosen": -0.007073036395013332,
445
+ "rewards/margins": 0.0031865164637565613,
446
+ "rewards/rejected": -0.010259552858769894,
447
+ "step": 280
448
+ },
449
+ {
450
+ "epoch": 0.61,
451
+ "learning_rate": 4.3953557376679856e-06,
452
+ "logits/chosen": 0.8482117652893066,
453
+ "logits/rejected": 0.8521090745925903,
454
+ "logps/chosen": -260.2223815917969,
455
+ "logps/rejected": -255.7244873046875,
456
+ "loss": 0.0022,
457
+ "rewards/accuracies": 0.5874999761581421,
458
+ "rewards/chosen": -0.006550622172653675,
459
+ "rewards/margins": 0.004290700424462557,
460
+ "rewards/rejected": -0.01084132306277752,
461
+ "step": 290
462
+ },
463
+ {
464
+ "epoch": 0.63,
465
+ "learning_rate": 4.33440758555951e-06,
466
+ "logits/chosen": 0.8257355690002441,
467
+ "logits/rejected": 0.9032806158065796,
468
+ "logps/chosen": -249.4975128173828,
469
+ "logps/rejected": -243.69088745117188,
470
+ "loss": 0.0022,
471
+ "rewards/accuracies": 0.6875,
472
+ "rewards/chosen": -0.004273009952157736,
473
+ "rewards/margins": 0.008262387476861477,
474
+ "rewards/rejected": -0.0125353978946805,
475
+ "step": 300
476
+ },
477
+ {
478
+ "epoch": 0.63,
479
+ "eval_logits/chosen": 0.7992061972618103,
480
+ "eval_logits/rejected": 0.9022300243377686,
481
+ "eval_logps/chosen": -260.91790771484375,
482
+ "eval_logps/rejected": -232.84466552734375,
483
+ "eval_loss": 0.002206910401582718,
484
+ "eval_rewards/accuracies": 0.6700000166893005,
485
+ "eval_rewards/chosen": -0.006737456191331148,
486
+ "eval_rewards/margins": 0.007348266430199146,
487
+ "eval_rewards/rejected": -0.014085723087191582,
488
+ "eval_runtime": 327.3645,
489
+ "eval_samples_per_second": 6.109,
490
+ "eval_steps_per_second": 0.382,
491
+ "step": 300
492
+ },
493
+ {
494
+ "epoch": 0.65,
495
+ "learning_rate": 4.2710003544234255e-06,
496
+ "logits/chosen": 0.851446270942688,
497
+ "logits/rejected": 0.8612054586410522,
498
+ "logps/chosen": -235.674072265625,
499
+ "logps/rejected": -225.989990234375,
500
+ "loss": 0.0022,
501
+ "rewards/accuracies": 0.637499988079071,
502
+ "rewards/chosen": -0.007650519721210003,
503
+ "rewards/margins": 0.006462027784436941,
504
+ "rewards/rejected": -0.014112548902630806,
505
+ "step": 310
506
+ },
507
+ {
508
+ "epoch": 0.67,
509
+ "learning_rate": 4.205219043576955e-06,
510
+ "logits/chosen": 0.859449565410614,
511
+ "logits/rejected": 0.8806228637695312,
512
+ "logps/chosen": -224.012451171875,
513
+ "logps/rejected": -215.5794677734375,
514
+ "loss": 0.0021,
515
+ "rewards/accuracies": 0.65625,
516
+ "rewards/chosen": -0.008152343332767487,
517
+ "rewards/margins": 0.006258256733417511,
518
+ "rewards/rejected": -0.014410600066184998,
519
+ "step": 320
520
+ },
521
+ {
522
+ "epoch": 0.69,
523
+ "learning_rate": 4.137151834863213e-06,
524
+ "logits/chosen": 0.8416824340820312,
525
+ "logits/rejected": 0.8180407285690308,
526
+ "logps/chosen": -253.04629516601562,
527
+ "logps/rejected": -258.2658386230469,
528
+ "loss": 0.0022,
529
+ "rewards/accuracies": 0.625,
530
+ "rewards/chosen": -0.007537019904702902,
531
+ "rewards/margins": 0.008035494945943356,
532
+ "rewards/rejected": -0.015572515316307545,
533
+ "step": 330
534
+ },
535
+ {
536
+ "epoch": 0.71,
537
+ "learning_rate": 4.066889974440757e-06,
538
+ "logits/chosen": 0.8714286088943481,
539
+ "logits/rejected": 0.870419979095459,
540
+ "logps/chosen": -266.2498474121094,
541
+ "logps/rejected": -248.62814331054688,
542
+ "loss": 0.0021,
543
+ "rewards/accuracies": 0.625,
544
+ "rewards/chosen": -0.006631535477936268,
545
+ "rewards/margins": 0.007086685858666897,
546
+ "rewards/rejected": -0.01371822226792574,
547
+ "step": 340
548
+ },
549
+ {
550
+ "epoch": 0.73,
551
+ "learning_rate": 3.994527650465352e-06,
552
+ "logits/chosen": 0.8660160899162292,
553
+ "logits/rejected": 0.8818486928939819,
554
+ "logps/chosen": -222.5798797607422,
555
+ "logps/rejected": -204.8966522216797,
556
+ "loss": 0.0022,
557
+ "rewards/accuracies": 0.625,
558
+ "rewards/chosen": -0.009565680287778378,
559
+ "rewards/margins": 0.006333778612315655,
560
+ "rewards/rejected": -0.015899458900094032,
561
+ "step": 350
562
+ },
563
+ {
564
+ "epoch": 0.75,
565
+ "learning_rate": 3.92016186682789e-06,
566
+ "logits/chosen": 0.7974532842636108,
567
+ "logits/rejected": 0.802233874797821,
568
+ "logps/chosen": -208.30282592773438,
569
+ "logps/rejected": -235.695556640625,
570
+ "loss": 0.0021,
571
+ "rewards/accuracies": 0.706250011920929,
572
+ "rewards/chosen": -0.008728450164198875,
573
+ "rewards/margins": 0.008383492939174175,
574
+ "rewards/rejected": -0.017111944034695625,
575
+ "step": 360
576
+ },
577
+ {
578
+ "epoch": 0.77,
579
+ "learning_rate": 3.843892313117724e-06,
580
+ "logits/chosen": 0.8825100064277649,
581
+ "logits/rejected": 0.9040519595146179,
582
+ "logps/chosen": -268.5409851074219,
583
+ "logps/rejected": -241.93862915039062,
584
+ "loss": 0.0022,
585
+ "rewards/accuracies": 0.6312500238418579,
586
+ "rewards/chosen": -0.008966553956270218,
587
+ "rewards/margins": 0.00720653822645545,
588
+ "rewards/rejected": -0.016173092648386955,
589
+ "step": 370
590
+ },
591
+ {
592
+ "epoch": 0.8,
593
+ "learning_rate": 3.7658212309857576e-06,
594
+ "logits/chosen": 0.8589727282524109,
595
+ "logits/rejected": 0.8763955235481262,
596
+ "logps/chosen": -238.5310516357422,
597
+ "logps/rejected": -214.51651000976562,
598
+ "loss": 0.0021,
599
+ "rewards/accuracies": 0.668749988079071,
600
+ "rewards/chosen": -0.008340245112776756,
601
+ "rewards/margins": 0.009361723437905312,
602
+ "rewards/rejected": -0.017701968550682068,
603
+ "step": 380
604
+ },
605
+ {
606
+ "epoch": 0.82,
607
+ "learning_rate": 3.686053277086401e-06,
608
+ "logits/chosen": 0.812663197517395,
609
+ "logits/rejected": 0.8946850895881653,
610
+ "logps/chosen": -261.9408874511719,
611
+ "logps/rejected": -236.6110382080078,
612
+ "loss": 0.0021,
613
+ "rewards/accuracies": 0.668749988079071,
614
+ "rewards/chosen": -0.008398517966270447,
615
+ "rewards/margins": 0.008632157929241657,
616
+ "rewards/rejected": -0.01703067496418953,
617
+ "step": 390
618
+ },
619
+ {
620
+ "epoch": 0.84,
621
+ "learning_rate": 3.604695382782159e-06,
622
+ "logits/chosen": 0.8278132677078247,
623
+ "logits/rejected": 0.8401390314102173,
624
+ "logps/chosen": -277.306396484375,
625
+ "logps/rejected": -252.70474243164062,
626
+ "loss": 0.0021,
627
+ "rewards/accuracies": 0.5874999761581421,
628
+ "rewards/chosen": -0.008408455178141594,
629
+ "rewards/margins": 0.007572343107312918,
630
+ "rewards/rejected": -0.0159807987511158,
631
+ "step": 400
632
+ },
633
+ {
634
+ "epoch": 0.84,
635
+ "eval_logits/chosen": 0.7884067893028259,
636
+ "eval_logits/rejected": 0.8913614153862,
637
+ "eval_logps/chosen": -261.16204833984375,
638
+ "eval_logps/rejected": -233.21571350097656,
639
+ "eval_loss": 0.0021709667053073645,
640
+ "eval_rewards/accuracies": 0.6639999747276306,
641
+ "eval_rewards/chosen": -0.009178930893540382,
642
+ "eval_rewards/margins": 0.008617207407951355,
643
+ "eval_rewards/rejected": -0.017796138301491737,
644
+ "eval_runtime": 327.7654,
645
+ "eval_samples_per_second": 6.102,
646
+ "eval_steps_per_second": 0.381,
647
+ "step": 400
648
+ },
649
+ {
650
+ "epoch": 0.86,
651
+ "learning_rate": 3.5218566107988872e-06,
652
+ "logits/chosen": 0.8511005640029907,
653
+ "logits/rejected": 0.8689740300178528,
654
+ "logps/chosen": -269.23675537109375,
655
+ "logps/rejected": -234.9521484375,
656
+ "loss": 0.0022,
657
+ "rewards/accuracies": 0.6875,
658
+ "rewards/chosen": -0.008150833658874035,
659
+ "rewards/margins": 0.009497146122157574,
660
+ "rewards/rejected": -0.01764797978103161,
661
+ "step": 410
662
+ },
663
+ {
664
+ "epoch": 0.88,
665
+ "learning_rate": 3.437648009023905e-06,
666
+ "logits/chosen": 0.796908974647522,
667
+ "logits/rejected": 0.8615992665290833,
668
+ "logps/chosen": -213.23800659179688,
669
+ "logps/rejected": -206.04647827148438,
670
+ "loss": 0.0022,
671
+ "rewards/accuracies": 0.59375,
672
+ "rewards/chosen": -0.009716427884995937,
673
+ "rewards/margins": 0.006270779762417078,
674
+ "rewards/rejected": -0.015987208113074303,
675
+ "step": 420
676
+ },
677
+ {
678
+ "epoch": 0.9,
679
+ "learning_rate": 3.352182461642929e-06,
680
+ "logits/chosen": 0.8233186602592468,
681
+ "logits/rejected": 0.8778685331344604,
682
+ "logps/chosen": -235.12954711914062,
683
+ "logps/rejected": -218.67416381835938,
684
+ "loss": 0.0021,
685
+ "rewards/accuracies": 0.737500011920929,
686
+ "rewards/chosen": -0.008281409740447998,
687
+ "rewards/margins": 0.008449633605778217,
688
+ "rewards/rejected": -0.01673104241490364,
689
+ "step": 430
690
+ },
691
+ {
692
+ "epoch": 0.92,
693
+ "learning_rate": 3.265574537815398e-06,
694
+ "logits/chosen": 0.8454925417900085,
695
+ "logits/rejected": 0.8766401410102844,
696
+ "logps/chosen": -280.82562255859375,
697
+ "logps/rejected": -242.23635864257812,
698
+ "loss": 0.0021,
699
+ "rewards/accuracies": 0.71875,
700
+ "rewards/chosen": -0.006689480505883694,
701
+ "rewards/margins": 0.010621527209877968,
702
+ "rewards/rejected": -0.017311008647084236,
703
+ "step": 440
704
+ },
705
+ {
706
+ "epoch": 0.94,
707
+ "learning_rate": 3.177940338091043e-06,
708
+ "logits/chosen": 0.8534622192382812,
709
+ "logits/rejected": 0.9055653810501099,
710
+ "logps/chosen": -252.76089477539062,
711
+ "logps/rejected": -221.62551879882812,
712
+ "loss": 0.0021,
713
+ "rewards/accuracies": 0.699999988079071,
714
+ "rewards/chosen": -0.007692619226872921,
715
+ "rewards/margins": 0.009279204532504082,
716
+ "rewards/rejected": -0.016971822828054428,
717
+ "step": 450
718
+ },
719
+ {
720
+ "epoch": 0.96,
721
+ "learning_rate": 3.089397338773569e-06,
722
+ "logits/chosen": 0.8498457074165344,
723
+ "logits/rejected": 0.8722270131111145,
724
+ "logps/chosen": -258.4722595214844,
725
+ "logps/rejected": -225.99447631835938,
726
+ "loss": 0.0021,
727
+ "rewards/accuracies": 0.668749988079071,
728
+ "rewards/chosen": -0.009471247904002666,
729
+ "rewards/margins": 0.008289327844977379,
730
+ "rewards/rejected": -0.01776057854294777,
731
+ "step": 460
732
+ },
733
+ {
734
+ "epoch": 0.98,
735
+ "learning_rate": 3.0000642344401115e-06,
736
+ "logits/chosen": 0.8206149935722351,
737
+ "logits/rejected": 0.8616384267807007,
738
+ "logps/chosen": -239.18264770507812,
739
+ "logps/rejected": -224.2777862548828,
740
+ "loss": 0.0021,
741
+ "rewards/accuracies": 0.581250011920929,
742
+ "rewards/chosen": -0.009907958097755909,
743
+ "rewards/margins": 0.007560922298580408,
744
+ "rewards/rejected": -0.017468880861997604,
745
+ "step": 470
746
+ },
747
+ {
748
+ "epoch": 1.0,
749
+ "learning_rate": 2.9100607788275547e-06,
750
+ "logits/chosen": 0.8556537628173828,
751
+ "logits/rejected": 0.9059454202651978,
752
+ "logps/chosen": -243.97933959960938,
753
+ "logps/rejected": -235.90817260742188,
754
+ "loss": 0.0022,
755
+ "rewards/accuracies": 0.6499999761581421,
756
+ "rewards/chosen": -0.009879620745778084,
757
+ "rewards/margins": 0.007370662875473499,
758
+ "rewards/rejected": -0.01725028082728386,
759
+ "step": 480
760
+ },
761
+ {
762
+ "epoch": 1.03,
763
+ "learning_rate": 2.8195076242990124e-06,
764
+ "logits/chosen": 0.837064266204834,
765
+ "logits/rejected": 0.8774217367172241,
766
+ "logps/chosen": -239.0427703857422,
767
+ "logps/rejected": -215.9508819580078,
768
+ "loss": 0.0021,
769
+ "rewards/accuracies": 0.675000011920929,
770
+ "rewards/chosen": -0.010479142889380455,
771
+ "rewards/margins": 0.008913186378777027,
772
+ "rewards/rejected": -0.019392330199480057,
773
+ "step": 490
774
+ },
775
+ {
776
+ "epoch": 1.05,
777
+ "learning_rate": 2.72852616010567e-06,
778
+ "logits/chosen": 0.8183882832527161,
779
+ "logits/rejected": 0.8531386256217957,
780
+ "logps/chosen": -253.97683715820312,
781
+ "logps/rejected": -234.94265747070312,
782
+ "loss": 0.0022,
783
+ "rewards/accuracies": 0.6937500238418579,
784
+ "rewards/chosen": -0.008641371503472328,
785
+ "rewards/margins": 0.009928500279784203,
786
+ "rewards/rejected": -0.01856987178325653,
787
+ "step": 500
788
+ },
789
+ {
790
+ "epoch": 1.05,
791
+ "eval_logits/chosen": 0.7821087837219238,
792
+ "eval_logits/rejected": 0.8852795362472534,
793
+ "eval_logps/chosen": -261.1851501464844,
794
+ "eval_logps/rejected": -233.3613739013672,
795
+ "eval_loss": 0.0021211737766861916,
796
+ "eval_rewards/accuracies": 0.7099999785423279,
797
+ "eval_rewards/chosen": -0.009409956634044647,
798
+ "eval_rewards/margins": 0.00984267145395279,
799
+ "eval_rewards/rejected": -0.019252628087997437,
800
+ "eval_runtime": 325.4617,
801
+ "eval_samples_per_second": 6.145,
802
+ "eval_steps_per_second": 0.384,
803
+ "step": 500
804
+ },
805
+ {
806
+ "epoch": 1.07,
807
+ "learning_rate": 2.637238349660819e-06,
808
+ "logits/chosen": 0.8250406384468079,
809
+ "logits/rejected": 0.9090532064437866,
810
+ "logps/chosen": -236.7153778076172,
811
+ "logps/rejected": -197.7652587890625,
812
+ "loss": 0.0021,
813
+ "rewards/accuracies": 0.637499988079071,
814
+ "rewards/chosen": -0.010482882149517536,
815
+ "rewards/margins": 0.00821294542402029,
816
+ "rewards/rejected": -0.018695827573537827,
817
+ "step": 510
818
+ },
819
+ {
820
+ "epoch": 1.09,
821
+ "learning_rate": 2.5457665670441937e-06,
822
+ "logits/chosen": 0.8841145634651184,
823
+ "logits/rejected": 0.891588568687439,
824
+ "logps/chosen": -250.2413330078125,
825
+ "logps/rejected": -226.20846557617188,
826
+ "loss": 0.0022,
827
+ "rewards/accuracies": 0.6499999761581421,
828
+ "rewards/chosen": -0.008791481144726276,
829
+ "rewards/margins": 0.008898518979549408,
830
+ "rewards/rejected": -0.01768999919295311,
831
+ "step": 520
832
+ },
833
+ {
834
+ "epoch": 1.11,
835
+ "learning_rate": 2.4542334329558075e-06,
836
+ "logits/chosen": 0.8093138933181763,
837
+ "logits/rejected": 0.8301302790641785,
838
+ "logps/chosen": -242.32766723632812,
839
+ "logps/rejected": -228.98446655273438,
840
+ "loss": 0.0021,
841
+ "rewards/accuracies": 0.7124999761581421,
842
+ "rewards/chosen": -0.008460971526801586,
843
+ "rewards/margins": 0.009179492481052876,
844
+ "rewards/rejected": -0.017640462145209312,
845
+ "step": 530
846
+ },
847
+ {
848
+ "epoch": 1.13,
849
+ "learning_rate": 2.3627616503391813e-06,
850
+ "logits/chosen": 0.8050084114074707,
851
+ "logits/rejected": 0.8223572969436646,
852
+ "logps/chosen": -259.1438903808594,
853
+ "logps/rejected": -216.0526885986328,
854
+ "loss": 0.0021,
855
+ "rewards/accuracies": 0.6312500238418579,
856
+ "rewards/chosen": -0.009418713860213757,
857
+ "rewards/margins": 0.0077890073880553246,
858
+ "rewards/rejected": -0.01720772124826908,
859
+ "step": 540
860
+ },
861
+ {
862
+ "epoch": 1.15,
863
+ "learning_rate": 2.271473839894331e-06,
864
+ "logits/chosen": 0.8079057931900024,
865
+ "logits/rejected": 0.8310044407844543,
866
+ "logps/chosen": -267.4736328125,
867
+ "logps/rejected": -249.3809356689453,
868
+ "loss": 0.0021,
869
+ "rewards/accuracies": 0.6499999761581421,
870
+ "rewards/chosen": -0.008644058369100094,
871
+ "rewards/margins": 0.008870486170053482,
872
+ "rewards/rejected": -0.01751454547047615,
873
+ "step": 550
874
+ },
875
+ {
876
+ "epoch": 1.17,
877
+ "learning_rate": 2.1804923757009885e-06,
878
+ "logits/chosen": 0.7828265428543091,
879
+ "logits/rejected": 0.810738205909729,
880
+ "logps/chosen": -251.98782348632812,
881
+ "logps/rejected": -226.2450408935547,
882
+ "loss": 0.0021,
883
+ "rewards/accuracies": 0.6937500238418579,
884
+ "rewards/chosen": -0.008495638146996498,
885
+ "rewards/margins": 0.009752650745213032,
886
+ "rewards/rejected": -0.018248289823532104,
887
+ "step": 560
888
+ },
889
+ {
890
+ "epoch": 1.19,
891
+ "learning_rate": 2.089939221172446e-06,
892
+ "logits/chosen": 0.8058856129646301,
893
+ "logits/rejected": 0.8096176385879517,
894
+ "logps/chosen": -270.29681396484375,
895
+ "logps/rejected": -230.7064666748047,
896
+ "loss": 0.0021,
897
+ "rewards/accuracies": 0.6937500238418579,
898
+ "rewards/chosen": -0.010026035830378532,
899
+ "rewards/margins": 0.009994433261454105,
900
+ "rewards/rejected": -0.020020468160510063,
901
+ "step": 570
902
+ },
903
+ {
904
+ "epoch": 1.21,
905
+ "learning_rate": 1.9999357655598894e-06,
906
+ "logits/chosen": 0.8202878832817078,
907
+ "logits/rejected": 0.7905790209770203,
908
+ "logps/chosen": -240.6394500732422,
909
+ "logps/rejected": -227.01150512695312,
910
+ "loss": 0.0022,
911
+ "rewards/accuracies": 0.6499999761581421,
912
+ "rewards/chosen": -0.007834648713469505,
913
+ "rewards/margins": 0.007603611797094345,
914
+ "rewards/rejected": -0.01543826051056385,
915
+ "step": 580
916
+ },
917
+ {
918
+ "epoch": 1.23,
919
+ "learning_rate": 1.9106026612264316e-06,
920
+ "logits/chosen": 0.8630008697509766,
921
+ "logits/rejected": 0.9225195050239563,
922
+ "logps/chosen": -218.4501953125,
923
+ "logps/rejected": -214.31399536132812,
924
+ "loss": 0.0021,
925
+ "rewards/accuracies": 0.637499988079071,
926
+ "rewards/chosen": -0.008840398862957954,
927
+ "rewards/margins": 0.009108386933803558,
928
+ "rewards/rejected": -0.017948785796761513,
929
+ "step": 590
930
+ },
931
+ {
932
+ "epoch": 1.26,
933
+ "learning_rate": 1.8220596619089576e-06,
934
+ "logits/chosen": 0.8304165601730347,
935
+ "logits/rejected": 0.7959300875663757,
936
+ "logps/chosen": -262.4768981933594,
937
+ "logps/rejected": -230.4768524169922,
938
+ "loss": 0.002,
939
+ "rewards/accuracies": 0.6937500238418579,
940
+ "rewards/chosen": -0.008658383972942829,
941
+ "rewards/margins": 0.008863124065101147,
942
+ "rewards/rejected": -0.017521508038043976,
943
+ "step": 600
944
+ },
945
+ {
946
+ "epoch": 1.26,
947
+ "eval_logits/chosen": 0.7814888954162598,
948
+ "eval_logits/rejected": 0.8839987516403198,
949
+ "eval_logps/chosen": -261.1206970214844,
950
+ "eval_logps/rejected": -233.28433227539062,
951
+ "eval_loss": 0.002118554199114442,
952
+ "eval_rewards/accuracies": 0.6940000057220459,
953
+ "eval_rewards/chosen": -0.008765296079218388,
954
+ "eval_rewards/margins": 0.009716734290122986,
955
+ "eval_rewards/rejected": -0.0184820294380188,
956
+ "eval_runtime": 325.1283,
957
+ "eval_samples_per_second": 6.151,
958
+ "eval_steps_per_second": 0.384,
959
+ "step": 600
960
+ },
961
+ {
962
+ "epoch": 1.28,
963
+ "learning_rate": 1.7344254621846018e-06,
964
+ "logits/chosen": 0.7952737212181091,
965
+ "logits/rejected": 0.8380396962165833,
966
+ "logps/chosen": -264.8700256347656,
967
+ "logps/rejected": -251.73214721679688,
968
+ "loss": 0.0019,
969
+ "rewards/accuracies": 0.75,
970
+ "rewards/chosen": -0.007588304579257965,
971
+ "rewards/margins": 0.011967618018388748,
972
+ "rewards/rejected": -0.019555922597646713,
973
+ "step": 610
974
+ },
975
+ {
976
+ "epoch": 1.3,
977
+ "learning_rate": 1.647817538357072e-06,
978
+ "logits/chosen": 0.8249796032905579,
979
+ "logits/rejected": 0.8937106132507324,
980
+ "logps/chosen": -247.3903350830078,
981
+ "logps/rejected": -229.36093139648438,
982
+ "loss": 0.0022,
983
+ "rewards/accuracies": 0.612500011920929,
984
+ "rewards/chosen": -0.010081231594085693,
985
+ "rewards/margins": 0.006175318732857704,
986
+ "rewards/rejected": -0.016256550326943398,
987
+ "step": 620
988
+ },
989
+ {
990
+ "epoch": 1.32,
991
+ "learning_rate": 1.5623519909760953e-06,
992
+ "logits/chosen": 0.8032970428466797,
993
+ "logits/rejected": 0.8601589202880859,
994
+ "logps/chosen": -242.910888671875,
995
+ "logps/rejected": -238.8945770263672,
996
+ "loss": 0.0021,
997
+ "rewards/accuracies": 0.668749988079071,
998
+ "rewards/chosen": -0.010011060163378716,
999
+ "rewards/margins": 0.007496376521885395,
1000
+ "rewards/rejected": -0.017507437616586685,
1001
+ "step": 630
1002
+ },
1003
+ {
1004
+ "epoch": 1.34,
1005
+ "learning_rate": 1.4781433892011132e-06,
1006
+ "logits/chosen": 0.8834539651870728,
1007
+ "logits/rejected": 0.8997892141342163,
1008
+ "logps/chosen": -242.17578125,
1009
+ "logps/rejected": -259.06671142578125,
1010
+ "loss": 0.0021,
1011
+ "rewards/accuracies": 0.668749988079071,
1012
+ "rewards/chosen": -0.008281581103801727,
1013
+ "rewards/margins": 0.008412448689341545,
1014
+ "rewards/rejected": -0.016694029793143272,
1015
+ "step": 640
1016
+ },
1017
+ {
1018
+ "epoch": 1.36,
1019
+ "learning_rate": 1.3953046172178413e-06,
1020
+ "logits/chosen": 0.8225077390670776,
1021
+ "logits/rejected": 0.8323785662651062,
1022
+ "logps/chosen": -253.43905639648438,
1023
+ "logps/rejected": -245.92721557617188,
1024
+ "loss": 0.0021,
1025
+ "rewards/accuracies": 0.6875,
1026
+ "rewards/chosen": -0.010358546860516071,
1027
+ "rewards/margins": 0.009153308346867561,
1028
+ "rewards/rejected": -0.019511854276061058,
1029
+ "step": 650
1030
+ },
1031
+ {
1032
+ "epoch": 1.38,
1033
+ "learning_rate": 1.3139467229135999e-06,
1034
+ "logits/chosen": 0.7964144945144653,
1035
+ "logits/rejected": 0.8561304807662964,
1036
+ "logps/chosen": -220.56430053710938,
1037
+ "logps/rejected": -246.96731567382812,
1038
+ "loss": 0.0022,
1039
+ "rewards/accuracies": 0.675000011920929,
1040
+ "rewards/chosen": -0.008580346591770649,
1041
+ "rewards/margins": 0.00769965723156929,
1042
+ "rewards/rejected": -0.016280004754662514,
1043
+ "step": 660
1044
+ },
1045
+ {
1046
+ "epoch": 1.4,
1047
+ "learning_rate": 1.2341787690142436e-06,
1048
+ "logits/chosen": 0.7782607078552246,
1049
+ "logits/rejected": 0.8575235605239868,
1050
+ "logps/chosen": -300.5128173828125,
1051
+ "logps/rejected": -240.5801239013672,
1052
+ "loss": 0.0021,
1053
+ "rewards/accuracies": 0.737500011920929,
1054
+ "rewards/chosen": -0.007187344133853912,
1055
+ "rewards/margins": 0.01008409820497036,
1056
+ "rewards/rejected": -0.017271442338824272,
1057
+ "step": 670
1058
+ },
1059
+ {
1060
+ "epoch": 1.42,
1061
+ "learning_rate": 1.1561076868822756e-06,
1062
+ "logits/chosen": 0.8010842204093933,
1063
+ "logits/rejected": 0.8400290608406067,
1064
+ "logps/chosen": -243.8752899169922,
1065
+ "logps/rejected": -216.2671661376953,
1066
+ "loss": 0.002,
1067
+ "rewards/accuracies": 0.6875,
1068
+ "rewards/chosen": -0.007206754293292761,
1069
+ "rewards/margins": 0.009658637456595898,
1070
+ "rewards/rejected": -0.01686539314687252,
1071
+ "step": 680
1072
+ },
1073
+ {
1074
+ "epoch": 1.44,
1075
+ "learning_rate": 1.079838133172111e-06,
1076
+ "logits/chosen": 0.8610042333602905,
1077
+ "logits/rejected": 0.8555776476860046,
1078
+ "logps/chosen": -258.38946533203125,
1079
+ "logps/rejected": -238.4031982421875,
1080
+ "loss": 0.0021,
1081
+ "rewards/accuracies": 0.668749988079071,
1082
+ "rewards/chosen": -0.008076001890003681,
1083
+ "rewards/margins": 0.0069586001336574554,
1084
+ "rewards/rejected": -0.015034601092338562,
1085
+ "step": 690
1086
+ },
1087
+ {
1088
+ "epoch": 1.47,
1089
+ "learning_rate": 1.0054723495346484e-06,
1090
+ "logits/chosen": 0.7388730049133301,
1091
+ "logits/rejected": 0.8165189027786255,
1092
+ "logps/chosen": -259.45965576171875,
1093
+ "logps/rejected": -227.28750610351562,
1094
+ "loss": 0.0021,
1095
+ "rewards/accuracies": 0.668749988079071,
1096
+ "rewards/chosen": -0.00880814902484417,
1097
+ "rewards/margins": 0.007600386627018452,
1098
+ "rewards/rejected": -0.016408536583185196,
1099
+ "step": 700
1100
+ },
1101
+ {
1102
+ "epoch": 1.47,
1103
+ "eval_logits/chosen": 0.7790006995201111,
1104
+ "eval_logits/rejected": 0.8815683722496033,
1105
+ "eval_logps/chosen": -261.0787658691406,
1106
+ "eval_logps/rejected": -233.25596618652344,
1107
+ "eval_loss": 0.0021041170693933964,
1108
+ "eval_rewards/accuracies": 0.699999988079071,
1109
+ "eval_rewards/chosen": -0.00834597460925579,
1110
+ "eval_rewards/margins": 0.009852716699242592,
1111
+ "eval_rewards/rejected": -0.018198693171143532,
1112
+ "eval_runtime": 325.1201,
1113
+ "eval_samples_per_second": 6.152,
1114
+ "eval_steps_per_second": 0.384,
1115
+ "step": 700
1116
+ },
1117
+ {
1118
+ "epoch": 1.49,
1119
+ "learning_rate": 9.331100255592437e-07,
1120
+ "logits/chosen": 0.8419380187988281,
1121
+ "logits/rejected": 0.8946343660354614,
1122
+ "logps/chosen": -279.4914245605469,
1123
+ "logps/rejected": -229.0105438232422,
1124
+ "loss": 0.0021,
1125
+ "rewards/accuracies": 0.6812499761581421,
1126
+ "rewards/chosen": -0.0075461543165147305,
1127
+ "rewards/margins": 0.010306203737854958,
1128
+ "rewards/rejected": -0.017852356657385826,
1129
+ "step": 710
1130
+ },
1131
+ {
1132
+ "epoch": 1.51,
1133
+ "learning_rate": 8.628481651367876e-07,
1134
+ "logits/chosen": 0.8257007598876953,
1135
+ "logits/rejected": 0.8564590215682983,
1136
+ "logps/chosen": -264.4073486328125,
1137
+ "logps/rejected": -244.4358673095703,
1138
+ "loss": 0.002,
1139
+ "rewards/accuracies": 0.65625,
1140
+ "rewards/chosen": -0.007830760441720486,
1141
+ "rewards/margins": 0.009762524627149105,
1142
+ "rewards/rejected": -0.01759328506886959,
1143
+ "step": 720
1144
+ },
1145
+ {
1146
+ "epoch": 1.53,
1147
+ "learning_rate": 7.947809564230446e-07,
1148
+ "logits/chosen": 0.8498528599739075,
1149
+ "logits/rejected": 0.8689123392105103,
1150
+ "logps/chosen": -265.91986083984375,
1151
+ "logps/rejected": -252.3328094482422,
1152
+ "loss": 0.002,
1153
+ "rewards/accuracies": 0.7749999761581421,
1154
+ "rewards/chosen": -0.007306605577468872,
1155
+ "rewards/margins": 0.00969378836452961,
1156
+ "rewards/rejected": -0.01700039580464363,
1157
+ "step": 730
1158
+ },
1159
+ {
1160
+ "epoch": 1.55,
1161
+ "learning_rate": 7.289996455765749e-07,
1162
+ "logits/chosen": 0.8058244585990906,
1163
+ "logits/rejected": 0.8203352093696594,
1164
+ "logps/chosen": -276.05224609375,
1165
+ "logps/rejected": -252.39712524414062,
1166
+ "loss": 0.0021,
1167
+ "rewards/accuracies": 0.6499999761581421,
1168
+ "rewards/chosen": -0.00796983577311039,
1169
+ "rewards/margins": 0.007479208521544933,
1170
+ "rewards/rejected": -0.015449045225977898,
1171
+ "step": 740
1172
+ },
1173
+ {
1174
+ "epoch": 1.57,
1175
+ "learning_rate": 6.655924144404907e-07,
1176
+ "logits/chosen": 0.7636196613311768,
1177
+ "logits/rejected": 0.8203707933425903,
1178
+ "logps/chosen": -260.9599304199219,
1179
+ "logps/rejected": -236.13442993164062,
1180
+ "loss": 0.0021,
1181
+ "rewards/accuracies": 0.7250000238418579,
1182
+ "rewards/chosen": -0.0055490517988801,
1183
+ "rewards/margins": 0.010982049629092216,
1184
+ "rewards/rejected": -0.01653110235929489,
1185
+ "step": 750
1186
+ },
1187
+ {
1188
+ "epoch": 1.59,
1189
+ "learning_rate": 6.046442623320145e-07,
1190
+ "logits/chosen": 0.8590337634086609,
1191
+ "logits/rejected": 0.889240562915802,
1192
+ "logps/chosen": -256.20843505859375,
1193
+ "logps/rejected": -218.8590087890625,
1194
+ "loss": 0.0021,
1195
+ "rewards/accuracies": 0.612500011920929,
1196
+ "rewards/chosen": -0.009814934805035591,
1197
+ "rewards/margins": 0.007059067487716675,
1198
+ "rewards/rejected": -0.016874000430107117,
1199
+ "step": 760
1200
+ },
1201
+ {
1202
+ "epoch": 1.61,
1203
+ "learning_rate": 5.462368920983249e-07,
1204
+ "logits/chosen": 0.8207941055297852,
1205
+ "logits/rejected": 0.8683902025222778,
1206
+ "logps/chosen": -241.982666015625,
1207
+ "logps/rejected": -217.79141235351562,
1208
+ "loss": 0.0022,
1209
+ "rewards/accuracies": 0.6499999761581421,
1210
+ "rewards/chosen": -0.009196323342621326,
1211
+ "rewards/margins": 0.006100159604102373,
1212
+ "rewards/rejected": -0.015296483412384987,
1213
+ "step": 770
1214
+ },
1215
+ {
1216
+ "epoch": 1.63,
1217
+ "learning_rate": 4.904486005914027e-07,
1218
+ "logits/chosen": 0.8281243443489075,
1219
+ "logits/rejected": 0.8692743182182312,
1220
+ "logps/chosen": -284.8575744628906,
1221
+ "logps/rejected": -258.57464599609375,
1222
+ "loss": 0.002,
1223
+ "rewards/accuracies": 0.675000011920929,
1224
+ "rewards/chosen": -0.0074394033290445805,
1225
+ "rewards/margins": 0.010337688960134983,
1226
+ "rewards/rejected": -0.01777709275484085,
1227
+ "step": 780
1228
+ },
1229
+ {
1230
+ "epoch": 1.65,
1231
+ "learning_rate": 4.373541737087264e-07,
1232
+ "logits/chosen": 0.8097678422927856,
1233
+ "logits/rejected": 0.9018150568008423,
1234
+ "logps/chosen": -265.8307189941406,
1235
+ "logps/rejected": -224.81103515625,
1236
+ "loss": 0.0022,
1237
+ "rewards/accuracies": 0.668749988079071,
1238
+ "rewards/chosen": -0.007008875720202923,
1239
+ "rewards/margins": 0.009901894256472588,
1240
+ "rewards/rejected": -0.016910770907998085,
1241
+ "step": 790
1242
+ },
1243
+ {
1244
+ "epoch": 1.67,
1245
+ "learning_rate": 3.8702478614051353e-07,
1246
+ "logits/chosen": 0.7832424640655518,
1247
+ "logits/rejected": 0.7852426767349243,
1248
+ "logps/chosen": -238.4669952392578,
1249
+ "logps/rejected": -229.850830078125,
1250
+ "loss": 0.0021,
1251
+ "rewards/accuracies": 0.737500011920929,
1252
+ "rewards/chosen": -0.007307013962417841,
1253
+ "rewards/margins": 0.011516671627759933,
1254
+ "rewards/rejected": -0.018823683261871338,
1255
+ "step": 800
1256
+ },
1257
+ {
1258
+ "epoch": 1.67,
1259
+ "eval_logits/chosen": 0.7780749797821045,
1260
+ "eval_logits/rejected": 0.8810694217681885,
1261
+ "eval_logps/chosen": -261.0643310546875,
1262
+ "eval_logps/rejected": -233.27398681640625,
1263
+ "eval_loss": 0.002105255611240864,
1264
+ "eval_rewards/accuracies": 0.6940000057220459,
1265
+ "eval_rewards/chosen": -0.008201568387448788,
1266
+ "eval_rewards/margins": 0.010177312418818474,
1267
+ "eval_rewards/rejected": -0.018378881737589836,
1268
+ "eval_runtime": 324.9688,
1269
+ "eval_samples_per_second": 6.154,
1270
+ "eval_steps_per_second": 0.385,
1271
+ "step": 800
1272
+ },
1273
+ {
1274
+ "epoch": 1.7,
1275
+ "learning_rate": 3.3952790595787986e-07,
1276
+ "logits/chosen": 0.9047285914421082,
1277
+ "logits/rejected": 0.8632117509841919,
1278
+ "logps/chosen": -249.9727020263672,
1279
+ "logps/rejected": -233.0714569091797,
1280
+ "loss": 0.0021,
1281
+ "rewards/accuracies": 0.606249988079071,
1282
+ "rewards/chosen": -0.008487801998853683,
1283
+ "rewards/margins": 0.007260690443217754,
1284
+ "rewards/rejected": -0.015748491510748863,
1285
+ "step": 810
1286
+ },
1287
+ {
1288
+ "epoch": 1.72,
1289
+ "learning_rate": 2.9492720416985004e-07,
1290
+ "logits/chosen": 0.745966374874115,
1291
+ "logits/rejected": 0.7877084016799927,
1292
+ "logps/chosen": -275.1504211425781,
1293
+ "logps/rejected": -252.06570434570312,
1294
+ "loss": 0.0021,
1295
+ "rewards/accuracies": 0.65625,
1296
+ "rewards/chosen": -0.006994744297116995,
1297
+ "rewards/margins": 0.008207054808735847,
1298
+ "rewards/rejected": -0.01520179957151413,
1299
+ "step": 820
1300
+ },
1301
+ {
1302
+ "epoch": 1.74,
1303
+ "learning_rate": 2.5328246937043526e-07,
1304
+ "logits/chosen": 0.8438940048217773,
1305
+ "logits/rejected": 0.8585928082466125,
1306
+ "logps/chosen": -237.1392059326172,
1307
+ "logps/rejected": -217.2599334716797,
1308
+ "loss": 0.0021,
1309
+ "rewards/accuracies": 0.668749988079071,
1310
+ "rewards/chosen": -0.009680895134806633,
1311
+ "rewards/margins": 0.008195875212550163,
1312
+ "rewards/rejected": -0.017876770347356796,
1313
+ "step": 830
1314
+ },
1315
+ {
1316
+ "epoch": 1.76,
1317
+ "learning_rate": 2.1464952759020857e-07,
1318
+ "logits/chosen": 0.835778534412384,
1319
+ "logits/rejected": 0.8605103492736816,
1320
+ "logps/chosen": -250.32455444335938,
1321
+ "logps/rejected": -247.2939453125,
1322
+ "loss": 0.0021,
1323
+ "rewards/accuracies": 0.6875,
1324
+ "rewards/chosen": -0.006872941739857197,
1325
+ "rewards/margins": 0.00862927082926035,
1326
+ "rewards/rejected": -0.015502211637794971,
1327
+ "step": 840
1328
+ },
1329
+ {
1330
+ "epoch": 1.78,
1331
+ "learning_rate": 1.790801674598186e-07,
1332
+ "logits/chosen": 0.8059646487236023,
1333
+ "logits/rejected": 0.8204299807548523,
1334
+ "logps/chosen": -269.0479736328125,
1335
+ "logps/rejected": -236.54345703125,
1336
+ "loss": 0.0021,
1337
+ "rewards/accuracies": 0.75,
1338
+ "rewards/chosen": -0.007251231465488672,
1339
+ "rewards/margins": 0.010516216047108173,
1340
+ "rewards/rejected": -0.017767447978258133,
1341
+ "step": 850
1342
+ },
1343
+ {
1344
+ "epoch": 1.8,
1345
+ "learning_rate": 1.4662207078575685e-07,
1346
+ "logits/chosen": 0.8005737066268921,
1347
+ "logits/rejected": 0.865730881690979,
1348
+ "logps/chosen": -232.56271362304688,
1349
+ "logps/rejected": -240.54428100585938,
1350
+ "loss": 0.0021,
1351
+ "rewards/accuracies": 0.71875,
1352
+ "rewards/chosen": -0.008310760371387005,
1353
+ "rewards/margins": 0.010277243331074715,
1354
+ "rewards/rejected": -0.018588004633784294,
1355
+ "step": 860
1356
+ },
1357
+ {
1358
+ "epoch": 1.82,
1359
+ "learning_rate": 1.1731874863145143e-07,
1360
+ "logits/chosen": 0.8402239680290222,
1361
+ "logits/rejected": 0.8874310255050659,
1362
+ "logps/chosen": -262.51519775390625,
1363
+ "logps/rejected": -224.810302734375,
1364
+ "loss": 0.002,
1365
+ "rewards/accuracies": 0.7124999761581421,
1366
+ "rewards/chosen": -0.006995867937803268,
1367
+ "rewards/margins": 0.009674609638750553,
1368
+ "rewards/rejected": -0.016670476645231247,
1369
+ "step": 870
1370
+ },
1371
+ {
1372
+ "epoch": 1.84,
1373
+ "learning_rate": 9.120948298936422e-08,
1374
+ "logits/chosen": 0.8549894094467163,
1375
+ "logits/rejected": 0.8683481216430664,
1376
+ "logps/chosen": -250.1100311279297,
1377
+ "logps/rejected": -225.083740234375,
1378
+ "loss": 0.0019,
1379
+ "rewards/accuracies": 0.6499999761581421,
1380
+ "rewards/chosen": -0.007406042423099279,
1381
+ "rewards/margins": 0.009078353643417358,
1382
+ "rewards/rejected": -0.016484394669532776,
1383
+ "step": 880
1384
+ },
1385
+ {
1386
+ "epoch": 1.86,
1387
+ "learning_rate": 6.832927412229017e-08,
1388
+ "logits/chosen": 0.785990834236145,
1389
+ "logits/rejected": 0.9018427729606628,
1390
+ "logps/chosen": -262.3821716308594,
1391
+ "logps/rejected": -228.41357421875,
1392
+ "loss": 0.002,
1393
+ "rewards/accuracies": 0.699999988079071,
1394
+ "rewards/chosen": -0.006989480461925268,
1395
+ "rewards/margins": 0.010014806874096394,
1396
+ "rewards/rejected": -0.017004288733005524,
1397
+ "step": 890
1398
+ },
1399
+ {
1400
+ "epoch": 1.88,
1401
+ "learning_rate": 4.870879364444109e-08,
1402
+ "logits/chosen": 0.8013142347335815,
1403
+ "logits/rejected": 0.8696510195732117,
1404
+ "logps/chosen": -252.10507202148438,
1405
+ "logps/rejected": -206.7936553955078,
1406
+ "loss": 0.0021,
1407
+ "rewards/accuracies": 0.6499999761581421,
1408
+ "rewards/chosen": -0.007737092673778534,
1409
+ "rewards/margins": 0.01070366334170103,
1410
+ "rewards/rejected": -0.01844075694680214,
1411
+ "step": 900
1412
+ },
1413
+ {
1414
+ "epoch": 1.88,
1415
+ "eval_logits/chosen": 0.7806341648101807,
1416
+ "eval_logits/rejected": 0.8832870721817017,
1417
+ "eval_logps/chosen": -261.0921936035156,
1418
+ "eval_logps/rejected": -233.21180725097656,
1419
+ "eval_loss": 0.002123194048181176,
1420
+ "eval_rewards/accuracies": 0.6899999976158142,
1421
+ "eval_rewards/chosen": -0.008480267599225044,
1422
+ "eval_rewards/margins": 0.009276580065488815,
1423
+ "eval_rewards/rejected": -0.01775684952735901,
1424
+ "eval_runtime": 325.0149,
1425
+ "eval_samples_per_second": 6.154,
1426
+ "eval_steps_per_second": 0.385,
1427
+ "step": 900
1428
+ },
1429
+ {
1430
+ "epoch": 1.9,
1431
+ "learning_rate": 3.237434340521789e-08,
1432
+ "logits/chosen": 0.7978643774986267,
1433
+ "logits/rejected": 0.8687127828598022,
1434
+ "logps/chosen": -263.38275146484375,
1435
+ "logps/rejected": -247.8026123046875,
1436
+ "loss": 0.0021,
1437
+ "rewards/accuracies": 0.668749988079071,
1438
+ "rewards/chosen": -0.007231117691844702,
1439
+ "rewards/margins": 0.009652243927121162,
1440
+ "rewards/rejected": -0.016883360221982002,
1441
+ "step": 910
1442
+ },
1443
+ {
1444
+ "epoch": 1.93,
1445
+ "learning_rate": 1.93478202307823e-08,
1446
+ "logits/chosen": 0.7963850498199463,
1447
+ "logits/rejected": 0.8160678148269653,
1448
+ "logps/chosen": -242.1365966796875,
1449
+ "logps/rejected": -246.0305938720703,
1450
+ "loss": 0.0021,
1451
+ "rewards/accuracies": 0.643750011920929,
1452
+ "rewards/chosen": -0.0077395932748913765,
1453
+ "rewards/margins": 0.007968437857925892,
1454
+ "rewards/rejected": -0.01570803113281727,
1455
+ "step": 920
1456
+ },
1457
+ {
1458
+ "epoch": 1.95,
1459
+ "learning_rate": 9.646686570697062e-09,
1460
+ "logits/chosen": 0.862303614616394,
1461
+ "logits/rejected": 0.8678015470504761,
1462
+ "logps/chosen": -257.33099365234375,
1463
+ "logps/rejected": -249.9061737060547,
1464
+ "loss": 0.0021,
1465
+ "rewards/accuracies": 0.7124999761581421,
1466
+ "rewards/chosen": -0.007601047400385141,
1467
+ "rewards/margins": 0.009036187082529068,
1468
+ "rewards/rejected": -0.01663723587989807,
1469
+ "step": 930
1470
+ },
1471
+ {
1472
+ "epoch": 1.97,
1473
+ "learning_rate": 3.283947088983663e-09,
1474
+ "logits/chosen": 0.8371657133102417,
1475
+ "logits/rejected": 0.8322643041610718,
1476
+ "logps/chosen": -238.14657592773438,
1477
+ "logps/rejected": -243.3525390625,
1478
+ "loss": 0.0021,
1479
+ "rewards/accuracies": 0.65625,
1480
+ "rewards/chosen": -0.009062298573553562,
1481
+ "rewards/margins": 0.008633644320070744,
1482
+ "rewards/rejected": -0.017695942893624306,
1483
+ "step": 940
1484
+ },
1485
+ {
1486
+ "epoch": 1.99,
1487
+ "learning_rate": 2.681312309735229e-10,
1488
+ "logits/chosen": 0.8020931482315063,
1489
+ "logits/rejected": 0.9026565551757812,
1490
+ "logps/chosen": -231.6744842529297,
1491
+ "logps/rejected": -229.53726196289062,
1492
+ "loss": 0.0021,
1493
+ "rewards/accuracies": 0.612500011920929,
1494
+ "rewards/chosen": -0.007398143410682678,
1495
+ "rewards/margins": 0.009477959014475346,
1496
+ "rewards/rejected": -0.0168761033564806,
1497
+ "step": 950
1498
+ },
1499
+ {
1500
+ "epoch": 2.0,
1501
+ "step": 954,
1502
+ "total_flos": 0.0,
1503
+ "train_loss": 0.0021909422920118682,
1504
+ "train_runtime": 18127.9992,
1505
+ "train_samples_per_second": 3.372,
1506
+ "train_steps_per_second": 0.053
1507
+ }
1508
+ ],
1509
+ "logging_steps": 10,
1510
+ "max_steps": 954,
1511
+ "num_input_tokens_seen": 0,
1512
+ "num_train_epochs": 2,
1513
+ "save_steps": 100,
1514
+ "total_flos": 0.0,
1515
+ "train_batch_size": 4,
1516
+ "trial_name": null,
1517
+ "trial_params": null
1518
+ }