lole25 commited on
Commit
407a137
1 Parent(s): c55ea35

Model save

Browse files
README.md ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ library_name: peft
4
+ tags:
5
+ - trl
6
+ - dpo
7
+ - generated_from_trainer
8
+ base_model: mistralai/Mistral-7B-v0.1
9
+ model-index:
10
+ - name: zephyr-7b-gpo-iter0
11
+ results: []
12
+ ---
13
+
14
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
+ should probably proofread and complete it, then remove this comment. -->
16
+
17
+ # zephyr-7b-gpo-iter0
18
+
19
+ This model is a fine-tuned version of [mistralai/Mistral-7B-v0.1](https://huggingface.co/mistralai/Mistral-7B-v0.1) on the None dataset.
20
+ It achieves the following results on the evaluation set:
21
+ - Loss: 0.0258
22
+ - Rewards/chosen: -0.0580
23
+ - Rewards/rejected: -0.0061
24
+ - Rewards/accuracies: 0.3380
25
+ - Rewards/margins: -0.0519
26
+ - Logps/rejected: -249.4468
27
+ - Logps/chosen: -274.3866
28
+ - Logits/rejected: -2.2108
29
+ - Logits/chosen: -2.4070
30
+
31
+ ## Model description
32
+
33
+ More information needed
34
+
35
+ ## Intended uses & limitations
36
+
37
+ More information needed
38
+
39
+ ## Training and evaluation data
40
+
41
+ More information needed
42
+
43
+ ## Training procedure
44
+
45
+ ### Training hyperparameters
46
+
47
+ The following hyperparameters were used during training:
48
+ - learning_rate: 5e-06
49
+ - train_batch_size: 1
50
+ - eval_batch_size: 2
51
+ - seed: 42
52
+ - distributed_type: multi-GPU
53
+ - gradient_accumulation_steps: 2
54
+ - total_train_batch_size: 2
55
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
56
+ - lr_scheduler_type: cosine
57
+ - lr_scheduler_warmup_ratio: 0.1
58
+ - num_epochs: 2
59
+
60
+ ### Training results
61
+
62
+ | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
63
+ |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
64
+ | 0.0008 | 0.2 | 100 | 0.0019 | -0.0111 | -0.0138 | 0.5300 | 0.0027 | -250.2170 | -269.6990 | -2.2026 | -2.4007 |
65
+ | 0.0006 | 0.4 | 200 | 0.0029 | -0.0237 | -0.0230 | 0.4910 | -0.0007 | -251.1392 | -270.9541 | -2.2051 | -2.4034 |
66
+ | 0.001 | 0.6 | 300 | 0.0019 | -0.0120 | -0.0142 | 0.5310 | 0.0022 | -250.2602 | -269.7912 | -2.2008 | -2.3984 |
67
+ | 0.0011 | 0.8 | 400 | 0.0023 | -0.0201 | -0.0211 | 0.5010 | 0.0011 | -250.9541 | -270.5950 | -2.1993 | -2.3968 |
68
+ | 0.0008 | 1.0 | 500 | 0.0021 | -0.0170 | -0.0189 | 0.5065 | 0.0019 | -250.7260 | -270.2850 | -2.1982 | -2.3960 |
69
+ | 0.044 | 1.2 | 600 | 0.0091 | -0.0053 | 0.0198 | 0.3600 | -0.0252 | -246.8548 | -269.1194 | -2.1940 | -2.3899 |
70
+ | 0.0682 | 1.4 | 700 | 0.0191 | -0.0345 | 0.0086 | 0.3450 | -0.0431 | -247.9818 | -272.0423 | -2.2035 | -2.3992 |
71
+ | 0.0505 | 1.6 | 800 | 0.0237 | -0.0497 | -0.0001 | 0.3405 | -0.0496 | -248.8542 | -273.5587 | -2.2094 | -2.4056 |
72
+ | 0.0243 | 1.8 | 900 | 0.0259 | -0.0581 | -0.0062 | 0.3340 | -0.0519 | -249.4570 | -274.3967 | -2.2117 | -2.4081 |
73
+ | 0.0697 | 2.0 | 1000 | 0.0258 | -0.0580 | -0.0061 | 0.3380 | -0.0519 | -249.4468 | -274.3866 | -2.2108 | -2.4070 |
74
+
75
+
76
+ ### Framework versions
77
+
78
+ - PEFT 0.7.1
79
+ - Transformers 4.36.2
80
+ - Pytorch 2.1.2+cu118
81
+ - Datasets 2.14.6
82
+ - Tokenizers 0.15.2
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d62e9aa2c8e98056d8063360fc5b8f7d2dbab7db13276c8eaa780ed05df0fb43
3
  size 671150064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bf14c9e09a2e0c9af1b622451b19e1881cb4891019a3c54abfb5de9de084f54d
3
  size 671150064
all_results.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.0,
3
+ "eval_logits/chosen": -2.4070470333099365,
4
+ "eval_logits/rejected": -2.210773468017578,
5
+ "eval_logps/chosen": -274.3865966796875,
6
+ "eval_logps/rejected": -249.44677734375,
7
+ "eval_loss": 0.0258334930986166,
8
+ "eval_rewards/accuracies": 0.33799999952316284,
9
+ "eval_rewards/chosen": -0.05798804759979248,
10
+ "eval_rewards/margins": -0.05191566422581673,
11
+ "eval_rewards/rejected": -0.006072388496249914,
12
+ "eval_runtime": 1420.6371,
13
+ "eval_samples": 2000,
14
+ "eval_samples_per_second": 1.408,
15
+ "eval_steps_per_second": 0.704,
16
+ "train_loss": 0.019690872263745406,
17
+ "train_runtime": 18916.4989,
18
+ "train_samples": 61135,
19
+ "train_samples_per_second": 0.106,
20
+ "train_steps_per_second": 0.053
21
+ }
eval_results.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.0,
3
+ "eval_logits/chosen": -2.4070470333099365,
4
+ "eval_logits/rejected": -2.210773468017578,
5
+ "eval_logps/chosen": -274.3865966796875,
6
+ "eval_logps/rejected": -249.44677734375,
7
+ "eval_loss": 0.0258334930986166,
8
+ "eval_rewards/accuracies": 0.33799999952316284,
9
+ "eval_rewards/chosen": -0.05798804759979248,
10
+ "eval_rewards/margins": -0.05191566422581673,
11
+ "eval_rewards/rejected": -0.006072388496249914,
12
+ "eval_runtime": 1420.6371,
13
+ "eval_samples": 2000,
14
+ "eval_samples_per_second": 1.408,
15
+ "eval_steps_per_second": 0.704
16
+ }
runs/Mar26_14-44-48_gpu4-119-4/events.out.tfevents.1711424947.gpu4-119-4.803138.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:04489168647bc6186b6a0502343e73ed185707fa750fe7006d25395ec574f725
3
- size 68763
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1061ee19f4cc392c0238618372bdde5251cb8ce7cf6d3912ea305f9cb85fb4ad
3
+ size 76197
runs/Mar26_14-44-48_gpu4-119-4/events.out.tfevents.1711445328.gpu4-119-4.803138.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0fdbc205643a8074faab447db30e4dace56b9dc9ced5521f0bad7811bcace628
3
+ size 828
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.0,
3
+ "train_loss": 0.019690872263745406,
4
+ "train_runtime": 18916.4989,
5
+ "train_samples": 61135,
6
+ "train_samples_per_second": 0.106,
7
+ "train_steps_per_second": 0.053
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,1604 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 2.0,
5
+ "eval_steps": 100,
6
+ "global_step": 1000,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.0,
13
+ "learning_rate": 5.0000000000000004e-08,
14
+ "logits/chosen": -3.1812663078308105,
15
+ "logits/rejected": -2.810722827911377,
16
+ "logps/chosen": -118.07083129882812,
17
+ "logps/rejected": -100.80718994140625,
18
+ "loss": 0.0011,
19
+ "rewards/accuracies": 0.0,
20
+ "rewards/chosen": 0.0,
21
+ "rewards/margins": 0.0,
22
+ "rewards/rejected": 0.0,
23
+ "step": 1
24
+ },
25
+ {
26
+ "epoch": 0.02,
27
+ "learning_rate": 5.000000000000001e-07,
28
+ "logits/chosen": -2.9193410873413086,
29
+ "logits/rejected": -2.7373409271240234,
30
+ "logps/chosen": -116.89999389648438,
31
+ "logps/rejected": -116.49290466308594,
32
+ "loss": 0.0011,
33
+ "rewards/accuracies": 0.3888888955116272,
34
+ "rewards/chosen": -0.00038699465221725404,
35
+ "rewards/margins": -0.0005435158964246511,
36
+ "rewards/rejected": 0.00015652130241505802,
37
+ "step": 10
38
+ },
39
+ {
40
+ "epoch": 0.04,
41
+ "learning_rate": 1.0000000000000002e-06,
42
+ "logits/chosen": -2.981767416000366,
43
+ "logits/rejected": -2.6092238426208496,
44
+ "logps/chosen": -164.24819946289062,
45
+ "logps/rejected": -164.7591552734375,
46
+ "loss": 0.0011,
47
+ "rewards/accuracies": 0.4000000059604645,
48
+ "rewards/chosen": -6.044005203875713e-05,
49
+ "rewards/margins": -0.0003916244604624808,
50
+ "rewards/rejected": 0.0003311844193376601,
51
+ "step": 20
52
+ },
53
+ {
54
+ "epoch": 0.06,
55
+ "learning_rate": 1.5e-06,
56
+ "logits/chosen": -2.913992404937744,
57
+ "logits/rejected": -2.6422486305236816,
58
+ "logps/chosen": -153.99203491210938,
59
+ "logps/rejected": -141.27601623535156,
60
+ "loss": 0.0011,
61
+ "rewards/accuracies": 0.44999998807907104,
62
+ "rewards/chosen": -0.00013318247511051595,
63
+ "rewards/margins": 0.0008146190084517002,
64
+ "rewards/rejected": -0.0009478016872890294,
65
+ "step": 30
66
+ },
67
+ {
68
+ "epoch": 0.08,
69
+ "learning_rate": 2.0000000000000003e-06,
70
+ "logits/chosen": -2.9979870319366455,
71
+ "logits/rejected": -2.5024120807647705,
72
+ "logps/chosen": -180.6337890625,
73
+ "logps/rejected": -168.78204345703125,
74
+ "loss": 0.001,
75
+ "rewards/accuracies": 0.699999988079071,
76
+ "rewards/chosen": 0.002415223978459835,
77
+ "rewards/margins": 0.002099759876728058,
78
+ "rewards/rejected": 0.0003154640144202858,
79
+ "step": 40
80
+ },
81
+ {
82
+ "epoch": 0.1,
83
+ "learning_rate": 2.5e-06,
84
+ "logits/chosen": -2.7826578617095947,
85
+ "logits/rejected": -2.437479019165039,
86
+ "logps/chosen": -167.60964965820312,
87
+ "logps/rejected": -154.45205688476562,
88
+ "loss": 0.001,
89
+ "rewards/accuracies": 0.550000011920929,
90
+ "rewards/chosen": 0.0028098905459046364,
91
+ "rewards/margins": 0.0014571474166586995,
92
+ "rewards/rejected": 0.0013527432456612587,
93
+ "step": 50
94
+ },
95
+ {
96
+ "epoch": 0.12,
97
+ "learning_rate": 3e-06,
98
+ "logits/chosen": -2.715552806854248,
99
+ "logits/rejected": -2.5350234508514404,
100
+ "logps/chosen": -147.0255126953125,
101
+ "logps/rejected": -136.99874877929688,
102
+ "loss": 0.0011,
103
+ "rewards/accuracies": 0.6499999761581421,
104
+ "rewards/chosen": 0.001564659527502954,
105
+ "rewards/margins": 0.0012098618317395449,
106
+ "rewards/rejected": 0.0003547971718944609,
107
+ "step": 60
108
+ },
109
+ {
110
+ "epoch": 0.14,
111
+ "learning_rate": 3.5e-06,
112
+ "logits/chosen": -3.064145088195801,
113
+ "logits/rejected": -2.715378522872925,
114
+ "logps/chosen": -163.35397338867188,
115
+ "logps/rejected": -152.8401336669922,
116
+ "loss": 0.001,
117
+ "rewards/accuracies": 0.6000000238418579,
118
+ "rewards/chosen": 0.011319306679069996,
119
+ "rewards/margins": 0.002820510882884264,
120
+ "rewards/rejected": 0.00849879626184702,
121
+ "step": 70
122
+ },
123
+ {
124
+ "epoch": 0.16,
125
+ "learning_rate": 4.000000000000001e-06,
126
+ "logits/chosen": -3.031562089920044,
127
+ "logits/rejected": -2.762894868850708,
128
+ "logps/chosen": -154.64096069335938,
129
+ "logps/rejected": -142.45066833496094,
130
+ "loss": 0.001,
131
+ "rewards/accuracies": 0.699999988079071,
132
+ "rewards/chosen": 0.015446094796061516,
133
+ "rewards/margins": 0.0030269301496446133,
134
+ "rewards/rejected": 0.012419164180755615,
135
+ "step": 80
136
+ },
137
+ {
138
+ "epoch": 0.18,
139
+ "learning_rate": 4.5e-06,
140
+ "logits/chosen": -3.0730316638946533,
141
+ "logits/rejected": -2.5636696815490723,
142
+ "logps/chosen": -126.04837799072266,
143
+ "logps/rejected": -110.87501525878906,
144
+ "loss": 0.0009,
145
+ "rewards/accuracies": 0.6499999761581421,
146
+ "rewards/chosen": 0.018308930099010468,
147
+ "rewards/margins": 0.008363587781786919,
148
+ "rewards/rejected": 0.009945342317223549,
149
+ "step": 90
150
+ },
151
+ {
152
+ "epoch": 0.2,
153
+ "learning_rate": 5e-06,
154
+ "logits/chosen": -2.9546635150909424,
155
+ "logits/rejected": -2.5928895473480225,
156
+ "logps/chosen": -133.506103515625,
157
+ "logps/rejected": -123.97562408447266,
158
+ "loss": 0.0008,
159
+ "rewards/accuracies": 0.75,
160
+ "rewards/chosen": 0.014314673840999603,
161
+ "rewards/margins": 0.005265203304588795,
162
+ "rewards/rejected": 0.009049469605088234,
163
+ "step": 100
164
+ },
165
+ {
166
+ "epoch": 0.2,
167
+ "eval_logits/chosen": -2.4007108211517334,
168
+ "eval_logits/rejected": -2.2025678157806396,
169
+ "eval_logps/chosen": -269.69903564453125,
170
+ "eval_logps/rejected": -250.2169952392578,
171
+ "eval_loss": 0.0019292256329208612,
172
+ "eval_rewards/accuracies": 0.5299999713897705,
173
+ "eval_rewards/chosen": -0.01111243013292551,
174
+ "eval_rewards/margins": 0.002662122482433915,
175
+ "eval_rewards/rejected": -0.013774552382528782,
176
+ "eval_runtime": 1423.4882,
177
+ "eval_samples_per_second": 1.405,
178
+ "eval_steps_per_second": 0.702,
179
+ "step": 100
180
+ },
181
+ {
182
+ "epoch": 0.22,
183
+ "learning_rate": 4.99847706754774e-06,
184
+ "logits/chosen": -3.0832509994506836,
185
+ "logits/rejected": -2.6540307998657227,
186
+ "logps/chosen": -158.02037048339844,
187
+ "logps/rejected": -128.09632873535156,
188
+ "loss": 0.0012,
189
+ "rewards/accuracies": 0.800000011920929,
190
+ "rewards/chosen": 0.03201522305607796,
191
+ "rewards/margins": 0.014926651492714882,
192
+ "rewards/rejected": 0.017088569700717926,
193
+ "step": 110
194
+ },
195
+ {
196
+ "epoch": 0.24,
197
+ "learning_rate": 4.993910125649561e-06,
198
+ "logits/chosen": -3.000650644302368,
199
+ "logits/rejected": -2.5093066692352295,
200
+ "logps/chosen": -111.5718765258789,
201
+ "logps/rejected": -91.67008209228516,
202
+ "loss": 0.0007,
203
+ "rewards/accuracies": 0.8500000238418579,
204
+ "rewards/chosen": 0.017279163002967834,
205
+ "rewards/margins": 0.016767729073762894,
206
+ "rewards/rejected": 0.0005114328814670444,
207
+ "step": 120
208
+ },
209
+ {
210
+ "epoch": 0.26,
211
+ "learning_rate": 4.986304738420684e-06,
212
+ "logits/chosen": -2.8899765014648438,
213
+ "logits/rejected": -2.545956611633301,
214
+ "logps/chosen": -157.5283660888672,
215
+ "logps/rejected": -135.06536865234375,
216
+ "loss": 0.001,
217
+ "rewards/accuracies": 0.699999988079071,
218
+ "rewards/chosen": 0.006769341416656971,
219
+ "rewards/margins": 0.013469241559505463,
220
+ "rewards/rejected": -0.006699901074171066,
221
+ "step": 130
222
+ },
223
+ {
224
+ "epoch": 0.28,
225
+ "learning_rate": 4.975670171853926e-06,
226
+ "logits/chosen": -3.0038022994995117,
227
+ "logits/rejected": -2.8060638904571533,
228
+ "logps/chosen": -172.43551635742188,
229
+ "logps/rejected": -161.0656280517578,
230
+ "loss": 0.001,
231
+ "rewards/accuracies": 0.5,
232
+ "rewards/chosen": -0.006354503333568573,
233
+ "rewards/margins": 0.004105663392692804,
234
+ "rewards/rejected": -0.01046016626060009,
235
+ "step": 140
236
+ },
237
+ {
238
+ "epoch": 0.3,
239
+ "learning_rate": 4.962019382530521e-06,
240
+ "logits/chosen": -2.938009023666382,
241
+ "logits/rejected": -2.3462884426116943,
242
+ "logps/chosen": -113.1685791015625,
243
+ "logps/rejected": -100.8033676147461,
244
+ "loss": 0.001,
245
+ "rewards/accuracies": 0.6499999761581421,
246
+ "rewards/chosen": 0.00343899498693645,
247
+ "rewards/margins": 0.007120263762772083,
248
+ "rewards/rejected": -0.0036812685430049896,
249
+ "step": 150
250
+ },
251
+ {
252
+ "epoch": 0.32,
253
+ "learning_rate": 4.9453690018345144e-06,
254
+ "logits/chosen": -3.1557974815368652,
255
+ "logits/rejected": -2.805929183959961,
256
+ "logps/chosen": -212.8404998779297,
257
+ "logps/rejected": -204.03390502929688,
258
+ "loss": 0.0013,
259
+ "rewards/accuracies": 0.550000011920929,
260
+ "rewards/chosen": -0.006425648927688599,
261
+ "rewards/margins": -0.0002856804057955742,
262
+ "rewards/rejected": -0.0061399685218930244,
263
+ "step": 160
264
+ },
265
+ {
266
+ "epoch": 0.34,
267
+ "learning_rate": 4.925739315689991e-06,
268
+ "logits/chosen": -2.8449888229370117,
269
+ "logits/rejected": -2.6668992042541504,
270
+ "logps/chosen": -205.7440643310547,
271
+ "logps/rejected": -196.88563537597656,
272
+ "loss": 0.0012,
273
+ "rewards/accuracies": 0.5,
274
+ "rewards/chosen": -0.005873252637684345,
275
+ "rewards/margins": 0.0015904292231425643,
276
+ "rewards/rejected": -0.007463681511580944,
277
+ "step": 170
278
+ },
279
+ {
280
+ "epoch": 0.36,
281
+ "learning_rate": 4.903154239845798e-06,
282
+ "logits/chosen": -3.059779167175293,
283
+ "logits/rejected": -2.7152631282806396,
284
+ "logps/chosen": -178.5559539794922,
285
+ "logps/rejected": -174.60049438476562,
286
+ "loss": 0.001,
287
+ "rewards/accuracies": 0.4000000059604645,
288
+ "rewards/chosen": 0.004108003806322813,
289
+ "rewards/margins": 0.0049694306217134,
290
+ "rewards/rejected": -0.0008614275720901787,
291
+ "step": 180
292
+ },
293
+ {
294
+ "epoch": 0.38,
295
+ "learning_rate": 4.8776412907378845e-06,
296
+ "logits/chosen": -2.9436910152435303,
297
+ "logits/rejected": -2.4861669540405273,
298
+ "logps/chosen": -154.61508178710938,
299
+ "logps/rejected": -129.8065643310547,
300
+ "loss": 0.0008,
301
+ "rewards/accuracies": 0.8500000238418579,
302
+ "rewards/chosen": 0.02211841568350792,
303
+ "rewards/margins": 0.012492652982473373,
304
+ "rewards/rejected": 0.00962576363235712,
305
+ "step": 190
306
+ },
307
+ {
308
+ "epoch": 0.4,
309
+ "learning_rate": 4.849231551964771e-06,
310
+ "logits/chosen": -2.934830904006958,
311
+ "logits/rejected": -2.599217653274536,
312
+ "logps/chosen": -148.74435424804688,
313
+ "logps/rejected": -139.92453002929688,
314
+ "loss": 0.0006,
315
+ "rewards/accuracies": 0.8500000238418579,
316
+ "rewards/chosen": 0.02799961529672146,
317
+ "rewards/margins": 0.012480301782488823,
318
+ "rewards/rejected": 0.015519311651587486,
319
+ "step": 200
320
+ },
321
+ {
322
+ "epoch": 0.4,
323
+ "eval_logits/chosen": -2.4034218788146973,
324
+ "eval_logits/rejected": -2.205068349838257,
325
+ "eval_logps/chosen": -270.9541015625,
326
+ "eval_logps/rejected": -251.13922119140625,
327
+ "eval_loss": 0.002935634693130851,
328
+ "eval_rewards/accuracies": 0.4909999966621399,
329
+ "eval_rewards/chosen": -0.0236628670245409,
330
+ "eval_rewards/margins": -0.0006660502986051142,
331
+ "eval_rewards/rejected": -0.022996816784143448,
332
+ "eval_runtime": 1423.7196,
333
+ "eval_samples_per_second": 1.405,
334
+ "eval_steps_per_second": 0.702,
335
+ "step": 200
336
+ },
337
+ {
338
+ "epoch": 0.42,
339
+ "learning_rate": 4.817959636416969e-06,
340
+ "logits/chosen": -2.7869691848754883,
341
+ "logits/rejected": -2.3660902976989746,
342
+ "logps/chosen": -188.44235229492188,
343
+ "logps/rejected": -165.0704345703125,
344
+ "loss": 0.0007,
345
+ "rewards/accuracies": 0.6499999761581421,
346
+ "rewards/chosen": 0.023742977529764175,
347
+ "rewards/margins": 0.013938216492533684,
348
+ "rewards/rejected": 0.009804759174585342,
349
+ "step": 210
350
+ },
351
+ {
352
+ "epoch": 0.44,
353
+ "learning_rate": 4.783863644106502e-06,
354
+ "logits/chosen": -2.7817561626434326,
355
+ "logits/rejected": -2.4513843059539795,
356
+ "logps/chosen": -129.5194854736328,
357
+ "logps/rejected": -119.9756088256836,
358
+ "loss": 0.0011,
359
+ "rewards/accuracies": 0.6499999761581421,
360
+ "rewards/chosen": 0.014000030234456062,
361
+ "rewards/margins": 0.006808738224208355,
362
+ "rewards/rejected": 0.007191292010247707,
363
+ "step": 220
364
+ },
365
+ {
366
+ "epoch": 0.46,
367
+ "learning_rate": 4.746985115747918e-06,
368
+ "logits/chosen": -2.997894525527954,
369
+ "logits/rejected": -2.7682430744171143,
370
+ "logps/chosen": -206.5100860595703,
371
+ "logps/rejected": -196.3065185546875,
372
+ "loss": 0.0007,
373
+ "rewards/accuracies": 0.699999988079071,
374
+ "rewards/chosen": 0.01694752275943756,
375
+ "rewards/margins": 0.008601350709795952,
376
+ "rewards/rejected": 0.00834617204964161,
377
+ "step": 230
378
+ },
379
+ {
380
+ "epoch": 0.48,
381
+ "learning_rate": 4.707368982147318e-06,
382
+ "logits/chosen": -2.9324448108673096,
383
+ "logits/rejected": -2.6252548694610596,
384
+ "logps/chosen": -140.8667449951172,
385
+ "logps/rejected": -124.31671142578125,
386
+ "loss": 0.001,
387
+ "rewards/accuracies": 0.6000000238418579,
388
+ "rewards/chosen": 0.010156641714274883,
389
+ "rewards/margins": 0.011887853033840656,
390
+ "rewards/rejected": -0.0017312124837189913,
391
+ "step": 240
392
+ },
393
+ {
394
+ "epoch": 0.5,
395
+ "learning_rate": 4.665063509461098e-06,
396
+ "logits/chosen": -3.0423521995544434,
397
+ "logits/rejected": -2.578011989593506,
398
+ "logps/chosen": -136.1217498779297,
399
+ "logps/rejected": -116.2592544555664,
400
+ "loss": 0.0006,
401
+ "rewards/accuracies": 0.75,
402
+ "rewards/chosen": 0.022909339517354965,
403
+ "rewards/margins": 0.01127886213362217,
404
+ "rewards/rejected": 0.011630477383732796,
405
+ "step": 250
406
+ },
407
+ {
408
+ "epoch": 0.52,
409
+ "learning_rate": 4.620120240391065e-06,
410
+ "logits/chosen": -2.8443424701690674,
411
+ "logits/rejected": -2.6343538761138916,
412
+ "logps/chosen": -100.57234191894531,
413
+ "logps/rejected": -98.87772369384766,
414
+ "loss": 0.0011,
415
+ "rewards/accuracies": 0.75,
416
+ "rewards/chosen": 0.010612092912197113,
417
+ "rewards/margins": 0.005750484298914671,
418
+ "rewards/rejected": 0.004861608147621155,
419
+ "step": 260
420
+ },
421
+ {
422
+ "epoch": 0.54,
423
+ "learning_rate": 4.572593931387604e-06,
424
+ "logits/chosen": -3.0539159774780273,
425
+ "logits/rejected": -2.753909111022949,
426
+ "logps/chosen": -126.12535095214844,
427
+ "logps/rejected": -113.8405990600586,
428
+ "loss": 0.0008,
429
+ "rewards/accuracies": 0.6499999761581421,
430
+ "rewards/chosen": 0.00659831753000617,
431
+ "rewards/margins": 0.010872049257159233,
432
+ "rewards/rejected": -0.004273730795830488,
433
+ "step": 270
434
+ },
435
+ {
436
+ "epoch": 0.56,
437
+ "learning_rate": 4.522542485937369e-06,
438
+ "logits/chosen": -3.0358662605285645,
439
+ "logits/rejected": -2.4368128776550293,
440
+ "logps/chosen": -150.52206420898438,
441
+ "logps/rejected": -120.9325942993164,
442
+ "loss": 0.0008,
443
+ "rewards/accuracies": 0.699999988079071,
444
+ "rewards/chosen": 0.015789732336997986,
445
+ "rewards/margins": 0.017957117408514023,
446
+ "rewards/rejected": -0.002167386468499899,
447
+ "step": 280
448
+ },
449
+ {
450
+ "epoch": 0.58,
451
+ "learning_rate": 4.470026884016805e-06,
452
+ "logits/chosen": -3.0055549144744873,
453
+ "logits/rejected": -2.710265636444092,
454
+ "logps/chosen": -143.09898376464844,
455
+ "logps/rejected": -144.5852508544922,
456
+ "loss": 0.0014,
457
+ "rewards/accuracies": 0.75,
458
+ "rewards/chosen": 0.013310904614627361,
459
+ "rewards/margins": 0.008920473977923393,
460
+ "rewards/rejected": 0.004390430636703968,
461
+ "step": 290
462
+ },
463
+ {
464
+ "epoch": 0.6,
465
+ "learning_rate": 4.415111107797445e-06,
466
+ "logits/chosen": -2.88038969039917,
467
+ "logits/rejected": -2.524245500564575,
468
+ "logps/chosen": -116.92478942871094,
469
+ "logps/rejected": -106.48399353027344,
470
+ "loss": 0.001,
471
+ "rewards/accuracies": 0.6499999761581421,
472
+ "rewards/chosen": -0.016026504337787628,
473
+ "rewards/margins": 0.011352911591529846,
474
+ "rewards/rejected": -0.027379417791962624,
475
+ "step": 300
476
+ },
477
+ {
478
+ "epoch": 0.6,
479
+ "eval_logits/chosen": -2.3983988761901855,
480
+ "eval_logits/rejected": -2.200815200805664,
481
+ "eval_logps/chosen": -269.79119873046875,
482
+ "eval_logps/rejected": -250.26023864746094,
483
+ "eval_loss": 0.0018744752742350101,
484
+ "eval_rewards/accuracies": 0.531000018119812,
485
+ "eval_rewards/chosen": -0.012033737264573574,
486
+ "eval_rewards/margins": 0.0021730433218181133,
487
+ "eval_rewards/rejected": -0.01420677825808525,
488
+ "eval_runtime": 1423.8246,
489
+ "eval_samples_per_second": 1.405,
490
+ "eval_steps_per_second": 0.702,
491
+ "step": 300
492
+ },
493
+ {
494
+ "epoch": 0.62,
495
+ "learning_rate": 4.357862063693486e-06,
496
+ "logits/chosen": -3.0442090034484863,
497
+ "logits/rejected": -2.7478137016296387,
498
+ "logps/chosen": -200.55148315429688,
499
+ "logps/rejected": -198.62619018554688,
500
+ "loss": 0.001,
501
+ "rewards/accuracies": 0.550000011920929,
502
+ "rewards/chosen": -0.01667696237564087,
503
+ "rewards/margins": 0.007582807447761297,
504
+ "rewards/rejected": -0.024259772151708603,
505
+ "step": 310
506
+ },
507
+ {
508
+ "epoch": 0.64,
509
+ "learning_rate": 4.2983495008466285e-06,
510
+ "logits/chosen": -2.8576464653015137,
511
+ "logits/rejected": -2.5876190662384033,
512
+ "logps/chosen": -129.97561645507812,
513
+ "logps/rejected": -118.2795181274414,
514
+ "loss": 0.0013,
515
+ "rewards/accuracies": 0.6000000238418579,
516
+ "rewards/chosen": -0.01680707558989525,
517
+ "rewards/margins": 0.00610563438385725,
518
+ "rewards/rejected": -0.022912709042429924,
519
+ "step": 320
520
+ },
521
+ {
522
+ "epoch": 0.66,
523
+ "learning_rate": 4.236645926147493e-06,
524
+ "logits/chosen": -3.026426315307617,
525
+ "logits/rejected": -2.7503247261047363,
526
+ "logps/chosen": -146.84666442871094,
527
+ "logps/rejected": -139.24612426757812,
528
+ "loss": 0.001,
529
+ "rewards/accuracies": 0.6499999761581421,
530
+ "rewards/chosen": 0.014562124386429787,
531
+ "rewards/margins": 0.005175650119781494,
532
+ "rewards/rejected": 0.009386474266648293,
533
+ "step": 330
534
+ },
535
+ {
536
+ "epoch": 0.68,
537
+ "learning_rate": 4.172826515897146e-06,
538
+ "logits/chosen": -3.144963502883911,
539
+ "logits/rejected": -2.831359624862671,
540
+ "logps/chosen": -169.22183227539062,
541
+ "logps/rejected": -168.03089904785156,
542
+ "loss": 0.0012,
543
+ "rewards/accuracies": 0.44999998807907104,
544
+ "rewards/chosen": 0.004123937338590622,
545
+ "rewards/margins": 0.0042907195165753365,
546
+ "rewards/rejected": -0.00016678198880981654,
547
+ "step": 340
548
+ },
549
+ {
550
+ "epoch": 0.7,
551
+ "learning_rate": 4.106969024216348e-06,
552
+ "logits/chosen": -3.0851778984069824,
553
+ "logits/rejected": -2.769751787185669,
554
+ "logps/chosen": -130.99282836914062,
555
+ "logps/rejected": -126.33982849121094,
556
+ "loss": 0.0012,
557
+ "rewards/accuracies": 0.699999988079071,
558
+ "rewards/chosen": 0.007414456456899643,
559
+ "rewards/margins": 0.006272236816585064,
560
+ "rewards/rejected": 0.0011422175448387861,
561
+ "step": 350
562
+ },
563
+ {
564
+ "epoch": 0.72,
565
+ "learning_rate": 4.039153688314146e-06,
566
+ "logits/chosen": -3.0202767848968506,
567
+ "logits/rejected": -2.817288637161255,
568
+ "logps/chosen": -201.23435974121094,
569
+ "logps/rejected": -194.98385620117188,
570
+ "loss": 0.0021,
571
+ "rewards/accuracies": 0.4000000059604645,
572
+ "rewards/chosen": 0.04826142266392708,
573
+ "rewards/margins": -0.002473982283845544,
574
+ "rewards/rejected": 0.05073540285229683,
575
+ "step": 360
576
+ },
577
+ {
578
+ "epoch": 0.74,
579
+ "learning_rate": 3.969463130731183e-06,
580
+ "logits/chosen": -2.9788031578063965,
581
+ "logits/rejected": -2.5181713104248047,
582
+ "logps/chosen": -172.3706817626953,
583
+ "logps/rejected": -149.59950256347656,
584
+ "loss": 0.0009,
585
+ "rewards/accuracies": 0.75,
586
+ "rewards/chosen": 0.009602314792573452,
587
+ "rewards/margins": 0.012693755328655243,
588
+ "rewards/rejected": -0.00309143983758986,
589
+ "step": 370
590
+ },
591
+ {
592
+ "epoch": 0.76,
593
+ "learning_rate": 3.897982258676867e-06,
594
+ "logits/chosen": -3.0599074363708496,
595
+ "logits/rejected": -2.749642848968506,
596
+ "logps/chosen": -216.9517822265625,
597
+ "logps/rejected": -202.60659790039062,
598
+ "loss": 0.0012,
599
+ "rewards/accuracies": 0.699999988079071,
600
+ "rewards/chosen": -0.008673595264554024,
601
+ "rewards/margins": 0.00854658242315054,
602
+ "rewards/rejected": -0.01722017675638199,
603
+ "step": 380
604
+ },
605
+ {
606
+ "epoch": 0.78,
607
+ "learning_rate": 3.824798160583012e-06,
608
+ "logits/chosen": -2.6857285499572754,
609
+ "logits/rejected": -2.262728214263916,
610
+ "logps/chosen": -133.64395141601562,
611
+ "logps/rejected": -119.55937194824219,
612
+ "loss": 0.0009,
613
+ "rewards/accuracies": 0.6000000238418579,
614
+ "rewards/chosen": 0.020837822929024696,
615
+ "rewards/margins": 0.01239033229649067,
616
+ "rewards/rejected": 0.008447489701211452,
617
+ "step": 390
618
+ },
619
+ {
620
+ "epoch": 0.8,
621
+ "learning_rate": 3.7500000000000005e-06,
622
+ "logits/chosen": -2.9022040367126465,
623
+ "logits/rejected": -2.7616379261016846,
624
+ "logps/chosen": -185.07240295410156,
625
+ "logps/rejected": -179.7260284423828,
626
+ "loss": 0.0011,
627
+ "rewards/accuracies": 0.44999998807907104,
628
+ "rewards/chosen": 0.003294271184131503,
629
+ "rewards/margins": 0.0035428921692073345,
630
+ "rewards/rejected": -0.0002486211305949837,
631
+ "step": 400
632
+ },
633
+ {
634
+ "epoch": 0.8,
635
+ "eval_logits/chosen": -2.396838426589966,
636
+ "eval_logits/rejected": -2.1993439197540283,
637
+ "eval_logps/chosen": -270.5950012207031,
638
+ "eval_logps/rejected": -250.95413208007812,
639
+ "eval_loss": 0.0023364874068647623,
640
+ "eval_rewards/accuracies": 0.5009999871253967,
641
+ "eval_rewards/chosen": -0.02007202059030533,
642
+ "eval_rewards/margins": 0.0010739399585872889,
643
+ "eval_rewards/rejected": -0.021145964041352272,
644
+ "eval_runtime": 1423.743,
645
+ "eval_samples_per_second": 1.405,
646
+ "eval_steps_per_second": 0.702,
647
+ "step": 400
648
+ },
649
+ {
650
+ "epoch": 0.82,
651
+ "learning_rate": 3.6736789069647273e-06,
652
+ "logits/chosen": -3.0739264488220215,
653
+ "logits/rejected": -2.8628082275390625,
654
+ "logps/chosen": -149.10903930664062,
655
+ "logps/rejected": -134.99038696289062,
656
+ "loss": 0.0011,
657
+ "rewards/accuracies": 0.6499999761581421,
658
+ "rewards/chosen": 0.008919402956962585,
659
+ "rewards/margins": 0.0073748803697526455,
660
+ "rewards/rejected": 0.0015445235185325146,
661
+ "step": 410
662
+ },
663
+ {
664
+ "epoch": 0.84,
665
+ "learning_rate": 3.595927866972694e-06,
666
+ "logits/chosen": -3.0844318866729736,
667
+ "logits/rejected": -2.8257293701171875,
668
+ "logps/chosen": -252.5592498779297,
669
+ "logps/rejected": -248.02474975585938,
670
+ "loss": 0.001,
671
+ "rewards/accuracies": 0.5,
672
+ "rewards/chosen": -0.011812982149422169,
673
+ "rewards/margins": 0.007170848548412323,
674
+ "rewards/rejected": -0.018983829766511917,
675
+ "step": 420
676
+ },
677
+ {
678
+ "epoch": 0.86,
679
+ "learning_rate": 3.516841607689501e-06,
680
+ "logits/chosen": -3.055979013442993,
681
+ "logits/rejected": -2.7850348949432373,
682
+ "logps/chosen": -147.57447814941406,
683
+ "logps/rejected": -138.53738403320312,
684
+ "loss": 0.0009,
685
+ "rewards/accuracies": 0.550000011920929,
686
+ "rewards/chosen": -0.005650115665048361,
687
+ "rewards/margins": 0.012325585819780827,
688
+ "rewards/rejected": -0.0179757010191679,
689
+ "step": 430
690
+ },
691
+ {
692
+ "epoch": 0.88,
693
+ "learning_rate": 3.436516483539781e-06,
694
+ "logits/chosen": -2.7994961738586426,
695
+ "logits/rejected": -2.700761079788208,
696
+ "logps/chosen": -189.2479248046875,
697
+ "logps/rejected": -180.1698760986328,
698
+ "loss": 0.0016,
699
+ "rewards/accuracies": 0.5,
700
+ "rewards/chosen": -0.003194189164787531,
701
+ "rewards/margins": -0.00013344390026759356,
702
+ "rewards/rejected": -0.0030607457738369703,
703
+ "step": 440
704
+ },
705
+ {
706
+ "epoch": 0.9,
707
+ "learning_rate": 3.3550503583141726e-06,
708
+ "logits/chosen": -3.072540521621704,
709
+ "logits/rejected": -2.8204236030578613,
710
+ "logps/chosen": -133.08212280273438,
711
+ "logps/rejected": -119.2376480102539,
712
+ "loss": 0.0009,
713
+ "rewards/accuracies": 0.699999988079071,
714
+ "rewards/chosen": -0.00826738215982914,
715
+ "rewards/margins": 0.011625150218605995,
716
+ "rewards/rejected": -0.019892532378435135,
717
+ "step": 450
718
+ },
719
+ {
720
+ "epoch": 0.92,
721
+ "learning_rate": 3.272542485937369e-06,
722
+ "logits/chosen": -2.9859511852264404,
723
+ "logits/rejected": -2.7228169441223145,
724
+ "logps/chosen": -211.18002319335938,
725
+ "logps/rejected": -202.37867736816406,
726
+ "loss": 0.0009,
727
+ "rewards/accuracies": 0.6000000238418579,
728
+ "rewards/chosen": -0.00889207050204277,
729
+ "rewards/margins": 0.014142923057079315,
730
+ "rewards/rejected": -0.023034993559122086,
731
+ "step": 460
732
+ },
733
+ {
734
+ "epoch": 0.94,
735
+ "learning_rate": 3.189093389542498e-06,
736
+ "logits/chosen": -2.9243226051330566,
737
+ "logits/rejected": -2.708249092102051,
738
+ "logps/chosen": -178.67819213867188,
739
+ "logps/rejected": -162.6269073486328,
740
+ "loss": 0.0013,
741
+ "rewards/accuracies": 0.6000000238418579,
742
+ "rewards/chosen": 0.008272857405245304,
743
+ "rewards/margins": 0.005242692772299051,
744
+ "rewards/rejected": 0.0030301641672849655,
745
+ "step": 470
746
+ },
747
+ {
748
+ "epoch": 0.96,
749
+ "learning_rate": 3.1048047389991693e-06,
750
+ "logits/chosen": -3.1769165992736816,
751
+ "logits/rejected": -2.587887763977051,
752
+ "logps/chosen": -163.45852661132812,
753
+ "logps/rejected": -134.19691467285156,
754
+ "loss": 0.0012,
755
+ "rewards/accuracies": 0.550000011920929,
756
+ "rewards/chosen": -0.0025352207012474537,
757
+ "rewards/margins": 0.014497722499072552,
758
+ "rewards/rejected": -0.017032943665981293,
759
+ "step": 480
760
+ },
761
+ {
762
+ "epoch": 0.98,
763
+ "learning_rate": 3.019779227044398e-06,
764
+ "logits/chosen": -3.0145325660705566,
765
+ "logits/rejected": -2.7123026847839355,
766
+ "logps/chosen": -167.49745178222656,
767
+ "logps/rejected": -152.8248748779297,
768
+ "loss": 0.0012,
769
+ "rewards/accuracies": 0.6000000238418579,
770
+ "rewards/chosen": 0.01072392426431179,
771
+ "rewards/margins": 0.01687874272465706,
772
+ "rewards/rejected": -0.006154821254312992,
773
+ "step": 490
774
+ },
775
+ {
776
+ "epoch": 1.0,
777
+ "learning_rate": 2.9341204441673267e-06,
778
+ "logits/chosen": -3.132664918899536,
779
+ "logits/rejected": -2.911961317062378,
780
+ "logps/chosen": -108.4159927368164,
781
+ "logps/rejected": -94.57585144042969,
782
+ "loss": 0.0008,
783
+ "rewards/accuracies": 0.6499999761581421,
784
+ "rewards/chosen": 0.0020372807048261166,
785
+ "rewards/margins": 0.008723934181034565,
786
+ "rewards/rejected": -0.006686653010547161,
787
+ "step": 500
788
+ },
789
+ {
790
+ "epoch": 1.0,
791
+ "eval_logits/chosen": -2.395993947982788,
792
+ "eval_logits/rejected": -2.1981606483459473,
793
+ "eval_logps/chosen": -270.28497314453125,
794
+ "eval_logps/rejected": -250.72601318359375,
795
+ "eval_loss": 0.002102552680298686,
796
+ "eval_rewards/accuracies": 0.5065000057220459,
797
+ "eval_rewards/chosen": -0.016971532255411148,
798
+ "eval_rewards/margins": 0.0018933486426249146,
799
+ "eval_rewards/rejected": -0.018864883109927177,
800
+ "eval_runtime": 1423.5769,
801
+ "eval_samples_per_second": 1.405,
802
+ "eval_steps_per_second": 0.702,
803
+ "step": 500
804
+ },
805
+ {
806
+ "epoch": 1.02,
807
+ "learning_rate": 2.847932752400164e-06,
808
+ "logits/chosen": -3.008751392364502,
809
+ "logits/rejected": -2.6613450050354004,
810
+ "logps/chosen": -123.0440444946289,
811
+ "logps/rejected": -122.49776458740234,
812
+ "loss": 0.024,
813
+ "rewards/accuracies": 0.8999999761581421,
814
+ "rewards/chosen": 0.06933443248271942,
815
+ "rewards/margins": 0.12685301899909973,
816
+ "rewards/rejected": -0.05751859024167061,
817
+ "step": 510
818
+ },
819
+ {
820
+ "epoch": 1.04,
821
+ "learning_rate": 2.761321158169134e-06,
822
+ "logits/chosen": -2.982330322265625,
823
+ "logits/rejected": -2.6290860176086426,
824
+ "logps/chosen": -207.28955078125,
825
+ "logps/rejected": -207.44711303710938,
826
+ "loss": 0.0169,
827
+ "rewards/accuracies": 0.8500000238418579,
828
+ "rewards/chosen": 0.023494327440857887,
829
+ "rewards/margins": 0.059116560965776443,
830
+ "rewards/rejected": -0.035622235387563705,
831
+ "step": 520
832
+ },
833
+ {
834
+ "epoch": 1.06,
835
+ "learning_rate": 2.6743911843603134e-06,
836
+ "logits/chosen": -3.140193223953247,
837
+ "logits/rejected": -2.7758877277374268,
838
+ "logps/chosen": -161.1981658935547,
839
+ "logps/rejected": -160.78762817382812,
840
+ "loss": 0.0854,
841
+ "rewards/accuracies": 0.8500000238418579,
842
+ "rewards/chosen": 0.08391492813825607,
843
+ "rewards/margins": 0.21972373127937317,
844
+ "rewards/rejected": -0.1358087956905365,
845
+ "step": 530
846
+ },
847
+ {
848
+ "epoch": 1.08,
849
+ "learning_rate": 2.587248741756253e-06,
850
+ "logits/chosen": -2.866288661956787,
851
+ "logits/rejected": -2.557806968688965,
852
+ "logps/chosen": -229.82077026367188,
853
+ "logps/rejected": -227.31967163085938,
854
+ "loss": 0.0241,
855
+ "rewards/accuracies": 0.75,
856
+ "rewards/chosen": -0.0034355625975877047,
857
+ "rewards/margins": 0.05683975666761398,
858
+ "rewards/rejected": -0.06027532368898392,
859
+ "step": 540
860
+ },
861
+ {
862
+ "epoch": 1.1,
863
+ "learning_rate": 2.5e-06,
864
+ "logits/chosen": -2.7409634590148926,
865
+ "logits/rejected": -2.412743330001831,
866
+ "logps/chosen": -110.87519836425781,
867
+ "logps/rejected": -106.71611022949219,
868
+ "loss": 0.0099,
869
+ "rewards/accuracies": 0.75,
870
+ "rewards/chosen": 0.004647939465939999,
871
+ "rewards/margins": 0.07160676270723343,
872
+ "rewards/rejected": -0.06695882230997086,
873
+ "step": 550
874
+ },
875
+ {
876
+ "epoch": 1.12,
877
+ "learning_rate": 2.4127512582437486e-06,
878
+ "logits/chosen": -3.080806255340576,
879
+ "logits/rejected": -2.819638729095459,
880
+ "logps/chosen": -138.67153930664062,
881
+ "logps/rejected": -144.43199157714844,
882
+ "loss": 0.0542,
883
+ "rewards/accuracies": 0.8999999761581421,
884
+ "rewards/chosen": 0.025073865428566933,
885
+ "rewards/margins": 0.14305388927459717,
886
+ "rewards/rejected": -0.11798002570867538,
887
+ "step": 560
888
+ },
889
+ {
890
+ "epoch": 1.14,
891
+ "learning_rate": 2.325608815639687e-06,
892
+ "logits/chosen": -3.0271854400634766,
893
+ "logits/rejected": -2.7436680793762207,
894
+ "logps/chosen": -174.3506317138672,
895
+ "logps/rejected": -171.3226776123047,
896
+ "loss": 0.0444,
897
+ "rewards/accuracies": 0.800000011920929,
898
+ "rewards/chosen": 0.034181032329797745,
899
+ "rewards/margins": 0.1548537313938141,
900
+ "rewards/rejected": -0.12067268788814545,
901
+ "step": 570
902
+ },
903
+ {
904
+ "epoch": 1.16,
905
+ "learning_rate": 2.238678841830867e-06,
906
+ "logits/chosen": -2.6729888916015625,
907
+ "logits/rejected": -2.449924945831299,
908
+ "logps/chosen": -143.39756774902344,
909
+ "logps/rejected": -147.48681640625,
910
+ "loss": 0.0234,
911
+ "rewards/accuracies": 0.800000011920929,
912
+ "rewards/chosen": 0.00795525498688221,
913
+ "rewards/margins": 0.11490567773580551,
914
+ "rewards/rejected": -0.10695041716098785,
915
+ "step": 580
916
+ },
917
+ {
918
+ "epoch": 1.18,
919
+ "learning_rate": 2.1520672475998374e-06,
920
+ "logits/chosen": -2.940767526626587,
921
+ "logits/rejected": -2.584683895111084,
922
+ "logps/chosen": -128.87655639648438,
923
+ "logps/rejected": -120.95220947265625,
924
+ "loss": 0.0663,
925
+ "rewards/accuracies": 0.6000000238418579,
926
+ "rewards/chosen": -0.025388438254594803,
927
+ "rewards/margins": 0.09438282996416092,
928
+ "rewards/rejected": -0.11977125704288483,
929
+ "step": 590
930
+ },
931
+ {
932
+ "epoch": 1.2,
933
+ "learning_rate": 2.0658795558326745e-06,
934
+ "logits/chosen": -2.8861563205718994,
935
+ "logits/rejected": -2.5959839820861816,
936
+ "logps/chosen": -124.5409927368164,
937
+ "logps/rejected": -133.5659637451172,
938
+ "loss": 0.044,
939
+ "rewards/accuracies": 0.75,
940
+ "rewards/chosen": -0.0006943264743313193,
941
+ "rewards/margins": 0.09414149820804596,
942
+ "rewards/rejected": -0.0948358103632927,
943
+ "step": 600
944
+ },
945
+ {
946
+ "epoch": 1.2,
947
+ "eval_logits/chosen": -2.389890670776367,
948
+ "eval_logits/rejected": -2.1939876079559326,
949
+ "eval_logps/chosen": -269.1193542480469,
950
+ "eval_logps/rejected": -246.85476684570312,
951
+ "eval_loss": 0.009110072627663612,
952
+ "eval_rewards/accuracies": 0.36000001430511475,
953
+ "eval_rewards/chosen": -0.005315269809216261,
954
+ "eval_rewards/margins": -0.025163182988762856,
955
+ "eval_rewards/rejected": 0.019847916439175606,
956
+ "eval_runtime": 1422.5682,
957
+ "eval_samples_per_second": 1.406,
958
+ "eval_steps_per_second": 0.703,
959
+ "step": 600
960
+ },
961
+ {
962
+ "epoch": 1.22,
963
+ "learning_rate": 1.9802207729556023e-06,
964
+ "logits/chosen": -2.90655779838562,
965
+ "logits/rejected": -2.578254222869873,
966
+ "logps/chosen": -182.38720703125,
967
+ "logps/rejected": -175.3732147216797,
968
+ "loss": 0.0322,
969
+ "rewards/accuracies": 0.6499999761581421,
970
+ "rewards/chosen": -0.02949387952685356,
971
+ "rewards/margins": 0.04020578786730766,
972
+ "rewards/rejected": -0.06969965994358063,
973
+ "step": 610
974
+ },
975
+ {
976
+ "epoch": 1.24,
977
+ "learning_rate": 1.895195261000831e-06,
978
+ "logits/chosen": -3.0672218799591064,
979
+ "logits/rejected": -2.651693105697632,
980
+ "logps/chosen": -170.82894897460938,
981
+ "logps/rejected": -157.90194702148438,
982
+ "loss": 0.0163,
983
+ "rewards/accuracies": 0.75,
984
+ "rewards/chosen": -0.03491836413741112,
985
+ "rewards/margins": 0.04328788444399834,
986
+ "rewards/rejected": -0.07820625603199005,
987
+ "step": 620
988
+ },
989
+ {
990
+ "epoch": 1.26,
991
+ "learning_rate": 1.8109066104575023e-06,
992
+ "logits/chosen": -3.076021432876587,
993
+ "logits/rejected": -2.759376049041748,
994
+ "logps/chosen": -130.21498107910156,
995
+ "logps/rejected": -134.99252319335938,
996
+ "loss": 0.0842,
997
+ "rewards/accuracies": 0.75,
998
+ "rewards/chosen": 0.011367540806531906,
999
+ "rewards/margins": 0.15639443695545197,
1000
+ "rewards/rejected": -0.14502687752246857,
1001
+ "step": 630
1002
+ },
1003
+ {
1004
+ "epoch": 1.28,
1005
+ "learning_rate": 1.7274575140626318e-06,
1006
+ "logits/chosen": -2.9677953720092773,
1007
+ "logits/rejected": -2.5879857540130615,
1008
+ "logps/chosen": -160.40704345703125,
1009
+ "logps/rejected": -148.29469299316406,
1010
+ "loss": 0.0238,
1011
+ "rewards/accuracies": 0.699999988079071,
1012
+ "rewards/chosen": -0.04639770835638046,
1013
+ "rewards/margins": 0.028317932039499283,
1014
+ "rewards/rejected": -0.07471564412117004,
1015
+ "step": 640
1016
+ },
1017
+ {
1018
+ "epoch": 1.3,
1019
+ "learning_rate": 1.6449496416858285e-06,
1020
+ "logits/chosen": -2.873720169067383,
1021
+ "logits/rejected": -2.657524347305298,
1022
+ "logps/chosen": -211.8774871826172,
1023
+ "logps/rejected": -208.0607452392578,
1024
+ "loss": 0.052,
1025
+ "rewards/accuracies": 0.75,
1026
+ "rewards/chosen": -0.05117432028055191,
1027
+ "rewards/margins": 0.10337792336940765,
1028
+ "rewards/rejected": -0.15455225110054016,
1029
+ "step": 650
1030
+ },
1031
+ {
1032
+ "epoch": 1.32,
1033
+ "learning_rate": 1.56348351646022e-06,
1034
+ "logits/chosen": -3.1277270317077637,
1035
+ "logits/rejected": -2.781031370162964,
1036
+ "logps/chosen": -159.2963104248047,
1037
+ "logps/rejected": -157.16714477539062,
1038
+ "loss": 0.0255,
1039
+ "rewards/accuracies": 0.699999988079071,
1040
+ "rewards/chosen": -0.08234690129756927,
1041
+ "rewards/margins": 0.07028084993362427,
1042
+ "rewards/rejected": -0.15262776613235474,
1043
+ "step": 660
1044
+ },
1045
+ {
1046
+ "epoch": 1.34,
1047
+ "learning_rate": 1.4831583923105e-06,
1048
+ "logits/chosen": -2.9012064933776855,
1049
+ "logits/rejected": -2.5057435035705566,
1050
+ "logps/chosen": -133.7654571533203,
1051
+ "logps/rejected": -123.15315246582031,
1052
+ "loss": 0.0204,
1053
+ "rewards/accuracies": 0.5,
1054
+ "rewards/chosen": -0.06615938246250153,
1055
+ "rewards/margins": -0.006010846234858036,
1056
+ "rewards/rejected": -0.060148537158966064,
1057
+ "step": 670
1058
+ },
1059
+ {
1060
+ "epoch": 1.36,
1061
+ "learning_rate": 1.4040721330273063e-06,
1062
+ "logits/chosen": -3.0350639820098877,
1063
+ "logits/rejected": -2.688361167907715,
1064
+ "logps/chosen": -161.60037231445312,
1065
+ "logps/rejected": -148.4673614501953,
1066
+ "loss": 0.018,
1067
+ "rewards/accuracies": 0.75,
1068
+ "rewards/chosen": -0.012395807541906834,
1069
+ "rewards/margins": 0.07525575160980225,
1070
+ "rewards/rejected": -0.08765155076980591,
1071
+ "step": 680
1072
+ },
1073
+ {
1074
+ "epoch": 1.38,
1075
+ "learning_rate": 1.3263210930352737e-06,
1076
+ "logits/chosen": -2.987074613571167,
1077
+ "logits/rejected": -2.41349458694458,
1078
+ "logps/chosen": -155.8392333984375,
1079
+ "logps/rejected": -120.13862609863281,
1080
+ "loss": 0.1358,
1081
+ "rewards/accuracies": 0.6499999761581421,
1082
+ "rewards/chosen": -0.06855552643537521,
1083
+ "rewards/margins": 0.0165361650288105,
1084
+ "rewards/rejected": -0.08509168773889542,
1085
+ "step": 690
1086
+ },
1087
+ {
1088
+ "epoch": 1.4,
1089
+ "learning_rate": 1.2500000000000007e-06,
1090
+ "logits/chosen": -3.142533302307129,
1091
+ "logits/rejected": -2.7605295181274414,
1092
+ "logps/chosen": -196.124267578125,
1093
+ "logps/rejected": -175.75848388671875,
1094
+ "loss": 0.0682,
1095
+ "rewards/accuracies": 0.550000011920929,
1096
+ "rewards/chosen": -0.08456405252218246,
1097
+ "rewards/margins": 0.07064096629619598,
1098
+ "rewards/rejected": -0.15520504117012024,
1099
+ "step": 700
1100
+ },
1101
+ {
1102
+ "epoch": 1.4,
1103
+ "eval_logits/chosen": -2.399232864379883,
1104
+ "eval_logits/rejected": -2.203479528427124,
1105
+ "eval_logps/chosen": -272.0422668457031,
1106
+ "eval_logps/rejected": -247.98184204101562,
1107
+ "eval_loss": 0.019056277349591255,
1108
+ "eval_rewards/accuracies": 0.3449999988079071,
1109
+ "eval_rewards/chosen": -0.03454471752047539,
1110
+ "eval_rewards/margins": -0.043121837079524994,
1111
+ "eval_rewards/rejected": 0.008577119559049606,
1112
+ "eval_runtime": 1422.244,
1113
+ "eval_samples_per_second": 1.406,
1114
+ "eval_steps_per_second": 0.703,
1115
+ "step": 700
1116
+ },
1117
+ {
1118
+ "epoch": 1.42,
1119
+ "learning_rate": 1.1752018394169882e-06,
1120
+ "logits/chosen": -2.923161029815674,
1121
+ "logits/rejected": -2.7423832416534424,
1122
+ "logps/chosen": -232.46353149414062,
1123
+ "logps/rejected": -236.95712280273438,
1124
+ "loss": 0.1005,
1125
+ "rewards/accuracies": 0.6499999761581421,
1126
+ "rewards/chosen": -0.058026909828186035,
1127
+ "rewards/margins": 0.11247865855693817,
1128
+ "rewards/rejected": -0.170505553483963,
1129
+ "step": 710
1130
+ },
1131
+ {
1132
+ "epoch": 1.44,
1133
+ "learning_rate": 1.1020177413231334e-06,
1134
+ "logits/chosen": -2.9437034130096436,
1135
+ "logits/rejected": -2.7746634483337402,
1136
+ "logps/chosen": -182.21336364746094,
1137
+ "logps/rejected": -170.1109619140625,
1138
+ "loss": 0.0119,
1139
+ "rewards/accuracies": 0.550000011920929,
1140
+ "rewards/chosen": -0.009687254205346107,
1141
+ "rewards/margins": 0.010108504444360733,
1142
+ "rewards/rejected": -0.01979575864970684,
1143
+ "step": 720
1144
+ },
1145
+ {
1146
+ "epoch": 1.46,
1147
+ "learning_rate": 1.0305368692688175e-06,
1148
+ "logits/chosen": -2.91925048828125,
1149
+ "logits/rejected": -2.4575612545013428,
1150
+ "logps/chosen": -141.11546325683594,
1151
+ "logps/rejected": -132.57675170898438,
1152
+ "loss": 0.0074,
1153
+ "rewards/accuracies": 0.550000011920929,
1154
+ "rewards/chosen": -0.058561988174915314,
1155
+ "rewards/margins": 0.03072230890393257,
1156
+ "rewards/rejected": -0.08928428590297699,
1157
+ "step": 730
1158
+ },
1159
+ {
1160
+ "epoch": 1.48,
1161
+ "learning_rate": 9.608463116858544e-07,
1162
+ "logits/chosen": -2.8368327617645264,
1163
+ "logits/rejected": -2.523725986480713,
1164
+ "logps/chosen": -102.6483383178711,
1165
+ "logps/rejected": -94.72925567626953,
1166
+ "loss": 0.0083,
1167
+ "rewards/accuracies": 0.699999988079071,
1168
+ "rewards/chosen": -0.0365123450756073,
1169
+ "rewards/margins": 0.021143099293112755,
1170
+ "rewards/rejected": -0.057655446231365204,
1171
+ "step": 740
1172
+ },
1173
+ {
1174
+ "epoch": 1.5,
1175
+ "learning_rate": 8.930309757836517e-07,
1176
+ "logits/chosen": -2.9483554363250732,
1177
+ "logits/rejected": -2.4691848754882812,
1178
+ "logps/chosen": -170.67236328125,
1179
+ "logps/rejected": -154.7274932861328,
1180
+ "loss": 0.0595,
1181
+ "rewards/accuracies": 0.550000011920929,
1182
+ "rewards/chosen": -0.09041354060173035,
1183
+ "rewards/margins": 0.019873833283782005,
1184
+ "rewards/rejected": -0.1102873831987381,
1185
+ "step": 750
1186
+ },
1187
+ {
1188
+ "epoch": 1.52,
1189
+ "learning_rate": 8.271734841028553e-07,
1190
+ "logits/chosen": -2.998615264892578,
1191
+ "logits/rejected": -2.765198230743408,
1192
+ "logps/chosen": -178.57168579101562,
1193
+ "logps/rejected": -165.0284423828125,
1194
+ "loss": 0.0603,
1195
+ "rewards/accuracies": 0.6499999761581421,
1196
+ "rewards/chosen": -0.07312550395727158,
1197
+ "rewards/margins": 0.03924999386072159,
1198
+ "rewards/rejected": -0.11237549781799316,
1199
+ "step": 760
1200
+ },
1201
+ {
1202
+ "epoch": 1.54,
1203
+ "learning_rate": 7.633540738525066e-07,
1204
+ "logits/chosen": -2.9713294506073,
1205
+ "logits/rejected": -2.581371784210205,
1206
+ "logps/chosen": -188.65562438964844,
1207
+ "logps/rejected": -198.20057678222656,
1208
+ "loss": 0.0376,
1209
+ "rewards/accuracies": 0.800000011920929,
1210
+ "rewards/chosen": -0.08031792938709259,
1211
+ "rewards/margins": 0.0691676214337349,
1212
+ "rewards/rejected": -0.1494855433702469,
1213
+ "step": 770
1214
+ },
1215
+ {
1216
+ "epoch": 1.56,
1217
+ "learning_rate": 7.016504991533727e-07,
1218
+ "logits/chosen": -3.094494342803955,
1219
+ "logits/rejected": -2.8429107666015625,
1220
+ "logps/chosen": -172.08921813964844,
1221
+ "logps/rejected": -168.11550903320312,
1222
+ "loss": 0.0146,
1223
+ "rewards/accuracies": 0.800000011920929,
1224
+ "rewards/chosen": -0.03326564282178879,
1225
+ "rewards/margins": 0.049937326461076736,
1226
+ "rewards/rejected": -0.08320296555757523,
1227
+ "step": 780
1228
+ },
1229
+ {
1230
+ "epoch": 1.58,
1231
+ "learning_rate": 6.421379363065142e-07,
1232
+ "logits/chosen": -2.8603293895721436,
1233
+ "logits/rejected": -2.629847288131714,
1234
+ "logps/chosen": -187.458251953125,
1235
+ "logps/rejected": -181.56585693359375,
1236
+ "loss": 0.0346,
1237
+ "rewards/accuracies": 0.699999988079071,
1238
+ "rewards/chosen": -0.026567768305540085,
1239
+ "rewards/margins": 0.08442084491252899,
1240
+ "rewards/rejected": -0.11098861694335938,
1241
+ "step": 790
1242
+ },
1243
+ {
1244
+ "epoch": 1.6,
1245
+ "learning_rate": 5.848888922025553e-07,
1246
+ "logits/chosen": -3.0436525344848633,
1247
+ "logits/rejected": -2.855353355407715,
1248
+ "logps/chosen": -116.41280364990234,
1249
+ "logps/rejected": -118.14847564697266,
1250
+ "loss": 0.0505,
1251
+ "rewards/accuracies": 0.8500000238418579,
1252
+ "rewards/chosen": 0.013444220647215843,
1253
+ "rewards/margins": 0.11258585751056671,
1254
+ "rewards/rejected": -0.09914163500070572,
1255
+ "step": 800
1256
+ },
1257
+ {
1258
+ "epoch": 1.6,
1259
+ "eval_logits/chosen": -2.4056026935577393,
1260
+ "eval_logits/rejected": -2.209423780441284,
1261
+ "eval_logps/chosen": -273.5587158203125,
1262
+ "eval_logps/rejected": -248.85421752929688,
1263
+ "eval_loss": 0.023677825927734375,
1264
+ "eval_rewards/accuracies": 0.34049999713897705,
1265
+ "eval_rewards/chosen": -0.049709003418684006,
1266
+ "eval_rewards/margins": -0.04956228658556938,
1267
+ "eval_rewards/rejected": -0.0001467149268137291,
1268
+ "eval_runtime": 1422.3272,
1269
+ "eval_samples_per_second": 1.406,
1270
+ "eval_steps_per_second": 0.703,
1271
+ "step": 800
1272
+ },
1273
+ {
1274
+ "epoch": 1.62,
1275
+ "learning_rate": 5.299731159831953e-07,
1276
+ "logits/chosen": -2.867088794708252,
1277
+ "logits/rejected": -2.544645309448242,
1278
+ "logps/chosen": -144.49082946777344,
1279
+ "logps/rejected": -137.3140106201172,
1280
+ "loss": 0.0169,
1281
+ "rewards/accuracies": 0.6499999761581421,
1282
+ "rewards/chosen": -0.05922747775912285,
1283
+ "rewards/margins": 0.03063639998435974,
1284
+ "rewards/rejected": -0.08986388146877289,
1285
+ "step": 810
1286
+ },
1287
+ {
1288
+ "epoch": 1.64,
1289
+ "learning_rate": 4.774575140626317e-07,
1290
+ "logits/chosen": -2.855769634246826,
1291
+ "logits/rejected": -2.6003212928771973,
1292
+ "logps/chosen": -149.01148986816406,
1293
+ "logps/rejected": -150.924072265625,
1294
+ "loss": 0.0045,
1295
+ "rewards/accuracies": 0.44999998807907104,
1296
+ "rewards/chosen": -0.0036942525766789913,
1297
+ "rewards/margins": 0.02296125330030918,
1298
+ "rewards/rejected": -0.02665550634264946,
1299
+ "step": 820
1300
+ },
1301
+ {
1302
+ "epoch": 1.66,
1303
+ "learning_rate": 4.27406068612396e-07,
1304
+ "logits/chosen": -3.0886871814727783,
1305
+ "logits/rejected": -2.7754549980163574,
1306
+ "logps/chosen": -159.57974243164062,
1307
+ "logps/rejected": -160.63307189941406,
1308
+ "loss": 0.0227,
1309
+ "rewards/accuracies": 0.800000011920929,
1310
+ "rewards/chosen": -0.05405544117093086,
1311
+ "rewards/margins": 0.0963703840970993,
1312
+ "rewards/rejected": -0.15042582154273987,
1313
+ "step": 830
1314
+ },
1315
+ {
1316
+ "epoch": 1.68,
1317
+ "learning_rate": 3.798797596089351e-07,
1318
+ "logits/chosen": -3.102863073348999,
1319
+ "logits/rejected": -2.912782669067383,
1320
+ "logps/chosen": -148.99937438964844,
1321
+ "logps/rejected": -143.40872192382812,
1322
+ "loss": 0.0379,
1323
+ "rewards/accuracies": 0.800000011920929,
1324
+ "rewards/chosen": -0.06271068751811981,
1325
+ "rewards/margins": 0.025210902094841003,
1326
+ "rewards/rejected": -0.08792158961296082,
1327
+ "step": 840
1328
+ },
1329
+ {
1330
+ "epoch": 1.7,
1331
+ "learning_rate": 3.3493649053890325e-07,
1332
+ "logits/chosen": -2.9544389247894287,
1333
+ "logits/rejected": -2.6804440021514893,
1334
+ "logps/chosen": -142.18478393554688,
1335
+ "logps/rejected": -136.8746795654297,
1336
+ "loss": 0.0061,
1337
+ "rewards/accuracies": 0.699999988079071,
1338
+ "rewards/chosen": -0.008848746307194233,
1339
+ "rewards/margins": 0.052127204835414886,
1340
+ "rewards/rejected": -0.06097595766186714,
1341
+ "step": 850
1342
+ },
1343
+ {
1344
+ "epoch": 1.72,
1345
+ "learning_rate": 2.9263101785268253e-07,
1346
+ "logits/chosen": -2.970134735107422,
1347
+ "logits/rejected": -2.784672737121582,
1348
+ "logps/chosen": -122.1519775390625,
1349
+ "logps/rejected": -113.82493591308594,
1350
+ "loss": 0.0044,
1351
+ "rewards/accuracies": 0.6000000238418579,
1352
+ "rewards/chosen": -0.058860231190919876,
1353
+ "rewards/margins": 0.03174171224236488,
1354
+ "rewards/rejected": -0.09060193598270416,
1355
+ "step": 860
1356
+ },
1357
+ {
1358
+ "epoch": 1.74,
1359
+ "learning_rate": 2.53014884252083e-07,
1360
+ "logits/chosen": -2.949235200881958,
1361
+ "logits/rejected": -2.5656838417053223,
1362
+ "logps/chosen": -258.15875244140625,
1363
+ "logps/rejected": -249.7416534423828,
1364
+ "loss": 0.091,
1365
+ "rewards/accuracies": 0.5,
1366
+ "rewards/chosen": -0.05386580899357796,
1367
+ "rewards/margins": 0.09710750728845596,
1368
+ "rewards/rejected": -0.15097330510616302,
1369
+ "step": 870
1370
+ },
1371
+ {
1372
+ "epoch": 1.76,
1373
+ "learning_rate": 2.1613635589349756e-07,
1374
+ "logits/chosen": -3.1699745655059814,
1375
+ "logits/rejected": -2.638118028640747,
1376
+ "logps/chosen": -151.49009704589844,
1377
+ "logps/rejected": -145.30239868164062,
1378
+ "loss": 0.0532,
1379
+ "rewards/accuracies": 0.75,
1380
+ "rewards/chosen": -0.06000872328877449,
1381
+ "rewards/margins": 0.11070142686367035,
1382
+ "rewards/rejected": -0.17071016132831573,
1383
+ "step": 880
1384
+ },
1385
+ {
1386
+ "epoch": 1.78,
1387
+ "learning_rate": 1.8204036358303173e-07,
1388
+ "logits/chosen": -2.985215663909912,
1389
+ "logits/rejected": -2.570612668991089,
1390
+ "logps/chosen": -109.09466552734375,
1391
+ "logps/rejected": -98.76487731933594,
1392
+ "loss": 0.0108,
1393
+ "rewards/accuracies": 0.699999988079071,
1394
+ "rewards/chosen": -0.0210666935890913,
1395
+ "rewards/margins": 0.044059909880161285,
1396
+ "rewards/rejected": -0.06512660533189774,
1397
+ "step": 890
1398
+ },
1399
+ {
1400
+ "epoch": 1.8,
1401
+ "learning_rate": 1.507684480352292e-07,
1402
+ "logits/chosen": -3.0888664722442627,
1403
+ "logits/rejected": -2.6895575523376465,
1404
+ "logps/chosen": -163.73812866210938,
1405
+ "logps/rejected": -161.14007568359375,
1406
+ "loss": 0.0243,
1407
+ "rewards/accuracies": 0.5,
1408
+ "rewards/chosen": -0.08492692559957504,
1409
+ "rewards/margins": 0.025812974199652672,
1410
+ "rewards/rejected": -0.11073990166187286,
1411
+ "step": 900
1412
+ },
1413
+ {
1414
+ "epoch": 1.8,
1415
+ "eval_logits/chosen": -2.4080586433410645,
1416
+ "eval_logits/rejected": -2.2116518020629883,
1417
+ "eval_logps/chosen": -274.3966979980469,
1418
+ "eval_logps/rejected": -249.45701599121094,
1419
+ "eval_loss": 0.025878531858325005,
1420
+ "eval_rewards/accuracies": 0.33399999141693115,
1421
+ "eval_rewards/chosen": -0.058088988065719604,
1422
+ "eval_rewards/margins": -0.05191420391201973,
1423
+ "eval_rewards/rejected": -0.00617477810010314,
1424
+ "eval_runtime": 1421.9219,
1425
+ "eval_samples_per_second": 1.407,
1426
+ "eval_steps_per_second": 0.703,
1427
+ "step": 900
1428
+ },
1429
+ {
1430
+ "epoch": 1.82,
1431
+ "learning_rate": 1.223587092621162e-07,
1432
+ "logits/chosen": -3.060760259628296,
1433
+ "logits/rejected": -2.724635124206543,
1434
+ "logps/chosen": -126.0656509399414,
1435
+ "logps/rejected": -112.5301742553711,
1436
+ "loss": 0.0271,
1437
+ "rewards/accuracies": 0.800000011920929,
1438
+ "rewards/chosen": -0.014755621552467346,
1439
+ "rewards/margins": 0.10490630567073822,
1440
+ "rewards/rejected": -0.11966194212436676,
1441
+ "step": 910
1442
+ },
1443
+ {
1444
+ "epoch": 1.84,
1445
+ "learning_rate": 9.684576015420277e-08,
1446
+ "logits/chosen": -2.515842914581299,
1447
+ "logits/rejected": -1.9269781112670898,
1448
+ "logps/chosen": -214.9114532470703,
1449
+ "logps/rejected": -191.04974365234375,
1450
+ "loss": 0.0234,
1451
+ "rewards/accuracies": 0.44999998807907104,
1452
+ "rewards/chosen": -0.11193618923425674,
1453
+ "rewards/margins": 0.01120082102715969,
1454
+ "rewards/rejected": -0.12313701212406158,
1455
+ "step": 920
1456
+ },
1457
+ {
1458
+ "epoch": 1.86,
1459
+ "learning_rate": 7.426068431000883e-08,
1460
+ "logits/chosen": -3.096656322479248,
1461
+ "logits/rejected": -2.6982362270355225,
1462
+ "logps/chosen": -156.7047576904297,
1463
+ "logps/rejected": -140.3162841796875,
1464
+ "loss": 0.0558,
1465
+ "rewards/accuracies": 0.6000000238418579,
1466
+ "rewards/chosen": -0.1117779016494751,
1467
+ "rewards/margins": 0.010117399506270885,
1468
+ "rewards/rejected": -0.12189529836177826,
1469
+ "step": 930
1470
+ },
1471
+ {
1472
+ "epoch": 1.88,
1473
+ "learning_rate": 5.463099816548578e-08,
1474
+ "logits/chosen": -2.985032558441162,
1475
+ "logits/rejected": -2.7575299739837646,
1476
+ "logps/chosen": -217.82192993164062,
1477
+ "logps/rejected": -225.7892303466797,
1478
+ "loss": 0.0701,
1479
+ "rewards/accuracies": 0.6000000238418579,
1480
+ "rewards/chosen": -0.16152246296405792,
1481
+ "rewards/margins": 0.10506312549114227,
1482
+ "rewards/rejected": -0.2665855586528778,
1483
+ "step": 940
1484
+ },
1485
+ {
1486
+ "epoch": 1.9,
1487
+ "learning_rate": 3.798061746947995e-08,
1488
+ "logits/chosen": -2.925285816192627,
1489
+ "logits/rejected": -2.7017900943756104,
1490
+ "logps/chosen": -161.64578247070312,
1491
+ "logps/rejected": -155.53384399414062,
1492
+ "loss": 0.0125,
1493
+ "rewards/accuracies": 0.6499999761581421,
1494
+ "rewards/chosen": -0.04954836145043373,
1495
+ "rewards/margins": 0.03911132365465164,
1496
+ "rewards/rejected": -0.08865968883037567,
1497
+ "step": 950
1498
+ },
1499
+ {
1500
+ "epoch": 1.92,
1501
+ "learning_rate": 2.4329828146074096e-08,
1502
+ "logits/chosen": -3.1424098014831543,
1503
+ "logits/rejected": -2.879060745239258,
1504
+ "logps/chosen": -189.52865600585938,
1505
+ "logps/rejected": -195.4296875,
1506
+ "loss": 0.0599,
1507
+ "rewards/accuracies": 0.800000011920929,
1508
+ "rewards/chosen": -0.06265055388212204,
1509
+ "rewards/margins": 0.1324789822101593,
1510
+ "rewards/rejected": -0.19512954354286194,
1511
+ "step": 960
1512
+ },
1513
+ {
1514
+ "epoch": 1.94,
1515
+ "learning_rate": 1.3695261579316776e-08,
1516
+ "logits/chosen": -3.0380070209503174,
1517
+ "logits/rejected": -2.844536304473877,
1518
+ "logps/chosen": -164.58413696289062,
1519
+ "logps/rejected": -158.5533447265625,
1520
+ "loss": 0.027,
1521
+ "rewards/accuracies": 0.75,
1522
+ "rewards/chosen": -0.0411117784678936,
1523
+ "rewards/margins": 0.07761015743017197,
1524
+ "rewards/rejected": -0.11872193962335587,
1525
+ "step": 970
1526
+ },
1527
+ {
1528
+ "epoch": 1.96,
1529
+ "learning_rate": 6.089874350439507e-09,
1530
+ "logits/chosen": -3.0895726680755615,
1531
+ "logits/rejected": -2.8259506225585938,
1532
+ "logps/chosen": -196.0420684814453,
1533
+ "logps/rejected": -190.8524169921875,
1534
+ "loss": 0.022,
1535
+ "rewards/accuracies": 0.699999988079071,
1536
+ "rewards/chosen": -0.060807038098573685,
1537
+ "rewards/margins": 0.03220720216631889,
1538
+ "rewards/rejected": -0.09301424026489258,
1539
+ "step": 980
1540
+ },
1541
+ {
1542
+ "epoch": 1.98,
1543
+ "learning_rate": 1.5229324522605949e-09,
1544
+ "logits/chosen": -3.0721163749694824,
1545
+ "logits/rejected": -2.770005941390991,
1546
+ "logps/chosen": -117.59437561035156,
1547
+ "logps/rejected": -115.3232421875,
1548
+ "loss": 0.0234,
1549
+ "rewards/accuracies": 0.8500000238418579,
1550
+ "rewards/chosen": -0.033795468509197235,
1551
+ "rewards/margins": 0.1025773137807846,
1552
+ "rewards/rejected": -0.13637278974056244,
1553
+ "step": 990
1554
+ },
1555
+ {
1556
+ "epoch": 2.0,
1557
+ "learning_rate": 0.0,
1558
+ "logits/chosen": -2.9728710651397705,
1559
+ "logits/rejected": -2.4575047492980957,
1560
+ "logps/chosen": -203.40164184570312,
1561
+ "logps/rejected": -187.337890625,
1562
+ "loss": 0.0697,
1563
+ "rewards/accuracies": 0.5,
1564
+ "rewards/chosen": -0.11433009803295135,
1565
+ "rewards/margins": 0.06476924568414688,
1566
+ "rewards/rejected": -0.17909933626651764,
1567
+ "step": 1000
1568
+ },
1569
+ {
1570
+ "epoch": 2.0,
1571
+ "eval_logits/chosen": -2.4070470333099365,
1572
+ "eval_logits/rejected": -2.210773468017578,
1573
+ "eval_logps/chosen": -274.3865966796875,
1574
+ "eval_logps/rejected": -249.44677734375,
1575
+ "eval_loss": 0.0258334930986166,
1576
+ "eval_rewards/accuracies": 0.33799999952316284,
1577
+ "eval_rewards/chosen": -0.05798804759979248,
1578
+ "eval_rewards/margins": -0.05191566422581673,
1579
+ "eval_rewards/rejected": -0.006072388496249914,
1580
+ "eval_runtime": 1422.3182,
1581
+ "eval_samples_per_second": 1.406,
1582
+ "eval_steps_per_second": 0.703,
1583
+ "step": 1000
1584
+ },
1585
+ {
1586
+ "epoch": 2.0,
1587
+ "step": 1000,
1588
+ "total_flos": 0.0,
1589
+ "train_loss": 0.019690872263745406,
1590
+ "train_runtime": 18916.4989,
1591
+ "train_samples_per_second": 0.106,
1592
+ "train_steps_per_second": 0.053
1593
+ }
1594
+ ],
1595
+ "logging_steps": 10,
1596
+ "max_steps": 1000,
1597
+ "num_input_tokens_seen": 0,
1598
+ "num_train_epochs": 2,
1599
+ "save_steps": 100,
1600
+ "total_flos": 0.0,
1601
+ "train_batch_size": 1,
1602
+ "trial_name": null,
1603
+ "trial_params": null
1604
+ }