imelnyk commited on
Commit
61ee4e5
1 Parent(s): 5d9cd9d

Model save

Browse files
README.md ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ base_model: mistralai/Mistral-7B-v0.1
4
+ tags:
5
+ - trl
6
+ - dpo
7
+ - generated_from_trainer
8
+ model-index:
9
+ - name: zephyr-7b-dpo-qlora-fsdp
10
+ results: []
11
+ ---
12
+
13
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
14
+ should probably proofread and complete it, then remove this comment. -->
15
+
16
+ # zephyr-7b-dpo-qlora-fsdp
17
+
18
+ This model is a fine-tuned version of [mistralai/Mistral-7B-v0.1](https://huggingface.co/mistralai/Mistral-7B-v0.1) on an unknown dataset.
19
+ It achieves the following results on the evaluation set:
20
+ - Loss: 0.8742
21
+ - Rewards/chosen: 0.0082
22
+ - Rewards/rejected: 0.0003
23
+ - Rewards/accuracies: 0.6726
24
+ - Rewards/margins: 0.0079
25
+ - Logps/rejected: -242.3632
26
+ - Logps/chosen: -266.8597
27
+ - Logits/rejected: -2.3743
28
+ - Logits/chosen: -2.4108
29
+
30
+ ## Model description
31
+
32
+ More information needed
33
+
34
+ ## Intended uses & limitations
35
+
36
+ More information needed
37
+
38
+ ## Training and evaluation data
39
+
40
+ More information needed
41
+
42
+ ## Training procedure
43
+
44
+ ### Training hyperparameters
45
+
46
+ The following hyperparameters were used during training:
47
+ - learning_rate: 5e-06
48
+ - train_batch_size: 10
49
+ - eval_batch_size: 8
50
+ - seed: 42
51
+ - distributed_type: multi-GPU
52
+ - num_devices: 6
53
+ - gradient_accumulation_steps: 4
54
+ - total_train_batch_size: 240
55
+ - total_eval_batch_size: 48
56
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
57
+ - lr_scheduler_type: cosine
58
+ - lr_scheduler_warmup_ratio: 0.1
59
+ - num_epochs: 5
60
+
61
+ ### Training results
62
+
63
+ | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
64
+ |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
65
+ | 0.2536 | 0.39 | 100 | 0.2792 | 0.0024 | 0.0006 | 0.6042 | 0.0019 | -242.3385 | -267.4340 | -2.3735 | -2.4122 |
66
+ | 0.5352 | 0.79 | 200 | 0.5010 | 0.0011 | -0.0019 | 0.5744 | 0.0030 | -242.5832 | -267.5640 | -2.3629 | -2.4014 |
67
+ | 0.3676 | 1.18 | 300 | 0.8293 | 0.0079 | 0.0027 | 0.5982 | 0.0052 | -242.1211 | -266.8856 | -2.3788 | -2.4168 |
68
+ | 0.366 | 1.57 | 400 | 0.8239 | 0.0065 | 0.0007 | 0.6399 | 0.0058 | -242.3221 | -267.0256 | -2.3774 | -2.4146 |
69
+ | 0.292 | 1.96 | 500 | 0.8146 | 0.0050 | -0.0005 | 0.6399 | 0.0055 | -242.4462 | -267.1794 | -2.3978 | -2.4343 |
70
+ | 0.1355 | 2.36 | 600 | 0.9651 | 0.0047 | -0.0013 | 0.6161 | 0.0060 | -242.5212 | -267.2061 | -2.3796 | -2.4178 |
71
+ | 0.1327 | 2.75 | 700 | 0.9985 | 0.0046 | -0.0019 | 0.6339 | 0.0065 | -242.5883 | -267.2230 | -2.3690 | -2.4066 |
72
+ | 0.0389 | 3.14 | 800 | 0.8932 | 0.0080 | 0.0003 | 0.6518 | 0.0078 | -242.3696 | -266.8748 | -2.3563 | -2.3947 |
73
+ | 0.029 | 3.53 | 900 | 0.9392 | 0.0090 | 0.0008 | 0.6577 | 0.0082 | -242.3114 | -266.7798 | -2.3752 | -2.4118 |
74
+ | 0.0198 | 3.93 | 1000 | 0.8200 | 0.0087 | 0.0010 | 0.6577 | 0.0077 | -242.2917 | -266.8047 | -2.3780 | -2.4145 |
75
+ | 0.0059 | 4.32 | 1100 | 0.8904 | 0.0080 | 0.0002 | 0.6577 | 0.0078 | -242.3739 | -266.8760 | -2.3744 | -2.4108 |
76
+ | 0.0042 | 4.71 | 1200 | 0.8779 | 0.0080 | 0.0001 | 0.6518 | 0.0080 | -242.3892 | -266.8771 | -2.3753 | -2.4119 |
77
+
78
+
79
+ ### Framework versions
80
+
81
+ - Transformers 4.38.1
82
+ - Pytorch 2.2.0+cu118
83
+ - Datasets 2.17.1
84
+ - Tokenizers 0.15.2
all_results.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 4.99,
3
+ "eval_logits/chosen": -2.4107861518859863,
4
+ "eval_logits/rejected": -2.3742854595184326,
5
+ "eval_logps/chosen": -266.8597106933594,
6
+ "eval_logps/rejected": -242.3632049560547,
7
+ "eval_loss": 0.8742221593856812,
8
+ "eval_rewards/accuracies": 0.6726190447807312,
9
+ "eval_rewards/chosen": 0.008189404383301735,
10
+ "eval_rewards/margins": 0.007871923968195915,
11
+ "eval_rewards/rejected": 0.00031748018227517605,
12
+ "eval_runtime": 123.0411,
13
+ "eval_samples": 2000,
14
+ "eval_samples_per_second": 16.255,
15
+ "eval_steps_per_second": 0.341,
16
+ "train_loss": 0.164279118501603,
17
+ "train_runtime": 43545.4617,
18
+ "train_samples": 61135,
19
+ "train_samples_per_second": 7.02,
20
+ "train_steps_per_second": 0.029
21
+ }
eval_results.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 4.99,
3
+ "eval_logits/chosen": -2.4107861518859863,
4
+ "eval_logits/rejected": -2.3742854595184326,
5
+ "eval_logps/chosen": -266.8597106933594,
6
+ "eval_logps/rejected": -242.3632049560547,
7
+ "eval_loss": 0.8742221593856812,
8
+ "eval_rewards/accuracies": 0.6726190447807312,
9
+ "eval_rewards/chosen": 0.008189404383301735,
10
+ "eval_rewards/margins": 0.007871923968195915,
11
+ "eval_rewards/rejected": 0.00031748018227517605,
12
+ "eval_runtime": 123.0411,
13
+ "eval_samples": 2000,
14
+ "eval_samples_per_second": 16.255,
15
+ "eval_steps_per_second": 0.341
16
+ }
runs/Mar06_00-00-06_cccxc539/events.out.tfevents.1709701229.cccxc539.1019244.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:47a09f0173f91712b0730aa64dc148607b3f81ecdece3bc6a619413ce3170911
3
- size 96726
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c428a82cc6e745afb3d9611205f5e9b3a342637d67b1f68fe33b9ce467d3035b
3
+ size 101896
runs/Mar06_00-00-06_cccxc539/events.out.tfevents.1709744897.cccxc539.1019244.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cf7e4a06846219d38f0f9bf38ebce3503f87ab77336ec0cdf702f91bc67db13f
3
+ size 828
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 4.99,
3
+ "train_loss": 0.164279118501603,
4
+ "train_runtime": 43545.4617,
5
+ "train_samples": 61135,
6
+ "train_samples_per_second": 7.02,
7
+ "train_steps_per_second": 0.029
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,2142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 4.985279685966634,
5
+ "eval_steps": 100,
6
+ "global_step": 1270,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.0,
13
+ "grad_norm": 2.734375,
14
+ "learning_rate": 3.9370078740157486e-08,
15
+ "logits/chosen": -2.356706142425537,
16
+ "logits/rejected": -2.3367161750793457,
17
+ "logps/chosen": -287.937255859375,
18
+ "logps/rejected": -266.50421142578125,
19
+ "loss": 0.0001,
20
+ "rewards/accuracies": 0.0,
21
+ "rewards/chosen": 0.0,
22
+ "rewards/margins": 0.0,
23
+ "rewards/rejected": 0.0,
24
+ "step": 1
25
+ },
26
+ {
27
+ "epoch": 0.04,
28
+ "grad_norm": 106.5,
29
+ "learning_rate": 3.937007874015748e-07,
30
+ "logits/chosen": -2.3996241092681885,
31
+ "logits/rejected": -2.353182554244995,
32
+ "logps/chosen": -273.30889892578125,
33
+ "logps/rejected": -240.43850708007812,
34
+ "loss": 0.118,
35
+ "rewards/accuracies": 0.46388885378837585,
36
+ "rewards/chosen": 0.00026795046869665384,
37
+ "rewards/margins": 0.00038285815389826894,
38
+ "rewards/rejected": -0.00011490769247757271,
39
+ "step": 10
40
+ },
41
+ {
42
+ "epoch": 0.08,
43
+ "grad_norm": 129.0,
44
+ "learning_rate": 7.874015748031496e-07,
45
+ "logits/chosen": -2.38779878616333,
46
+ "logits/rejected": -2.3378489017486572,
47
+ "logps/chosen": -266.8452453613281,
48
+ "logps/rejected": -258.0478515625,
49
+ "loss": 0.1346,
50
+ "rewards/accuracies": 0.5525000095367432,
51
+ "rewards/chosen": 0.0013033099239692092,
52
+ "rewards/margins": 0.0008336402243003249,
53
+ "rewards/rejected": 0.0004696696996688843,
54
+ "step": 20
55
+ },
56
+ {
57
+ "epoch": 0.12,
58
+ "grad_norm": 108.0,
59
+ "learning_rate": 1.1811023622047246e-06,
60
+ "logits/chosen": -2.43515682220459,
61
+ "logits/rejected": -2.382305145263672,
62
+ "logps/chosen": -299.69366455078125,
63
+ "logps/rejected": -271.0403747558594,
64
+ "loss": 0.1585,
65
+ "rewards/accuracies": 0.4999999403953552,
66
+ "rewards/chosen": 0.002506708027794957,
67
+ "rewards/margins": 0.0005399176734499633,
68
+ "rewards/rejected": 0.0019667900633066893,
69
+ "step": 30
70
+ },
71
+ {
72
+ "epoch": 0.16,
73
+ "grad_norm": 95.0,
74
+ "learning_rate": 1.5748031496062992e-06,
75
+ "logits/chosen": -2.3607640266418457,
76
+ "logits/rejected": -2.313088893890381,
77
+ "logps/chosen": -288.7026672363281,
78
+ "logps/rejected": -253.4452667236328,
79
+ "loss": 0.1404,
80
+ "rewards/accuracies": 0.5774999856948853,
81
+ "rewards/chosen": 0.002137306611984968,
82
+ "rewards/margins": 0.0012014020467177033,
83
+ "rewards/rejected": 0.0009359045652672648,
84
+ "step": 40
85
+ },
86
+ {
87
+ "epoch": 0.2,
88
+ "grad_norm": 109.0,
89
+ "learning_rate": 1.968503937007874e-06,
90
+ "logits/chosen": -2.407731533050537,
91
+ "logits/rejected": -2.3856372833251953,
92
+ "logps/chosen": -267.39801025390625,
93
+ "logps/rejected": -264.8341064453125,
94
+ "loss": 0.1726,
95
+ "rewards/accuracies": 0.6200000047683716,
96
+ "rewards/chosen": 0.0022850066889077425,
97
+ "rewards/margins": 0.0014212832320481539,
98
+ "rewards/rejected": 0.0008637232822366059,
99
+ "step": 50
100
+ },
101
+ {
102
+ "epoch": 0.24,
103
+ "grad_norm": 135.0,
104
+ "learning_rate": 2.362204724409449e-06,
105
+ "logits/chosen": -2.3825833797454834,
106
+ "logits/rejected": -2.3412365913391113,
107
+ "logps/chosen": -272.9526672363281,
108
+ "logps/rejected": -239.8430633544922,
109
+ "loss": 0.1314,
110
+ "rewards/accuracies": 0.6150000095367432,
111
+ "rewards/chosen": 0.002347386907786131,
112
+ "rewards/margins": 0.001956633059307933,
113
+ "rewards/rejected": 0.00039075379027053714,
114
+ "step": 60
115
+ },
116
+ {
117
+ "epoch": 0.27,
118
+ "grad_norm": 89.5,
119
+ "learning_rate": 2.755905511811024e-06,
120
+ "logits/chosen": -2.382422924041748,
121
+ "logits/rejected": -2.3432180881500244,
122
+ "logps/chosen": -273.59881591796875,
123
+ "logps/rejected": -257.91790771484375,
124
+ "loss": 0.1777,
125
+ "rewards/accuracies": 0.6025000214576721,
126
+ "rewards/chosen": 0.002635209821164608,
127
+ "rewards/margins": 0.0014770927373319864,
128
+ "rewards/rejected": 0.0011581169674172997,
129
+ "step": 70
130
+ },
131
+ {
132
+ "epoch": 0.31,
133
+ "grad_norm": 125.0,
134
+ "learning_rate": 3.1496062992125985e-06,
135
+ "logits/chosen": -2.394813060760498,
136
+ "logits/rejected": -2.3611693382263184,
137
+ "logps/chosen": -279.48638916015625,
138
+ "logps/rejected": -260.6809997558594,
139
+ "loss": 0.1944,
140
+ "rewards/accuracies": 0.6100000739097595,
141
+ "rewards/chosen": 0.0013221392873674631,
142
+ "rewards/margins": 0.0018236342584714293,
143
+ "rewards/rejected": -0.0005014949128963053,
144
+ "step": 80
145
+ },
146
+ {
147
+ "epoch": 0.35,
148
+ "grad_norm": 146.0,
149
+ "learning_rate": 3.5433070866141735e-06,
150
+ "logits/chosen": -2.3927130699157715,
151
+ "logits/rejected": -2.3439621925354004,
152
+ "logps/chosen": -264.44195556640625,
153
+ "logps/rejected": -236.5146942138672,
154
+ "loss": 0.2032,
155
+ "rewards/accuracies": 0.5475000143051147,
156
+ "rewards/chosen": 0.001684651942923665,
157
+ "rewards/margins": 0.0011215232079848647,
158
+ "rewards/rejected": 0.0005631285603158176,
159
+ "step": 90
160
+ },
161
+ {
162
+ "epoch": 0.39,
163
+ "grad_norm": 145.0,
164
+ "learning_rate": 3.937007874015748e-06,
165
+ "logits/chosen": -2.4192750453948975,
166
+ "logits/rejected": -2.3637657165527344,
167
+ "logps/chosen": -280.88238525390625,
168
+ "logps/rejected": -253.8561553955078,
169
+ "loss": 0.2536,
170
+ "rewards/accuracies": 0.5900000333786011,
171
+ "rewards/chosen": 0.003346907440572977,
172
+ "rewards/margins": 0.002142687328159809,
173
+ "rewards/rejected": 0.001204220112413168,
174
+ "step": 100
175
+ },
176
+ {
177
+ "epoch": 0.39,
178
+ "eval_logits/chosen": -2.412179470062256,
179
+ "eval_logits/rejected": -2.3735485076904297,
180
+ "eval_logps/chosen": -267.4339904785156,
181
+ "eval_logps/rejected": -242.3385467529297,
182
+ "eval_loss": 0.27917271852493286,
183
+ "eval_rewards/accuracies": 0.6041666865348816,
184
+ "eval_rewards/chosen": 0.002446565078571439,
185
+ "eval_rewards/margins": 0.0018827051389962435,
186
+ "eval_rewards/rejected": 0.0005638597067445517,
187
+ "eval_runtime": 124.6972,
188
+ "eval_samples_per_second": 16.039,
189
+ "eval_steps_per_second": 0.337,
190
+ "step": 100
191
+ },
192
+ {
193
+ "epoch": 0.43,
194
+ "grad_norm": 196.0,
195
+ "learning_rate": 4.330708661417324e-06,
196
+ "logits/chosen": -2.383650064468384,
197
+ "logits/rejected": -2.345914125442505,
198
+ "logps/chosen": -298.79864501953125,
199
+ "logps/rejected": -272.7557373046875,
200
+ "loss": 0.2924,
201
+ "rewards/accuracies": 0.637499988079071,
202
+ "rewards/chosen": 0.004891454242169857,
203
+ "rewards/margins": 0.002777325687929988,
204
+ "rewards/rejected": 0.0021141283214092255,
205
+ "step": 110
206
+ },
207
+ {
208
+ "epoch": 0.47,
209
+ "grad_norm": 138.0,
210
+ "learning_rate": 4.724409448818898e-06,
211
+ "logits/chosen": -2.3592543601989746,
212
+ "logits/rejected": -2.29665470123291,
213
+ "logps/chosen": -285.70538330078125,
214
+ "logps/rejected": -253.1526336669922,
215
+ "loss": 0.3272,
216
+ "rewards/accuracies": 0.6274999380111694,
217
+ "rewards/chosen": 0.005152740981429815,
218
+ "rewards/margins": 0.0025280567351728678,
219
+ "rewards/rejected": 0.002624684479087591,
220
+ "step": 120
221
+ },
222
+ {
223
+ "epoch": 0.51,
224
+ "grad_norm": 227.0,
225
+ "learning_rate": 4.999915012051437e-06,
226
+ "logits/chosen": -2.3975679874420166,
227
+ "logits/rejected": -2.3674397468566895,
228
+ "logps/chosen": -261.0414733886719,
229
+ "logps/rejected": -249.3390350341797,
230
+ "loss": 0.3621,
231
+ "rewards/accuracies": 0.5925000309944153,
232
+ "rewards/chosen": 0.004668754059821367,
233
+ "rewards/margins": 0.0027659868355840445,
234
+ "rewards/rejected": 0.0019027665257453918,
235
+ "step": 130
236
+ },
237
+ {
238
+ "epoch": 0.55,
239
+ "grad_norm": 226.0,
240
+ "learning_rate": 4.9984042759305375e-06,
241
+ "logits/chosen": -2.4002552032470703,
242
+ "logits/rejected": -2.34761381149292,
243
+ "logps/chosen": -273.1309509277344,
244
+ "logps/rejected": -247.2257843017578,
245
+ "loss": 0.387,
246
+ "rewards/accuracies": 0.5449999570846558,
247
+ "rewards/chosen": 0.0018628574907779694,
248
+ "rewards/margins": 0.0016136768972501159,
249
+ "rewards/rejected": 0.00024918062263168395,
250
+ "step": 140
251
+ },
252
+ {
253
+ "epoch": 0.59,
254
+ "grad_norm": 196.0,
255
+ "learning_rate": 4.9950062323425556e-06,
256
+ "logits/chosen": -2.3986167907714844,
257
+ "logits/rejected": -2.358851194381714,
258
+ "logps/chosen": -271.7406921386719,
259
+ "logps/rejected": -249.7233428955078,
260
+ "loss": 0.3955,
261
+ "rewards/accuracies": 0.6075000166893005,
262
+ "rewards/chosen": 0.00026552577037364244,
263
+ "rewards/margins": 0.0028056656010448933,
264
+ "rewards/rejected": -0.002540139714255929,
265
+ "step": 150
266
+ },
267
+ {
268
+ "epoch": 0.63,
269
+ "grad_norm": 190.0,
270
+ "learning_rate": 4.989723448187132e-06,
271
+ "logits/chosen": -2.3923754692077637,
272
+ "logits/rejected": -2.3684566020965576,
273
+ "logps/chosen": -285.33184814453125,
274
+ "logps/rejected": -282.5887756347656,
275
+ "loss": 0.4099,
276
+ "rewards/accuracies": 0.5924999713897705,
277
+ "rewards/chosen": 0.0046120877377688885,
278
+ "rewards/margins": 0.0034965365193784237,
279
+ "rewards/rejected": 0.0011155509855598211,
280
+ "step": 160
281
+ },
282
+ {
283
+ "epoch": 0.67,
284
+ "grad_norm": 222.0,
285
+ "learning_rate": 4.982559914106645e-06,
286
+ "logits/chosen": -2.416792392730713,
287
+ "logits/rejected": -2.3636841773986816,
288
+ "logps/chosen": -297.1885681152344,
289
+ "logps/rejected": -281.99481201171875,
290
+ "loss": 0.5527,
291
+ "rewards/accuracies": 0.6299999952316284,
292
+ "rewards/chosen": 0.004418404307216406,
293
+ "rewards/margins": 0.0038651120848953724,
294
+ "rewards/rejected": 0.0005532926879823208,
295
+ "step": 170
296
+ },
297
+ {
298
+ "epoch": 0.71,
299
+ "grad_norm": 189.0,
300
+ "learning_rate": 4.973521041471662e-06,
301
+ "logits/chosen": -2.443068504333496,
302
+ "logits/rejected": -2.4043824672698975,
303
+ "logps/chosen": -284.85546875,
304
+ "logps/rejected": -246.3272705078125,
305
+ "loss": 0.467,
306
+ "rewards/accuracies": 0.5925000309944153,
307
+ "rewards/chosen": 0.003813292132690549,
308
+ "rewards/margins": 0.0034597956109791994,
309
+ "rewards/rejected": 0.00035349628888070583,
310
+ "step": 180
311
+ },
312
+ {
313
+ "epoch": 0.75,
314
+ "grad_norm": 186.0,
315
+ "learning_rate": 4.962613658293158e-06,
316
+ "logits/chosen": -2.364473581314087,
317
+ "logits/rejected": -2.336862802505493,
318
+ "logps/chosen": -260.23297119140625,
319
+ "logps/rejected": -244.9732666015625,
320
+ "loss": 0.4326,
321
+ "rewards/accuracies": 0.6074999570846558,
322
+ "rewards/chosen": 0.0008181848679669201,
323
+ "rewards/margins": 0.003228846937417984,
324
+ "rewards/rejected": -0.0024106616619974375,
325
+ "step": 190
326
+ },
327
+ {
328
+ "epoch": 0.79,
329
+ "grad_norm": 196.0,
330
+ "learning_rate": 4.949846004064605e-06,
331
+ "logits/chosen": -2.414769411087036,
332
+ "logits/rejected": -2.394395351409912,
333
+ "logps/chosen": -281.43670654296875,
334
+ "logps/rejected": -265.92974853515625,
335
+ "loss": 0.5352,
336
+ "rewards/accuracies": 0.5974999666213989,
337
+ "rewards/chosen": 0.00015548830560874194,
338
+ "rewards/margins": 0.003786542685702443,
339
+ "rewards/rejected": -0.003631054190918803,
340
+ "step": 200
341
+ },
342
+ {
343
+ "epoch": 0.79,
344
+ "eval_logits/chosen": -2.4014458656311035,
345
+ "eval_logits/rejected": -2.362912893295288,
346
+ "eval_logps/chosen": -267.5639953613281,
347
+ "eval_logps/rejected": -242.58323669433594,
348
+ "eval_loss": 0.5010271072387695,
349
+ "eval_rewards/accuracies": 0.574404776096344,
350
+ "eval_rewards/chosen": 0.0011464261915534735,
351
+ "eval_rewards/margins": 0.003029454033821821,
352
+ "eval_rewards/rejected": -0.001883027609437704,
353
+ "eval_runtime": 123.1297,
354
+ "eval_samples_per_second": 16.243,
355
+ "eval_steps_per_second": 0.341,
356
+ "step": 200
357
+ },
358
+ {
359
+ "epoch": 0.82,
360
+ "grad_norm": 254.0,
361
+ "learning_rate": 4.935227723537811e-06,
362
+ "logits/chosen": -2.406309127807617,
363
+ "logits/rejected": -2.360525131225586,
364
+ "logps/chosen": -296.66070556640625,
365
+ "logps/rejected": -266.5440979003906,
366
+ "loss": 0.5154,
367
+ "rewards/accuracies": 0.6399999856948853,
368
+ "rewards/chosen": 0.0017778485780581832,
369
+ "rewards/margins": 0.004943528212606907,
370
+ "rewards/rejected": -0.0031656797509640455,
371
+ "step": 210
372
+ },
373
+ {
374
+ "epoch": 0.86,
375
+ "grad_norm": 288.0,
376
+ "learning_rate": 4.918769859437233e-06,
377
+ "logits/chosen": -2.3826467990875244,
378
+ "logits/rejected": -2.3224892616271973,
379
+ "logps/chosen": -274.14825439453125,
380
+ "logps/rejected": -252.51400756835938,
381
+ "loss": 0.5475,
382
+ "rewards/accuracies": 0.6050000190734863,
383
+ "rewards/chosen": 0.004731293302029371,
384
+ "rewards/margins": 0.0043090214021503925,
385
+ "rewards/rejected": 0.0004222726565785706,
386
+ "step": 220
387
+ },
388
+ {
389
+ "epoch": 0.9,
390
+ "grad_norm": 182.0,
391
+ "learning_rate": 4.900484844118235e-06,
392
+ "logits/chosen": -2.3914456367492676,
393
+ "logits/rejected": -2.3371217250823975,
394
+ "logps/chosen": -280.33282470703125,
395
+ "logps/rejected": -240.6001739501953,
396
+ "loss": 0.561,
397
+ "rewards/accuracies": 0.6175000071525574,
398
+ "rewards/chosen": 0.002294857520610094,
399
+ "rewards/margins": 0.0033697611652314663,
400
+ "rewards/rejected": -0.0010749038774520159,
401
+ "step": 230
402
+ },
403
+ {
404
+ "epoch": 0.94,
405
+ "grad_norm": 196.0,
406
+ "learning_rate": 4.880386490175634e-06,
407
+ "logits/chosen": -2.359574794769287,
408
+ "logits/rejected": -2.327416181564331,
409
+ "logps/chosen": -290.989013671875,
410
+ "logps/rejected": -268.49395751953125,
411
+ "loss": 0.5661,
412
+ "rewards/accuracies": 0.5975000262260437,
413
+ "rewards/chosen": -0.00016075666644610465,
414
+ "rewards/margins": 0.003780897008255124,
415
+ "rewards/rejected": -0.003941653296351433,
416
+ "step": 240
417
+ },
418
+ {
419
+ "epoch": 0.98,
420
+ "grad_norm": 232.0,
421
+ "learning_rate": 4.8584899800095865e-06,
422
+ "logits/chosen": -2.4217326641082764,
423
+ "logits/rejected": -2.3503971099853516,
424
+ "logps/chosen": -288.73858642578125,
425
+ "logps/rejected": -258.19317626953125,
426
+ "loss": 0.5762,
427
+ "rewards/accuracies": 0.6399999856948853,
428
+ "rewards/chosen": 0.00230691721662879,
429
+ "rewards/margins": 0.004124250262975693,
430
+ "rewards/rejected": -0.0018173331627622247,
431
+ "step": 250
432
+ },
433
+ {
434
+ "epoch": 1.02,
435
+ "grad_norm": 162.0,
436
+ "learning_rate": 4.834811854356729e-06,
437
+ "logits/chosen": -2.406905174255371,
438
+ "logits/rejected": -2.3617682456970215,
439
+ "logps/chosen": -266.1622619628906,
440
+ "logps/rejected": -245.754150390625,
441
+ "loss": 0.4313,
442
+ "rewards/accuracies": 0.6574999690055847,
443
+ "rewards/chosen": 0.006862832698971033,
444
+ "rewards/margins": 0.010333456099033356,
445
+ "rewards/rejected": -0.0034706243313848972,
446
+ "step": 260
447
+ },
448
+ {
449
+ "epoch": 1.06,
450
+ "grad_norm": 119.0,
451
+ "learning_rate": 4.809369999795219e-06,
452
+ "logits/chosen": -2.367124080657959,
453
+ "logits/rejected": -2.352210760116577,
454
+ "logps/chosen": -271.3493957519531,
455
+ "logps/rejected": -274.4390563964844,
456
+ "loss": 0.2246,
457
+ "rewards/accuracies": 0.7775000333786011,
458
+ "rewards/chosen": 0.011581487953662872,
459
+ "rewards/margins": 0.02090141549706459,
460
+ "rewards/rejected": -0.009319926612079144,
461
+ "step": 270
462
+ },
463
+ {
464
+ "epoch": 1.1,
465
+ "grad_norm": 152.0,
466
+ "learning_rate": 4.7821836352331235e-06,
467
+ "logits/chosen": -2.4189422130584717,
468
+ "logits/rejected": -2.362922430038452,
469
+ "logps/chosen": -276.532470703125,
470
+ "logps/rejected": -254.9628143310547,
471
+ "loss": 0.3068,
472
+ "rewards/accuracies": 0.7899999618530273,
473
+ "rewards/chosen": 0.010409007780253887,
474
+ "rewards/margins": 0.019158251583576202,
475
+ "rewards/rejected": -0.008749241940677166,
476
+ "step": 280
477
+ },
478
+ {
479
+ "epoch": 1.14,
480
+ "grad_norm": 122.5,
481
+ "learning_rate": 4.7532732973903525e-06,
482
+ "logits/chosen": -2.392087936401367,
483
+ "logits/rejected": -2.3331363201141357,
484
+ "logps/chosen": -281.2344970703125,
485
+ "logps/rejected": -266.2068176269531,
486
+ "loss": 0.2544,
487
+ "rewards/accuracies": 0.7375000715255737,
488
+ "rewards/chosen": 0.01263010036200285,
489
+ "rewards/margins": 0.01806234009563923,
490
+ "rewards/rejected": -0.0054322415962815285,
491
+ "step": 290
492
+ },
493
+ {
494
+ "epoch": 1.18,
495
+ "grad_norm": 177.0,
496
+ "learning_rate": 4.722660825285122e-06,
497
+ "logits/chosen": -2.413367509841919,
498
+ "logits/rejected": -2.3771374225616455,
499
+ "logps/chosen": -278.57904052734375,
500
+ "logps/rejected": -270.5982360839844,
501
+ "loss": 0.3676,
502
+ "rewards/accuracies": 0.762499988079071,
503
+ "rewards/chosen": 0.014171945862472057,
504
+ "rewards/margins": 0.020834611728787422,
505
+ "rewards/rejected": -0.0066626654006540775,
506
+ "step": 300
507
+ },
508
+ {
509
+ "epoch": 1.18,
510
+ "eval_logits/chosen": -2.416761636734009,
511
+ "eval_logits/rejected": -2.3788468837738037,
512
+ "eval_logps/chosen": -266.88555908203125,
513
+ "eval_logps/rejected": -242.12106323242188,
514
+ "eval_loss": 0.8293091654777527,
515
+ "eval_rewards/accuracies": 0.5982142686843872,
516
+ "eval_rewards/chosen": 0.00793052464723587,
517
+ "eval_rewards/margins": 0.005191552918404341,
518
+ "eval_rewards/rejected": 0.0027389726601541042,
519
+ "eval_runtime": 123.1487,
520
+ "eval_samples_per_second": 16.241,
521
+ "eval_steps_per_second": 0.341,
522
+ "step": 300
523
+ },
524
+ {
525
+ "epoch": 1.22,
526
+ "grad_norm": 140.0,
527
+ "learning_rate": 4.690369343736637e-06,
528
+ "logits/chosen": -2.4030745029449463,
529
+ "logits/rejected": -2.368807077407837,
530
+ "logps/chosen": -278.1623229980469,
531
+ "logps/rejected": -264.98541259765625,
532
+ "loss": 0.3085,
533
+ "rewards/accuracies": 0.7649999856948853,
534
+ "rewards/chosen": 0.01418610941618681,
535
+ "rewards/margins": 0.01857883669435978,
536
+ "rewards/rejected": -0.004392724949866533,
537
+ "step": 310
538
+ },
539
+ {
540
+ "epoch": 1.26,
541
+ "grad_norm": 185.0,
542
+ "learning_rate": 4.656423245896494e-06,
543
+ "logits/chosen": -2.4111828804016113,
544
+ "logits/rejected": -2.3645176887512207,
545
+ "logps/chosen": -270.58477783203125,
546
+ "logps/rejected": -256.71099853515625,
547
+ "loss": 0.2759,
548
+ "rewards/accuracies": 0.7575000524520874,
549
+ "rewards/chosen": 0.008453365415334702,
550
+ "rewards/margins": 0.01515539176762104,
551
+ "rewards/rejected": -0.0067020258866250515,
552
+ "step": 320
553
+ },
554
+ {
555
+ "epoch": 1.3,
556
+ "grad_norm": 163.0,
557
+ "learning_rate": 4.6208481748219645e-06,
558
+ "logits/chosen": -2.392019271850586,
559
+ "logits/rejected": -2.367377519607544,
560
+ "logps/chosen": -271.10015869140625,
561
+ "logps/rejected": -256.1971435546875,
562
+ "loss": 0.2666,
563
+ "rewards/accuracies": 0.7675000429153442,
564
+ "rewards/chosen": 0.007386817596852779,
565
+ "rewards/margins": 0.019648974761366844,
566
+ "rewards/rejected": -0.01226215623319149,
567
+ "step": 330
568
+ },
569
+ {
570
+ "epoch": 1.33,
571
+ "grad_norm": 142.0,
572
+ "learning_rate": 4.583671004105096e-06,
573
+ "logits/chosen": -2.3817129135131836,
574
+ "logits/rejected": -2.342694044113159,
575
+ "logps/chosen": -275.9081115722656,
576
+ "logps/rejected": -251.59848022460938,
577
+ "loss": 0.2849,
578
+ "rewards/accuracies": 0.75,
579
+ "rewards/chosen": 0.0086748655885458,
580
+ "rewards/margins": 0.01924619823694229,
581
+ "rewards/rejected": -0.010571330785751343,
582
+ "step": 340
583
+ },
584
+ {
585
+ "epoch": 1.37,
586
+ "grad_norm": 113.0,
587
+ "learning_rate": 4.544919817572262e-06,
588
+ "logits/chosen": -2.3859992027282715,
589
+ "logits/rejected": -2.325930118560791,
590
+ "logps/chosen": -272.119873046875,
591
+ "logps/rejected": -247.908935546875,
592
+ "loss": 0.2871,
593
+ "rewards/accuracies": 0.7649999856948853,
594
+ "rewards/chosen": 0.012000922113656998,
595
+ "rewards/margins": 0.019705070182681084,
596
+ "rewards/rejected": -0.007704148534685373,
597
+ "step": 350
598
+ },
599
+ {
600
+ "epoch": 1.41,
601
+ "grad_norm": 154.0,
602
+ "learning_rate": 4.504623888069497e-06,
603
+ "logits/chosen": -2.397146701812744,
604
+ "logits/rejected": -2.3492813110351562,
605
+ "logps/chosen": -271.32171630859375,
606
+ "logps/rejected": -247.8098907470703,
607
+ "loss": 0.3405,
608
+ "rewards/accuracies": 0.7600000500679016,
609
+ "rewards/chosen": 0.013489668257534504,
610
+ "rewards/margins": 0.017752837389707565,
611
+ "rewards/rejected": -0.0042631677351891994,
612
+ "step": 360
613
+ },
614
+ {
615
+ "epoch": 1.45,
616
+ "grad_norm": 139.0,
617
+ "learning_rate": 4.462813655349637e-06,
618
+ "logits/chosen": -2.372323751449585,
619
+ "logits/rejected": -2.3170628547668457,
620
+ "logps/chosen": -268.582275390625,
621
+ "logps/rejected": -245.34097290039062,
622
+ "loss": 0.3015,
623
+ "rewards/accuracies": 0.7275000214576721,
624
+ "rewards/chosen": 0.013011058792471886,
625
+ "rewards/margins": 0.018547596409916878,
626
+ "rewards/rejected": -0.005536535754799843,
627
+ "step": 370
628
+ },
629
+ {
630
+ "epoch": 1.49,
631
+ "grad_norm": 195.0,
632
+ "learning_rate": 4.419520703077975e-06,
633
+ "logits/chosen": -2.3980116844177246,
634
+ "logits/rejected": -2.3060975074768066,
635
+ "logps/chosen": -284.5008544921875,
636
+ "logps/rejected": -232.28515625,
637
+ "loss": 0.2953,
638
+ "rewards/accuracies": 0.7300000190734863,
639
+ "rewards/chosen": 0.009602969512343407,
640
+ "rewards/margins": 0.01879434660077095,
641
+ "rewards/rejected": -0.00919137429445982,
642
+ "step": 380
643
+ },
644
+ {
645
+ "epoch": 1.53,
646
+ "grad_norm": 184.0,
647
+ "learning_rate": 4.3747777349737905e-06,
648
+ "logits/chosen": -2.394030809402466,
649
+ "logits/rejected": -2.3477187156677246,
650
+ "logps/chosen": -299.9769592285156,
651
+ "logps/rejected": -268.77301025390625,
652
+ "loss": 0.318,
653
+ "rewards/accuracies": 0.7575000524520874,
654
+ "rewards/chosen": 0.011008193716406822,
655
+ "rewards/margins": 0.020197119563817978,
656
+ "rewards/rejected": -0.009188923053443432,
657
+ "step": 390
658
+ },
659
+ {
660
+ "epoch": 1.57,
661
+ "grad_norm": 153.0,
662
+ "learning_rate": 4.328618550105802e-06,
663
+ "logits/chosen": -2.3696258068084717,
664
+ "logits/rejected": -2.341409206390381,
665
+ "logps/chosen": -271.8193664550781,
666
+ "logps/rejected": -264.6459045410156,
667
+ "loss": 0.366,
668
+ "rewards/accuracies": 0.7400000691413879,
669
+ "rewards/chosen": 0.012812617234885693,
670
+ "rewards/margins": 0.018871381878852844,
671
+ "rewards/rejected": -0.006058765109628439,
672
+ "step": 400
673
+ },
674
+ {
675
+ "epoch": 1.57,
676
+ "eval_logits/chosen": -2.4146392345428467,
677
+ "eval_logits/rejected": -2.377370595932007,
678
+ "eval_logps/chosen": -267.025634765625,
679
+ "eval_logps/rejected": -242.3221435546875,
680
+ "eval_loss": 0.8238700032234192,
681
+ "eval_rewards/accuracies": 0.6398809552192688,
682
+ "eval_rewards/chosen": 0.006530104670673609,
683
+ "eval_rewards/margins": 0.005802116356790066,
684
+ "eval_rewards/rejected": 0.0007279877318069339,
685
+ "eval_runtime": 123.0966,
686
+ "eval_samples_per_second": 16.247,
687
+ "eval_steps_per_second": 0.341,
688
+ "step": 400
689
+ },
690
+ {
691
+ "epoch": 1.61,
692
+ "grad_norm": 91.0,
693
+ "learning_rate": 4.2810780173601675e-06,
694
+ "logits/chosen": -2.3998053073883057,
695
+ "logits/rejected": -2.341407060623169,
696
+ "logps/chosen": -285.62054443359375,
697
+ "logps/rejected": -247.3552703857422,
698
+ "loss": 0.3234,
699
+ "rewards/accuracies": 0.762499988079071,
700
+ "rewards/chosen": 0.008955768309533596,
701
+ "rewards/margins": 0.020532304421067238,
702
+ "rewards/rejected": -0.011576534248888493,
703
+ "step": 410
704
+ },
705
+ {
706
+ "epoch": 1.65,
707
+ "grad_norm": 174.0,
708
+ "learning_rate": 4.232192049100351e-06,
709
+ "logits/chosen": -2.411689043045044,
710
+ "logits/rejected": -2.384003162384033,
711
+ "logps/chosen": -242.8949737548828,
712
+ "logps/rejected": -236.754150390625,
713
+ "loss": 0.292,
714
+ "rewards/accuracies": 0.747499942779541,
715
+ "rewards/chosen": 0.005577466916292906,
716
+ "rewards/margins": 0.016019560396671295,
717
+ "rewards/rejected": -0.010442093946039677,
718
+ "step": 420
719
+ },
720
+ {
721
+ "epoch": 1.69,
722
+ "grad_norm": 110.0,
723
+ "learning_rate": 4.1819975740387406e-06,
724
+ "logits/chosen": -2.4044318199157715,
725
+ "logits/rejected": -2.3681979179382324,
726
+ "logps/chosen": -276.5671691894531,
727
+ "logps/rejected": -259.7832336425781,
728
+ "loss": 0.3016,
729
+ "rewards/accuracies": 0.7475000619888306,
730
+ "rewards/chosen": 0.011015561409294605,
731
+ "rewards/margins": 0.02647540345788002,
732
+ "rewards/rejected": -0.01545984111726284,
733
+ "step": 430
734
+ },
735
+ {
736
+ "epoch": 1.73,
737
+ "grad_norm": 158.0,
738
+ "learning_rate": 4.1305325093405045e-06,
739
+ "logits/chosen": -2.4186065196990967,
740
+ "logits/rejected": -2.406249523162842,
741
+ "logps/chosen": -295.4107971191406,
742
+ "logps/rejected": -280.9339294433594,
743
+ "loss": 0.4026,
744
+ "rewards/accuracies": 0.7675000429153442,
745
+ "rewards/chosen": 0.010993788950145245,
746
+ "rewards/margins": 0.020805999636650085,
747
+ "rewards/rejected": -0.00981221068650484,
748
+ "step": 440
749
+ },
750
+ {
751
+ "epoch": 1.77,
752
+ "grad_norm": 175.0,
753
+ "learning_rate": 4.077835731980775e-06,
754
+ "logits/chosen": -2.416654348373413,
755
+ "logits/rejected": -2.368619203567505,
756
+ "logps/chosen": -279.9720764160156,
757
+ "logps/rejected": -245.94345092773438,
758
+ "loss": 0.3414,
759
+ "rewards/accuracies": 0.7575000524520874,
760
+ "rewards/chosen": 0.006698101758956909,
761
+ "rewards/margins": 0.016826082020998,
762
+ "rewards/rejected": -0.010127981193363667,
763
+ "step": 450
764
+ },
765
+ {
766
+ "epoch": 1.81,
767
+ "grad_norm": 175.0,
768
+ "learning_rate": 4.02394704937677e-06,
769
+ "logits/chosen": -2.3919434547424316,
770
+ "logits/rejected": -2.3505940437316895,
771
+ "logps/chosen": -280.6643981933594,
772
+ "logps/rejected": -252.3192901611328,
773
+ "loss": 0.3603,
774
+ "rewards/accuracies": 0.7575000524520874,
775
+ "rewards/chosen": 0.00701780105009675,
776
+ "rewards/margins": 0.01795104146003723,
777
+ "rewards/rejected": -0.010933240875601768,
778
+ "step": 460
779
+ },
780
+ {
781
+ "epoch": 1.84,
782
+ "grad_norm": 176.0,
783
+ "learning_rate": 3.96890716931708e-06,
784
+ "logits/chosen": -2.381404399871826,
785
+ "logits/rejected": -2.369319438934326,
786
+ "logps/chosen": -251.976806640625,
787
+ "logps/rejected": -239.9644775390625,
788
+ "loss": 0.3975,
789
+ "rewards/accuracies": 0.7425000071525574,
790
+ "rewards/chosen": 0.00673043355345726,
791
+ "rewards/margins": 0.01468564011156559,
792
+ "rewards/rejected": -0.00795520469546318,
793
+ "step": 470
794
+ },
795
+ {
796
+ "epoch": 1.88,
797
+ "grad_norm": 129.0,
798
+ "learning_rate": 3.912757669210783e-06,
799
+ "logits/chosen": -2.4172403812408447,
800
+ "logits/rejected": -2.354468584060669,
801
+ "logps/chosen": -258.93780517578125,
802
+ "logps/rejected": -234.1327362060547,
803
+ "loss": 0.354,
804
+ "rewards/accuracies": 0.7425000071525574,
805
+ "rewards/chosen": 0.015040628612041473,
806
+ "rewards/margins": 0.020659491419792175,
807
+ "rewards/rejected": -0.005618864204734564,
808
+ "step": 480
809
+ },
810
+ {
811
+ "epoch": 1.92,
812
+ "grad_norm": 127.5,
813
+ "learning_rate": 3.855540964679658e-06,
814
+ "logits/chosen": -2.3677306175231934,
815
+ "logits/rejected": -2.323366641998291,
816
+ "logps/chosen": -239.04776000976562,
817
+ "logps/rejected": -228.19580078125,
818
+ "loss": 0.2687,
819
+ "rewards/accuracies": 0.7850000262260437,
820
+ "rewards/chosen": 0.0076850662007927895,
821
+ "rewards/margins": 0.01919684186577797,
822
+ "rewards/rejected": -0.01151177566498518,
823
+ "step": 490
824
+ },
825
+ {
826
+ "epoch": 1.96,
827
+ "grad_norm": 100.5,
828
+ "learning_rate": 3.797300277517212e-06,
829
+ "logits/chosen": -2.412917137145996,
830
+ "logits/rejected": -2.38498592376709,
831
+ "logps/chosen": -285.268310546875,
832
+ "logps/rejected": -264.0386962890625,
833
+ "loss": 0.292,
834
+ "rewards/accuracies": 0.7850000262260437,
835
+ "rewards/chosen": 0.010482062585651875,
836
+ "rewards/margins": 0.020365219563245773,
837
+ "rewards/rejected": -0.009883158840239048,
838
+ "step": 500
839
+ },
840
+ {
841
+ "epoch": 1.96,
842
+ "eval_logits/chosen": -2.4342868328094482,
843
+ "eval_logits/rejected": -2.3977575302124023,
844
+ "eval_logps/chosen": -267.17938232421875,
845
+ "eval_logps/rejected": -242.4461669921875,
846
+ "eval_loss": 0.8145859837532043,
847
+ "eval_rewards/accuracies": 0.6398809552192688,
848
+ "eval_rewards/chosen": 0.004992412868887186,
849
+ "eval_rewards/margins": 0.005504657980054617,
850
+ "eval_rewards/rejected": -0.0005122453439980745,
851
+ "eval_runtime": 123.1687,
852
+ "eval_samples_per_second": 16.238,
853
+ "eval_steps_per_second": 0.341,
854
+ "step": 500
855
+ },
856
+ {
857
+ "epoch": 2.0,
858
+ "grad_norm": 150.0,
859
+ "learning_rate": 3.7380796030387035e-06,
860
+ "logits/chosen": -2.4117255210876465,
861
+ "logits/rejected": -2.3580873012542725,
862
+ "logps/chosen": -288.262451171875,
863
+ "logps/rejected": -250.64486694335938,
864
+ "loss": 0.2919,
865
+ "rewards/accuracies": 0.7850000262260437,
866
+ "rewards/chosen": 0.012420935556292534,
867
+ "rewards/margins": 0.022166112437844276,
868
+ "rewards/rejected": -0.009745175018906593,
869
+ "step": 510
870
+ },
871
+ {
872
+ "epoch": 2.04,
873
+ "grad_norm": 72.0,
874
+ "learning_rate": 3.6779236768468647e-06,
875
+ "logits/chosen": -2.416080951690674,
876
+ "logits/rejected": -2.3825385570526123,
877
+ "logps/chosen": -266.83453369140625,
878
+ "logps/rejected": -257.94635009765625,
879
+ "loss": 0.0944,
880
+ "rewards/accuracies": 0.8500000238418579,
881
+ "rewards/chosen": 0.017391610890626907,
882
+ "rewards/margins": 0.030041953548789024,
883
+ "rewards/rejected": -0.012650340795516968,
884
+ "step": 520
885
+ },
886
+ {
887
+ "epoch": 2.08,
888
+ "grad_norm": 40.75,
889
+ "learning_rate": 3.6168779410383905e-06,
890
+ "logits/chosen": -2.4022631645202637,
891
+ "logits/rejected": -2.366995334625244,
892
+ "logps/chosen": -274.7615661621094,
893
+ "logps/rejected": -253.46835327148438,
894
+ "loss": 0.0944,
895
+ "rewards/accuracies": 0.824999988079071,
896
+ "rewards/chosen": 0.014151136390864849,
897
+ "rewards/margins": 0.029283767566084862,
898
+ "rewards/rejected": -0.015132628381252289,
899
+ "step": 530
900
+ },
901
+ {
902
+ "epoch": 2.12,
903
+ "grad_norm": 109.0,
904
+ "learning_rate": 3.554988509876747e-06,
905
+ "logits/chosen": -2.411635637283325,
906
+ "logits/rejected": -2.379657030105591,
907
+ "logps/chosen": -264.20758056640625,
908
+ "logps/rejected": -248.7351837158203,
909
+ "loss": 0.1176,
910
+ "rewards/accuracies": 0.8475000262260437,
911
+ "rewards/chosen": 0.01760762929916382,
912
+ "rewards/margins": 0.03199198096990585,
913
+ "rewards/rejected": -0.014384354464709759,
914
+ "step": 540
915
+ },
916
+ {
917
+ "epoch": 2.16,
918
+ "grad_norm": 162.0,
919
+ "learning_rate": 3.4923021349572183e-06,
920
+ "logits/chosen": -2.4204351902008057,
921
+ "logits/rejected": -2.342064619064331,
922
+ "logps/chosen": -293.338623046875,
923
+ "logps/rejected": -249.83984375,
924
+ "loss": 0.1199,
925
+ "rewards/accuracies": 0.8449999690055847,
926
+ "rewards/chosen": 0.016306212171912193,
927
+ "rewards/margins": 0.03437874838709831,
928
+ "rewards/rejected": -0.01807253621518612,
929
+ "step": 550
930
+ },
931
+ {
932
+ "epoch": 2.2,
933
+ "grad_norm": 156.0,
934
+ "learning_rate": 3.428866169890511e-06,
935
+ "logits/chosen": -2.4187042713165283,
936
+ "logits/rejected": -2.3788833618164062,
937
+ "logps/chosen": -280.169921875,
938
+ "logps/rejected": -266.49139404296875,
939
+ "loss": 0.1396,
940
+ "rewards/accuracies": 0.8550000190734863,
941
+ "rewards/chosen": 0.020975306630134583,
942
+ "rewards/margins": 0.0341680608689785,
943
+ "rewards/rejected": -0.013192756101489067,
944
+ "step": 560
945
+ },
946
+ {
947
+ "epoch": 2.24,
948
+ "grad_norm": 52.5,
949
+ "learning_rate": 3.3647285345315933e-06,
950
+ "logits/chosen": -2.426948308944702,
951
+ "logits/rejected": -2.3513236045837402,
952
+ "logps/chosen": -301.64703369140625,
953
+ "logps/rejected": -252.04910278320312,
954
+ "loss": 0.1179,
955
+ "rewards/accuracies": 0.8324999809265137,
956
+ "rewards/chosen": 0.02232900820672512,
957
+ "rewards/margins": 0.03726055473089218,
958
+ "rewards/rejected": -0.01493154652416706,
959
+ "step": 570
960
+ },
961
+ {
962
+ "epoch": 2.28,
963
+ "grad_norm": 63.0,
964
+ "learning_rate": 3.299937678780786e-06,
965
+ "logits/chosen": -2.3919901847839355,
966
+ "logits/rejected": -2.376873016357422,
967
+ "logps/chosen": -270.5113830566406,
968
+ "logps/rejected": -262.92120361328125,
969
+ "loss": 0.1103,
970
+ "rewards/accuracies": 0.8525000810623169,
971
+ "rewards/chosen": 0.01391147542744875,
972
+ "rewards/margins": 0.03057609498500824,
973
+ "rewards/rejected": -0.016664620488882065,
974
+ "step": 580
975
+ },
976
+ {
977
+ "epoch": 2.32,
978
+ "grad_norm": 38.25,
979
+ "learning_rate": 3.234542545984464e-06,
980
+ "logits/chosen": -2.3860366344451904,
981
+ "logits/rejected": -2.3532588481903076,
982
+ "logps/chosen": -279.0345764160156,
983
+ "logps/rejected": -268.3638000488281,
984
+ "loss": 0.113,
985
+ "rewards/accuracies": 0.8125,
986
+ "rewards/chosen": 0.013167209923267365,
987
+ "rewards/margins": 0.03231758996844292,
988
+ "rewards/rejected": -0.0191503819078207,
989
+ "step": 590
990
+ },
991
+ {
992
+ "epoch": 2.36,
993
+ "grad_norm": 95.5,
994
+ "learning_rate": 3.1685925359629928e-06,
995
+ "logits/chosen": -2.382845401763916,
996
+ "logits/rejected": -2.345613479614258,
997
+ "logps/chosen": -270.888427734375,
998
+ "logps/rejected": -262.33251953125,
999
+ "loss": 0.1355,
1000
+ "rewards/accuracies": 0.8575000762939453,
1001
+ "rewards/chosen": 0.01834903098642826,
1002
+ "rewards/margins": 0.034947365522384644,
1003
+ "rewards/rejected": -0.016598336398601532,
1004
+ "step": 600
1005
+ },
1006
+ {
1007
+ "epoch": 2.36,
1008
+ "eval_logits/chosen": -2.4177558422088623,
1009
+ "eval_logits/rejected": -2.3796002864837646,
1010
+ "eval_logps/chosen": -267.20611572265625,
1011
+ "eval_logps/rejected": -242.52117919921875,
1012
+ "eval_loss": 0.9650812745094299,
1013
+ "eval_rewards/accuracies": 0.6160714030265808,
1014
+ "eval_rewards/chosen": 0.004725400358438492,
1015
+ "eval_rewards/margins": 0.00598777923732996,
1016
+ "eval_rewards/rejected": -0.0012623785296455026,
1017
+ "eval_runtime": 123.1103,
1018
+ "eval_samples_per_second": 16.246,
1019
+ "eval_steps_per_second": 0.341,
1020
+ "step": 600
1021
+ },
1022
+ {
1023
+ "epoch": 2.39,
1024
+ "grad_norm": 79.5,
1025
+ "learning_rate": 3.102137467693858e-06,
1026
+ "logits/chosen": -2.3922505378723145,
1027
+ "logits/rejected": -2.3382246494293213,
1028
+ "logps/chosen": -273.4150390625,
1029
+ "logps/rejected": -258.9840393066406,
1030
+ "loss": 0.252,
1031
+ "rewards/accuracies": 0.8725000619888306,
1032
+ "rewards/chosen": 0.01791740581393242,
1033
+ "rewards/margins": 0.032240770757198334,
1034
+ "rewards/rejected": -0.014323368668556213,
1035
+ "step": 610
1036
+ },
1037
+ {
1038
+ "epoch": 2.43,
1039
+ "grad_norm": 120.0,
1040
+ "learning_rate": 3.0352275416781465e-06,
1041
+ "logits/chosen": -2.416335344314575,
1042
+ "logits/rejected": -2.379333019256592,
1043
+ "logps/chosen": -273.5201110839844,
1044
+ "logps/rejected": -258.6203918457031,
1045
+ "loss": 0.1567,
1046
+ "rewards/accuracies": 0.8400000333786011,
1047
+ "rewards/chosen": 0.02479313686490059,
1048
+ "rewards/margins": 0.0343189537525177,
1049
+ "rewards/rejected": -0.00952581875026226,
1050
+ "step": 620
1051
+ },
1052
+ {
1053
+ "epoch": 2.47,
1054
+ "grad_norm": 106.0,
1055
+ "learning_rate": 2.96791330201883e-06,
1056
+ "logits/chosen": -2.421025514602661,
1057
+ "logits/rejected": -2.3913843631744385,
1058
+ "logps/chosen": -266.0569763183594,
1059
+ "logps/rejected": -255.9671173095703,
1060
+ "loss": 0.1255,
1061
+ "rewards/accuracies": 0.8274999856948853,
1062
+ "rewards/chosen": 0.021568376570940018,
1063
+ "rewards/margins": 0.03504693880677223,
1064
+ "rewards/rejected": -0.013478565029799938,
1065
+ "step": 630
1066
+ },
1067
+ {
1068
+ "epoch": 2.51,
1069
+ "grad_norm": 94.5,
1070
+ "learning_rate": 2.9002455982394946e-06,
1071
+ "logits/chosen": -2.3834731578826904,
1072
+ "logits/rejected": -2.3404629230499268,
1073
+ "logps/chosen": -279.171630859375,
1074
+ "logps/rejected": -251.27841186523438,
1075
+ "loss": 0.1115,
1076
+ "rewards/accuracies": 0.8674999475479126,
1077
+ "rewards/chosen": 0.020785773172974586,
1078
+ "rewards/margins": 0.03259057179093361,
1079
+ "rewards/rejected": -0.011804800480604172,
1080
+ "step": 640
1081
+ },
1082
+ {
1083
+ "epoch": 2.55,
1084
+ "grad_norm": 61.0,
1085
+ "learning_rate": 2.832275546872339e-06,
1086
+ "logits/chosen": -2.401367664337158,
1087
+ "logits/rejected": -2.3691532611846924,
1088
+ "logps/chosen": -261.18377685546875,
1089
+ "logps/rejected": -267.6328125,
1090
+ "loss": 0.0953,
1091
+ "rewards/accuracies": 0.8850000500679016,
1092
+ "rewards/chosen": 0.018077706918120384,
1093
+ "rewards/margins": 0.03268102556467056,
1094
+ "rewards/rejected": -0.014603319577872753,
1095
+ "step": 650
1096
+ },
1097
+ {
1098
+ "epoch": 2.59,
1099
+ "grad_norm": 130.0,
1100
+ "learning_rate": 2.7640544928444927e-06,
1101
+ "logits/chosen": -2.418788194656372,
1102
+ "logits/rejected": -2.3343942165374756,
1103
+ "logps/chosen": -288.7831726074219,
1104
+ "logps/rejected": -252.0687713623047,
1105
+ "loss": 0.1191,
1106
+ "rewards/accuracies": 0.8675000071525574,
1107
+ "rewards/chosen": 0.020471712574362755,
1108
+ "rewards/margins": 0.03600749000906944,
1109
+ "rewards/rejected": -0.015535781159996986,
1110
+ "step": 660
1111
+ },
1112
+ {
1113
+ "epoch": 2.63,
1114
+ "grad_norm": 86.0,
1115
+ "learning_rate": 2.695633970691786e-06,
1116
+ "logits/chosen": -2.3701846599578857,
1117
+ "logits/rejected": -2.351933240890503,
1118
+ "logps/chosen": -257.39752197265625,
1119
+ "logps/rejected": -252.69287109375,
1120
+ "loss": 0.0849,
1121
+ "rewards/accuracies": 0.8550000190734863,
1122
+ "rewards/chosen": 0.019604947417974472,
1123
+ "rewards/margins": 0.031910307705402374,
1124
+ "rewards/rejected": -0.012305359356105328,
1125
+ "step": 670
1126
+ },
1127
+ {
1128
+ "epoch": 2.67,
1129
+ "grad_norm": 101.5,
1130
+ "learning_rate": 2.6270656656293007e-06,
1131
+ "logits/chosen": -2.394273281097412,
1132
+ "logits/rejected": -2.348475694656372,
1133
+ "logps/chosen": -264.9925537109375,
1134
+ "logps/rejected": -248.74368286132812,
1135
+ "loss": 0.0955,
1136
+ "rewards/accuracies": 0.8574999570846558,
1137
+ "rewards/chosen": 0.020610950887203217,
1138
+ "rewards/margins": 0.03306712210178375,
1139
+ "rewards/rejected": -0.012456170283257961,
1140
+ "step": 680
1141
+ },
1142
+ {
1143
+ "epoch": 2.71,
1144
+ "grad_norm": 63.25,
1145
+ "learning_rate": 2.558401374508089e-06,
1146
+ "logits/chosen": -2.402439594268799,
1147
+ "logits/rejected": -2.3409905433654785,
1148
+ "logps/chosen": -276.2132873535156,
1149
+ "logps/rejected": -251.0520782470703,
1150
+ "loss": 0.0996,
1151
+ "rewards/accuracies": 0.8650000691413879,
1152
+ "rewards/chosen": 0.02115057222545147,
1153
+ "rewards/margins": 0.030901487916707993,
1154
+ "rewards/rejected": -0.009750919416546822,
1155
+ "step": 690
1156
+ },
1157
+ {
1158
+ "epoch": 2.75,
1159
+ "grad_norm": 57.75,
1160
+ "learning_rate": 2.4896929666875665e-06,
1161
+ "logits/chosen": -2.4019179344177246,
1162
+ "logits/rejected": -2.3663971424102783,
1163
+ "logps/chosen": -274.6024475097656,
1164
+ "logps/rejected": -264.2455139160156,
1165
+ "loss": 0.1327,
1166
+ "rewards/accuracies": 0.8574999570846558,
1167
+ "rewards/chosen": 0.016096513718366623,
1168
+ "rewards/margins": 0.03081604465842247,
1169
+ "rewards/rejected": -0.014719529077410698,
1170
+ "step": 700
1171
+ },
1172
+ {
1173
+ "epoch": 2.75,
1174
+ "eval_logits/chosen": -2.4065868854522705,
1175
+ "eval_logits/rejected": -2.369014263153076,
1176
+ "eval_logps/chosen": -267.2229919433594,
1177
+ "eval_logps/rejected": -242.58834838867188,
1178
+ "eval_loss": 0.9984952211380005,
1179
+ "eval_rewards/accuracies": 0.6339285969734192,
1180
+ "eval_rewards/chosen": 0.004556288011372089,
1181
+ "eval_rewards/margins": 0.006490407045930624,
1182
+ "eval_rewards/rejected": -0.001934119500219822,
1183
+ "eval_runtime": 122.9942,
1184
+ "eval_samples_per_second": 16.261,
1185
+ "eval_steps_per_second": 0.341,
1186
+ "step": 700
1187
+ },
1188
+ {
1189
+ "epoch": 2.79,
1190
+ "grad_norm": 104.0,
1191
+ "learning_rate": 2.420992344853132e-06,
1192
+ "logits/chosen": -2.4031834602355957,
1193
+ "logits/rejected": -2.380056142807007,
1194
+ "logps/chosen": -276.49700927734375,
1195
+ "logps/rejected": -262.06341552734375,
1196
+ "loss": 0.1394,
1197
+ "rewards/accuracies": 0.8399999737739563,
1198
+ "rewards/chosen": 0.019898083060979843,
1199
+ "rewards/margins": 0.033728718757629395,
1200
+ "rewards/rejected": -0.013830636627972126,
1201
+ "step": 710
1202
+ },
1203
+ {
1204
+ "epoch": 2.83,
1205
+ "grad_norm": 111.0,
1206
+ "learning_rate": 2.3523514058086093e-06,
1207
+ "logits/chosen": -2.410182237625122,
1208
+ "logits/rejected": -2.326798915863037,
1209
+ "logps/chosen": -288.55609130859375,
1210
+ "logps/rejected": -250.171630859375,
1211
+ "loss": 0.1191,
1212
+ "rewards/accuracies": 0.8675000071525574,
1213
+ "rewards/chosen": 0.01868962123990059,
1214
+ "rewards/margins": 0.02993825078010559,
1215
+ "rewards/rejected": -0.011248626746237278,
1216
+ "step": 720
1217
+ },
1218
+ {
1219
+ "epoch": 2.87,
1220
+ "grad_norm": 63.5,
1221
+ "learning_rate": 2.2838220012731365e-06,
1222
+ "logits/chosen": -2.3818917274475098,
1223
+ "logits/rejected": -2.3685965538024902,
1224
+ "logps/chosen": -270.9535217285156,
1225
+ "logps/rejected": -267.3423767089844,
1226
+ "loss": 0.1279,
1227
+ "rewards/accuracies": 0.8825000524520874,
1228
+ "rewards/chosen": 0.023929597809910774,
1229
+ "rewards/margins": 0.041905276477336884,
1230
+ "rewards/rejected": -0.01797567494213581,
1231
+ "step": 730
1232
+ },
1233
+ {
1234
+ "epoch": 2.9,
1235
+ "grad_norm": 136.0,
1236
+ "learning_rate": 2.2154558987121054e-06,
1237
+ "logits/chosen": -2.3983840942382812,
1238
+ "logits/rejected": -2.3515264987945557,
1239
+ "logps/chosen": -274.8974609375,
1240
+ "logps/rejected": -253.7775115966797,
1241
+ "loss": 0.1044,
1242
+ "rewards/accuracies": 0.8500000238418579,
1243
+ "rewards/chosen": 0.025471847504377365,
1244
+ "rewards/margins": 0.03200577199459076,
1245
+ "rewards/rejected": -0.006533923093229532,
1246
+ "step": 740
1247
+ },
1248
+ {
1249
+ "epoch": 2.94,
1250
+ "grad_norm": 208.0,
1251
+ "learning_rate": 2.147304742231758e-06,
1252
+ "logits/chosen": -2.3778913021087646,
1253
+ "logits/rejected": -2.3485236167907715,
1254
+ "logps/chosen": -254.48635864257812,
1255
+ "logps/rejected": -267.0797119140625,
1256
+ "loss": 0.1637,
1257
+ "rewards/accuracies": 0.8550001382827759,
1258
+ "rewards/chosen": 0.018020575866103172,
1259
+ "rewards/margins": 0.030409198254346848,
1260
+ "rewards/rejected": -0.012388622388243675,
1261
+ "step": 750
1262
+ },
1263
+ {
1264
+ "epoch": 2.98,
1265
+ "grad_norm": 62.5,
1266
+ "learning_rate": 2.0794200135669586e-06,
1267
+ "logits/chosen": -2.399770498275757,
1268
+ "logits/rejected": -2.364065647125244,
1269
+ "logps/chosen": -277.4521484375,
1270
+ "logps/rejected": -267.6507568359375,
1271
+ "loss": 0.1466,
1272
+ "rewards/accuracies": 0.8574999570846558,
1273
+ "rewards/chosen": 0.021130980923771858,
1274
+ "rewards/margins": 0.036186493933200836,
1275
+ "rewards/rejected": -0.015055513009428978,
1276
+ "step": 760
1277
+ },
1278
+ {
1279
+ "epoch": 3.02,
1280
+ "grad_norm": 43.25,
1281
+ "learning_rate": 2.011852993191625e-06,
1282
+ "logits/chosen": -2.3711681365966797,
1283
+ "logits/rejected": -2.331266403198242,
1284
+ "logps/chosen": -284.1138916015625,
1285
+ "logps/rejected": -270.9895324707031,
1286
+ "loss": 0.0563,
1287
+ "rewards/accuracies": 0.9100000262260437,
1288
+ "rewards/chosen": 0.02099769189953804,
1289
+ "rewards/margins": 0.03560823202133179,
1290
+ "rewards/rejected": -0.014610541984438896,
1291
+ "step": 770
1292
+ },
1293
+ {
1294
+ "epoch": 3.06,
1295
+ "grad_norm": 91.5,
1296
+ "learning_rate": 1.944654721581196e-06,
1297
+ "logits/chosen": -2.3276844024658203,
1298
+ "logits/rejected": -2.3009562492370605,
1299
+ "logps/chosen": -260.98626708984375,
1300
+ "logps/rejected": -247.24685668945312,
1301
+ "loss": 0.0353,
1302
+ "rewards/accuracies": 0.9300001263618469,
1303
+ "rewards/chosen": 0.024031776934862137,
1304
+ "rewards/margins": 0.04021488502621651,
1305
+ "rewards/rejected": -0.01618310809135437,
1306
+ "step": 780
1307
+ },
1308
+ {
1309
+ "epoch": 3.1,
1310
+ "grad_norm": 29.5,
1311
+ "learning_rate": 1.877875960656394e-06,
1312
+ "logits/chosen": -2.3537003993988037,
1313
+ "logits/rejected": -2.3234972953796387,
1314
+ "logps/chosen": -275.46453857421875,
1315
+ "logps/rejected": -260.1392517089844,
1316
+ "loss": 0.0298,
1317
+ "rewards/accuracies": 0.9125000238418579,
1318
+ "rewards/chosen": 0.021290091797709465,
1319
+ "rewards/margins": 0.032937195152044296,
1320
+ "rewards/rejected": -0.011647104285657406,
1321
+ "step": 790
1322
+ },
1323
+ {
1324
+ "epoch": 3.14,
1325
+ "grad_norm": 42.5,
1326
+ "learning_rate": 1.8115671554374067e-06,
1327
+ "logits/chosen": -2.399651050567627,
1328
+ "logits/rejected": -2.3859167098999023,
1329
+ "logps/chosen": -268.92803955078125,
1330
+ "logps/rejected": -275.9405822753906,
1331
+ "loss": 0.0389,
1332
+ "rewards/accuracies": 0.9375,
1333
+ "rewards/chosen": 0.025255614891648293,
1334
+ "rewards/margins": 0.04190623760223389,
1335
+ "rewards/rejected": -0.016650624573230743,
1336
+ "step": 800
1337
+ },
1338
+ {
1339
+ "epoch": 3.14,
1340
+ "eval_logits/chosen": -2.3946948051452637,
1341
+ "eval_logits/rejected": -2.3562896251678467,
1342
+ "eval_logps/chosen": -266.8748474121094,
1343
+ "eval_logps/rejected": -242.36962890625,
1344
+ "eval_loss": 0.8932417035102844,
1345
+ "eval_rewards/accuracies": 0.6517857313156128,
1346
+ "eval_rewards/chosen": 0.008037895895540714,
1347
+ "eval_rewards/margins": 0.007784782908856869,
1348
+ "eval_rewards/rejected": 0.00025311243371106684,
1349
+ "eval_runtime": 123.1142,
1350
+ "eval_samples_per_second": 16.245,
1351
+ "eval_steps_per_second": 0.341,
1352
+ "step": 800
1353
+ },
1354
+ {
1355
+ "epoch": 3.18,
1356
+ "grad_norm": 19.25,
1357
+ "learning_rate": 1.7457783959374585e-06,
1358
+ "logits/chosen": -2.404486894607544,
1359
+ "logits/rejected": -2.3604061603546143,
1360
+ "logps/chosen": -278.95855712890625,
1361
+ "logps/rejected": -251.1371307373047,
1362
+ "loss": 0.0348,
1363
+ "rewards/accuracies": 0.9275000691413879,
1364
+ "rewards/chosen": 0.025924455374479294,
1365
+ "rewards/margins": 0.037114791572093964,
1366
+ "rewards/rejected": -0.011190338991582394,
1367
+ "step": 810
1368
+ },
1369
+ {
1370
+ "epoch": 3.22,
1371
+ "grad_norm": 32.25,
1372
+ "learning_rate": 1.680559379324558e-06,
1373
+ "logits/chosen": -2.390227794647217,
1374
+ "logits/rejected": -2.3385822772979736,
1375
+ "logps/chosen": -292.6279296875,
1376
+ "logps/rejected": -254.8878173828125,
1377
+ "loss": 0.0299,
1378
+ "rewards/accuracies": 0.9099999666213989,
1379
+ "rewards/chosen": 0.02545427717268467,
1380
+ "rewards/margins": 0.03745580464601517,
1381
+ "rewards/rejected": -0.012001526542007923,
1382
+ "step": 820
1383
+ },
1384
+ {
1385
+ "epoch": 3.26,
1386
+ "grad_norm": 60.5,
1387
+ "learning_rate": 1.6159593723800013e-06,
1388
+ "logits/chosen": -2.4059481620788574,
1389
+ "logits/rejected": -2.3442025184631348,
1390
+ "logps/chosen": -264.06060791015625,
1391
+ "logps/rejected": -246.68521118164062,
1392
+ "loss": 0.0307,
1393
+ "rewards/accuracies": 0.9125000238418579,
1394
+ "rewards/chosen": 0.0275394506752491,
1395
+ "rewards/margins": 0.036764778196811676,
1396
+ "rewards/rejected": -0.00922533217817545,
1397
+ "step": 830
1398
+ },
1399
+ {
1400
+ "epoch": 3.3,
1401
+ "grad_norm": 49.75,
1402
+ "learning_rate": 1.5520271742819883e-06,
1403
+ "logits/chosen": -2.389446973800659,
1404
+ "logits/rejected": -2.3524551391601562,
1405
+ "logps/chosen": -271.63641357421875,
1406
+ "logps/rejected": -255.3404998779297,
1407
+ "loss": 0.0301,
1408
+ "rewards/accuracies": 0.877500057220459,
1409
+ "rewards/chosen": 0.02547125145792961,
1410
+ "rewards/margins": 0.03725877031683922,
1411
+ "rewards/rejected": -0.01178752165287733,
1412
+ "step": 840
1413
+ },
1414
+ {
1415
+ "epoch": 3.34,
1416
+ "grad_norm": 24.625,
1417
+ "learning_rate": 1.4888110797424783e-06,
1418
+ "logits/chosen": -2.4469919204711914,
1419
+ "logits/rejected": -2.3791050910949707,
1420
+ "logps/chosen": -315.74066162109375,
1421
+ "logps/rejected": -277.9239501953125,
1422
+ "loss": 0.0354,
1423
+ "rewards/accuracies": 0.9025000333786011,
1424
+ "rewards/chosen": 0.03072303533554077,
1425
+ "rewards/margins": 0.048735830932855606,
1426
+ "rewards/rejected": -0.018012793734669685,
1427
+ "step": 850
1428
+ },
1429
+ {
1430
+ "epoch": 3.38,
1431
+ "grad_norm": 54.0,
1432
+ "learning_rate": 1.4263588425251052e-06,
1433
+ "logits/chosen": -2.4028658866882324,
1434
+ "logits/rejected": -2.3509697914123535,
1435
+ "logps/chosen": -289.78485107421875,
1436
+ "logps/rejected": -251.5,
1437
+ "loss": 0.0267,
1438
+ "rewards/accuracies": 0.9375001192092896,
1439
+ "rewards/chosen": 0.028129303827881813,
1440
+ "rewards/margins": 0.04094386473298073,
1441
+ "rewards/rejected": -0.01281456183642149,
1442
+ "step": 860
1443
+ },
1444
+ {
1445
+ "epoch": 3.42,
1446
+ "grad_norm": 25.625,
1447
+ "learning_rate": 1.3647176393717509e-06,
1448
+ "logits/chosen": -2.4022791385650635,
1449
+ "logits/rejected": -2.3641726970672607,
1450
+ "logps/chosen": -278.85333251953125,
1451
+ "logps/rejected": -270.0050964355469,
1452
+ "loss": 0.0217,
1453
+ "rewards/accuracies": 0.9199999570846558,
1454
+ "rewards/chosen": 0.025644132867455482,
1455
+ "rewards/margins": 0.03728828951716423,
1456
+ "rewards/rejected": -0.011644158512353897,
1457
+ "step": 870
1458
+ },
1459
+ {
1460
+ "epoch": 3.45,
1461
+ "grad_norm": 30.875,
1462
+ "learning_rate": 1.303934034364983e-06,
1463
+ "logits/chosen": -2.3777599334716797,
1464
+ "logits/rejected": -2.3236684799194336,
1465
+ "logps/chosen": -261.53216552734375,
1466
+ "logps/rejected": -239.6273651123047,
1467
+ "loss": 0.0216,
1468
+ "rewards/accuracies": 0.9025000333786011,
1469
+ "rewards/chosen": 0.024678941816091537,
1470
+ "rewards/margins": 0.03661385923624039,
1471
+ "rewards/rejected": -0.011934916488826275,
1472
+ "step": 880
1473
+ },
1474
+ {
1475
+ "epoch": 3.49,
1476
+ "grad_norm": 87.0,
1477
+ "learning_rate": 1.2440539437533075e-06,
1478
+ "logits/chosen": -2.352806806564331,
1479
+ "logits/rejected": -2.3354265689849854,
1480
+ "logps/chosen": -269.5535583496094,
1481
+ "logps/rejected": -268.3061218261719,
1482
+ "loss": 0.0273,
1483
+ "rewards/accuracies": 0.9300000071525574,
1484
+ "rewards/chosen": 0.022415757179260254,
1485
+ "rewards/margins": 0.036310791969299316,
1486
+ "rewards/rejected": -0.013895031996071339,
1487
+ "step": 890
1488
+ },
1489
+ {
1490
+ "epoch": 3.53,
1491
+ "grad_norm": 33.25,
1492
+ "learning_rate": 1.1851226012658015e-06,
1493
+ "logits/chosen": -2.366988182067871,
1494
+ "logits/rejected": -2.323378562927246,
1495
+ "logps/chosen": -264.4871520996094,
1496
+ "logps/rejected": -251.3701934814453,
1497
+ "loss": 0.029,
1498
+ "rewards/accuracies": 0.9175000190734863,
1499
+ "rewards/chosen": 0.023964881896972656,
1500
+ "rewards/margins": 0.039505355060100555,
1501
+ "rewards/rejected": -0.015540470369160175,
1502
+ "step": 900
1503
+ },
1504
+ {
1505
+ "epoch": 3.53,
1506
+ "eval_logits/chosen": -2.411829948425293,
1507
+ "eval_logits/rejected": -2.3752424716949463,
1508
+ "eval_logps/chosen": -266.7797546386719,
1509
+ "eval_logps/rejected": -242.3114013671875,
1510
+ "eval_loss": 0.9391952157020569,
1511
+ "eval_rewards/accuracies": 0.6577380895614624,
1512
+ "eval_rewards/chosen": 0.008988723158836365,
1513
+ "eval_rewards/margins": 0.008153370581567287,
1514
+ "eval_rewards/rejected": 0.0008353526936843991,
1515
+ "eval_runtime": 123.1579,
1516
+ "eval_samples_per_second": 16.239,
1517
+ "eval_steps_per_second": 0.341,
1518
+ "step": 900
1519
+ },
1520
+ {
1521
+ "epoch": 3.57,
1522
+ "grad_norm": 16.5,
1523
+ "learning_rate": 1.1271845239423196e-06,
1524
+ "logits/chosen": -2.4022092819213867,
1525
+ "logits/rejected": -2.357339382171631,
1526
+ "logps/chosen": -289.2544250488281,
1527
+ "logps/rejected": -258.82379150390625,
1528
+ "loss": 0.0269,
1529
+ "rewards/accuracies": 0.9175000190734863,
1530
+ "rewards/chosen": 0.022589916363358498,
1531
+ "rewards/margins": 0.03589435666799545,
1532
+ "rewards/rejected": -0.013304440304636955,
1533
+ "step": 910
1534
+ },
1535
+ {
1536
+ "epoch": 3.61,
1537
+ "grad_norm": 24.25,
1538
+ "learning_rate": 1.0702834785050893e-06,
1539
+ "logits/chosen": -2.3661084175109863,
1540
+ "logits/rejected": -2.3483455181121826,
1541
+ "logps/chosen": -276.6420593261719,
1542
+ "logps/rejected": -278.82080078125,
1543
+ "loss": 0.033,
1544
+ "rewards/accuracies": 0.9250000715255737,
1545
+ "rewards/chosen": 0.021236615255475044,
1546
+ "rewards/margins": 0.038433950394392014,
1547
+ "rewards/rejected": -0.017197338864207268,
1548
+ "step": 920
1549
+ },
1550
+ {
1551
+ "epoch": 3.65,
1552
+ "grad_norm": 40.5,
1553
+ "learning_rate": 1.0144624482971082e-06,
1554
+ "logits/chosen": -2.4388625621795654,
1555
+ "logits/rejected": -2.380392551422119,
1556
+ "logps/chosen": -271.2420349121094,
1557
+ "logps/rejected": -256.2777404785156,
1558
+ "loss": 0.0275,
1559
+ "rewards/accuracies": 0.9350000619888306,
1560
+ "rewards/chosen": 0.022215455770492554,
1561
+ "rewards/margins": 0.03798101097345352,
1562
+ "rewards/rejected": -0.01576555334031582,
1563
+ "step": 930
1564
+ },
1565
+ {
1566
+ "epoch": 3.69,
1567
+ "grad_norm": 14.0,
1568
+ "learning_rate": 9.597636008123052e-07,
1569
+ "logits/chosen": -2.4123058319091797,
1570
+ "logits/rejected": -2.3628923892974854,
1571
+ "logps/chosen": -308.3174743652344,
1572
+ "logps/rejected": -278.7733459472656,
1573
+ "loss": 0.0278,
1574
+ "rewards/accuracies": 0.942500114440918,
1575
+ "rewards/chosen": 0.024744439870119095,
1576
+ "rewards/margins": 0.038902923464775085,
1577
+ "rewards/rejected": -0.014158482663333416,
1578
+ "step": 940
1579
+ },
1580
+ {
1581
+ "epoch": 3.73,
1582
+ "grad_norm": 20.375,
1583
+ "learning_rate": 9.06228255841991e-07,
1584
+ "logits/chosen": -2.382359504699707,
1585
+ "logits/rejected": -2.3492817878723145,
1586
+ "logps/chosen": -264.2523193359375,
1587
+ "logps/rejected": -256.5533752441406,
1588
+ "loss": 0.0279,
1589
+ "rewards/accuracies": 0.8999999761581421,
1590
+ "rewards/chosen": 0.022538714110851288,
1591
+ "rewards/margins": 0.03617415204644203,
1592
+ "rewards/rejected": -0.013635434210300446,
1593
+ "step": 950
1594
+ },
1595
+ {
1596
+ "epoch": 3.77,
1597
+ "grad_norm": 36.25,
1598
+ "learning_rate": 8.538968542616846e-07,
1599
+ "logits/chosen": -2.41325044631958,
1600
+ "logits/rejected": -2.375253677368164,
1601
+ "logps/chosen": -281.48248291015625,
1602
+ "logps/rejected": -265.0972900390625,
1603
+ "loss": 0.0179,
1604
+ "rewards/accuracies": 0.9075000882148743,
1605
+ "rewards/chosen": 0.025317683815956116,
1606
+ "rewards/margins": 0.038605697453022,
1607
+ "rewards/rejected": -0.013288016431033611,
1608
+ "step": 960
1609
+ },
1610
+ {
1611
+ "epoch": 3.81,
1612
+ "grad_norm": 29.625,
1613
+ "learning_rate": 8.028089274818624e-07,
1614
+ "logits/chosen": -2.4157140254974365,
1615
+ "logits/rejected": -2.377472162246704,
1616
+ "logps/chosen": -278.72564697265625,
1617
+ "logps/rejected": -257.7362365722656,
1618
+ "loss": 0.034,
1619
+ "rewards/accuracies": 0.9274999499320984,
1620
+ "rewards/chosen": 0.023796474561095238,
1621
+ "rewards/margins": 0.03968465328216553,
1622
+ "rewards/rejected": -0.01588817685842514,
1623
+ "step": 970
1624
+ },
1625
+ {
1626
+ "epoch": 3.85,
1627
+ "grad_norm": 24.5,
1628
+ "learning_rate": 7.530030675857252e-07,
1629
+ "logits/chosen": -2.371452569961548,
1630
+ "logits/rejected": -2.33616304397583,
1631
+ "logps/chosen": -278.2137756347656,
1632
+ "logps/rejected": -254.2333984375,
1633
+ "loss": 0.0245,
1634
+ "rewards/accuracies": 0.9350000619888306,
1635
+ "rewards/chosen": 0.028386935591697693,
1636
+ "rewards/margins": 0.044838014990091324,
1637
+ "rewards/rejected": -0.01645107939839363,
1638
+ "step": 980
1639
+ },
1640
+ {
1641
+ "epoch": 3.89,
1642
+ "grad_norm": 31.5,
1643
+ "learning_rate": 7.045168981765427e-07,
1644
+ "logits/chosen": -2.4061717987060547,
1645
+ "logits/rejected": -2.367501735687256,
1646
+ "logps/chosen": -277.0457763671875,
1647
+ "logps/rejected": -248.8456268310547,
1648
+ "loss": 0.0248,
1649
+ "rewards/accuracies": 0.9325000047683716,
1650
+ "rewards/chosen": 0.025927498936653137,
1651
+ "rewards/margins": 0.03844950348138809,
1652
+ "rewards/rejected": -0.012522002682089806,
1653
+ "step": 990
1654
+ },
1655
+ {
1656
+ "epoch": 3.93,
1657
+ "grad_norm": 12.875,
1658
+ "learning_rate": 6.573870459565907e-07,
1659
+ "logits/chosen": -2.381437301635742,
1660
+ "logits/rejected": -2.3325188159942627,
1661
+ "logps/chosen": -293.3424377441406,
1662
+ "logps/rejected": -262.5635681152344,
1663
+ "loss": 0.0198,
1664
+ "rewards/accuracies": 0.9225000143051147,
1665
+ "rewards/chosen": 0.02644011378288269,
1666
+ "rewards/margins": 0.04168093949556351,
1667
+ "rewards/rejected": -0.015240825712680817,
1668
+ "step": 1000
1669
+ },
1670
+ {
1671
+ "epoch": 3.93,
1672
+ "eval_logits/chosen": -2.4145004749298096,
1673
+ "eval_logits/rejected": -2.3780155181884766,
1674
+ "eval_logps/chosen": -266.8046875,
1675
+ "eval_logps/rejected": -242.29171752929688,
1676
+ "eval_loss": 0.820038914680481,
1677
+ "eval_rewards/accuracies": 0.6577380895614624,
1678
+ "eval_rewards/chosen": 0.008739516139030457,
1679
+ "eval_rewards/margins": 0.007707077078521252,
1680
+ "eval_rewards/rejected": 0.0010324395261704922,
1681
+ "eval_runtime": 123.2067,
1682
+ "eval_samples_per_second": 16.233,
1683
+ "eval_steps_per_second": 0.341,
1684
+ "step": 1000
1685
+ },
1686
+ {
1687
+ "epoch": 3.96,
1688
+ "grad_norm": 33.75,
1689
+ "learning_rate": 6.116491130591478e-07,
1690
+ "logits/chosen": -2.410226821899414,
1691
+ "logits/rejected": -2.366502285003662,
1692
+ "logps/chosen": -279.39166259765625,
1693
+ "logps/rejected": -251.89364624023438,
1694
+ "loss": 0.0233,
1695
+ "rewards/accuracies": 0.9175001382827759,
1696
+ "rewards/chosen": 0.024083226919174194,
1697
+ "rewards/margins": 0.03586304560303688,
1698
+ "rewards/rejected": -0.011779818683862686,
1699
+ "step": 1010
1700
+ },
1701
+ {
1702
+ "epoch": 4.0,
1703
+ "grad_norm": 3.4375,
1704
+ "learning_rate": 5.673376501544641e-07,
1705
+ "logits/chosen": -2.41102933883667,
1706
+ "logits/rejected": -2.36838960647583,
1707
+ "logps/chosen": -274.3736877441406,
1708
+ "logps/rejected": -243.4824981689453,
1709
+ "loss": 0.017,
1710
+ "rewards/accuracies": 0.9175000190734863,
1711
+ "rewards/chosen": 0.023644987493753433,
1712
+ "rewards/margins": 0.0368778295814991,
1713
+ "rewards/rejected": -0.013232842087745667,
1714
+ "step": 1020
1715
+ },
1716
+ {
1717
+ "epoch": 4.04,
1718
+ "grad_norm": 20.5,
1719
+ "learning_rate": 5.244861303500026e-07,
1720
+ "logits/chosen": -2.413541793823242,
1721
+ "logits/rejected": -2.362837314605713,
1722
+ "logps/chosen": -272.99359130859375,
1723
+ "logps/rejected": -239.32608032226562,
1724
+ "loss": 0.008,
1725
+ "rewards/accuracies": 0.9325000643730164,
1726
+ "rewards/chosen": 0.021824661642313004,
1727
+ "rewards/margins": 0.0353974774479866,
1728
+ "rewards/rejected": -0.013572819530963898,
1729
+ "step": 1030
1730
+ },
1731
+ {
1732
+ "epoch": 4.08,
1733
+ "grad_norm": 3.765625,
1734
+ "learning_rate": 4.831269239046851e-07,
1735
+ "logits/chosen": -2.4089815616607666,
1736
+ "logits/rejected": -2.366555690765381,
1737
+ "logps/chosen": -266.49798583984375,
1738
+ "logps/rejected": -252.52145385742188,
1739
+ "loss": 0.0043,
1740
+ "rewards/accuracies": 0.949999988079071,
1741
+ "rewards/chosen": 0.025229623541235924,
1742
+ "rewards/margins": 0.03910643607378006,
1743
+ "rewards/rejected": -0.013876812532544136,
1744
+ "step": 1040
1745
+ },
1746
+ {
1747
+ "epoch": 4.12,
1748
+ "grad_norm": 18.75,
1749
+ "learning_rate": 4.4329127377623127e-07,
1750
+ "logits/chosen": -2.380474328994751,
1751
+ "logits/rejected": -2.3557381629943848,
1752
+ "logps/chosen": -275.6029968261719,
1753
+ "logps/rejected": -260.39776611328125,
1754
+ "loss": 0.0054,
1755
+ "rewards/accuracies": 0.9550000429153442,
1756
+ "rewards/chosen": 0.025818094611167908,
1757
+ "rewards/margins": 0.038945622742176056,
1758
+ "rewards/rejected": -0.013127523474395275,
1759
+ "step": 1050
1760
+ },
1761
+ {
1762
+ "epoch": 4.16,
1763
+ "grad_norm": 14.8125,
1764
+ "learning_rate": 4.050092720200638e-07,
1765
+ "logits/chosen": -2.384019374847412,
1766
+ "logits/rejected": -2.338085889816284,
1767
+ "logps/chosen": -280.4701232910156,
1768
+ "logps/rejected": -250.2019805908203,
1769
+ "loss": 0.0052,
1770
+ "rewards/accuracies": 0.9575001001358032,
1771
+ "rewards/chosen": 0.028378132730722427,
1772
+ "rewards/margins": 0.042529620230197906,
1773
+ "rewards/rejected": -0.014151493087410927,
1774
+ "step": 1060
1775
+ },
1776
+ {
1777
+ "epoch": 4.2,
1778
+ "grad_norm": 8.375,
1779
+ "learning_rate": 3.683098370576196e-07,
1780
+ "logits/chosen": -2.406979560852051,
1781
+ "logits/rejected": -2.3714377880096436,
1782
+ "logps/chosen": -287.4407958984375,
1783
+ "logps/rejected": -259.043212890625,
1784
+ "loss": 0.0062,
1785
+ "rewards/accuracies": 0.9449999928474426,
1786
+ "rewards/chosen": 0.024499880149960518,
1787
+ "rewards/margins": 0.03765181452035904,
1788
+ "rewards/rejected": -0.013151939027011395,
1789
+ "step": 1070
1790
+ },
1791
+ {
1792
+ "epoch": 4.24,
1793
+ "grad_norm": 11.6875,
1794
+ "learning_rate": 3.3322069183122253e-07,
1795
+ "logits/chosen": -2.415555477142334,
1796
+ "logits/rejected": -2.3626112937927246,
1797
+ "logps/chosen": -270.87249755859375,
1798
+ "logps/rejected": -251.2595977783203,
1799
+ "loss": 0.0049,
1800
+ "rewards/accuracies": 0.9399999380111694,
1801
+ "rewards/chosen": 0.028504956513643265,
1802
+ "rewards/margins": 0.043677303940057755,
1803
+ "rewards/rejected": -0.015172350220382214,
1804
+ "step": 1080
1805
+ },
1806
+ {
1807
+ "epoch": 4.28,
1808
+ "grad_norm": 60.75,
1809
+ "learning_rate": 2.997683428620296e-07,
1810
+ "logits/chosen": -2.4119620323181152,
1811
+ "logits/rejected": -2.3438758850097656,
1812
+ "logps/chosen": -288.2101745605469,
1813
+ "logps/rejected": -261.5445251464844,
1814
+ "loss": 0.0101,
1815
+ "rewards/accuracies": 0.9550000429153442,
1816
+ "rewards/chosen": 0.026679161936044693,
1817
+ "rewards/margins": 0.042851291596889496,
1818
+ "rewards/rejected": -0.016172129660844803,
1819
+ "step": 1090
1820
+ },
1821
+ {
1822
+ "epoch": 4.32,
1823
+ "grad_norm": 23.25,
1824
+ "learning_rate": 2.6797806022686835e-07,
1825
+ "logits/chosen": -2.3794476985931396,
1826
+ "logits/rejected": -2.350893974304199,
1827
+ "logps/chosen": -262.9915771484375,
1828
+ "logps/rejected": -261.4454650878906,
1829
+ "loss": 0.0059,
1830
+ "rewards/accuracies": 0.9449998736381531,
1831
+ "rewards/chosen": 0.024703029543161392,
1832
+ "rewards/margins": 0.04311930388212204,
1833
+ "rewards/rejected": -0.018416276201605797,
1834
+ "step": 1100
1835
+ },
1836
+ {
1837
+ "epoch": 4.32,
1838
+ "eval_logits/chosen": -2.410806894302368,
1839
+ "eval_logits/rejected": -2.3743793964385986,
1840
+ "eval_logps/chosen": -266.8759765625,
1841
+ "eval_logps/rejected": -242.3739471435547,
1842
+ "eval_loss": 0.8903548717498779,
1843
+ "eval_rewards/accuracies": 0.6577380895614624,
1844
+ "eval_rewards/chosen": 0.008026321418583393,
1845
+ "eval_rewards/margins": 0.007816384546458721,
1846
+ "eval_rewards/rejected": 0.00020993576617911458,
1847
+ "eval_runtime": 123.126,
1848
+ "eval_samples_per_second": 16.244,
1849
+ "eval_steps_per_second": 0.341,
1850
+ "step": 1100
1851
+ },
1852
+ {
1853
+ "epoch": 4.36,
1854
+ "grad_norm": 33.5,
1855
+ "learning_rate": 2.378738584690926e-07,
1856
+ "logits/chosen": -2.3809187412261963,
1857
+ "logits/rejected": -2.345142126083374,
1858
+ "logps/chosen": -274.513916015625,
1859
+ "logps/rejected": -259.05767822265625,
1860
+ "loss": 0.0073,
1861
+ "rewards/accuracies": 0.9474999308586121,
1862
+ "rewards/chosen": 0.024536920711398125,
1863
+ "rewards/margins": 0.0396781824529171,
1864
+ "rewards/rejected": -0.015141261741518974,
1865
+ "step": 1110
1866
+ },
1867
+ {
1868
+ "epoch": 4.4,
1869
+ "grad_norm": 6.09375,
1870
+ "learning_rate": 2.0947847845787073e-07,
1871
+ "logits/chosen": -2.380807638168335,
1872
+ "logits/rejected": -2.3750548362731934,
1873
+ "logps/chosen": -273.5718688964844,
1874
+ "logps/rejected": -271.99774169921875,
1875
+ "loss": 0.0056,
1876
+ "rewards/accuracies": 0.9699999690055847,
1877
+ "rewards/chosen": 0.025182534009218216,
1878
+ "rewards/margins": 0.04230727255344391,
1879
+ "rewards/rejected": -0.017124736681580544,
1880
+ "step": 1120
1881
+ },
1882
+ {
1883
+ "epoch": 4.44,
1884
+ "grad_norm": 12.75,
1885
+ "learning_rate": 1.828133702096152e-07,
1886
+ "logits/chosen": -2.4007954597473145,
1887
+ "logits/rejected": -2.3510959148406982,
1888
+ "logps/chosen": -297.80023193359375,
1889
+ "logps/rejected": -267.09271240234375,
1890
+ "loss": 0.0058,
1891
+ "rewards/accuracies": 0.9350000619888306,
1892
+ "rewards/chosen": 0.027246862649917603,
1893
+ "rewards/margins": 0.04566134512424469,
1894
+ "rewards/rejected": -0.01841447874903679,
1895
+ "step": 1130
1896
+ },
1897
+ {
1898
+ "epoch": 4.47,
1899
+ "grad_norm": 6.8125,
1900
+ "learning_rate": 1.5789867668453224e-07,
1901
+ "logits/chosen": -2.359222650527954,
1902
+ "logits/rejected": -2.316779613494873,
1903
+ "logps/chosen": -256.35321044921875,
1904
+ "logps/rejected": -244.2194366455078,
1905
+ "loss": 0.005,
1906
+ "rewards/accuracies": 0.940000057220459,
1907
+ "rewards/chosen": 0.02366521954536438,
1908
+ "rewards/margins": 0.03908833488821983,
1909
+ "rewards/rejected": -0.015423113480210304,
1910
+ "step": 1140
1911
+ },
1912
+ {
1913
+ "epoch": 4.51,
1914
+ "grad_norm": 10.625,
1915
+ "learning_rate": 1.3475321857052387e-07,
1916
+ "logits/chosen": -2.3972084522247314,
1917
+ "logits/rejected": -2.3675570487976074,
1918
+ "logps/chosen": -278.0527038574219,
1919
+ "logps/rejected": -256.3372497558594,
1920
+ "loss": 0.0053,
1921
+ "rewards/accuracies": 0.9575001001358032,
1922
+ "rewards/chosen": 0.025035608559846878,
1923
+ "rewards/margins": 0.039394162595272064,
1924
+ "rewards/rejected": -0.014358552172780037,
1925
+ "step": 1150
1926
+ },
1927
+ {
1928
+ "epoch": 4.55,
1929
+ "grad_norm": 24.5,
1930
+ "learning_rate": 1.1339448006594284e-07,
1931
+ "logits/chosen": -2.3742661476135254,
1932
+ "logits/rejected": -2.3620691299438477,
1933
+ "logps/chosen": -269.88934326171875,
1934
+ "logps/rejected": -263.372314453125,
1935
+ "loss": 0.0056,
1936
+ "rewards/accuracies": 0.9574999809265137,
1937
+ "rewards/chosen": 0.02747185155749321,
1938
+ "rewards/margins": 0.04529104381799698,
1939
+ "rewards/rejected": -0.01781919226050377,
1940
+ "step": 1160
1941
+ },
1942
+ {
1943
+ "epoch": 4.59,
1944
+ "grad_norm": 15.125,
1945
+ "learning_rate": 9.383859567194148e-08,
1946
+ "logits/chosen": -2.4092886447906494,
1947
+ "logits/rejected": -2.3820230960845947,
1948
+ "logps/chosen": -292.05755615234375,
1949
+ "logps/rejected": -274.00262451171875,
1950
+ "loss": 0.0076,
1951
+ "rewards/accuracies": 0.9600000381469727,
1952
+ "rewards/chosen": 0.02904806099832058,
1953
+ "rewards/margins": 0.04705999046564102,
1954
+ "rewards/rejected": -0.018011927604675293,
1955
+ "step": 1170
1956
+ },
1957
+ {
1958
+ "epoch": 4.63,
1959
+ "grad_norm": 6.125,
1960
+ "learning_rate": 7.610033800438343e-08,
1961
+ "logits/chosen": -2.404353141784668,
1962
+ "logits/rejected": -2.3535656929016113,
1963
+ "logps/chosen": -279.27001953125,
1964
+ "logps/rejected": -261.3420104980469,
1965
+ "loss": 0.0036,
1966
+ "rewards/accuracies": 0.9624999761581421,
1967
+ "rewards/chosen": 0.023024918511509895,
1968
+ "rewards/margins": 0.038382213562726974,
1969
+ "rewards/rejected": -0.015357298776507378,
1970
+ "step": 1180
1971
+ },
1972
+ {
1973
+ "epoch": 4.67,
1974
+ "grad_norm": 15.3125,
1975
+ "learning_rate": 6.019310663453654e-08,
1976
+ "logits/chosen": -2.379361867904663,
1977
+ "logits/rejected": -2.348564624786377,
1978
+ "logps/chosen": -272.57965087890625,
1979
+ "logps/rejected": -280.6869201660156,
1980
+ "loss": 0.0045,
1981
+ "rewards/accuracies": 0.9474999308586121,
1982
+ "rewards/chosen": 0.02704106830060482,
1983
+ "rewards/margins": 0.04761399328708649,
1984
+ "rewards/rejected": -0.02057291939854622,
1985
+ "step": 1190
1986
+ },
1987
+ {
1988
+ "epoch": 4.71,
1989
+ "grad_norm": 6.5625,
1990
+ "learning_rate": 4.6128917966964394e-08,
1991
+ "logits/chosen": -2.3975164890289307,
1992
+ "logits/rejected": -2.354431390762329,
1993
+ "logps/chosen": -264.81683349609375,
1994
+ "logps/rejected": -240.6919403076172,
1995
+ "loss": 0.0042,
1996
+ "rewards/accuracies": 0.9375,
1997
+ "rewards/chosen": 0.023663988336920738,
1998
+ "rewards/margins": 0.037333834916353226,
1999
+ "rewards/rejected": -0.013669842854142189,
2000
+ "step": 1200
2001
+ },
2002
+ {
2003
+ "epoch": 4.71,
2004
+ "eval_logits/chosen": -2.4119250774383545,
2005
+ "eval_logits/rejected": -2.375300407409668,
2006
+ "eval_logps/chosen": -266.87713623046875,
2007
+ "eval_logps/rejected": -242.38916015625,
2008
+ "eval_loss": 0.8778771162033081,
2009
+ "eval_rewards/accuracies": 0.6517857313156128,
2010
+ "eval_rewards/chosen": 0.00801478698849678,
2011
+ "eval_rewards/margins": 0.007957086898386478,
2012
+ "eval_rewards/rejected": 5.7699922763276845e-05,
2013
+ "eval_runtime": 123.014,
2014
+ "eval_samples_per_second": 16.258,
2015
+ "eval_steps_per_second": 0.341,
2016
+ "step": 1200
2017
+ },
2018
+ {
2019
+ "epoch": 4.75,
2020
+ "grad_norm": 27.0,
2021
+ "learning_rate": 3.3918396162275214e-08,
2022
+ "logits/chosen": -2.429567337036133,
2023
+ "logits/rejected": -2.401862621307373,
2024
+ "logps/chosen": -265.3700866699219,
2025
+ "logps/rejected": -255.75082397460938,
2026
+ "loss": 0.0077,
2027
+ "rewards/accuracies": 0.9475001096725464,
2028
+ "rewards/chosen": 0.02286478877067566,
2029
+ "rewards/margins": 0.037350136786699295,
2030
+ "rewards/rejected": -0.01448534894734621,
2031
+ "step": 1210
2032
+ },
2033
+ {
2034
+ "epoch": 4.79,
2035
+ "grad_norm": 29.625,
2036
+ "learning_rate": 2.3570765111574357e-08,
2037
+ "logits/chosen": -2.420926570892334,
2038
+ "logits/rejected": -2.3869175910949707,
2039
+ "logps/chosen": -275.02593994140625,
2040
+ "logps/rejected": -250.9207000732422,
2041
+ "loss": 0.0076,
2042
+ "rewards/accuracies": 0.9325000643730164,
2043
+ "rewards/chosen": 0.025391753762960434,
2044
+ "rewards/margins": 0.04114841669797897,
2045
+ "rewards/rejected": -0.01575666293501854,
2046
+ "step": 1220
2047
+ },
2048
+ {
2049
+ "epoch": 4.83,
2050
+ "grad_norm": 12.8125,
2051
+ "learning_rate": 1.5093841468690473e-08,
2052
+ "logits/chosen": -2.378108501434326,
2053
+ "logits/rejected": -2.3375566005706787,
2054
+ "logps/chosen": -278.16180419921875,
2055
+ "logps/rejected": -249.80557250976562,
2056
+ "loss": 0.0043,
2057
+ "rewards/accuracies": 0.9550000429153442,
2058
+ "rewards/chosen": 0.02722669579088688,
2059
+ "rewards/margins": 0.042906779795885086,
2060
+ "rewards/rejected": -0.015680085867643356,
2061
+ "step": 1230
2062
+ },
2063
+ {
2064
+ "epoch": 4.87,
2065
+ "grad_norm": 6.65625,
2066
+ "learning_rate": 8.494028745434368e-09,
2067
+ "logits/chosen": -2.4071974754333496,
2068
+ "logits/rejected": -2.3638107776641846,
2069
+ "logps/chosen": -272.5426940917969,
2070
+ "logps/rejected": -254.20095825195312,
2071
+ "loss": 0.0031,
2072
+ "rewards/accuracies": 0.9424999952316284,
2073
+ "rewards/chosen": 0.024697447195649147,
2074
+ "rewards/margins": 0.0481376014649868,
2075
+ "rewards/rejected": -0.023440156131982803,
2076
+ "step": 1240
2077
+ },
2078
+ {
2079
+ "epoch": 4.91,
2080
+ "grad_norm": 13.625,
2081
+ "learning_rate": 3.776312474353394e-09,
2082
+ "logits/chosen": -2.382949113845825,
2083
+ "logits/rejected": -2.3320465087890625,
2084
+ "logps/chosen": -262.14154052734375,
2085
+ "logps/rejected": -247.5254669189453,
2086
+ "loss": 0.0039,
2087
+ "rewards/accuracies": 0.9550000429153442,
2088
+ "rewards/chosen": 0.022636910900473595,
2089
+ "rewards/margins": 0.03510580584406853,
2090
+ "rewards/rejected": -0.012468894943594933,
2091
+ "step": 1250
2092
+ },
2093
+ {
2094
+ "epoch": 4.95,
2095
+ "grad_norm": 6.78125,
2096
+ "learning_rate": 9.442564426342949e-10,
2097
+ "logits/chosen": -2.3739681243896484,
2098
+ "logits/rejected": -2.3734383583068848,
2099
+ "logps/chosen": -248.2006072998047,
2100
+ "logps/rejected": -255.94161987304688,
2101
+ "loss": 0.0033,
2102
+ "rewards/accuracies": 0.9275000691413879,
2103
+ "rewards/chosen": 0.025133823975920677,
2104
+ "rewards/margins": 0.04240426793694496,
2105
+ "rewards/rejected": -0.017270449548959732,
2106
+ "step": 1260
2107
+ },
2108
+ {
2109
+ "epoch": 4.99,
2110
+ "grad_norm": 13.1875,
2111
+ "learning_rate": 0.0,
2112
+ "logits/chosen": -2.393690586090088,
2113
+ "logits/rejected": -2.3561182022094727,
2114
+ "logps/chosen": -263.6891174316406,
2115
+ "logps/rejected": -243.8829345703125,
2116
+ "loss": 0.0063,
2117
+ "rewards/accuracies": 0.949999988079071,
2118
+ "rewards/chosen": 0.024886978790163994,
2119
+ "rewards/margins": 0.0397338829934597,
2120
+ "rewards/rejected": -0.014846903271973133,
2121
+ "step": 1270
2122
+ },
2123
+ {
2124
+ "epoch": 4.99,
2125
+ "step": 1270,
2126
+ "total_flos": 0.0,
2127
+ "train_loss": 0.164279118501603,
2128
+ "train_runtime": 43545.4617,
2129
+ "train_samples_per_second": 7.02,
2130
+ "train_steps_per_second": 0.029
2131
+ }
2132
+ ],
2133
+ "logging_steps": 10,
2134
+ "max_steps": 1270,
2135
+ "num_input_tokens_seen": 0,
2136
+ "num_train_epochs": 5,
2137
+ "save_steps": 100,
2138
+ "total_flos": 0.0,
2139
+ "train_batch_size": 10,
2140
+ "trial_name": null,
2141
+ "trial_params": null
2142
+ }