lole25 commited on
Commit
a7d8dc6
1 Parent(s): 9eb75df

Model save

Browse files
README.md ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ library_name: peft
4
+ tags:
5
+ - trl
6
+ - dpo
7
+ - generated_from_trainer
8
+ base_model: mistralai/Mistral-7B-v0.1
9
+ model-index:
10
+ - name: zephyr-7b-gpo-u3-i1
11
+ results: []
12
+ ---
13
+
14
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
+ should probably proofread and complete it, then remove this comment. -->
16
+
17
+ # zephyr-7b-gpo-u3-i1
18
+
19
+ This model is a fine-tuned version of [mistralai/Mistral-7B-v0.1](https://huggingface.co/mistralai/Mistral-7B-v0.1) on the None dataset.
20
+ It achieves the following results on the evaluation set:
21
+ - Loss: 0.0976
22
+ - Rewards/chosen: -0.2046
23
+ - Rewards/rejected: -0.1684
24
+ - Rewards/accuracies: 0.3440
25
+ - Rewards/margins: -0.0362
26
+ - Logps/rejected: -271.7846
27
+ - Logps/chosen: -287.1580
28
+ - Logits/rejected: -1.8253
29
+ - Logits/chosen: -1.9851
30
+
31
+ ## Model description
32
+
33
+ More information needed
34
+
35
+ ## Intended uses & limitations
36
+
37
+ More information needed
38
+
39
+ ## Training and evaluation data
40
+
41
+ More information needed
42
+
43
+ ## Training procedure
44
+
45
+ ### Training hyperparameters
46
+
47
+ The following hyperparameters were used during training:
48
+ - learning_rate: 5e-06
49
+ - train_batch_size: 2
50
+ - eval_batch_size: 2
51
+ - seed: 42
52
+ - distributed_type: multi-GPU
53
+ - gradient_accumulation_steps: 2
54
+ - total_train_batch_size: 4
55
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
56
+ - lr_scheduler_type: cosine
57
+ - lr_scheduler_warmup_ratio: 0.1
58
+ - num_epochs: 5
59
+
60
+ ### Training results
61
+
62
+ | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
63
+ |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
64
+ | 0.3803 | 0.4 | 100 | 0.0537 | 0.0 | 0.0 | 0.0 | 0.0 | -254.9398 | -266.6976 | -1.8067 | -1.9618 |
65
+ | 0.2732 | 0.8 | 200 | 0.0585 | -0.0406 | -0.0433 | 0.4405 | 0.0028 | -259.2744 | -270.7553 | -1.8367 | -1.9952 |
66
+ | 0.3013 | 1.2 | 300 | 0.0800 | -0.3312 | -0.3632 | 0.4645 | 0.0319 | -291.2575 | -299.8226 | -1.8131 | -1.9752 |
67
+ | 0.3433 | 1.6 | 400 | 0.0812 | -0.3364 | -0.3695 | 0.4675 | 0.0331 | -291.8892 | -300.3361 | -1.8102 | -1.9721 |
68
+ | 0.3606 | 2.0 | 500 | 0.1100 | -0.3181 | -0.2920 | 0.3735 | -0.0262 | -284.1371 | -298.5123 | -1.8348 | -1.9970 |
69
+ | 0.3038 | 2.4 | 600 | 0.1092 | -0.3233 | -0.2979 | 0.3770 | -0.0254 | -284.7261 | -299.0256 | -1.8317 | -1.9936 |
70
+ | 0.3161 | 2.8 | 700 | 0.1069 | -0.3172 | -0.2929 | 0.3800 | -0.0243 | -284.2322 | -298.4158 | -1.8345 | -1.9966 |
71
+ | 0.3852 | 3.2 | 800 | 0.0918 | -0.2304 | -0.2057 | 0.3685 | -0.0247 | -275.5103 | -289.7388 | -1.8409 | -2.0019 |
72
+ | 0.3359 | 3.6 | 900 | 0.0983 | -0.2063 | -0.1696 | 0.3430 | -0.0368 | -271.8958 | -287.3323 | -1.8240 | -1.9838 |
73
+ | 0.3701 | 4.0 | 1000 | 0.0982 | -0.2062 | -0.1693 | 0.3455 | -0.0368 | -271.8734 | -287.3159 | -1.8241 | -1.9838 |
74
+ | 0.4025 | 4.4 | 1100 | 0.0975 | -0.2047 | -0.1687 | 0.3455 | -0.0359 | -271.8127 | -287.1649 | -1.8260 | -1.9858 |
75
+ | 0.3754 | 4.8 | 1200 | 0.0974 | -0.2044 | -0.1685 | 0.3440 | -0.0359 | -271.7890 | -287.1331 | -1.8256 | -1.9853 |
76
+
77
+
78
+ ### Framework versions
79
+
80
+ - PEFT 0.7.1
81
+ - Transformers 4.36.2
82
+ - Pytorch 2.1.2+cu121
83
+ - Datasets 2.14.6
84
+ - Tokenizers 0.15.2
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:507c84ca06ab4bff646e501e604c8320dac73708474720a6faad1a4573d4a4cf
3
  size 671150064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:953eaa577d856aeb2a7f8a1a8ea24963aa97118ad28751ebd904f4a57279b806
3
  size 671150064
all_results.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.0,
3
+ "eval_logits/chosen": -1.985146403312683,
4
+ "eval_logits/rejected": -1.8253288269042969,
5
+ "eval_logps/chosen": -287.1579895019531,
6
+ "eval_logps/rejected": -271.7846374511719,
7
+ "eval_loss": 0.09761956334114075,
8
+ "eval_rewards/accuracies": 0.3440000116825104,
9
+ "eval_rewards/chosen": -0.20460382103919983,
10
+ "eval_rewards/margins": -0.03615570068359375,
11
+ "eval_rewards/rejected": -0.16844810545444489,
12
+ "eval_runtime": 700.8137,
13
+ "eval_samples": 2000,
14
+ "eval_samples_per_second": 2.854,
15
+ "eval_steps_per_second": 1.427,
16
+ "train_loss": 0.3515237546205521,
17
+ "train_runtime": 12848.6235,
18
+ "train_samples": 61135,
19
+ "train_samples_per_second": 0.389,
20
+ "train_steps_per_second": 0.097
21
+ }
eval_results.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.0,
3
+ "eval_logits/chosen": -1.985146403312683,
4
+ "eval_logits/rejected": -1.8253288269042969,
5
+ "eval_logps/chosen": -287.1579895019531,
6
+ "eval_logps/rejected": -271.7846374511719,
7
+ "eval_loss": 0.09761956334114075,
8
+ "eval_rewards/accuracies": 0.3440000116825104,
9
+ "eval_rewards/chosen": -0.20460382103919983,
10
+ "eval_rewards/margins": -0.03615570068359375,
11
+ "eval_rewards/rejected": -0.16844810545444489,
12
+ "eval_runtime": 700.8137,
13
+ "eval_samples": 2000,
14
+ "eval_samples_per_second": 2.854,
15
+ "eval_steps_per_second": 1.427
16
+ }
runs/Apr09_00-15-49_gpu4-119-5/events.out.tfevents.1712585821.gpu4-119-5.62377.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9ba67976bee853f5dca0ce112c3cd9db48c98a584929bd29c846136875b49fab
3
- size 90004
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:17c4cdfd9f1d73928b23aa491fa399d4e8992f00397d92888969d54f364cdd70
3
+ size 93528
runs/Apr09_00-15-49_gpu4-119-5/events.out.tfevents.1712599370.gpu4-119-5.62377.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eec89bcc991eabf2b45665dde3bfe1a30c8c6ad5e48ba72f6ea3be5af2fd1b27
3
+ size 828
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.0,
3
+ "train_loss": 0.3515237546205521,
4
+ "train_runtime": 12848.6235,
5
+ "train_samples": 61135,
6
+ "train_samples_per_second": 0.389,
7
+ "train_steps_per_second": 0.097
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,1986 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 5.0,
5
+ "eval_steps": 100,
6
+ "global_step": 1250,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.0,
13
+ "learning_rate": 4e-08,
14
+ "logits/chosen": -1.8503975868225098,
15
+ "logits/rejected": -1.8503975868225098,
16
+ "logps/chosen": 0.0,
17
+ "logps/rejected": 0.0,
18
+ "loss": 0.4075,
19
+ "rewards/accuracies": 0.0,
20
+ "rewards/chosen": 0.0,
21
+ "rewards/margins": 0.0,
22
+ "rewards/rejected": 0.0,
23
+ "step": 1
24
+ },
25
+ {
26
+ "epoch": 0.04,
27
+ "learning_rate": 4.0000000000000003e-07,
28
+ "logits/chosen": -1.8588156700134277,
29
+ "logits/rejected": -1.8588156700134277,
30
+ "logps/chosen": 0.0,
31
+ "logps/rejected": 0.0,
32
+ "loss": 0.3636,
33
+ "rewards/accuracies": 0.0,
34
+ "rewards/chosen": 0.0,
35
+ "rewards/margins": 0.0,
36
+ "rewards/rejected": 0.0,
37
+ "step": 10
38
+ },
39
+ {
40
+ "epoch": 0.08,
41
+ "learning_rate": 8.000000000000001e-07,
42
+ "logits/chosen": -1.970517873764038,
43
+ "logits/rejected": -1.970517873764038,
44
+ "logps/chosen": 0.0,
45
+ "logps/rejected": 0.0,
46
+ "loss": 0.3902,
47
+ "rewards/accuracies": 0.0,
48
+ "rewards/chosen": 0.0,
49
+ "rewards/margins": 0.0,
50
+ "rewards/rejected": 0.0,
51
+ "step": 20
52
+ },
53
+ {
54
+ "epoch": 0.12,
55
+ "learning_rate": 1.2000000000000002e-06,
56
+ "logits/chosen": -1.9209930896759033,
57
+ "logits/rejected": -1.9209930896759033,
58
+ "logps/chosen": 0.0,
59
+ "logps/rejected": 0.0,
60
+ "loss": 0.3482,
61
+ "rewards/accuracies": 0.0,
62
+ "rewards/chosen": 0.0,
63
+ "rewards/margins": 0.0,
64
+ "rewards/rejected": 0.0,
65
+ "step": 30
66
+ },
67
+ {
68
+ "epoch": 0.16,
69
+ "learning_rate": 1.6000000000000001e-06,
70
+ "logits/chosen": -1.883547067642212,
71
+ "logits/rejected": -1.883547067642212,
72
+ "logps/chosen": 0.0,
73
+ "logps/rejected": 0.0,
74
+ "loss": 0.3507,
75
+ "rewards/accuracies": 0.0,
76
+ "rewards/chosen": 0.0,
77
+ "rewards/margins": 0.0,
78
+ "rewards/rejected": 0.0,
79
+ "step": 40
80
+ },
81
+ {
82
+ "epoch": 0.2,
83
+ "learning_rate": 2.0000000000000003e-06,
84
+ "logits/chosen": -1.9128715991973877,
85
+ "logits/rejected": -1.9128715991973877,
86
+ "logps/chosen": 0.0,
87
+ "logps/rejected": 0.0,
88
+ "loss": 0.3359,
89
+ "rewards/accuracies": 0.0,
90
+ "rewards/chosen": 0.0,
91
+ "rewards/margins": 0.0,
92
+ "rewards/rejected": 0.0,
93
+ "step": 50
94
+ },
95
+ {
96
+ "epoch": 0.24,
97
+ "learning_rate": 2.4000000000000003e-06,
98
+ "logits/chosen": -2.0107295513153076,
99
+ "logits/rejected": -2.0107295513153076,
100
+ "logps/chosen": 0.0,
101
+ "logps/rejected": 0.0,
102
+ "loss": 0.3828,
103
+ "rewards/accuracies": 0.0,
104
+ "rewards/chosen": 0.0,
105
+ "rewards/margins": 0.0,
106
+ "rewards/rejected": 0.0,
107
+ "step": 60
108
+ },
109
+ {
110
+ "epoch": 0.28,
111
+ "learning_rate": 2.8000000000000003e-06,
112
+ "logits/chosen": -1.9920228719711304,
113
+ "logits/rejected": -1.9920228719711304,
114
+ "logps/chosen": 0.0,
115
+ "logps/rejected": 0.0,
116
+ "loss": 0.3112,
117
+ "rewards/accuracies": 0.0,
118
+ "rewards/chosen": 0.0,
119
+ "rewards/margins": 0.0,
120
+ "rewards/rejected": 0.0,
121
+ "step": 70
122
+ },
123
+ {
124
+ "epoch": 0.32,
125
+ "learning_rate": 3.2000000000000003e-06,
126
+ "logits/chosen": -1.8801155090332031,
127
+ "logits/rejected": -1.8801155090332031,
128
+ "logps/chosen": 0.0,
129
+ "logps/rejected": 0.0,
130
+ "loss": 0.3778,
131
+ "rewards/accuracies": 0.0,
132
+ "rewards/chosen": 0.0,
133
+ "rewards/margins": 0.0,
134
+ "rewards/rejected": 0.0,
135
+ "step": 80
136
+ },
137
+ {
138
+ "epoch": 0.36,
139
+ "learning_rate": 3.6000000000000003e-06,
140
+ "logits/chosen": -2.050198793411255,
141
+ "logits/rejected": -2.050198793411255,
142
+ "logps/chosen": 0.0,
143
+ "logps/rejected": 0.0,
144
+ "loss": 0.3655,
145
+ "rewards/accuracies": 0.0,
146
+ "rewards/chosen": 0.0,
147
+ "rewards/margins": 0.0,
148
+ "rewards/rejected": 0.0,
149
+ "step": 90
150
+ },
151
+ {
152
+ "epoch": 0.4,
153
+ "learning_rate": 4.000000000000001e-06,
154
+ "logits/chosen": -1.8852717876434326,
155
+ "logits/rejected": -1.8852717876434326,
156
+ "logps/chosen": 0.0,
157
+ "logps/rejected": 0.0,
158
+ "loss": 0.3803,
159
+ "rewards/accuracies": 0.0,
160
+ "rewards/chosen": 0.0,
161
+ "rewards/margins": 0.0,
162
+ "rewards/rejected": 0.0,
163
+ "step": 100
164
+ },
165
+ {
166
+ "epoch": 0.4,
167
+ "eval_logits/chosen": -1.9617642164230347,
168
+ "eval_logits/rejected": -1.8066532611846924,
169
+ "eval_logps/chosen": -266.6976013183594,
170
+ "eval_logps/rejected": -254.9398193359375,
171
+ "eval_loss": 0.053734518587589264,
172
+ "eval_rewards/accuracies": 0.0,
173
+ "eval_rewards/chosen": 0.0,
174
+ "eval_rewards/margins": 0.0,
175
+ "eval_rewards/rejected": 0.0,
176
+ "eval_runtime": 700.7393,
177
+ "eval_samples_per_second": 2.854,
178
+ "eval_steps_per_second": 1.427,
179
+ "step": 100
180
+ },
181
+ {
182
+ "epoch": 0.44,
183
+ "learning_rate": 4.4e-06,
184
+ "logits/chosen": -1.731688141822815,
185
+ "logits/rejected": -1.731688141822815,
186
+ "logps/chosen": 0.0,
187
+ "logps/rejected": 0.0,
188
+ "loss": 0.2717,
189
+ "rewards/accuracies": 0.0,
190
+ "rewards/chosen": 0.0,
191
+ "rewards/margins": 0.0,
192
+ "rewards/rejected": 0.0,
193
+ "step": 110
194
+ },
195
+ {
196
+ "epoch": 0.48,
197
+ "learning_rate": 4.800000000000001e-06,
198
+ "logits/chosen": -1.8530235290527344,
199
+ "logits/rejected": -1.8530235290527344,
200
+ "logps/chosen": 0.0,
201
+ "logps/rejected": 0.0,
202
+ "loss": 0.3482,
203
+ "rewards/accuracies": 0.0,
204
+ "rewards/chosen": 0.0,
205
+ "rewards/margins": 0.0,
206
+ "rewards/rejected": 0.0,
207
+ "step": 120
208
+ },
209
+ {
210
+ "epoch": 0.52,
211
+ "learning_rate": 4.999756310023261e-06,
212
+ "logits/chosen": -2.0225424766540527,
213
+ "logits/rejected": -2.0225424766540527,
214
+ "logps/chosen": 0.0,
215
+ "logps/rejected": 0.0,
216
+ "loss": 0.3507,
217
+ "rewards/accuracies": 0.0,
218
+ "rewards/chosen": 0.0,
219
+ "rewards/margins": 0.0,
220
+ "rewards/rejected": 0.0,
221
+ "step": 130
222
+ },
223
+ {
224
+ "epoch": 0.56,
225
+ "learning_rate": 4.997807075247147e-06,
226
+ "logits/chosen": -1.8995482921600342,
227
+ "logits/rejected": -1.8995482921600342,
228
+ "logps/chosen": 0.0,
229
+ "logps/rejected": 0.0,
230
+ "loss": 0.3186,
231
+ "rewards/accuracies": 0.0,
232
+ "rewards/chosen": 0.0,
233
+ "rewards/margins": 0.0,
234
+ "rewards/rejected": 0.0,
235
+ "step": 140
236
+ },
237
+ {
238
+ "epoch": 0.6,
239
+ "learning_rate": 4.993910125649561e-06,
240
+ "logits/chosen": -1.8702564239501953,
241
+ "logits/rejected": -1.8493874073028564,
242
+ "logps/chosen": -4.896004676818848,
243
+ "logps/rejected": -1.6084611415863037,
244
+ "loss": 0.3112,
245
+ "rewards/accuracies": 0.0,
246
+ "rewards/chosen": 0.0,
247
+ "rewards/margins": 0.0,
248
+ "rewards/rejected": 0.0,
249
+ "step": 150
250
+ },
251
+ {
252
+ "epoch": 0.64,
253
+ "learning_rate": 4.988068499954578e-06,
254
+ "logits/chosen": -2.04287052154541,
255
+ "logits/rejected": -2.04287052154541,
256
+ "logps/chosen": 0.0,
257
+ "logps/rejected": 0.0,
258
+ "loss": 0.3383,
259
+ "rewards/accuracies": 0.0,
260
+ "rewards/chosen": 0.0,
261
+ "rewards/margins": 0.0,
262
+ "rewards/rejected": 0.0,
263
+ "step": 160
264
+ },
265
+ {
266
+ "epoch": 0.68,
267
+ "learning_rate": 4.980286753286196e-06,
268
+ "logits/chosen": -1.8564621210098267,
269
+ "logits/rejected": -1.8564621210098267,
270
+ "logps/chosen": 0.0,
271
+ "logps/rejected": 0.0,
272
+ "loss": 0.3531,
273
+ "rewards/accuracies": 0.0,
274
+ "rewards/chosen": 0.0,
275
+ "rewards/margins": 0.0,
276
+ "rewards/rejected": 0.0,
277
+ "step": 170
278
+ },
279
+ {
280
+ "epoch": 0.72,
281
+ "learning_rate": 4.970570953616383e-06,
282
+ "logits/chosen": -1.9510726928710938,
283
+ "logits/rejected": -1.9173896312713623,
284
+ "logps/chosen": -12.76134967803955,
285
+ "logps/rejected": -5.861204624176025,
286
+ "loss": 0.3393,
287
+ "rewards/accuracies": 0.02500000037252903,
288
+ "rewards/chosen": -0.029967620968818665,
289
+ "rewards/margins": 0.002692684531211853,
290
+ "rewards/rejected": -0.03266030550003052,
291
+ "step": 180
292
+ },
293
+ {
294
+ "epoch": 0.76,
295
+ "learning_rate": 4.958928677033465e-06,
296
+ "logits/chosen": -1.8616416454315186,
297
+ "logits/rejected": -1.8616416454315186,
298
+ "logps/chosen": 0.0,
299
+ "logps/rejected": 0.0,
300
+ "loss": 0.3383,
301
+ "rewards/accuracies": 0.0,
302
+ "rewards/chosen": 0.0,
303
+ "rewards/margins": 0.0,
304
+ "rewards/rejected": 0.0,
305
+ "step": 190
306
+ },
307
+ {
308
+ "epoch": 0.8,
309
+ "learning_rate": 4.9453690018345144e-06,
310
+ "logits/chosen": -1.911077857017517,
311
+ "logits/rejected": -1.9127223491668701,
312
+ "logps/chosen": -5.607743740081787,
313
+ "logps/rejected": -6.2597527503967285,
314
+ "loss": 0.2732,
315
+ "rewards/accuracies": 0.0,
316
+ "rewards/chosen": -9.019851859193295e-05,
317
+ "rewards/margins": -0.0019743461161851883,
318
+ "rewards/rejected": 0.0018841475248336792,
319
+ "step": 200
320
+ },
321
+ {
322
+ "epoch": 0.8,
323
+ "eval_logits/chosen": -1.9952213764190674,
324
+ "eval_logits/rejected": -1.8367009162902832,
325
+ "eval_logps/chosen": -270.7552795410156,
326
+ "eval_logps/rejected": -259.2743835449219,
327
+ "eval_loss": 0.05847138166427612,
328
+ "eval_rewards/accuracies": 0.4404999911785126,
329
+ "eval_rewards/chosen": -0.040576834231615067,
330
+ "eval_rewards/margins": 0.0027689056005328894,
331
+ "eval_rewards/rejected": -0.04334573447704315,
332
+ "eval_runtime": 702.202,
333
+ "eval_samples_per_second": 2.848,
334
+ "eval_steps_per_second": 1.424,
335
+ "step": 200
336
+ },
337
+ {
338
+ "epoch": 0.84,
339
+ "learning_rate": 4.9299025014463665e-06,
340
+ "logits/chosen": -1.871522307395935,
341
+ "logits/rejected": -1.871522307395935,
342
+ "logps/chosen": 0.0,
343
+ "logps/rejected": 0.0,
344
+ "loss": 0.3062,
345
+ "rewards/accuracies": 0.0,
346
+ "rewards/chosen": 0.0,
347
+ "rewards/margins": 0.0,
348
+ "rewards/rejected": 0.0,
349
+ "step": 210
350
+ },
351
+ {
352
+ "epoch": 0.88,
353
+ "learning_rate": 4.912541236180779e-06,
354
+ "logits/chosen": -2.014587640762329,
355
+ "logits/rejected": -2.014587640762329,
356
+ "logps/chosen": 0.0,
357
+ "logps/rejected": 0.0,
358
+ "loss": 0.3408,
359
+ "rewards/accuracies": 0.0,
360
+ "rewards/chosen": 0.0,
361
+ "rewards/margins": 0.0,
362
+ "rewards/rejected": 0.0,
363
+ "step": 220
364
+ },
365
+ {
366
+ "epoch": 0.92,
367
+ "learning_rate": 4.893298743830168e-06,
368
+ "logits/chosen": -1.9391746520996094,
369
+ "logits/rejected": -1.9385459423065186,
370
+ "logps/chosen": -2.180995464324951,
371
+ "logps/rejected": -2.3552231788635254,
372
+ "loss": 0.3505,
373
+ "rewards/accuracies": 0.02500000037252903,
374
+ "rewards/chosen": -0.007617546711117029,
375
+ "rewards/margins": 0.00016982034139800817,
376
+ "rewards/rejected": -0.007787366863340139,
377
+ "step": 230
378
+ },
379
+ {
380
+ "epoch": 0.96,
381
+ "learning_rate": 4.8721900291112415e-06,
382
+ "logits/chosen": -2.1026082038879395,
383
+ "logits/rejected": -2.1026082038879395,
384
+ "logps/chosen": 0.0,
385
+ "logps/rejected": 0.0,
386
+ "loss": 0.3211,
387
+ "rewards/accuracies": 0.0,
388
+ "rewards/chosen": 0.0,
389
+ "rewards/margins": 0.0,
390
+ "rewards/rejected": 0.0,
391
+ "step": 240
392
+ },
393
+ {
394
+ "epoch": 1.0,
395
+ "learning_rate": 4.849231551964771e-06,
396
+ "logits/chosen": -1.923709511756897,
397
+ "logits/rejected": -1.923709511756897,
398
+ "logps/chosen": 0.0,
399
+ "logps/rejected": 0.0,
400
+ "loss": 0.3581,
401
+ "rewards/accuracies": 0.0,
402
+ "rewards/chosen": 0.0,
403
+ "rewards/margins": 0.0,
404
+ "rewards/rejected": 0.0,
405
+ "step": 250
406
+ },
407
+ {
408
+ "epoch": 1.04,
409
+ "learning_rate": 4.824441214720629e-06,
410
+ "logits/chosen": -1.7751576900482178,
411
+ "logits/rejected": -1.7751576900482178,
412
+ "logps/chosen": 0.0,
413
+ "logps/rejected": 0.0,
414
+ "loss": 0.3778,
415
+ "rewards/accuracies": 0.0,
416
+ "rewards/chosen": 0.0,
417
+ "rewards/margins": 0.0,
418
+ "rewards/rejected": 0.0,
419
+ "step": 260
420
+ },
421
+ {
422
+ "epoch": 1.08,
423
+ "learning_rate": 4.7978383481380865e-06,
424
+ "logits/chosen": -1.8949896097183228,
425
+ "logits/rejected": -1.8624740839004517,
426
+ "logps/chosen": -13.423696517944336,
427
+ "logps/rejected": -20.979846954345703,
428
+ "loss": 1.1297,
429
+ "rewards/accuracies": 0.02500000037252903,
430
+ "rewards/chosen": -0.036591093987226486,
431
+ "rewards/margins": 0.14725562930107117,
432
+ "rewards/rejected": -0.18384674191474915,
433
+ "step": 270
434
+ },
435
+ {
436
+ "epoch": 1.12,
437
+ "learning_rate": 4.769443696332272e-06,
438
+ "logits/chosen": -1.9459644556045532,
439
+ "logits/rejected": -1.9459644556045532,
440
+ "logps/chosen": 0.0,
441
+ "logps/rejected": 0.0,
442
+ "loss": 0.3754,
443
+ "rewards/accuracies": 0.0,
444
+ "rewards/chosen": 0.0,
445
+ "rewards/margins": 0.0,
446
+ "rewards/rejected": 0.0,
447
+ "step": 280
448
+ },
449
+ {
450
+ "epoch": 1.16,
451
+ "learning_rate": 4.7392794005985324e-06,
452
+ "logits/chosen": -1.8699764013290405,
453
+ "logits/rejected": -1.8699764013290405,
454
+ "logps/chosen": 0.0,
455
+ "logps/rejected": 0.0,
456
+ "loss": 0.3087,
457
+ "rewards/accuracies": 0.0,
458
+ "rewards/chosen": 0.0,
459
+ "rewards/margins": 0.0,
460
+ "rewards/rejected": 0.0,
461
+ "step": 290
462
+ },
463
+ {
464
+ "epoch": 1.2,
465
+ "learning_rate": 4.707368982147318e-06,
466
+ "logits/chosen": -1.9606857299804688,
467
+ "logits/rejected": -1.9606857299804688,
468
+ "logps/chosen": 0.0,
469
+ "logps/rejected": 0.0,
470
+ "loss": 0.3013,
471
+ "rewards/accuracies": 0.0,
472
+ "rewards/chosen": 0.0,
473
+ "rewards/margins": 0.0,
474
+ "rewards/rejected": 0.0,
475
+ "step": 300
476
+ },
477
+ {
478
+ "epoch": 1.2,
479
+ "eval_logits/chosen": -1.975152850151062,
480
+ "eval_logits/rejected": -1.813112497329712,
481
+ "eval_logps/chosen": -299.82257080078125,
482
+ "eval_logps/rejected": -291.25750732421875,
483
+ "eval_loss": 0.08001529425382614,
484
+ "eval_rewards/accuracies": 0.4645000100135803,
485
+ "eval_rewards/chosen": -0.3312495946884155,
486
+ "eval_rewards/margins": 0.031927283853292465,
487
+ "eval_rewards/rejected": -0.3631769120693207,
488
+ "eval_runtime": 703.8743,
489
+ "eval_samples_per_second": 2.841,
490
+ "eval_steps_per_second": 1.421,
491
+ "step": 300
492
+ },
493
+ {
494
+ "epoch": 1.24,
495
+ "learning_rate": 4.673737323763048e-06,
496
+ "logits/chosen": -1.8876497745513916,
497
+ "logits/rejected": -1.8876497745513916,
498
+ "logps/chosen": 0.0,
499
+ "logps/rejected": 0.0,
500
+ "loss": 0.3383,
501
+ "rewards/accuracies": 0.0,
502
+ "rewards/chosen": 0.0,
503
+ "rewards/margins": 0.0,
504
+ "rewards/rejected": 0.0,
505
+ "step": 310
506
+ },
507
+ {
508
+ "epoch": 1.28,
509
+ "learning_rate": 4.638410650401267e-06,
510
+ "logits/chosen": -1.9741500616073608,
511
+ "logits/rejected": -1.9741500616073608,
512
+ "logps/chosen": 0.0,
513
+ "logps/rejected": 0.0,
514
+ "loss": 0.3951,
515
+ "rewards/accuracies": 0.0,
516
+ "rewards/chosen": 0.0,
517
+ "rewards/margins": 0.0,
518
+ "rewards/rejected": 0.0,
519
+ "step": 320
520
+ },
521
+ {
522
+ "epoch": 1.32,
523
+ "learning_rate": 4.601416508739211e-06,
524
+ "logits/chosen": -1.9750694036483765,
525
+ "logits/rejected": -1.974765419960022,
526
+ "logps/chosen": -2.6847405433654785,
527
+ "logps/rejected": -4.218203067779541,
528
+ "loss": 0.3257,
529
+ "rewards/accuracies": 0.02500000037252903,
530
+ "rewards/chosen": -0.012654995545744896,
531
+ "rewards/margins": 0.01376216672360897,
532
+ "rewards/rejected": -0.026417162269353867,
533
+ "step": 330
534
+ },
535
+ {
536
+ "epoch": 1.36,
537
+ "learning_rate": 4.562783745695738e-06,
538
+ "logits/chosen": -1.7031259536743164,
539
+ "logits/rejected": -1.7031259536743164,
540
+ "logps/chosen": 0.0,
541
+ "logps/rejected": 0.0,
542
+ "loss": 0.3408,
543
+ "rewards/accuracies": 0.0,
544
+ "rewards/chosen": 0.0,
545
+ "rewards/margins": 0.0,
546
+ "rewards/rejected": 0.0,
547
+ "step": 340
548
+ },
549
+ {
550
+ "epoch": 1.4,
551
+ "learning_rate": 4.522542485937369e-06,
552
+ "logits/chosen": -1.9031295776367188,
553
+ "logits/rejected": -1.9031295776367188,
554
+ "logps/chosen": 0.0,
555
+ "logps/rejected": 0.0,
556
+ "loss": 0.3754,
557
+ "rewards/accuracies": 0.0,
558
+ "rewards/chosen": 0.0,
559
+ "rewards/margins": 0.0,
560
+ "rewards/rejected": 0.0,
561
+ "step": 350
562
+ },
563
+ {
564
+ "epoch": 1.44,
565
+ "learning_rate": 4.4807241083879774e-06,
566
+ "logits/chosen": -1.9317785501480103,
567
+ "logits/rejected": -1.9317785501480103,
568
+ "logps/chosen": 0.0,
569
+ "logps/rejected": 0.0,
570
+ "loss": 0.3778,
571
+ "rewards/accuracies": 0.0,
572
+ "rewards/chosen": 0.0,
573
+ "rewards/margins": 0.0,
574
+ "rewards/rejected": 0.0,
575
+ "step": 360
576
+ },
577
+ {
578
+ "epoch": 1.48,
579
+ "learning_rate": 4.437361221760449e-06,
580
+ "logits/chosen": -1.8960540294647217,
581
+ "logits/rejected": -1.8960540294647217,
582
+ "logps/chosen": 0.0,
583
+ "logps/rejected": 0.0,
584
+ "loss": 0.3852,
585
+ "rewards/accuracies": 0.0,
586
+ "rewards/chosen": 0.0,
587
+ "rewards/margins": 0.0,
588
+ "rewards/rejected": 0.0,
589
+ "step": 370
590
+ },
591
+ {
592
+ "epoch": 1.52,
593
+ "learning_rate": 4.3924876391293915e-06,
594
+ "logits/chosen": -1.8491512537002563,
595
+ "logits/rejected": -1.8491512537002563,
596
+ "logps/chosen": 0.0,
597
+ "logps/rejected": 0.0,
598
+ "loss": 0.3852,
599
+ "rewards/accuracies": 0.0,
600
+ "rewards/chosen": 0.0,
601
+ "rewards/margins": 0.0,
602
+ "rewards/rejected": 0.0,
603
+ "step": 380
604
+ },
605
+ {
606
+ "epoch": 1.56,
607
+ "learning_rate": 4.346138351564711e-06,
608
+ "logits/chosen": -1.8366947174072266,
609
+ "logits/rejected": -1.8366947174072266,
610
+ "logps/chosen": 0.0,
611
+ "logps/rejected": 0.0,
612
+ "loss": 0.3408,
613
+ "rewards/accuracies": 0.0,
614
+ "rewards/chosen": 0.0,
615
+ "rewards/margins": 0.0,
616
+ "rewards/rejected": 0.0,
617
+ "step": 390
618
+ },
619
+ {
620
+ "epoch": 1.6,
621
+ "learning_rate": 4.2983495008466285e-06,
622
+ "logits/chosen": -1.9089797735214233,
623
+ "logits/rejected": -1.9089797735214233,
624
+ "logps/chosen": 0.0,
625
+ "logps/rejected": 0.0,
626
+ "loss": 0.3433,
627
+ "rewards/accuracies": 0.0,
628
+ "rewards/chosen": 0.0,
629
+ "rewards/margins": 0.0,
630
+ "rewards/rejected": 0.0,
631
+ "step": 400
632
+ },
633
+ {
634
+ "epoch": 1.6,
635
+ "eval_logits/chosen": -1.9721235036849976,
636
+ "eval_logits/rejected": -1.8102209568023682,
637
+ "eval_logps/chosen": -300.3360595703125,
638
+ "eval_logps/rejected": -291.88916015625,
639
+ "eval_loss": 0.08119545131921768,
640
+ "eval_rewards/accuracies": 0.4675000011920929,
641
+ "eval_rewards/chosen": -0.33638474345207214,
642
+ "eval_rewards/margins": 0.03310885280370712,
643
+ "eval_rewards/rejected": -0.3694935739040375,
644
+ "eval_runtime": 703.1815,
645
+ "eval_samples_per_second": 2.844,
646
+ "eval_steps_per_second": 1.422,
647
+ "step": 400
648
+ },
649
+ {
650
+ "epoch": 1.64,
651
+ "learning_rate": 4.249158351283414e-06,
652
+ "logits/chosen": -1.8887426853179932,
653
+ "logits/rejected": -1.8887426853179932,
654
+ "logps/chosen": 0.0,
655
+ "logps/rejected": 0.0,
656
+ "loss": 0.3186,
657
+ "rewards/accuracies": 0.0,
658
+ "rewards/chosen": 0.0,
659
+ "rewards/margins": 0.0,
660
+ "rewards/rejected": 0.0,
661
+ "step": 410
662
+ },
663
+ {
664
+ "epoch": 1.68,
665
+ "learning_rate": 4.198603260653792e-06,
666
+ "logits/chosen": -1.6066404581069946,
667
+ "logits/rejected": -1.6066404581069946,
668
+ "logps/chosen": 0.0,
669
+ "logps/rejected": 0.0,
670
+ "loss": 0.284,
671
+ "rewards/accuracies": 0.0,
672
+ "rewards/chosen": 0.0,
673
+ "rewards/margins": 0.0,
674
+ "rewards/rejected": 0.0,
675
+ "step": 420
676
+ },
677
+ {
678
+ "epoch": 1.72,
679
+ "learning_rate": 4.146723650296701e-06,
680
+ "logits/chosen": -1.7861597537994385,
681
+ "logits/rejected": -1.764651894569397,
682
+ "logps/chosen": -6.981114864349365,
683
+ "logps/rejected": -4.9876909255981445,
684
+ "loss": 0.2566,
685
+ "rewards/accuracies": 0.02500000037252903,
686
+ "rewards/chosen": -0.020851103588938713,
687
+ "rewards/margins": 0.012941191904246807,
688
+ "rewards/rejected": -0.033792294561862946,
689
+ "step": 430
690
+ },
691
+ {
692
+ "epoch": 1.76,
693
+ "learning_rate": 4.093559974371725e-06,
694
+ "logits/chosen": -1.9894205331802368,
695
+ "logits/rejected": -1.9900938272476196,
696
+ "logps/chosen": -4.070672988891602,
697
+ "logps/rejected": -14.303924560546875,
698
+ "loss": 0.6156,
699
+ "rewards/accuracies": 0.02500000037252903,
700
+ "rewards/chosen": 0.015280509367585182,
701
+ "rewards/margins": 0.09383808076381683,
702
+ "rewards/rejected": -0.0785575658082962,
703
+ "step": 440
704
+ },
705
+ {
706
+ "epoch": 1.8,
707
+ "learning_rate": 4.039153688314146e-06,
708
+ "logits/chosen": -1.858513593673706,
709
+ "logits/rejected": -1.858513593673706,
710
+ "logps/chosen": 0.0,
711
+ "logps/rejected": 0.0,
712
+ "loss": 0.3408,
713
+ "rewards/accuracies": 0.0,
714
+ "rewards/chosen": 0.0,
715
+ "rewards/margins": 0.0,
716
+ "rewards/rejected": 0.0,
717
+ "step": 450
718
+ },
719
+ {
720
+ "epoch": 1.84,
721
+ "learning_rate": 3.983547216509254e-06,
722
+ "logits/chosen": -2.0184249877929688,
723
+ "logits/rejected": -2.0184249877929688,
724
+ "logps/chosen": 0.0,
725
+ "logps/rejected": 0.0,
726
+ "loss": 0.3211,
727
+ "rewards/accuracies": 0.0,
728
+ "rewards/chosen": 0.0,
729
+ "rewards/margins": 0.0,
730
+ "rewards/rejected": 0.0,
731
+ "step": 460
732
+ },
733
+ {
734
+ "epoch": 1.88,
735
+ "learning_rate": 3.92678391921108e-06,
736
+ "logits/chosen": -1.7378623485565186,
737
+ "logits/rejected": -1.7378623485565186,
738
+ "logps/chosen": 0.0,
739
+ "logps/rejected": 0.0,
740
+ "loss": 0.3087,
741
+ "rewards/accuracies": 0.0,
742
+ "rewards/chosen": 0.0,
743
+ "rewards/margins": 0.0,
744
+ "rewards/rejected": 0.0,
745
+ "step": 470
746
+ },
747
+ {
748
+ "epoch": 1.92,
749
+ "learning_rate": 3.868908058731376e-06,
750
+ "logits/chosen": -1.7385492324829102,
751
+ "logits/rejected": -1.7385492324829102,
752
+ "logps/chosen": 0.0,
753
+ "logps/rejected": 0.0,
754
+ "loss": 0.3606,
755
+ "rewards/accuracies": 0.0,
756
+ "rewards/chosen": 0.0,
757
+ "rewards/margins": 0.0,
758
+ "rewards/rejected": 0.0,
759
+ "step": 480
760
+ },
761
+ {
762
+ "epoch": 1.96,
763
+ "learning_rate": 3.8099647649251984e-06,
764
+ "logits/chosen": -1.9141228199005127,
765
+ "logits/rejected": -1.9141228199005127,
766
+ "logps/chosen": 0.0,
767
+ "logps/rejected": 0.0,
768
+ "loss": 0.3062,
769
+ "rewards/accuracies": 0.0,
770
+ "rewards/chosen": 0.0,
771
+ "rewards/margins": 0.0,
772
+ "rewards/rejected": 0.0,
773
+ "step": 490
774
+ },
775
+ {
776
+ "epoch": 2.0,
777
+ "learning_rate": 3.7500000000000005e-06,
778
+ "logits/chosen": -1.7758957147598267,
779
+ "logits/rejected": -1.7758957147598267,
780
+ "logps/chosen": 0.0,
781
+ "logps/rejected": 0.0,
782
+ "loss": 0.3606,
783
+ "rewards/accuracies": 0.0,
784
+ "rewards/chosen": 0.0,
785
+ "rewards/margins": 0.0,
786
+ "rewards/rejected": 0.0,
787
+ "step": 500
788
+ },
789
+ {
790
+ "epoch": 2.0,
791
+ "eval_logits/chosen": -1.9969795942306519,
792
+ "eval_logits/rejected": -1.8348422050476074,
793
+ "eval_logps/chosen": -298.5122985839844,
794
+ "eval_logps/rejected": -284.1371154785156,
795
+ "eval_loss": 0.10996392369270325,
796
+ "eval_rewards/accuracies": 0.3734999895095825,
797
+ "eval_rewards/chosen": -0.3181473910808563,
798
+ "eval_rewards/margins": -0.026174278929829597,
799
+ "eval_rewards/rejected": -0.2919731140136719,
800
+ "eval_runtime": 702.666,
801
+ "eval_samples_per_second": 2.846,
802
+ "eval_steps_per_second": 1.423,
803
+ "step": 500
804
+ },
805
+ {
806
+ "epoch": 2.04,
807
+ "learning_rate": 3.689060522675689e-06,
808
+ "logits/chosen": -1.7798951864242554,
809
+ "logits/rejected": -1.7795917987823486,
810
+ "logps/chosen": -2.3812079429626465,
811
+ "logps/rejected": -3.1179988384246826,
812
+ "loss": 0.2962,
813
+ "rewards/accuracies": 0.02500000037252903,
814
+ "rewards/chosen": -0.009619669988751411,
815
+ "rewards/margins": 0.005795452743768692,
816
+ "rewards/rejected": -0.015415122732520103,
817
+ "step": 510
818
+ },
819
+ {
820
+ "epoch": 2.08,
821
+ "learning_rate": 3.627193851723577e-06,
822
+ "logits/chosen": -1.9688339233398438,
823
+ "logits/rejected": -1.9688339233398438,
824
+ "logps/chosen": 0.0,
825
+ "logps/rejected": 0.0,
826
+ "loss": 0.3383,
827
+ "rewards/accuracies": 0.0,
828
+ "rewards/chosen": 0.0,
829
+ "rewards/margins": 0.0,
830
+ "rewards/rejected": 0.0,
831
+ "step": 520
832
+ },
833
+ {
834
+ "epoch": 2.12,
835
+ "learning_rate": 3.564448228912682e-06,
836
+ "logits/chosen": -2.014033794403076,
837
+ "logits/rejected": -2.014033794403076,
838
+ "logps/chosen": 0.0,
839
+ "logps/rejected": 0.0,
840
+ "loss": 0.3408,
841
+ "rewards/accuracies": 0.0,
842
+ "rewards/chosen": 0.0,
843
+ "rewards/margins": 0.0,
844
+ "rewards/rejected": 0.0,
845
+ "step": 530
846
+ },
847
+ {
848
+ "epoch": 2.16,
849
+ "learning_rate": 3.5008725813922383e-06,
850
+ "logits/chosen": -1.9099280834197998,
851
+ "logits/rejected": -1.9099280834197998,
852
+ "logps/chosen": 0.0,
853
+ "logps/rejected": 0.0,
854
+ "loss": 0.3926,
855
+ "rewards/accuracies": 0.0,
856
+ "rewards/chosen": 0.0,
857
+ "rewards/margins": 0.0,
858
+ "rewards/rejected": 0.0,
859
+ "step": 540
860
+ },
861
+ {
862
+ "epoch": 2.2,
863
+ "learning_rate": 3.436516483539781e-06,
864
+ "logits/chosen": -1.9664745330810547,
865
+ "logits/rejected": -1.9664745330810547,
866
+ "logps/chosen": 0.0,
867
+ "logps/rejected": 0.0,
868
+ "loss": 0.3235,
869
+ "rewards/accuracies": 0.0,
870
+ "rewards/chosen": 0.0,
871
+ "rewards/margins": 0.0,
872
+ "rewards/rejected": 0.0,
873
+ "step": 550
874
+ },
875
+ {
876
+ "epoch": 2.24,
877
+ "learning_rate": 3.3714301183045382e-06,
878
+ "logits/chosen": -1.767690658569336,
879
+ "logits/rejected": -1.767690658569336,
880
+ "logps/chosen": 0.0,
881
+ "logps/rejected": 0.0,
882
+ "loss": 0.3334,
883
+ "rewards/accuracies": 0.0,
884
+ "rewards/chosen": 0.0,
885
+ "rewards/margins": 0.0,
886
+ "rewards/rejected": 0.0,
887
+ "step": 560
888
+ },
889
+ {
890
+ "epoch": 2.28,
891
+ "learning_rate": 3.3056642380762783e-06,
892
+ "logits/chosen": -1.8395519256591797,
893
+ "logits/rejected": -1.8395519256591797,
894
+ "logps/chosen": 0.0,
895
+ "logps/rejected": 0.0,
896
+ "loss": 0.3581,
897
+ "rewards/accuracies": 0.0,
898
+ "rewards/chosen": 0.0,
899
+ "rewards/margins": 0.0,
900
+ "rewards/rejected": 0.0,
901
+ "step": 570
902
+ },
903
+ {
904
+ "epoch": 2.32,
905
+ "learning_rate": 3.2392701251101172e-06,
906
+ "logits/chosen": -1.9773021936416626,
907
+ "logits/rejected": -1.9773021936416626,
908
+ "logps/chosen": 0.0,
909
+ "logps/rejected": 0.0,
910
+ "loss": 0.3334,
911
+ "rewards/accuracies": 0.0,
912
+ "rewards/chosen": 0.0,
913
+ "rewards/margins": 0.0,
914
+ "rewards/rejected": 0.0,
915
+ "step": 580
916
+ },
917
+ {
918
+ "epoch": 2.36,
919
+ "learning_rate": 3.1722995515381644e-06,
920
+ "logits/chosen": -2.025939464569092,
921
+ "logits/rejected": -2.025939464569092,
922
+ "logps/chosen": 0.0,
923
+ "logps/rejected": 0.0,
924
+ "loss": 0.3754,
925
+ "rewards/accuracies": 0.0,
926
+ "rewards/chosen": 0.0,
927
+ "rewards/margins": 0.0,
928
+ "rewards/rejected": 0.0,
929
+ "step": 590
930
+ },
931
+ {
932
+ "epoch": 2.4,
933
+ "learning_rate": 3.1048047389991693e-06,
934
+ "logits/chosen": -2.046207904815674,
935
+ "logits/rejected": -2.046207904815674,
936
+ "logps/chosen": 0.0,
937
+ "logps/rejected": 0.0,
938
+ "loss": 0.3038,
939
+ "rewards/accuracies": 0.0,
940
+ "rewards/chosen": 0.0,
941
+ "rewards/margins": 0.0,
942
+ "rewards/rejected": 0.0,
943
+ "step": 600
944
+ },
945
+ {
946
+ "epoch": 2.4,
947
+ "eval_logits/chosen": -1.9935928583145142,
948
+ "eval_logits/rejected": -1.831691026687622,
949
+ "eval_logps/chosen": -299.02557373046875,
950
+ "eval_logps/rejected": -284.72607421875,
951
+ "eval_loss": 0.10917193442583084,
952
+ "eval_rewards/accuracies": 0.37700000405311584,
953
+ "eval_rewards/chosen": -0.3232795298099518,
954
+ "eval_rewards/margins": -0.025417106226086617,
955
+ "eval_rewards/rejected": -0.2978624105453491,
956
+ "eval_runtime": 703.1518,
957
+ "eval_samples_per_second": 2.844,
958
+ "eval_steps_per_second": 1.422,
959
+ "step": 600
960
+ },
961
+ {
962
+ "epoch": 2.44,
963
+ "learning_rate": 3.0368383179176584e-06,
964
+ "logits/chosen": -2.0336036682128906,
965
+ "logits/rejected": -2.0336036682128906,
966
+ "logps/chosen": 0.0,
967
+ "logps/rejected": 0.0,
968
+ "loss": 0.3211,
969
+ "rewards/accuracies": 0.0,
970
+ "rewards/chosen": 0.0,
971
+ "rewards/margins": 0.0,
972
+ "rewards/rejected": 0.0,
973
+ "step": 610
974
+ },
975
+ {
976
+ "epoch": 2.48,
977
+ "learning_rate": 2.9684532864643123e-06,
978
+ "logits/chosen": -1.7936891317367554,
979
+ "logits/rejected": -1.7936891317367554,
980
+ "logps/chosen": 0.0,
981
+ "logps/rejected": 0.0,
982
+ "loss": 0.3062,
983
+ "rewards/accuracies": 0.0,
984
+ "rewards/chosen": 0.0,
985
+ "rewards/margins": 0.0,
986
+ "rewards/rejected": 0.0,
987
+ "step": 620
988
+ },
989
+ {
990
+ "epoch": 2.52,
991
+ "learning_rate": 2.8997029692295875e-06,
992
+ "logits/chosen": -1.8989810943603516,
993
+ "logits/rejected": -1.8989810943603516,
994
+ "logps/chosen": 0.0,
995
+ "logps/rejected": 0.0,
996
+ "loss": 0.3457,
997
+ "rewards/accuracies": 0.0,
998
+ "rewards/chosen": 0.0,
999
+ "rewards/margins": 0.0,
1000
+ "rewards/rejected": 0.0,
1001
+ "step": 630
1002
+ },
1003
+ {
1004
+ "epoch": 2.56,
1005
+ "learning_rate": 2.8306409756428067e-06,
1006
+ "logits/chosen": -1.7690341472625732,
1007
+ "logits/rejected": -1.7690341472625732,
1008
+ "logps/chosen": 0.0,
1009
+ "logps/rejected": 0.0,
1010
+ "loss": 0.3359,
1011
+ "rewards/accuracies": 0.0,
1012
+ "rewards/chosen": 0.0,
1013
+ "rewards/margins": 0.0,
1014
+ "rewards/rejected": 0.0,
1015
+ "step": 640
1016
+ },
1017
+ {
1018
+ "epoch": 2.6,
1019
+ "learning_rate": 2.761321158169134e-06,
1020
+ "logits/chosen": -1.6524708271026611,
1021
+ "logits/rejected": -1.6524708271026611,
1022
+ "logps/chosen": 0.0,
1023
+ "logps/rejected": 0.0,
1024
+ "loss": 0.4099,
1025
+ "rewards/accuracies": 0.0,
1026
+ "rewards/chosen": 0.0,
1027
+ "rewards/margins": 0.0,
1028
+ "rewards/rejected": 0.0,
1029
+ "step": 650
1030
+ },
1031
+ {
1032
+ "epoch": 2.64,
1033
+ "learning_rate": 2.6917975703170466e-06,
1034
+ "logits/chosen": -1.835680365562439,
1035
+ "logits/rejected": -1.835680365562439,
1036
+ "logps/chosen": 0.0,
1037
+ "logps/rejected": 0.0,
1038
+ "loss": 0.3902,
1039
+ "rewards/accuracies": 0.0,
1040
+ "rewards/chosen": 0.0,
1041
+ "rewards/margins": 0.0,
1042
+ "rewards/rejected": 0.0,
1043
+ "step": 660
1044
+ },
1045
+ {
1046
+ "epoch": 2.68,
1047
+ "learning_rate": 2.6221244244890336e-06,
1048
+ "logits/chosen": -1.7938703298568726,
1049
+ "logits/rejected": -1.7727596759796143,
1050
+ "logps/chosen": -5.777490615844727,
1051
+ "logps/rejected": -3.1091294288635254,
1052
+ "loss": 0.296,
1053
+ "rewards/accuracies": 0.02500000037252903,
1054
+ "rewards/chosen": -0.008814861066639423,
1055
+ "rewards/margins": 0.006191821303218603,
1056
+ "rewards/rejected": -0.01500668190419674,
1057
+ "step": 670
1058
+ },
1059
+ {
1060
+ "epoch": 2.72,
1061
+ "learning_rate": 2.5523560497083927e-06,
1062
+ "logits/chosen": -1.8663837909698486,
1063
+ "logits/rejected": -1.8663837909698486,
1064
+ "logps/chosen": 0.0,
1065
+ "logps/rejected": 0.0,
1066
+ "loss": 0.3112,
1067
+ "rewards/accuracies": 0.0,
1068
+ "rewards/chosen": 0.0,
1069
+ "rewards/margins": 0.0,
1070
+ "rewards/rejected": 0.0,
1071
+ "step": 680
1072
+ },
1073
+ {
1074
+ "epoch": 2.76,
1075
+ "learning_rate": 2.482546849255096e-06,
1076
+ "logits/chosen": -1.8712513446807861,
1077
+ "logits/rejected": -1.8712513446807861,
1078
+ "logps/chosen": 0.0,
1079
+ "logps/rejected": 0.0,
1080
+ "loss": 0.3852,
1081
+ "rewards/accuracies": 0.0,
1082
+ "rewards/chosen": 0.0,
1083
+ "rewards/margins": 0.0,
1084
+ "rewards/rejected": 0.0,
1085
+ "step": 690
1086
+ },
1087
+ {
1088
+ "epoch": 2.8,
1089
+ "learning_rate": 2.4127512582437486e-06,
1090
+ "logits/chosen": -1.9196503162384033,
1091
+ "logits/rejected": -1.9196503162384033,
1092
+ "logps/chosen": 0.0,
1093
+ "logps/rejected": 0.0,
1094
+ "loss": 0.3161,
1095
+ "rewards/accuracies": 0.0,
1096
+ "rewards/chosen": 0.0,
1097
+ "rewards/margins": 0.0,
1098
+ "rewards/rejected": 0.0,
1099
+ "step": 700
1100
+ },
1101
+ {
1102
+ "epoch": 2.8,
1103
+ "eval_logits/chosen": -1.9965624809265137,
1104
+ "eval_logits/rejected": -1.8345471620559692,
1105
+ "eval_logps/chosen": -298.41583251953125,
1106
+ "eval_logps/rejected": -284.232177734375,
1107
+ "eval_loss": 0.10689055174589157,
1108
+ "eval_rewards/accuracies": 0.3799999952316284,
1109
+ "eval_rewards/chosen": -0.317182332277298,
1110
+ "eval_rewards/margins": -0.024258404970169067,
1111
+ "eval_rewards/rejected": -0.2929239571094513,
1112
+ "eval_runtime": 703.3493,
1113
+ "eval_samples_per_second": 2.844,
1114
+ "eval_steps_per_second": 1.422,
1115
+ "step": 700
1116
+ },
1117
+ {
1118
+ "epoch": 2.84,
1119
+ "learning_rate": 2.3430237011767166e-06,
1120
+ "logits/chosen": -1.9961488246917725,
1121
+ "logits/rejected": -1.9961488246917725,
1122
+ "logps/chosen": 0.0,
1123
+ "logps/rejected": 0.0,
1124
+ "loss": 0.3704,
1125
+ "rewards/accuracies": 0.0,
1126
+ "rewards/chosen": 0.0,
1127
+ "rewards/margins": 0.0,
1128
+ "rewards/rejected": 0.0,
1129
+ "step": 710
1130
+ },
1131
+ {
1132
+ "epoch": 2.88,
1133
+ "learning_rate": 2.2734185495055503e-06,
1134
+ "logits/chosen": -1.7007853984832764,
1135
+ "logits/rejected": -1.668602705001831,
1136
+ "logps/chosen": -22.714946746826172,
1137
+ "logps/rejected": -10.431631088256836,
1138
+ "loss": 0.5289,
1139
+ "rewards/accuracies": 0.0,
1140
+ "rewards/chosen": -0.12950357794761658,
1141
+ "rewards/margins": -0.05113900825381279,
1142
+ "rewards/rejected": -0.07836457341909409,
1143
+ "step": 720
1144
+ },
1145
+ {
1146
+ "epoch": 2.92,
1147
+ "learning_rate": 2.2039900792337477e-06,
1148
+ "logits/chosen": -1.8030157089233398,
1149
+ "logits/rejected": -1.8030157089233398,
1150
+ "logps/chosen": 0.0,
1151
+ "logps/rejected": 0.0,
1152
+ "loss": 0.3013,
1153
+ "rewards/accuracies": 0.0,
1154
+ "rewards/chosen": 0.0,
1155
+ "rewards/margins": 0.0,
1156
+ "rewards/rejected": 0.0,
1157
+ "step": 730
1158
+ },
1159
+ {
1160
+ "epoch": 2.96,
1161
+ "learning_rate": 2.134792428593971e-06,
1162
+ "logits/chosen": -1.9432337284088135,
1163
+ "logits/rejected": -1.9432337284088135,
1164
+ "logps/chosen": 0.0,
1165
+ "logps/rejected": 0.0,
1166
+ "loss": 0.3531,
1167
+ "rewards/accuracies": 0.0,
1168
+ "rewards/chosen": 0.0,
1169
+ "rewards/margins": 0.0,
1170
+ "rewards/rejected": 0.0,
1171
+ "step": 740
1172
+ },
1173
+ {
1174
+ "epoch": 3.0,
1175
+ "learning_rate": 2.0658795558326745e-06,
1176
+ "logits/chosen": -1.9992624521255493,
1177
+ "logits/rejected": -2.000507116317749,
1178
+ "logps/chosen": -6.309609889984131,
1179
+ "logps/rejected": -9.059834480285645,
1180
+ "loss": 0.2932,
1181
+ "rewards/accuracies": 0.02500000037252903,
1182
+ "rewards/chosen": -0.007108859717845917,
1183
+ "rewards/margins": 0.019007809460163116,
1184
+ "rewards/rejected": -0.026116669178009033,
1185
+ "step": 750
1186
+ },
1187
+ {
1188
+ "epoch": 3.04,
1189
+ "learning_rate": 1.997305197135089e-06,
1190
+ "logits/chosen": -1.9350688457489014,
1191
+ "logits/rejected": -1.9350688457489014,
1192
+ "logps/chosen": 0.0,
1193
+ "logps/rejected": 0.0,
1194
+ "loss": 0.3087,
1195
+ "rewards/accuracies": 0.0,
1196
+ "rewards/chosen": 0.0,
1197
+ "rewards/margins": 0.0,
1198
+ "rewards/rejected": 0.0,
1199
+ "step": 760
1200
+ },
1201
+ {
1202
+ "epoch": 3.08,
1203
+ "learning_rate": 1.9291228247233607e-06,
1204
+ "logits/chosen": -1.943377137184143,
1205
+ "logits/rejected": -1.943377137184143,
1206
+ "logps/chosen": 0.0,
1207
+ "logps/rejected": 0.0,
1208
+ "loss": 0.2939,
1209
+ "rewards/accuracies": 0.0,
1210
+ "rewards/chosen": 0.0,
1211
+ "rewards/margins": 0.0,
1212
+ "rewards/rejected": 0.0,
1213
+ "step": 770
1214
+ },
1215
+ {
1216
+ "epoch": 3.12,
1217
+ "learning_rate": 1.8613856051605242e-06,
1218
+ "logits/chosen": -1.9116928577423096,
1219
+ "logits/rejected": -1.9116928577423096,
1220
+ "logps/chosen": 0.0,
1221
+ "logps/rejected": 0.0,
1222
+ "loss": 0.4025,
1223
+ "rewards/accuracies": 0.0,
1224
+ "rewards/chosen": 0.0,
1225
+ "rewards/margins": 0.0,
1226
+ "rewards/rejected": 0.0,
1227
+ "step": 780
1228
+ },
1229
+ {
1230
+ "epoch": 3.16,
1231
+ "learning_rate": 1.7941463578928088e-06,
1232
+ "logits/chosen": -1.9572219848632812,
1233
+ "logits/rejected": -1.9572219848632812,
1234
+ "logps/chosen": 0.0,
1235
+ "logps/rejected": 0.0,
1236
+ "loss": 0.3038,
1237
+ "rewards/accuracies": 0.0,
1238
+ "rewards/chosen": 0.0,
1239
+ "rewards/margins": 0.0,
1240
+ "rewards/rejected": 0.0,
1241
+ "step": 790
1242
+ },
1243
+ {
1244
+ "epoch": 3.2,
1245
+ "learning_rate": 1.7274575140626318e-06,
1246
+ "logits/chosen": -2.023911952972412,
1247
+ "logits/rejected": -2.023911952972412,
1248
+ "logps/chosen": 0.0,
1249
+ "logps/rejected": 0.0,
1250
+ "loss": 0.3852,
1251
+ "rewards/accuracies": 0.0,
1252
+ "rewards/chosen": 0.0,
1253
+ "rewards/margins": 0.0,
1254
+ "rewards/rejected": 0.0,
1255
+ "step": 800
1256
+ },
1257
+ {
1258
+ "epoch": 3.2,
1259
+ "eval_logits/chosen": -2.001863479614258,
1260
+ "eval_logits/rejected": -1.8408547639846802,
1261
+ "eval_logps/chosen": -289.7388000488281,
1262
+ "eval_logps/rejected": -275.51025390625,
1263
+ "eval_loss": 0.09182017296552658,
1264
+ "eval_rewards/accuracies": 0.3684999942779541,
1265
+ "eval_rewards/chosen": -0.2304122895002365,
1266
+ "eval_rewards/margins": -0.02470785565674305,
1267
+ "eval_rewards/rejected": -0.2057044357061386,
1268
+ "eval_runtime": 704.5723,
1269
+ "eval_samples_per_second": 2.839,
1270
+ "eval_steps_per_second": 1.419,
1271
+ "step": 800
1272
+ },
1273
+ {
1274
+ "epoch": 3.24,
1275
+ "learning_rate": 1.661371075624363e-06,
1276
+ "logits/chosen": -1.9029079675674438,
1277
+ "logits/rejected": -1.9029079675674438,
1278
+ "logps/chosen": 0.0,
1279
+ "logps/rejected": 0.0,
1280
+ "loss": 0.2964,
1281
+ "rewards/accuracies": 0.0,
1282
+ "rewards/chosen": 0.0,
1283
+ "rewards/margins": 0.0,
1284
+ "rewards/rejected": 0.0,
1285
+ "step": 810
1286
+ },
1287
+ {
1288
+ "epoch": 3.28,
1289
+ "learning_rate": 1.5959385747947697e-06,
1290
+ "logits/chosen": -1.9884204864501953,
1291
+ "logits/rejected": -1.9884204864501953,
1292
+ "logps/chosen": 0.0,
1293
+ "logps/rejected": 0.0,
1294
+ "loss": 0.3433,
1295
+ "rewards/accuracies": 0.0,
1296
+ "rewards/chosen": 0.0,
1297
+ "rewards/margins": 0.0,
1298
+ "rewards/rejected": 0.0,
1299
+ "step": 820
1300
+ },
1301
+ {
1302
+ "epoch": 3.32,
1303
+ "learning_rate": 1.5312110338697427e-06,
1304
+ "logits/chosen": -1.9079113006591797,
1305
+ "logits/rejected": -1.9079113006591797,
1306
+ "logps/chosen": 0.0,
1307
+ "logps/rejected": 0.0,
1308
+ "loss": 0.3136,
1309
+ "rewards/accuracies": 0.0,
1310
+ "rewards/chosen": 0.0,
1311
+ "rewards/margins": 0.0,
1312
+ "rewards/rejected": 0.0,
1313
+ "step": 830
1314
+ },
1315
+ {
1316
+ "epoch": 3.36,
1317
+ "learning_rate": 1.467238925438646e-06,
1318
+ "logits/chosen": -1.6772514581680298,
1319
+ "logits/rejected": -1.6457149982452393,
1320
+ "logps/chosen": -14.745180130004883,
1321
+ "logps/rejected": -14.53711223602295,
1322
+ "loss": 0.4833,
1323
+ "rewards/accuracies": 0.02500000037252903,
1324
+ "rewards/chosen": -0.04980592057108879,
1325
+ "rewards/margins": 0.06961346417665482,
1326
+ "rewards/rejected": -0.11941938102245331,
1327
+ "step": 840
1328
+ },
1329
+ {
1330
+ "epoch": 3.4,
1331
+ "learning_rate": 1.4040721330273063e-06,
1332
+ "logits/chosen": -1.7772667407989502,
1333
+ "logits/rejected": -1.7781718969345093,
1334
+ "logps/chosen": -8.622848510742188,
1335
+ "logps/rejected": -12.08240032196045,
1336
+ "loss": 0.3293,
1337
+ "rewards/accuracies": 0.05000000074505806,
1338
+ "rewards/chosen": -0.01604883186519146,
1339
+ "rewards/margins": 0.024528637528419495,
1340
+ "rewards/rejected": -0.040577471256256104,
1341
+ "step": 850
1342
+ },
1343
+ {
1344
+ "epoch": 3.44,
1345
+ "learning_rate": 1.3417599122003464e-06,
1346
+ "logits/chosen": -1.9655358791351318,
1347
+ "logits/rejected": -1.9655358791351318,
1348
+ "logps/chosen": 0.0,
1349
+ "logps/rejected": 0.0,
1350
+ "loss": 0.3828,
1351
+ "rewards/accuracies": 0.0,
1352
+ "rewards/chosen": 0.0,
1353
+ "rewards/margins": 0.0,
1354
+ "rewards/rejected": 0.0,
1355
+ "step": 860
1356
+ },
1357
+ {
1358
+ "epoch": 3.48,
1359
+ "learning_rate": 1.280350852153168e-06,
1360
+ "logits/chosen": -2.0000901222229004,
1361
+ "logits/rejected": -2.0000901222229004,
1362
+ "logps/chosen": 0.0,
1363
+ "logps/rejected": 0.0,
1364
+ "loss": 0.3211,
1365
+ "rewards/accuracies": 0.0,
1366
+ "rewards/chosen": 0.0,
1367
+ "rewards/margins": 0.0,
1368
+ "rewards/rejected": 0.0,
1369
+ "step": 870
1370
+ },
1371
+ {
1372
+ "epoch": 3.52,
1373
+ "learning_rate": 1.2198928378235717e-06,
1374
+ "logits/chosen": -1.8563473224639893,
1375
+ "logits/rejected": -1.8563473224639893,
1376
+ "logps/chosen": 0.0,
1377
+ "logps/rejected": 0.0,
1378
+ "loss": 0.3408,
1379
+ "rewards/accuracies": 0.0,
1380
+ "rewards/chosen": 0.0,
1381
+ "rewards/margins": 0.0,
1382
+ "rewards/rejected": 0.0,
1383
+ "step": 880
1384
+ },
1385
+ {
1386
+ "epoch": 3.56,
1387
+ "learning_rate": 1.160433012552508e-06,
1388
+ "logits/chosen": -1.8875033855438232,
1389
+ "logits/rejected": -1.8875033855438232,
1390
+ "logps/chosen": 0.0,
1391
+ "logps/rejected": 0.0,
1392
+ "loss": 0.2791,
1393
+ "rewards/accuracies": 0.0,
1394
+ "rewards/chosen": 0.0,
1395
+ "rewards/margins": 0.0,
1396
+ "rewards/rejected": 0.0,
1397
+ "step": 890
1398
+ },
1399
+ {
1400
+ "epoch": 3.6,
1401
+ "learning_rate": 1.1020177413231334e-06,
1402
+ "logits/chosen": -1.8840911388397217,
1403
+ "logits/rejected": -1.8840911388397217,
1404
+ "logps/chosen": 0.0,
1405
+ "logps/rejected": 0.0,
1406
+ "loss": 0.3359,
1407
+ "rewards/accuracies": 0.0,
1408
+ "rewards/chosen": 0.0,
1409
+ "rewards/margins": 0.0,
1410
+ "rewards/rejected": 0.0,
1411
+ "step": 900
1412
+ },
1413
+ {
1414
+ "epoch": 3.6,
1415
+ "eval_logits/chosen": -1.983768343925476,
1416
+ "eval_logits/rejected": -1.823969841003418,
1417
+ "eval_logps/chosen": -287.3323059082031,
1418
+ "eval_logps/rejected": -271.89581298828125,
1419
+ "eval_loss": 0.09828919917345047,
1420
+ "eval_rewards/accuracies": 0.34299999475479126,
1421
+ "eval_rewards/chosen": -0.20634719729423523,
1422
+ "eval_rewards/margins": -0.036787137389183044,
1423
+ "eval_rewards/rejected": -0.16956007480621338,
1424
+ "eval_runtime": 702.0734,
1425
+ "eval_samples_per_second": 2.849,
1426
+ "eval_steps_per_second": 1.424,
1427
+ "step": 900
1428
+ },
1429
+ {
1430
+ "epoch": 3.64,
1431
+ "learning_rate": 1.0446925746067768e-06,
1432
+ "logits/chosen": -1.8285129070281982,
1433
+ "logits/rejected": -1.8285129070281982,
1434
+ "logps/chosen": 0.0,
1435
+ "logps/rejected": 0.0,
1436
+ "loss": 0.3383,
1437
+ "rewards/accuracies": 0.0,
1438
+ "rewards/chosen": 0.0,
1439
+ "rewards/margins": 0.0,
1440
+ "rewards/rejected": 0.0,
1441
+ "step": 910
1442
+ },
1443
+ {
1444
+ "epoch": 3.68,
1445
+ "learning_rate": 9.88502212844063e-07,
1446
+ "logits/chosen": -1.877623200416565,
1447
+ "logits/rejected": -1.877623200416565,
1448
+ "logps/chosen": 0.0,
1449
+ "logps/rejected": 0.0,
1450
+ "loss": 0.3433,
1451
+ "rewards/accuracies": 0.0,
1452
+ "rewards/chosen": 0.0,
1453
+ "rewards/margins": 0.0,
1454
+ "rewards/rejected": 0.0,
1455
+ "step": 920
1456
+ },
1457
+ {
1458
+ "epoch": 3.72,
1459
+ "learning_rate": 9.334904715888496e-07,
1460
+ "logits/chosen": -1.7445094585418701,
1461
+ "logits/rejected": -1.7445094585418701,
1462
+ "logps/chosen": 0.0,
1463
+ "logps/rejected": 0.0,
1464
+ "loss": 0.3606,
1465
+ "rewards/accuracies": 0.0,
1466
+ "rewards/chosen": 0.0,
1467
+ "rewards/margins": 0.0,
1468
+ "rewards/rejected": 0.0,
1469
+ "step": 930
1470
+ },
1471
+ {
1472
+ "epoch": 3.76,
1473
+ "learning_rate": 8.797002473421729e-07,
1474
+ "logits/chosen": -1.8405430316925049,
1475
+ "logits/rejected": -1.8405430316925049,
1476
+ "logps/chosen": 0.0,
1477
+ "logps/rejected": 0.0,
1478
+ "loss": 0.2939,
1479
+ "rewards/accuracies": 0.0,
1480
+ "rewards/chosen": 0.0,
1481
+ "rewards/margins": 0.0,
1482
+ "rewards/rejected": 0.0,
1483
+ "step": 940
1484
+ },
1485
+ {
1486
+ "epoch": 3.8,
1487
+ "learning_rate": 8.271734841028553e-07,
1488
+ "logits/chosen": -1.934522271156311,
1489
+ "logits/rejected": -1.934522271156311,
1490
+ "logps/chosen": 0.0,
1491
+ "logps/rejected": 0.0,
1492
+ "loss": 0.3334,
1493
+ "rewards/accuracies": 0.0,
1494
+ "rewards/chosen": 0.0,
1495
+ "rewards/margins": 0.0,
1496
+ "rewards/rejected": 0.0,
1497
+ "step": 950
1498
+ },
1499
+ {
1500
+ "epoch": 3.84,
1501
+ "learning_rate": 7.759511406608255e-07,
1502
+ "logits/chosen": -2.002326011657715,
1503
+ "logits/rejected": -2.002326011657715,
1504
+ "logps/chosen": 0.0,
1505
+ "logps/rejected": 0.0,
1506
+ "loss": 0.3556,
1507
+ "rewards/accuracies": 0.0,
1508
+ "rewards/chosen": 0.0,
1509
+ "rewards/margins": 0.0,
1510
+ "rewards/rejected": 0.0,
1511
+ "step": 960
1512
+ },
1513
+ {
1514
+ "epoch": 3.88,
1515
+ "learning_rate": 7.260731586586983e-07,
1516
+ "logits/chosen": -1.997032880783081,
1517
+ "logits/rejected": -1.997032880783081,
1518
+ "logps/chosen": 0.0,
1519
+ "logps/rejected": 0.0,
1520
+ "loss": 0.3408,
1521
+ "rewards/accuracies": 0.0,
1522
+ "rewards/chosen": 0.0,
1523
+ "rewards/margins": 0.0,
1524
+ "rewards/rejected": 0.0,
1525
+ "step": 970
1526
+ },
1527
+ {
1528
+ "epoch": 3.92,
1529
+ "learning_rate": 6.775784314464717e-07,
1530
+ "logits/chosen": -1.9426374435424805,
1531
+ "logits/rejected": -1.9426374435424805,
1532
+ "logps/chosen": 0.0,
1533
+ "logps/rejected": 0.0,
1534
+ "loss": 0.4149,
1535
+ "rewards/accuracies": 0.0,
1536
+ "rewards/chosen": 0.0,
1537
+ "rewards/margins": 0.0,
1538
+ "rewards/rejected": 0.0,
1539
+ "step": 980
1540
+ },
1541
+ {
1542
+ "epoch": 3.96,
1543
+ "learning_rate": 6.305047737536707e-07,
1544
+ "logits/chosen": -1.7942962646484375,
1545
+ "logits/rejected": -1.7942962646484375,
1546
+ "logps/chosen": 0.0,
1547
+ "logps/rejected": 0.0,
1548
+ "loss": 0.3927,
1549
+ "rewards/accuracies": 0.0,
1550
+ "rewards/chosen": 0.0,
1551
+ "rewards/margins": 0.0,
1552
+ "rewards/rejected": 0.0,
1553
+ "step": 990
1554
+ },
1555
+ {
1556
+ "epoch": 4.0,
1557
+ "learning_rate": 5.848888922025553e-07,
1558
+ "logits/chosen": -1.9319026470184326,
1559
+ "logits/rejected": -1.9107824563980103,
1560
+ "logps/chosen": -4.969229698181152,
1561
+ "logps/rejected": -3.057560682296753,
1562
+ "loss": 0.3701,
1563
+ "rewards/accuracies": 0.02500000037252903,
1564
+ "rewards/chosen": -0.0007322501624003053,
1565
+ "rewards/margins": 0.013758744113147259,
1566
+ "rewards/rejected": -0.01449099462479353,
1567
+ "step": 1000
1568
+ },
1569
+ {
1570
+ "epoch": 4.0,
1571
+ "eval_logits/chosen": -1.9837957620620728,
1572
+ "eval_logits/rejected": -1.8240842819213867,
1573
+ "eval_logps/chosen": -287.31591796875,
1574
+ "eval_logps/rejected": -271.8734130859375,
1575
+ "eval_loss": 0.09820234775543213,
1576
+ "eval_rewards/accuracies": 0.34549999237060547,
1577
+ "eval_rewards/chosen": -0.20618313550949097,
1578
+ "eval_rewards/margins": -0.03684700280427933,
1579
+ "eval_rewards/rejected": -0.16933614015579224,
1580
+ "eval_runtime": 705.2974,
1581
+ "eval_samples_per_second": 2.836,
1582
+ "eval_steps_per_second": 1.418,
1583
+ "step": 1000
1584
+ },
1585
+ {
1586
+ "epoch": 4.04,
1587
+ "learning_rate": 5.407663566854008e-07,
1588
+ "logits/chosen": -1.8698651790618896,
1589
+ "logits/rejected": -1.8484447002410889,
1590
+ "logps/chosen": -16.939109802246094,
1591
+ "logps/rejected": -9.7978515625,
1592
+ "loss": 0.311,
1593
+ "rewards/accuracies": 0.02500000037252903,
1594
+ "rewards/chosen": -0.07174522429704666,
1595
+ "rewards/margins": 0.0002815544721670449,
1596
+ "rewards/rejected": -0.07202677428722382,
1597
+ "step": 1010
1598
+ },
1599
+ {
1600
+ "epoch": 4.08,
1601
+ "learning_rate": 4.981715726281666e-07,
1602
+ "logits/chosen": -1.9195115566253662,
1603
+ "logits/rejected": -1.9195115566253662,
1604
+ "logps/chosen": 0.0,
1605
+ "logps/rejected": 0.0,
1606
+ "loss": 0.2618,
1607
+ "rewards/accuracies": 0.0,
1608
+ "rewards/chosen": 0.0,
1609
+ "rewards/margins": 0.0,
1610
+ "rewards/rejected": 0.0,
1611
+ "step": 1020
1612
+ },
1613
+ {
1614
+ "epoch": 4.12,
1615
+ "learning_rate": 4.5713775416217884e-07,
1616
+ "logits/chosen": -1.9020694494247437,
1617
+ "logits/rejected": -1.9020694494247437,
1618
+ "logps/chosen": 0.0,
1619
+ "logps/rejected": 0.0,
1620
+ "loss": 0.3655,
1621
+ "rewards/accuracies": 0.0,
1622
+ "rewards/chosen": 0.0,
1623
+ "rewards/margins": 0.0,
1624
+ "rewards/rejected": 0.0,
1625
+ "step": 1030
1626
+ },
1627
+ {
1628
+ "epoch": 4.16,
1629
+ "learning_rate": 4.1769689822475147e-07,
1630
+ "logits/chosen": -2.1050262451171875,
1631
+ "logits/rejected": -2.1050262451171875,
1632
+ "logps/chosen": 0.0,
1633
+ "logps/rejected": 0.0,
1634
+ "loss": 0.3359,
1635
+ "rewards/accuracies": 0.0,
1636
+ "rewards/chosen": 0.0,
1637
+ "rewards/margins": 0.0,
1638
+ "rewards/rejected": 0.0,
1639
+ "step": 1040
1640
+ },
1641
+ {
1642
+ "epoch": 4.2,
1643
+ "learning_rate": 3.798797596089351e-07,
1644
+ "logits/chosen": -2.110747814178467,
1645
+ "logits/rejected": -2.110747814178467,
1646
+ "logps/chosen": 0.0,
1647
+ "logps/rejected": 0.0,
1648
+ "loss": 0.3507,
1649
+ "rewards/accuracies": 0.0,
1650
+ "rewards/chosen": 0.0,
1651
+ "rewards/margins": 0.0,
1652
+ "rewards/rejected": 0.0,
1653
+ "step": 1050
1654
+ },
1655
+ {
1656
+ "epoch": 4.24,
1657
+ "learning_rate": 3.4371582698185636e-07,
1658
+ "logits/chosen": -1.8137989044189453,
1659
+ "logits/rejected": -1.8137989044189453,
1660
+ "logps/chosen": 0.0,
1661
+ "logps/rejected": 0.0,
1662
+ "loss": 0.3235,
1663
+ "rewards/accuracies": 0.0,
1664
+ "rewards/chosen": 0.0,
1665
+ "rewards/margins": 0.0,
1666
+ "rewards/rejected": 0.0,
1667
+ "step": 1060
1668
+ },
1669
+ {
1670
+ "epoch": 4.28,
1671
+ "learning_rate": 3.092332998903416e-07,
1672
+ "logits/chosen": -2.048144817352295,
1673
+ "logits/rejected": -2.048144817352295,
1674
+ "logps/chosen": 0.0,
1675
+ "logps/rejected": 0.0,
1676
+ "loss": 0.3606,
1677
+ "rewards/accuracies": 0.0,
1678
+ "rewards/chosen": 0.0,
1679
+ "rewards/margins": 0.0,
1680
+ "rewards/rejected": 0.0,
1681
+ "step": 1070
1682
+ },
1683
+ {
1684
+ "epoch": 4.32,
1685
+ "learning_rate": 2.764590667717562e-07,
1686
+ "logits/chosen": -2.115201473236084,
1687
+ "logits/rejected": -2.115201473236084,
1688
+ "logps/chosen": 0.0,
1689
+ "logps/rejected": 0.0,
1690
+ "loss": 0.3902,
1691
+ "rewards/accuracies": 0.0,
1692
+ "rewards/chosen": 0.0,
1693
+ "rewards/margins": 0.0,
1694
+ "rewards/rejected": 0.0,
1695
+ "step": 1080
1696
+ },
1697
+ {
1698
+ "epoch": 4.36,
1699
+ "learning_rate": 2.454186839872158e-07,
1700
+ "logits/chosen": -1.7584993839263916,
1701
+ "logits/rejected": -1.7584993839263916,
1702
+ "logps/chosen": 0.0,
1703
+ "logps/rejected": 0.0,
1704
+ "loss": 0.3803,
1705
+ "rewards/accuracies": 0.0,
1706
+ "rewards/chosen": 0.0,
1707
+ "rewards/margins": 0.0,
1708
+ "rewards/rejected": 0.0,
1709
+ "step": 1090
1710
+ },
1711
+ {
1712
+ "epoch": 4.4,
1713
+ "learning_rate": 2.1613635589349756e-07,
1714
+ "logits/chosen": -2.012417793273926,
1715
+ "logits/rejected": -2.012417793273926,
1716
+ "logps/chosen": 0.0,
1717
+ "logps/rejected": 0.0,
1718
+ "loss": 0.4025,
1719
+ "rewards/accuracies": 0.0,
1720
+ "rewards/chosen": 0.0,
1721
+ "rewards/margins": 0.0,
1722
+ "rewards/rejected": 0.0,
1723
+ "step": 1100
1724
+ },
1725
+ {
1726
+ "epoch": 4.4,
1727
+ "eval_logits/chosen": -1.9857844114303589,
1728
+ "eval_logits/rejected": -1.8259761333465576,
1729
+ "eval_logps/chosen": -287.1649475097656,
1730
+ "eval_logps/rejected": -271.81268310546875,
1731
+ "eval_loss": 0.09747015684843063,
1732
+ "eval_rewards/accuracies": 0.34549999237060547,
1733
+ "eval_rewards/chosen": -0.20467324554920197,
1734
+ "eval_rewards/margins": -0.03594454750418663,
1735
+ "eval_rewards/rejected": -0.16872867941856384,
1736
+ "eval_runtime": 705.0834,
1737
+ "eval_samples_per_second": 2.837,
1738
+ "eval_steps_per_second": 1.418,
1739
+ "step": 1100
1740
+ },
1741
+ {
1742
+ "epoch": 4.44,
1743
+ "learning_rate": 1.8863491596921745e-07,
1744
+ "logits/chosen": -1.8358827829360962,
1745
+ "logits/rejected": -1.8358827829360962,
1746
+ "logps/chosen": 0.0,
1747
+ "logps/rejected": 0.0,
1748
+ "loss": 0.3408,
1749
+ "rewards/accuracies": 0.0,
1750
+ "rewards/chosen": 0.0,
1751
+ "rewards/margins": 0.0,
1752
+ "rewards/rejected": 0.0,
1753
+ "step": 1110
1754
+ },
1755
+ {
1756
+ "epoch": 4.48,
1757
+ "learning_rate": 1.629358090099639e-07,
1758
+ "logits/chosen": -1.8439744710922241,
1759
+ "logits/rejected": -1.8439744710922241,
1760
+ "logps/chosen": 0.0,
1761
+ "logps/rejected": 0.0,
1762
+ "loss": 0.2692,
1763
+ "rewards/accuracies": 0.0,
1764
+ "rewards/chosen": 0.0,
1765
+ "rewards/margins": 0.0,
1766
+ "rewards/rejected": 0.0,
1767
+ "step": 1120
1768
+ },
1769
+ {
1770
+ "epoch": 4.52,
1771
+ "learning_rate": 1.3905907440629752e-07,
1772
+ "logits/chosen": -1.9981298446655273,
1773
+ "logits/rejected": -1.9981298446655273,
1774
+ "logps/chosen": 0.0,
1775
+ "logps/rejected": 0.0,
1776
+ "loss": 0.3112,
1777
+ "rewards/accuracies": 0.0,
1778
+ "rewards/chosen": 0.0,
1779
+ "rewards/margins": 0.0,
1780
+ "rewards/rejected": 0.0,
1781
+ "step": 1130
1782
+ },
1783
+ {
1784
+ "epoch": 4.56,
1785
+ "learning_rate": 1.1702333051763271e-07,
1786
+ "logits/chosen": -2.0399842262268066,
1787
+ "logits/rejected": -2.0399842262268066,
1788
+ "logps/chosen": 0.0,
1789
+ "logps/rejected": 0.0,
1790
+ "loss": 0.3581,
1791
+ "rewards/accuracies": 0.0,
1792
+ "rewards/chosen": 0.0,
1793
+ "rewards/margins": 0.0,
1794
+ "rewards/rejected": 0.0,
1795
+ "step": 1140
1796
+ },
1797
+ {
1798
+ "epoch": 4.6,
1799
+ "learning_rate": 9.684576015420277e-08,
1800
+ "logits/chosen": -1.9201631546020508,
1801
+ "logits/rejected": -1.9201631546020508,
1802
+ "logps/chosen": 0.0,
1803
+ "logps/rejected": 0.0,
1804
+ "loss": 0.3852,
1805
+ "rewards/accuracies": 0.0,
1806
+ "rewards/chosen": 0.0,
1807
+ "rewards/margins": 0.0,
1808
+ "rewards/rejected": 0.0,
1809
+ "step": 1150
1810
+ },
1811
+ {
1812
+ "epoch": 4.64,
1813
+ "learning_rate": 7.854209717842231e-08,
1814
+ "logits/chosen": -1.6203396320343018,
1815
+ "logits/rejected": -1.6199716329574585,
1816
+ "logps/chosen": -2.1602871417999268,
1817
+ "logps/rejected": -3.176687240600586,
1818
+ "loss": 0.3391,
1819
+ "rewards/accuracies": 0.02500000037252903,
1820
+ "rewards/chosen": -0.007410462014377117,
1821
+ "rewards/margins": 0.0085915457457304,
1822
+ "rewards/rejected": -0.016002008691430092,
1823
+ "step": 1160
1824
+ },
1825
+ {
1826
+ "epoch": 4.68,
1827
+ "learning_rate": 6.212661423609184e-08,
1828
+ "logits/chosen": -1.7701547145843506,
1829
+ "logits/rejected": -1.7701547145843506,
1830
+ "logps/chosen": 0.0,
1831
+ "logps/rejected": 0.0,
1832
+ "loss": 0.3655,
1833
+ "rewards/accuracies": 0.0,
1834
+ "rewards/chosen": 0.0,
1835
+ "rewards/margins": 0.0,
1836
+ "rewards/rejected": 0.0,
1837
+ "step": 1170
1838
+ },
1839
+ {
1840
+ "epoch": 4.72,
1841
+ "learning_rate": 4.761211162702117e-08,
1842
+ "logits/chosen": -1.9981458187103271,
1843
+ "logits/rejected": -1.976782202720642,
1844
+ "logps/chosen": -4.941376686096191,
1845
+ "logps/rejected": -3.0533976554870605,
1846
+ "loss": 0.3454,
1847
+ "rewards/accuracies": 0.02500000037252903,
1848
+ "rewards/chosen": -0.0004537239146884531,
1849
+ "rewards/margins": 0.013995639979839325,
1850
+ "rewards/rejected": -0.014449363574385643,
1851
+ "step": 1180
1852
+ },
1853
+ {
1854
+ "epoch": 4.76,
1855
+ "learning_rate": 3.5009907323737826e-08,
1856
+ "logits/chosen": -1.8368213176727295,
1857
+ "logits/rejected": -1.8368213176727295,
1858
+ "logps/chosen": 0.0,
1859
+ "logps/rejected": 0.0,
1860
+ "loss": 0.3334,
1861
+ "rewards/accuracies": 0.0,
1862
+ "rewards/chosen": 0.0,
1863
+ "rewards/margins": 0.0,
1864
+ "rewards/rejected": 0.0,
1865
+ "step": 1190
1866
+ },
1867
+ {
1868
+ "epoch": 4.8,
1869
+ "learning_rate": 2.4329828146074096e-08,
1870
+ "logits/chosen": -1.806884765625,
1871
+ "logits/rejected": -1.806884765625,
1872
+ "logps/chosen": 0.0,
1873
+ "logps/rejected": 0.0,
1874
+ "loss": 0.3754,
1875
+ "rewards/accuracies": 0.0,
1876
+ "rewards/chosen": 0.0,
1877
+ "rewards/margins": 0.0,
1878
+ "rewards/rejected": 0.0,
1879
+ "step": 1200
1880
+ },
1881
+ {
1882
+ "epoch": 4.8,
1883
+ "eval_logits/chosen": -1.9853414297103882,
1884
+ "eval_logits/rejected": -1.825589895248413,
1885
+ "eval_logps/chosen": -287.1330871582031,
1886
+ "eval_logps/rejected": -271.7890319824219,
1887
+ "eval_loss": 0.09735800325870514,
1888
+ "eval_rewards/accuracies": 0.3440000116825104,
1889
+ "eval_rewards/chosen": -0.20435477793216705,
1890
+ "eval_rewards/margins": -0.03586255759000778,
1891
+ "eval_rewards/rejected": -0.16849222779273987,
1892
+ "eval_runtime": 702.0059,
1893
+ "eval_samples_per_second": 2.849,
1894
+ "eval_steps_per_second": 1.424,
1895
+ "step": 1200
1896
+ },
1897
+ {
1898
+ "epoch": 4.84,
1899
+ "learning_rate": 1.5580202098509078e-08,
1900
+ "logits/chosen": -1.985327124595642,
1901
+ "logits/rejected": -1.9865849018096924,
1902
+ "logps/chosen": -6.517402648925781,
1903
+ "logps/rejected": -8.804891586303711,
1904
+ "loss": 0.3148,
1905
+ "rewards/accuracies": 0.02500000037252903,
1906
+ "rewards/chosen": -0.009186786599457264,
1907
+ "rewards/margins": 0.014380457811057568,
1908
+ "rewards/rejected": -0.02356724441051483,
1909
+ "step": 1210
1910
+ },
1911
+ {
1912
+ "epoch": 4.88,
1913
+ "learning_rate": 8.767851876239075e-09,
1914
+ "logits/chosen": -1.8783695697784424,
1915
+ "logits/rejected": -1.8783695697784424,
1916
+ "logps/chosen": 0.0,
1917
+ "logps/rejected": 0.0,
1918
+ "loss": 0.3112,
1919
+ "rewards/accuracies": 0.0,
1920
+ "rewards/chosen": 0.0,
1921
+ "rewards/margins": 0.0,
1922
+ "rewards/rejected": 0.0,
1923
+ "step": 1220
1924
+ },
1925
+ {
1926
+ "epoch": 4.92,
1927
+ "learning_rate": 3.8980895450474455e-09,
1928
+ "logits/chosen": -1.9094947576522827,
1929
+ "logits/rejected": -1.9094947576522827,
1930
+ "logps/chosen": 0.0,
1931
+ "logps/rejected": 0.0,
1932
+ "loss": 0.284,
1933
+ "rewards/accuracies": 0.0,
1934
+ "rewards/chosen": 0.0,
1935
+ "rewards/margins": 0.0,
1936
+ "rewards/rejected": 0.0,
1937
+ "step": 1230
1938
+ },
1939
+ {
1940
+ "epoch": 4.96,
1941
+ "learning_rate": 9.747123991141193e-10,
1942
+ "logits/chosen": -1.9814590215682983,
1943
+ "logits/rejected": -1.9814590215682983,
1944
+ "logps/chosen": 0.0,
1945
+ "logps/rejected": 0.0,
1946
+ "loss": 0.3606,
1947
+ "rewards/accuracies": 0.0,
1948
+ "rewards/chosen": 0.0,
1949
+ "rewards/margins": 0.0,
1950
+ "rewards/rejected": 0.0,
1951
+ "step": 1240
1952
+ },
1953
+ {
1954
+ "epoch": 5.0,
1955
+ "learning_rate": 0.0,
1956
+ "logits/chosen": -1.8839404582977295,
1957
+ "logits/rejected": -1.8839404582977295,
1958
+ "logps/chosen": 0.0,
1959
+ "logps/rejected": 0.0,
1960
+ "loss": 0.3383,
1961
+ "rewards/accuracies": 0.0,
1962
+ "rewards/chosen": 0.0,
1963
+ "rewards/margins": 0.0,
1964
+ "rewards/rejected": 0.0,
1965
+ "step": 1250
1966
+ },
1967
+ {
1968
+ "epoch": 5.0,
1969
+ "step": 1250,
1970
+ "total_flos": 0.0,
1971
+ "train_loss": 0.3515237546205521,
1972
+ "train_runtime": 12848.6235,
1973
+ "train_samples_per_second": 0.389,
1974
+ "train_steps_per_second": 0.097
1975
+ }
1976
+ ],
1977
+ "logging_steps": 10,
1978
+ "max_steps": 1250,
1979
+ "num_input_tokens_seen": 0,
1980
+ "num_train_epochs": 5,
1981
+ "save_steps": 100,
1982
+ "total_flos": 0.0,
1983
+ "train_batch_size": 2,
1984
+ "trial_name": null,
1985
+ "trial_params": null
1986
+ }