sambar commited on
Commit
4fc4e61
1 Parent(s): e298040

Model save

Browse files
README.md ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ base_model: mistralai/Mistral-7B-v0.1
4
+ tags:
5
+ - trl
6
+ - dpo
7
+ - generated_from_trainer
8
+ model-index:
9
+ - name: zephyr-7b-ipo-lora
10
+ results: []
11
+ ---
12
+
13
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
14
+ should probably proofread and complete it, then remove this comment. -->
15
+
16
+ # zephyr-7b-ipo-lora
17
+
18
+ This model is a fine-tuned version of [mistralai/Mistral-7B-v0.1](https://huggingface.co/mistralai/Mistral-7B-v0.1) on the None dataset.
19
+ It achieves the following results on the evaluation set:
20
+ - Loss: 18.3397
21
+ - Rewards/chosen: 0.0292
22
+ - Rewards/rejected: -0.1006
23
+ - Rewards/accuracies: 0.7200
24
+ - Rewards/margins: 0.1298
25
+ - Logps/rejected: -212.0379
26
+ - Logps/chosen: -255.2319
27
+ - Logits/rejected: -1.7967
28
+ - Logits/chosen: -2.0243
29
+
30
+ ## Model description
31
+
32
+ More information needed
33
+
34
+ ## Intended uses & limitations
35
+
36
+ More information needed
37
+
38
+ ## Training and evaluation data
39
+
40
+ More information needed
41
+
42
+ ## Training procedure
43
+
44
+ ### Training hyperparameters
45
+
46
+ The following hyperparameters were used during training:
47
+ - learning_rate: 5e-07
48
+ - train_batch_size: 2
49
+ - eval_batch_size: 4
50
+ - seed: 42
51
+ - distributed_type: multi-GPU
52
+ - num_devices: 4
53
+ - gradient_accumulation_steps: 32
54
+ - total_train_batch_size: 256
55
+ - total_eval_batch_size: 16
56
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
57
+ - lr_scheduler_type: linear
58
+ - lr_scheduler_warmup_ratio: 0.1
59
+ - num_epochs: 3
60
+
61
+ ### Training results
62
+
63
+ | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
64
+ |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
65
+ | 19.3937 | 1.0 | 242 | 19.3450 | 0.0291 | -0.0729 | 0.7040 | 0.1020 | -211.7608 | -255.2333 | -1.7962 | -2.0237 |
66
+ | 19.376 | 2.0 | 484 | 18.8198 | 0.0270 | -0.0949 | 0.7020 | 0.1218 | -211.9809 | -255.2546 | -1.7954 | -2.0232 |
67
+ | 18.4503 | 3.0 | 726 | 18.3397 | 0.0292 | -0.1006 | 0.7200 | 0.1298 | -212.0379 | -255.2319 | -1.7967 | -2.0243 |
68
+
69
+
70
+ ### Framework versions
71
+
72
+ - Transformers 4.35.0
73
+ - Pytorch 2.1.2+cu121
74
+ - Datasets 2.14.6
75
+ - Tokenizers 0.14.1
adapter_config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "mistralai/Mistral-7B-v0.1",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layers_pattern": null,
10
+ "layers_to_transform": null,
11
+ "lora_alpha": 16,
12
+ "lora_dropout": 0.1,
13
+ "modules_to_save": null,
14
+ "peft_type": "LORA",
15
+ "r": 64,
16
+ "rank_pattern": {},
17
+ "revision": null,
18
+ "target_modules": [
19
+ "v_proj",
20
+ "q_proj",
21
+ "k_proj",
22
+ "o_proj"
23
+ ],
24
+ "task_type": "CAUSAL_LM"
25
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:45b0ffe468c3e9243217ebbff5d9460b1ad173240bf33b1b453fc63c76a932d7
3
+ size 218138576
all_results.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.0,
3
+ "eval_logits/chosen": -2.0242695808410645,
4
+ "eval_logits/rejected": -1.7967454195022583,
5
+ "eval_logps/chosen": -255.23193359375,
6
+ "eval_logps/rejected": -212.03785705566406,
7
+ "eval_loss": 18.33973503112793,
8
+ "eval_rewards/accuracies": 0.7200000286102295,
9
+ "eval_rewards/chosen": 0.029216337949037552,
10
+ "eval_rewards/margins": 0.12978971004486084,
11
+ "eval_rewards/rejected": -0.10057336091995239,
12
+ "eval_runtime": 239.208,
13
+ "eval_samples": 2000,
14
+ "eval_samples_per_second": 8.361,
15
+ "eval_steps_per_second": 0.523,
16
+ "train_loss": 19.76867720969124,
17
+ "train_runtime": 32942.8373,
18
+ "train_samples": 61966,
19
+ "train_samples_per_second": 5.643,
20
+ "train_steps_per_second": 0.022
21
+ }
eval_results.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.0,
3
+ "eval_logits/chosen": -2.0242695808410645,
4
+ "eval_logits/rejected": -1.7967454195022583,
5
+ "eval_logps/chosen": -255.23193359375,
6
+ "eval_logps/rejected": -212.03785705566406,
7
+ "eval_loss": 18.33973503112793,
8
+ "eval_rewards/accuracies": 0.7200000286102295,
9
+ "eval_rewards/chosen": 0.029216337949037552,
10
+ "eval_rewards/margins": 0.12978971004486084,
11
+ "eval_rewards/rejected": -0.10057336091995239,
12
+ "eval_runtime": 239.208,
13
+ "eval_samples": 2000,
14
+ "eval_samples_per_second": 8.361,
15
+ "eval_steps_per_second": 0.523
16
+ }
runs/Jan04_14-18-11_node-0/events.out.tfevents.1704406980.node-0.388506.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d6a20c64982d0cf79e242b26a938737358c615a0eb021cfad072d8a491feda0e
3
+ size 53035
runs/Jan04_14-18-11_node-0/events.out.tfevents.1704440161.node-0.388506.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:531dbccecbdce53bad868619f1b73e4fac931019dc8a8e975bb952a76ad364f8
3
+ size 828
special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "</s>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "<unk>",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "<unk>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "<s>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "</s>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ }
27
+ },
28
+ "additional_special_tokens": [],
29
+ "bos_token": "<s>",
30
+ "chat_template": "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'system' %}\n{{ '<|system|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}",
31
+ "clean_up_tokenization_spaces": false,
32
+ "eos_token": "</s>",
33
+ "legacy": true,
34
+ "model_max_length": 2048,
35
+ "pad_token": "</s>",
36
+ "sp_model_kwargs": {},
37
+ "spaces_between_special_tokens": false,
38
+ "tokenizer_class": "LlamaTokenizer",
39
+ "unk_token": "<unk>",
40
+ "use_default_system_prompt": false
41
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.0,
3
+ "train_loss": 19.76867720969124,
4
+ "train_runtime": 32942.8373,
5
+ "train_samples": 61966,
6
+ "train_samples_per_second": 5.643,
7
+ "train_steps_per_second": 0.022
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,1098 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 2.9992254066615027,
5
+ "eval_steps": 100,
6
+ "global_step": 726,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.0,
13
+ "learning_rate": 6.84931506849315e-09,
14
+ "logits/chosen": -2.2289741039276123,
15
+ "logits/rejected": -2.0226380825042725,
16
+ "logps/chosen": -263.3438720703125,
17
+ "logps/rejected": -201.271240234375,
18
+ "loss": 25.0,
19
+ "rewards/accuracies": 0.0,
20
+ "rewards/chosen": 0.0,
21
+ "rewards/margins": 0.0,
22
+ "rewards/rejected": 0.0,
23
+ "step": 1
24
+ },
25
+ {
26
+ "epoch": 0.04,
27
+ "learning_rate": 6.84931506849315e-08,
28
+ "logits/chosen": -2.266495943069458,
29
+ "logits/rejected": -1.9388139247894287,
30
+ "logps/chosen": -284.0570068359375,
31
+ "logps/rejected": -217.82611083984375,
32
+ "loss": 25.2097,
33
+ "rewards/accuracies": 0.4253472089767456,
34
+ "rewards/chosen": -0.0013144080294296145,
35
+ "rewards/margins": -0.0012916413834318519,
36
+ "rewards/rejected": -2.276669692946598e-05,
37
+ "step": 10
38
+ },
39
+ {
40
+ "epoch": 0.08,
41
+ "learning_rate": 1.36986301369863e-07,
42
+ "logits/chosen": -2.273313283920288,
43
+ "logits/rejected": -1.9863513708114624,
44
+ "logps/chosen": -269.41644287109375,
45
+ "logps/rejected": -208.65896606445312,
46
+ "loss": 25.3631,
47
+ "rewards/accuracies": 0.504687488079071,
48
+ "rewards/chosen": -0.00041677377885207534,
49
+ "rewards/margins": -0.0017736803274601698,
50
+ "rewards/rejected": 0.0013569066068157554,
51
+ "step": 20
52
+ },
53
+ {
54
+ "epoch": 0.12,
55
+ "learning_rate": 2.054794520547945e-07,
56
+ "logits/chosen": -2.218705177307129,
57
+ "logits/rejected": -1.993547797203064,
58
+ "logps/chosen": -243.9556427001953,
59
+ "logps/rejected": -199.10511779785156,
60
+ "loss": 25.202,
61
+ "rewards/accuracies": 0.5171874761581421,
62
+ "rewards/chosen": -0.0021037233527749777,
63
+ "rewards/margins": 0.0023984042927622795,
64
+ "rewards/rejected": -0.004502127878367901,
65
+ "step": 30
66
+ },
67
+ {
68
+ "epoch": 0.17,
69
+ "learning_rate": 2.73972602739726e-07,
70
+ "logits/chosen": -2.3098392486572266,
71
+ "logits/rejected": -1.996872901916504,
72
+ "logps/chosen": -274.44091796875,
73
+ "logps/rejected": -208.118896484375,
74
+ "loss": 25.1001,
75
+ "rewards/accuracies": 0.5062500238418579,
76
+ "rewards/chosen": 5.082110874354839e-05,
77
+ "rewards/margins": 0.0012452874798327684,
78
+ "rewards/rejected": -0.0011944664875045419,
79
+ "step": 40
80
+ },
81
+ {
82
+ "epoch": 0.21,
83
+ "learning_rate": 3.424657534246575e-07,
84
+ "logits/chosen": -2.2532970905303955,
85
+ "logits/rejected": -2.0162949562072754,
86
+ "logps/chosen": -258.4920959472656,
87
+ "logps/rejected": -215.529052734375,
88
+ "loss": 24.7793,
89
+ "rewards/accuracies": 0.53125,
90
+ "rewards/chosen": 0.0014612700324505568,
91
+ "rewards/margins": 0.004128460772335529,
92
+ "rewards/rejected": -0.0026671909727156162,
93
+ "step": 50
94
+ },
95
+ {
96
+ "epoch": 0.25,
97
+ "learning_rate": 4.10958904109589e-07,
98
+ "logits/chosen": -2.2521817684173584,
99
+ "logits/rejected": -1.9908069372177124,
100
+ "logps/chosen": -257.59222412109375,
101
+ "logps/rejected": -206.552490234375,
102
+ "loss": 24.2711,
103
+ "rewards/accuracies": 0.567187488079071,
104
+ "rewards/chosen": 0.008318379521369934,
105
+ "rewards/margins": 0.010114217177033424,
106
+ "rewards/rejected": -0.0017958376556634903,
107
+ "step": 60
108
+ },
109
+ {
110
+ "epoch": 0.29,
111
+ "learning_rate": 4.794520547945205e-07,
112
+ "logits/chosen": -2.2163357734680176,
113
+ "logits/rejected": -1.997807264328003,
114
+ "logps/chosen": -245.3988800048828,
115
+ "logps/rejected": -206.3892364501953,
116
+ "loss": 23.8848,
117
+ "rewards/accuracies": 0.604687511920929,
118
+ "rewards/chosen": 0.008774536661803722,
119
+ "rewards/margins": 0.015018345788121223,
120
+ "rewards/rejected": -0.006243808660656214,
121
+ "step": 70
122
+ },
123
+ {
124
+ "epoch": 0.33,
125
+ "learning_rate": 4.946401225114854e-07,
126
+ "logits/chosen": -2.272542715072632,
127
+ "logits/rejected": -1.9779363870620728,
128
+ "logps/chosen": -256.14208984375,
129
+ "logps/rejected": -210.4285125732422,
130
+ "loss": 23.3744,
131
+ "rewards/accuracies": 0.6187499761581421,
132
+ "rewards/chosen": 0.011616826988756657,
133
+ "rewards/margins": 0.023227987810969353,
134
+ "rewards/rejected": -0.01161116175353527,
135
+ "step": 80
136
+ },
137
+ {
138
+ "epoch": 0.37,
139
+ "learning_rate": 4.869831546707504e-07,
140
+ "logits/chosen": -2.310504913330078,
141
+ "logits/rejected": -2.018115520477295,
142
+ "logps/chosen": -262.4266052246094,
143
+ "logps/rejected": -216.05191040039062,
144
+ "loss": 22.5882,
145
+ "rewards/accuracies": 0.6484375,
146
+ "rewards/chosen": 0.02013319917023182,
147
+ "rewards/margins": 0.03405465558171272,
148
+ "rewards/rejected": -0.013921457342803478,
149
+ "step": 90
150
+ },
151
+ {
152
+ "epoch": 0.41,
153
+ "learning_rate": 4.793261868300153e-07,
154
+ "logits/chosen": -2.2590413093566895,
155
+ "logits/rejected": -1.9913495779037476,
156
+ "logps/chosen": -264.6045837402344,
157
+ "logps/rejected": -219.0672149658203,
158
+ "loss": 22.0702,
159
+ "rewards/accuracies": 0.6640625,
160
+ "rewards/chosen": 0.02459399588406086,
161
+ "rewards/margins": 0.04087246581912041,
162
+ "rewards/rejected": -0.016278475522994995,
163
+ "step": 100
164
+ },
165
+ {
166
+ "epoch": 0.45,
167
+ "learning_rate": 4.7166921898928023e-07,
168
+ "logits/chosen": -2.2836508750915527,
169
+ "logits/rejected": -2.039545774459839,
170
+ "logps/chosen": -243.48867797851562,
171
+ "logps/rejected": -214.2646942138672,
172
+ "loss": 21.7548,
173
+ "rewards/accuracies": 0.6656249761581421,
174
+ "rewards/chosen": 0.020637672394514084,
175
+ "rewards/margins": 0.043859176337718964,
176
+ "rewards/rejected": -0.02322150394320488,
177
+ "step": 110
178
+ },
179
+ {
180
+ "epoch": 0.5,
181
+ "learning_rate": 4.640122511485451e-07,
182
+ "logits/chosen": -2.2533435821533203,
183
+ "logits/rejected": -2.0158801078796387,
184
+ "logps/chosen": -247.85543823242188,
185
+ "logps/rejected": -211.3623504638672,
186
+ "loss": 21.608,
187
+ "rewards/accuracies": 0.635937511920929,
188
+ "rewards/chosen": 0.026100531220436096,
189
+ "rewards/margins": 0.05109390616416931,
190
+ "rewards/rejected": -0.024993373081088066,
191
+ "step": 120
192
+ },
193
+ {
194
+ "epoch": 0.54,
195
+ "learning_rate": 4.563552833078101e-07,
196
+ "logits/chosen": -2.2582602500915527,
197
+ "logits/rejected": -2.0190303325653076,
198
+ "logps/chosen": -256.9596252441406,
199
+ "logps/rejected": -217.3245849609375,
200
+ "loss": 20.8686,
201
+ "rewards/accuracies": 0.667187511920929,
202
+ "rewards/chosen": 0.028709357604384422,
203
+ "rewards/margins": 0.06462417542934418,
204
+ "rewards/rejected": -0.035914815962314606,
205
+ "step": 130
206
+ },
207
+ {
208
+ "epoch": 0.58,
209
+ "learning_rate": 4.4869831546707505e-07,
210
+ "logits/chosen": -2.2754273414611816,
211
+ "logits/rejected": -2.038130521774292,
212
+ "logps/chosen": -261.1288757324219,
213
+ "logps/rejected": -221.7465057373047,
214
+ "loss": 20.9236,
215
+ "rewards/accuracies": 0.71875,
216
+ "rewards/chosen": 0.03735818341374397,
217
+ "rewards/margins": 0.07491641491651535,
218
+ "rewards/rejected": -0.037558235228061676,
219
+ "step": 140
220
+ },
221
+ {
222
+ "epoch": 0.62,
223
+ "learning_rate": 4.4104134762633994e-07,
224
+ "logits/chosen": -2.2817165851593018,
225
+ "logits/rejected": -2.055285930633545,
226
+ "logps/chosen": -257.0137939453125,
227
+ "logps/rejected": -217.66183471679688,
228
+ "loss": 20.6598,
229
+ "rewards/accuracies": 0.6703125238418579,
230
+ "rewards/chosen": 0.03143654763698578,
231
+ "rewards/margins": 0.06876268237829208,
232
+ "rewards/rejected": -0.0373261384665966,
233
+ "step": 150
234
+ },
235
+ {
236
+ "epoch": 0.66,
237
+ "learning_rate": 4.333843797856049e-07,
238
+ "logits/chosen": -2.2860567569732666,
239
+ "logits/rejected": -1.999734878540039,
240
+ "logps/chosen": -270.0646057128906,
241
+ "logps/rejected": -229.95010375976562,
242
+ "loss": 19.9317,
243
+ "rewards/accuracies": 0.7015625238418579,
244
+ "rewards/chosen": 0.04298175126314163,
245
+ "rewards/margins": 0.08404376357793808,
246
+ "rewards/rejected": -0.04106200858950615,
247
+ "step": 160
248
+ },
249
+ {
250
+ "epoch": 0.7,
251
+ "learning_rate": 4.257274119448698e-07,
252
+ "logits/chosen": -2.3108818531036377,
253
+ "logits/rejected": -2.0907673835754395,
254
+ "logps/chosen": -261.53253173828125,
255
+ "logps/rejected": -230.0071563720703,
256
+ "loss": 20.2237,
257
+ "rewards/accuracies": 0.6546875238418579,
258
+ "rewards/chosen": 0.03258711099624634,
259
+ "rewards/margins": 0.07823493331670761,
260
+ "rewards/rejected": -0.04564782604575157,
261
+ "step": 170
262
+ },
263
+ {
264
+ "epoch": 0.74,
265
+ "learning_rate": 4.180704441041347e-07,
266
+ "logits/chosen": -2.2532687187194824,
267
+ "logits/rejected": -2.0309438705444336,
268
+ "logps/chosen": -247.6223907470703,
269
+ "logps/rejected": -216.98587036132812,
270
+ "loss": 20.6143,
271
+ "rewards/accuracies": 0.671875,
272
+ "rewards/chosen": 0.027079517021775246,
273
+ "rewards/margins": 0.08031971752643585,
274
+ "rewards/rejected": -0.05324019119143486,
275
+ "step": 180
276
+ },
277
+ {
278
+ "epoch": 0.78,
279
+ "learning_rate": 4.1041347626339966e-07,
280
+ "logits/chosen": -2.290727138519287,
281
+ "logits/rejected": -2.008113384246826,
282
+ "logps/chosen": -252.6389617919922,
283
+ "logps/rejected": -204.10769653320312,
284
+ "loss": 19.923,
285
+ "rewards/accuracies": 0.668749988079071,
286
+ "rewards/chosen": 0.02958676591515541,
287
+ "rewards/margins": 0.09163785725831985,
288
+ "rewards/rejected": -0.06205107644200325,
289
+ "step": 190
290
+ },
291
+ {
292
+ "epoch": 0.83,
293
+ "learning_rate": 4.027565084226646e-07,
294
+ "logits/chosen": -2.278743267059326,
295
+ "logits/rejected": -2.0434935092926025,
296
+ "logps/chosen": -251.91677856445312,
297
+ "logps/rejected": -208.04470825195312,
298
+ "loss": 19.5106,
299
+ "rewards/accuracies": 0.71875,
300
+ "rewards/chosen": 0.03315151482820511,
301
+ "rewards/margins": 0.09510111808776855,
302
+ "rewards/rejected": -0.061949603259563446,
303
+ "step": 200
304
+ },
305
+ {
306
+ "epoch": 0.87,
307
+ "learning_rate": 3.9509954058192954e-07,
308
+ "logits/chosen": -2.288477897644043,
309
+ "logits/rejected": -2.027742385864258,
310
+ "logps/chosen": -274.6870422363281,
311
+ "logps/rejected": -221.636962890625,
312
+ "loss": 19.5079,
313
+ "rewards/accuracies": 0.684374988079071,
314
+ "rewards/chosen": 0.03773919492959976,
315
+ "rewards/margins": 0.10934200137853622,
316
+ "rewards/rejected": -0.07160280644893646,
317
+ "step": 210
318
+ },
319
+ {
320
+ "epoch": 0.91,
321
+ "learning_rate": 3.874425727411945e-07,
322
+ "logits/chosen": -2.2594573497772217,
323
+ "logits/rejected": -2.008882999420166,
324
+ "logps/chosen": -267.6262512207031,
325
+ "logps/rejected": -219.49728393554688,
326
+ "loss": 19.6404,
327
+ "rewards/accuracies": 0.698437511920929,
328
+ "rewards/chosen": 0.04026947170495987,
329
+ "rewards/margins": 0.10674212872982025,
330
+ "rewards/rejected": -0.06647266447544098,
331
+ "step": 220
332
+ },
333
+ {
334
+ "epoch": 0.95,
335
+ "learning_rate": 3.797856049004594e-07,
336
+ "logits/chosen": -2.26505708694458,
337
+ "logits/rejected": -2.060857057571411,
338
+ "logps/chosen": -254.90005493164062,
339
+ "logps/rejected": -218.54141235351562,
340
+ "loss": 19.6698,
341
+ "rewards/accuracies": 0.6796875,
342
+ "rewards/chosen": 0.035312727093696594,
343
+ "rewards/margins": 0.0973130315542221,
344
+ "rewards/rejected": -0.062000297009944916,
345
+ "step": 230
346
+ },
347
+ {
348
+ "epoch": 0.99,
349
+ "learning_rate": 3.7212863705972436e-07,
350
+ "logits/chosen": -2.2956652641296387,
351
+ "logits/rejected": -2.047691822052002,
352
+ "logps/chosen": -261.9791564941406,
353
+ "logps/rejected": -212.89151000976562,
354
+ "loss": 19.3937,
355
+ "rewards/accuracies": 0.706250011920929,
356
+ "rewards/chosen": 0.035399921238422394,
357
+ "rewards/margins": 0.10358913987874985,
358
+ "rewards/rejected": -0.06818921864032745,
359
+ "step": 240
360
+ },
361
+ {
362
+ "epoch": 1.0,
363
+ "eval_logits/chosen": -2.0236928462982178,
364
+ "eval_logits/rejected": -1.7961955070495605,
365
+ "eval_logps/chosen": -255.23326110839844,
366
+ "eval_logps/rejected": -211.76084899902344,
367
+ "eval_loss": 19.3449764251709,
368
+ "eval_rewards/accuracies": 0.7039999961853027,
369
+ "eval_rewards/chosen": 0.029078969731926918,
370
+ "eval_rewards/margins": 0.1019509881734848,
371
+ "eval_rewards/rejected": -0.07287202030420303,
372
+ "eval_runtime": 239.3505,
373
+ "eval_samples_per_second": 8.356,
374
+ "eval_steps_per_second": 0.522,
375
+ "step": 242
376
+ },
377
+ {
378
+ "epoch": 1.03,
379
+ "learning_rate": 3.6447166921898925e-07,
380
+ "logits/chosen": -2.2042205333709717,
381
+ "logits/rejected": -2.014646053314209,
382
+ "logps/chosen": -248.44058227539062,
383
+ "logps/rejected": -200.34120178222656,
384
+ "loss": 19.9018,
385
+ "rewards/accuracies": 0.6812499761581421,
386
+ "rewards/chosen": 0.03346983343362808,
387
+ "rewards/margins": 0.10174000263214111,
388
+ "rewards/rejected": -0.06827016919851303,
389
+ "step": 250
390
+ },
391
+ {
392
+ "epoch": 1.07,
393
+ "learning_rate": 3.568147013782542e-07,
394
+ "logits/chosen": -2.2492403984069824,
395
+ "logits/rejected": -2.0223591327667236,
396
+ "logps/chosen": -252.7912139892578,
397
+ "logps/rejected": -216.12240600585938,
398
+ "loss": 19.0687,
399
+ "rewards/accuracies": 0.7265625,
400
+ "rewards/chosen": 0.03231377154588699,
401
+ "rewards/margins": 0.10541899502277374,
402
+ "rewards/rejected": -0.07310522347688675,
403
+ "step": 260
404
+ },
405
+ {
406
+ "epoch": 1.12,
407
+ "learning_rate": 3.4915773353751913e-07,
408
+ "logits/chosen": -2.26572847366333,
409
+ "logits/rejected": -1.9836113452911377,
410
+ "logps/chosen": -268.9698791503906,
411
+ "logps/rejected": -225.75338745117188,
412
+ "loss": 18.628,
413
+ "rewards/accuracies": 0.7484375238418579,
414
+ "rewards/chosen": 0.04413260146975517,
415
+ "rewards/margins": 0.1356559544801712,
416
+ "rewards/rejected": -0.09152336418628693,
417
+ "step": 270
418
+ },
419
+ {
420
+ "epoch": 1.16,
421
+ "learning_rate": 3.41500765696784e-07,
422
+ "logits/chosen": -2.270271062850952,
423
+ "logits/rejected": -2.0418267250061035,
424
+ "logps/chosen": -241.98959350585938,
425
+ "logps/rejected": -215.8519287109375,
426
+ "loss": 19.2251,
427
+ "rewards/accuracies": 0.734375,
428
+ "rewards/chosen": 0.03690015524625778,
429
+ "rewards/margins": 0.11016629636287689,
430
+ "rewards/rejected": -0.07326614856719971,
431
+ "step": 280
432
+ },
433
+ {
434
+ "epoch": 1.2,
435
+ "learning_rate": 3.33843797856049e-07,
436
+ "logits/chosen": -2.2635691165924072,
437
+ "logits/rejected": -2.0188465118408203,
438
+ "logps/chosen": -273.9185791015625,
439
+ "logps/rejected": -217.93167114257812,
440
+ "loss": 18.6598,
441
+ "rewards/accuracies": 0.737500011920929,
442
+ "rewards/chosen": 0.03699145466089249,
443
+ "rewards/margins": 0.12567397952079773,
444
+ "rewards/rejected": -0.08868252485990524,
445
+ "step": 290
446
+ },
447
+ {
448
+ "epoch": 1.24,
449
+ "learning_rate": 3.2618683001531396e-07,
450
+ "logits/chosen": -2.2175521850585938,
451
+ "logits/rejected": -1.9899898767471313,
452
+ "logps/chosen": -251.8654327392578,
453
+ "logps/rejected": -217.54421997070312,
454
+ "loss": 18.5271,
455
+ "rewards/accuracies": 0.714062511920929,
456
+ "rewards/chosen": 0.03108426369726658,
457
+ "rewards/margins": 0.1172715276479721,
458
+ "rewards/rejected": -0.08618726581335068,
459
+ "step": 300
460
+ },
461
+ {
462
+ "epoch": 1.28,
463
+ "learning_rate": 3.1852986217457885e-07,
464
+ "logits/chosen": -2.287581205368042,
465
+ "logits/rejected": -2.035792827606201,
466
+ "logps/chosen": -250.79638671875,
467
+ "logps/rejected": -218.07278442382812,
468
+ "loss": 19.1524,
469
+ "rewards/accuracies": 0.7124999761581421,
470
+ "rewards/chosen": 0.03225615620613098,
471
+ "rewards/margins": 0.11478684097528458,
472
+ "rewards/rejected": -0.082530677318573,
473
+ "step": 310
474
+ },
475
+ {
476
+ "epoch": 1.32,
477
+ "learning_rate": 3.108728943338438e-07,
478
+ "logits/chosen": -2.2732996940612793,
479
+ "logits/rejected": -2.085688829421997,
480
+ "logps/chosen": -243.0460662841797,
481
+ "logps/rejected": -227.0532989501953,
482
+ "loss": 19.1273,
483
+ "rewards/accuracies": 0.692187488079071,
484
+ "rewards/chosen": 0.02955133654177189,
485
+ "rewards/margins": 0.10330448299646378,
486
+ "rewards/rejected": -0.07375315576791763,
487
+ "step": 320
488
+ },
489
+ {
490
+ "epoch": 1.36,
491
+ "learning_rate": 3.0321592649310873e-07,
492
+ "logits/chosen": -2.2556710243225098,
493
+ "logits/rejected": -1.9700673818588257,
494
+ "logps/chosen": -243.98757934570312,
495
+ "logps/rejected": -214.067138671875,
496
+ "loss": 18.57,
497
+ "rewards/accuracies": 0.7281249761581421,
498
+ "rewards/chosen": 0.03395627439022064,
499
+ "rewards/margins": 0.1173306480050087,
500
+ "rewards/rejected": -0.08337438106536865,
501
+ "step": 330
502
+ },
503
+ {
504
+ "epoch": 1.4,
505
+ "learning_rate": 2.955589586523736e-07,
506
+ "logits/chosen": -2.285491466522217,
507
+ "logits/rejected": -2.017014503479004,
508
+ "logps/chosen": -256.3018493652344,
509
+ "logps/rejected": -215.5684051513672,
510
+ "loss": 19.3792,
511
+ "rewards/accuracies": 0.7265625,
512
+ "rewards/chosen": 0.02712482213973999,
513
+ "rewards/margins": 0.11975017935037613,
514
+ "rewards/rejected": -0.09262534976005554,
515
+ "step": 340
516
+ },
517
+ {
518
+ "epoch": 1.45,
519
+ "learning_rate": 2.8790199081163856e-07,
520
+ "logits/chosen": -2.2827515602111816,
521
+ "logits/rejected": -1.9982925653457642,
522
+ "logps/chosen": -256.5948181152344,
523
+ "logps/rejected": -214.377197265625,
524
+ "loss": 19.2092,
525
+ "rewards/accuracies": 0.6890624761581421,
526
+ "rewards/chosen": 0.02536213956773281,
527
+ "rewards/margins": 0.10417892783880234,
528
+ "rewards/rejected": -0.07881677150726318,
529
+ "step": 350
530
+ },
531
+ {
532
+ "epoch": 1.49,
533
+ "learning_rate": 2.802450229709035e-07,
534
+ "logits/chosen": -2.226412296295166,
535
+ "logits/rejected": -1.9456377029418945,
536
+ "logps/chosen": -245.56753540039062,
537
+ "logps/rejected": -202.68087768554688,
538
+ "loss": 18.5533,
539
+ "rewards/accuracies": 0.7015625238418579,
540
+ "rewards/chosen": 0.023562483489513397,
541
+ "rewards/margins": 0.11015045642852783,
542
+ "rewards/rejected": -0.08658796548843384,
543
+ "step": 360
544
+ },
545
+ {
546
+ "epoch": 1.53,
547
+ "learning_rate": 2.725880551301684e-07,
548
+ "logits/chosen": -2.2886481285095215,
549
+ "logits/rejected": -2.075536012649536,
550
+ "logps/chosen": -265.2494812011719,
551
+ "logps/rejected": -222.0449981689453,
552
+ "loss": 19.4234,
553
+ "rewards/accuracies": 0.690625011920929,
554
+ "rewards/chosen": 0.0262996107339859,
555
+ "rewards/margins": 0.11352996528148651,
556
+ "rewards/rejected": -0.08723036199808121,
557
+ "step": 370
558
+ },
559
+ {
560
+ "epoch": 1.57,
561
+ "learning_rate": 2.649310872894334e-07,
562
+ "logits/chosen": -2.2954063415527344,
563
+ "logits/rejected": -2.019110918045044,
564
+ "logps/chosen": -265.41534423828125,
565
+ "logps/rejected": -211.8571319580078,
566
+ "loss": 18.7522,
567
+ "rewards/accuracies": 0.734375,
568
+ "rewards/chosen": 0.030247170478105545,
569
+ "rewards/margins": 0.12689557671546936,
570
+ "rewards/rejected": -0.09664840996265411,
571
+ "step": 380
572
+ },
573
+ {
574
+ "epoch": 1.61,
575
+ "learning_rate": 2.572741194486983e-07,
576
+ "logits/chosen": -2.307234525680542,
577
+ "logits/rejected": -1.9544427394866943,
578
+ "logps/chosen": -263.83050537109375,
579
+ "logps/rejected": -196.88644409179688,
580
+ "loss": 18.7036,
581
+ "rewards/accuracies": 0.721875011920929,
582
+ "rewards/chosen": 0.02584310993552208,
583
+ "rewards/margins": 0.11804970353841782,
584
+ "rewards/rejected": -0.09220659732818604,
585
+ "step": 390
586
+ },
587
+ {
588
+ "epoch": 1.65,
589
+ "learning_rate": 2.496171516079632e-07,
590
+ "logits/chosen": -2.3390731811523438,
591
+ "logits/rejected": -2.020245313644409,
592
+ "logps/chosen": -283.559326171875,
593
+ "logps/rejected": -228.0408477783203,
594
+ "loss": 18.9635,
595
+ "rewards/accuracies": 0.734375,
596
+ "rewards/chosen": 0.03759358078241348,
597
+ "rewards/margins": 0.13340887427330017,
598
+ "rewards/rejected": -0.09581530094146729,
599
+ "step": 400
600
+ },
601
+ {
602
+ "epoch": 1.69,
603
+ "learning_rate": 2.4196018376722816e-07,
604
+ "logits/chosen": -2.25607967376709,
605
+ "logits/rejected": -2.005556344985962,
606
+ "logps/chosen": -268.84881591796875,
607
+ "logps/rejected": -216.67996215820312,
608
+ "loss": 19.2263,
609
+ "rewards/accuracies": 0.7265625,
610
+ "rewards/chosen": 0.027539696544408798,
611
+ "rewards/margins": 0.12173386663198471,
612
+ "rewards/rejected": -0.09419417381286621,
613
+ "step": 410
614
+ },
615
+ {
616
+ "epoch": 1.74,
617
+ "learning_rate": 2.343032159264931e-07,
618
+ "logits/chosen": -2.234701633453369,
619
+ "logits/rejected": -1.9779908657073975,
620
+ "logps/chosen": -259.31536865234375,
621
+ "logps/rejected": -212.6099853515625,
622
+ "loss": 18.7807,
623
+ "rewards/accuracies": 0.729687511920929,
624
+ "rewards/chosen": 0.03047511912882328,
625
+ "rewards/margins": 0.13214388489723206,
626
+ "rewards/rejected": -0.10166877508163452,
627
+ "step": 420
628
+ },
629
+ {
630
+ "epoch": 1.78,
631
+ "learning_rate": 2.26646248085758e-07,
632
+ "logits/chosen": -2.231544017791748,
633
+ "logits/rejected": -1.981184720993042,
634
+ "logps/chosen": -249.824951171875,
635
+ "logps/rejected": -205.8656005859375,
636
+ "loss": 18.6868,
637
+ "rewards/accuracies": 0.729687511920929,
638
+ "rewards/chosen": 0.026928503066301346,
639
+ "rewards/margins": 0.128057599067688,
640
+ "rewards/rejected": -0.10112909972667694,
641
+ "step": 430
642
+ },
643
+ {
644
+ "epoch": 1.82,
645
+ "learning_rate": 2.1898928024502298e-07,
646
+ "logits/chosen": -2.2951433658599854,
647
+ "logits/rejected": -2.026249408721924,
648
+ "logps/chosen": -253.8539581298828,
649
+ "logps/rejected": -210.1437225341797,
650
+ "loss": 18.3582,
651
+ "rewards/accuracies": 0.739062488079071,
652
+ "rewards/chosen": 0.03310644254088402,
653
+ "rewards/margins": 0.12848404049873352,
654
+ "rewards/rejected": -0.09537758678197861,
655
+ "step": 440
656
+ },
657
+ {
658
+ "epoch": 1.86,
659
+ "learning_rate": 2.113323124042879e-07,
660
+ "logits/chosen": -2.2722887992858887,
661
+ "logits/rejected": -2.0053603649139404,
662
+ "logps/chosen": -261.27166748046875,
663
+ "logps/rejected": -219.5053253173828,
664
+ "loss": 18.8048,
665
+ "rewards/accuracies": 0.7265625,
666
+ "rewards/chosen": 0.034187205135822296,
667
+ "rewards/margins": 0.1302899569272995,
668
+ "rewards/rejected": -0.0961027592420578,
669
+ "step": 450
670
+ },
671
+ {
672
+ "epoch": 1.9,
673
+ "learning_rate": 2.036753445635528e-07,
674
+ "logits/chosen": -2.279463052749634,
675
+ "logits/rejected": -1.9979301691055298,
676
+ "logps/chosen": -273.9200744628906,
677
+ "logps/rejected": -225.6119842529297,
678
+ "loss": 18.3929,
679
+ "rewards/accuracies": 0.7093750238418579,
680
+ "rewards/chosen": 0.0315621979534626,
681
+ "rewards/margins": 0.1322147399187088,
682
+ "rewards/rejected": -0.1006525382399559,
683
+ "step": 460
684
+ },
685
+ {
686
+ "epoch": 1.94,
687
+ "learning_rate": 1.9601837672281775e-07,
688
+ "logits/chosen": -2.261075735092163,
689
+ "logits/rejected": -2.026048183441162,
690
+ "logps/chosen": -260.4441223144531,
691
+ "logps/rejected": -224.8106231689453,
692
+ "loss": 18.4695,
693
+ "rewards/accuracies": 0.7359374761581421,
694
+ "rewards/chosen": 0.025445517152547836,
695
+ "rewards/margins": 0.12411808967590332,
696
+ "rewards/rejected": -0.09867255389690399,
697
+ "step": 470
698
+ },
699
+ {
700
+ "epoch": 1.98,
701
+ "learning_rate": 1.883614088820827e-07,
702
+ "logits/chosen": -2.296330213546753,
703
+ "logits/rejected": -1.9999678134918213,
704
+ "logps/chosen": -258.93145751953125,
705
+ "logps/rejected": -212.75003051757812,
706
+ "loss": 19.376,
707
+ "rewards/accuracies": 0.731249988079071,
708
+ "rewards/chosen": 0.027268463745713234,
709
+ "rewards/margins": 0.12198734283447266,
710
+ "rewards/rejected": -0.09471887350082397,
711
+ "step": 480
712
+ },
713
+ {
714
+ "epoch": 2.0,
715
+ "eval_logits/chosen": -2.023204803466797,
716
+ "eval_logits/rejected": -1.7954164743423462,
717
+ "eval_logps/chosen": -255.2545623779297,
718
+ "eval_logps/rejected": -211.98085021972656,
719
+ "eval_loss": 18.819795608520508,
720
+ "eval_rewards/accuracies": 0.7020000219345093,
721
+ "eval_rewards/chosen": 0.026950573548674583,
722
+ "eval_rewards/margins": 0.12182173132896423,
723
+ "eval_rewards/rejected": -0.0948711559176445,
724
+ "eval_runtime": 239.4946,
725
+ "eval_samples_per_second": 8.351,
726
+ "eval_steps_per_second": 0.522,
727
+ "step": 484
728
+ },
729
+ {
730
+ "epoch": 2.02,
731
+ "learning_rate": 1.807044410413476e-07,
732
+ "logits/chosen": -2.2521510124206543,
733
+ "logits/rejected": -2.021901845932007,
734
+ "logps/chosen": -248.35025024414062,
735
+ "logps/rejected": -224.5894775390625,
736
+ "loss": 18.8769,
737
+ "rewards/accuracies": 0.7359374761581421,
738
+ "rewards/chosen": 0.026256313547492027,
739
+ "rewards/margins": 0.1273646056652069,
740
+ "rewards/rejected": -0.10110831260681152,
741
+ "step": 490
742
+ },
743
+ {
744
+ "epoch": 2.07,
745
+ "learning_rate": 1.7304747320061255e-07,
746
+ "logits/chosen": -2.2362194061279297,
747
+ "logits/rejected": -2.0276010036468506,
748
+ "logps/chosen": -253.75991821289062,
749
+ "logps/rejected": -218.86349487304688,
750
+ "loss": 18.6885,
751
+ "rewards/accuracies": 0.7265625,
752
+ "rewards/chosen": 0.03784112259745598,
753
+ "rewards/margins": 0.12961548566818237,
754
+ "rewards/rejected": -0.0917743593454361,
755
+ "step": 500
756
+ },
757
+ {
758
+ "epoch": 2.11,
759
+ "learning_rate": 1.6539050535987747e-07,
760
+ "logits/chosen": -2.3277292251586914,
761
+ "logits/rejected": -2.005606174468994,
762
+ "logps/chosen": -260.1662902832031,
763
+ "logps/rejected": -212.98428344726562,
764
+ "loss": 18.2008,
765
+ "rewards/accuracies": 0.731249988079071,
766
+ "rewards/chosen": 0.030466770753264427,
767
+ "rewards/margins": 0.13587407767772675,
768
+ "rewards/rejected": -0.10540731251239777,
769
+ "step": 510
770
+ },
771
+ {
772
+ "epoch": 2.15,
773
+ "learning_rate": 1.5773353751914243e-07,
774
+ "logits/chosen": -2.354032516479492,
775
+ "logits/rejected": -2.0204906463623047,
776
+ "logps/chosen": -275.2497863769531,
777
+ "logps/rejected": -218.10595703125,
778
+ "loss": 18.827,
779
+ "rewards/accuracies": 0.7515624761581421,
780
+ "rewards/chosen": 0.0328175388276577,
781
+ "rewards/margins": 0.13892371952533722,
782
+ "rewards/rejected": -0.10610618442296982,
783
+ "step": 520
784
+ },
785
+ {
786
+ "epoch": 2.19,
787
+ "learning_rate": 1.5007656967840735e-07,
788
+ "logits/chosen": -2.227428436279297,
789
+ "logits/rejected": -1.9895546436309814,
790
+ "logps/chosen": -244.3919677734375,
791
+ "logps/rejected": -218.86886596679688,
792
+ "loss": 18.3559,
793
+ "rewards/accuracies": 0.753125011920929,
794
+ "rewards/chosen": 0.027428537607192993,
795
+ "rewards/margins": 0.13661186397075653,
796
+ "rewards/rejected": -0.10918332636356354,
797
+ "step": 530
798
+ },
799
+ {
800
+ "epoch": 2.23,
801
+ "learning_rate": 1.4241960183767226e-07,
802
+ "logits/chosen": -2.2682414054870605,
803
+ "logits/rejected": -1.9858787059783936,
804
+ "logps/chosen": -272.84637451171875,
805
+ "logps/rejected": -223.4131622314453,
806
+ "loss": 18.6971,
807
+ "rewards/accuracies": 0.7124999761581421,
808
+ "rewards/chosen": 0.024813225492835045,
809
+ "rewards/margins": 0.12149915844202042,
810
+ "rewards/rejected": -0.09668593108654022,
811
+ "step": 540
812
+ },
813
+ {
814
+ "epoch": 2.27,
815
+ "learning_rate": 1.347626339969372e-07,
816
+ "logits/chosen": -2.2588775157928467,
817
+ "logits/rejected": -2.093949794769287,
818
+ "logps/chosen": -261.2510986328125,
819
+ "logps/rejected": -230.35330200195312,
820
+ "loss": 18.6719,
821
+ "rewards/accuracies": 0.723437488079071,
822
+ "rewards/chosen": 0.034928444772958755,
823
+ "rewards/margins": 0.12344489991664886,
824
+ "rewards/rejected": -0.08851645886898041,
825
+ "step": 550
826
+ },
827
+ {
828
+ "epoch": 2.31,
829
+ "learning_rate": 1.2710566615620215e-07,
830
+ "logits/chosen": -2.2328484058380127,
831
+ "logits/rejected": -1.9578449726104736,
832
+ "logps/chosen": -264.450439453125,
833
+ "logps/rejected": -212.9776611328125,
834
+ "loss": 17.9566,
835
+ "rewards/accuracies": 0.734375,
836
+ "rewards/chosen": 0.026903945952653885,
837
+ "rewards/margins": 0.13341760635375977,
838
+ "rewards/rejected": -0.10651366412639618,
839
+ "step": 560
840
+ },
841
+ {
842
+ "epoch": 2.35,
843
+ "learning_rate": 1.1944869831546706e-07,
844
+ "logits/chosen": -2.237755060195923,
845
+ "logits/rejected": -2.0300750732421875,
846
+ "logps/chosen": -255.99853515625,
847
+ "logps/rejected": -226.13626098632812,
848
+ "loss": 18.5265,
849
+ "rewards/accuracies": 0.698437511920929,
850
+ "rewards/chosen": 0.023317929357290268,
851
+ "rewards/margins": 0.12482543289661407,
852
+ "rewards/rejected": -0.1015075072646141,
853
+ "step": 570
854
+ },
855
+ {
856
+ "epoch": 2.4,
857
+ "learning_rate": 1.11791730474732e-07,
858
+ "logits/chosen": -2.257673740386963,
859
+ "logits/rejected": -1.9982837438583374,
860
+ "logps/chosen": -252.7151336669922,
861
+ "logps/rejected": -213.78366088867188,
862
+ "loss": 18.0724,
863
+ "rewards/accuracies": 0.7593749761581421,
864
+ "rewards/chosen": 0.023391084745526314,
865
+ "rewards/margins": 0.14269840717315674,
866
+ "rewards/rejected": -0.11930731683969498,
867
+ "step": 580
868
+ },
869
+ {
870
+ "epoch": 2.44,
871
+ "learning_rate": 1.0413476263399694e-07,
872
+ "logits/chosen": -2.297306537628174,
873
+ "logits/rejected": -2.012129783630371,
874
+ "logps/chosen": -259.78497314453125,
875
+ "logps/rejected": -206.7128448486328,
876
+ "loss": 18.4955,
877
+ "rewards/accuracies": 0.7093750238418579,
878
+ "rewards/chosen": 0.017748359590768814,
879
+ "rewards/margins": 0.12208724021911621,
880
+ "rewards/rejected": -0.1043388843536377,
881
+ "step": 590
882
+ },
883
+ {
884
+ "epoch": 2.48,
885
+ "learning_rate": 9.647779479326186e-08,
886
+ "logits/chosen": -2.2344954013824463,
887
+ "logits/rejected": -2.0501608848571777,
888
+ "logps/chosen": -240.86679077148438,
889
+ "logps/rejected": -212.7677764892578,
890
+ "loss": 18.9662,
891
+ "rewards/accuracies": 0.7203124761581421,
892
+ "rewards/chosen": 0.02210834063589573,
893
+ "rewards/margins": 0.11001463979482651,
894
+ "rewards/rejected": -0.08790630102157593,
895
+ "step": 600
896
+ },
897
+ {
898
+ "epoch": 2.52,
899
+ "learning_rate": 8.88208269525268e-08,
900
+ "logits/chosen": -2.239448308944702,
901
+ "logits/rejected": -1.992950201034546,
902
+ "logps/chosen": -250.8835906982422,
903
+ "logps/rejected": -211.41421508789062,
904
+ "loss": 18.8806,
905
+ "rewards/accuracies": 0.7250000238418579,
906
+ "rewards/chosen": 0.02492038905620575,
907
+ "rewards/margins": 0.1194721907377243,
908
+ "rewards/rejected": -0.09455180168151855,
909
+ "step": 610
910
+ },
911
+ {
912
+ "epoch": 2.56,
913
+ "learning_rate": 8.116385911179173e-08,
914
+ "logits/chosen": -2.2440013885498047,
915
+ "logits/rejected": -1.960422158241272,
916
+ "logps/chosen": -270.11187744140625,
917
+ "logps/rejected": -207.26058959960938,
918
+ "loss": 18.2745,
919
+ "rewards/accuracies": 0.737500011920929,
920
+ "rewards/chosen": 0.03377198800444603,
921
+ "rewards/margins": 0.13419213891029358,
922
+ "rewards/rejected": -0.10042013972997665,
923
+ "step": 620
924
+ },
925
+ {
926
+ "epoch": 2.6,
927
+ "learning_rate": 7.350689127105667e-08,
928
+ "logits/chosen": -2.2937111854553223,
929
+ "logits/rejected": -1.987168550491333,
930
+ "logps/chosen": -262.86199951171875,
931
+ "logps/rejected": -219.86508178710938,
932
+ "loss": 18.1999,
933
+ "rewards/accuracies": 0.7093750238418579,
934
+ "rewards/chosen": 0.03628316521644592,
935
+ "rewards/margins": 0.14258472621440887,
936
+ "rewards/rejected": -0.10630156099796295,
937
+ "step": 630
938
+ },
939
+ {
940
+ "epoch": 2.64,
941
+ "learning_rate": 6.584992343032159e-08,
942
+ "logits/chosen": -2.2060506343841553,
943
+ "logits/rejected": -1.9665876626968384,
944
+ "logps/chosen": -253.2500457763672,
945
+ "logps/rejected": -216.85476684570312,
946
+ "loss": 18.8148,
947
+ "rewards/accuracies": 0.7203124761581421,
948
+ "rewards/chosen": 0.03216860815882683,
949
+ "rewards/margins": 0.12755930423736572,
950
+ "rewards/rejected": -0.0953906923532486,
951
+ "step": 640
952
+ },
953
+ {
954
+ "epoch": 2.69,
955
+ "learning_rate": 5.819295558958652e-08,
956
+ "logits/chosen": -2.2680506706237793,
957
+ "logits/rejected": -1.9561970233917236,
958
+ "logps/chosen": -277.43157958984375,
959
+ "logps/rejected": -209.50244140625,
960
+ "loss": 18.345,
961
+ "rewards/accuracies": 0.7437499761581421,
962
+ "rewards/chosen": 0.03760701045393944,
963
+ "rewards/margins": 0.14586040377616882,
964
+ "rewards/rejected": -0.10825341939926147,
965
+ "step": 650
966
+ },
967
+ {
968
+ "epoch": 2.73,
969
+ "learning_rate": 5.0535987748851455e-08,
970
+ "logits/chosen": -2.3038992881774902,
971
+ "logits/rejected": -2.04447078704834,
972
+ "logps/chosen": -265.35980224609375,
973
+ "logps/rejected": -219.22085571289062,
974
+ "loss": 18.1755,
975
+ "rewards/accuracies": 0.7421875,
976
+ "rewards/chosen": 0.027750462293624878,
977
+ "rewards/margins": 0.12442169338464737,
978
+ "rewards/rejected": -0.09667123109102249,
979
+ "step": 660
980
+ },
981
+ {
982
+ "epoch": 2.77,
983
+ "learning_rate": 4.287901990811638e-08,
984
+ "logits/chosen": -2.2642054557800293,
985
+ "logits/rejected": -2.0056357383728027,
986
+ "logps/chosen": -258.467041015625,
987
+ "logps/rejected": -232.66946411132812,
988
+ "loss": 18.3268,
989
+ "rewards/accuracies": 0.7203124761581421,
990
+ "rewards/chosen": 0.033985260874032974,
991
+ "rewards/margins": 0.14256197214126587,
992
+ "rewards/rejected": -0.1085767149925232,
993
+ "step": 670
994
+ },
995
+ {
996
+ "epoch": 2.81,
997
+ "learning_rate": 3.522205206738132e-08,
998
+ "logits/chosen": -2.2641890048980713,
999
+ "logits/rejected": -2.0118279457092285,
1000
+ "logps/chosen": -265.30517578125,
1001
+ "logps/rejected": -217.5371856689453,
1002
+ "loss": 18.4502,
1003
+ "rewards/accuracies": 0.7359374761581421,
1004
+ "rewards/chosen": 0.027841120958328247,
1005
+ "rewards/margins": 0.12672238051891327,
1006
+ "rewards/rejected": -0.09888125956058502,
1007
+ "step": 680
1008
+ },
1009
+ {
1010
+ "epoch": 2.85,
1011
+ "learning_rate": 2.7565084226646246e-08,
1012
+ "logits/chosen": -2.2387919425964355,
1013
+ "logits/rejected": -2.0168097019195557,
1014
+ "logps/chosen": -254.23617553710938,
1015
+ "logps/rejected": -224.2781219482422,
1016
+ "loss": 18.6327,
1017
+ "rewards/accuracies": 0.7281249761581421,
1018
+ "rewards/chosen": 0.03157157823443413,
1019
+ "rewards/margins": 0.13236665725708008,
1020
+ "rewards/rejected": -0.10079507529735565,
1021
+ "step": 690
1022
+ },
1023
+ {
1024
+ "epoch": 2.89,
1025
+ "learning_rate": 1.9908116385911178e-08,
1026
+ "logits/chosen": -2.2574362754821777,
1027
+ "logits/rejected": -2.012924909591675,
1028
+ "logps/chosen": -250.27371215820312,
1029
+ "logps/rejected": -215.03756713867188,
1030
+ "loss": 18.7916,
1031
+ "rewards/accuracies": 0.690625011920929,
1032
+ "rewards/chosen": 0.023055683821439743,
1033
+ "rewards/margins": 0.11122564226388931,
1034
+ "rewards/rejected": -0.08816995471715927,
1035
+ "step": 700
1036
+ },
1037
+ {
1038
+ "epoch": 2.93,
1039
+ "learning_rate": 1.225114854517611e-08,
1040
+ "logits/chosen": -2.309433937072754,
1041
+ "logits/rejected": -2.0074551105499268,
1042
+ "logps/chosen": -270.73040771484375,
1043
+ "logps/rejected": -209.7691192626953,
1044
+ "loss": 18.5426,
1045
+ "rewards/accuracies": 0.723437488079071,
1046
+ "rewards/chosen": 0.025719935074448586,
1047
+ "rewards/margins": 0.13181468844413757,
1048
+ "rewards/rejected": -0.10609474033117294,
1049
+ "step": 710
1050
+ },
1051
+ {
1052
+ "epoch": 2.97,
1053
+ "learning_rate": 4.594180704441042e-09,
1054
+ "logits/chosen": -2.2386531829833984,
1055
+ "logits/rejected": -2.058807849884033,
1056
+ "logps/chosen": -248.2914581298828,
1057
+ "logps/rejected": -213.177490234375,
1058
+ "loss": 18.4503,
1059
+ "rewards/accuracies": 0.7124999761581421,
1060
+ "rewards/chosen": 0.0301600880920887,
1061
+ "rewards/margins": 0.12142640352249146,
1062
+ "rewards/rejected": -0.09126633405685425,
1063
+ "step": 720
1064
+ },
1065
+ {
1066
+ "epoch": 3.0,
1067
+ "eval_logits/chosen": -2.0242695808410645,
1068
+ "eval_logits/rejected": -1.7967454195022583,
1069
+ "eval_logps/chosen": -255.23193359375,
1070
+ "eval_logps/rejected": -212.03785705566406,
1071
+ "eval_loss": 18.33973503112793,
1072
+ "eval_rewards/accuracies": 0.7200000286102295,
1073
+ "eval_rewards/chosen": 0.029216337949037552,
1074
+ "eval_rewards/margins": 0.12978971004486084,
1075
+ "eval_rewards/rejected": -0.10057336091995239,
1076
+ "eval_runtime": 239.5813,
1077
+ "eval_samples_per_second": 8.348,
1078
+ "eval_steps_per_second": 0.522,
1079
+ "step": 726
1080
+ },
1081
+ {
1082
+ "epoch": 3.0,
1083
+ "step": 726,
1084
+ "total_flos": 0.0,
1085
+ "train_loss": 19.76867720969124,
1086
+ "train_runtime": 32942.8373,
1087
+ "train_samples_per_second": 5.643,
1088
+ "train_steps_per_second": 0.022
1089
+ }
1090
+ ],
1091
+ "logging_steps": 10,
1092
+ "max_steps": 726,
1093
+ "num_train_epochs": 3,
1094
+ "save_steps": 500,
1095
+ "total_flos": 0.0,
1096
+ "trial_name": null,
1097
+ "trial_params": null
1098
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7525c71e8ee671cdda1b95ff4483d3f6877e7054dcbf6b0bf28af57836196771
3
+ size 4728