Mastane commited on
Commit
3a36520
1 Parent(s): aeb9318

Model save

Browse files
README.md ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ library_name: peft
4
+ tags:
5
+ - trl-dpo
6
+ - generated_from_trainer
7
+ base_model: mistralai/Mistral-7B-v0.1
8
+ model-index:
9
+ - name: zephyr-7b-ipo-lora
10
+ results: []
11
+ ---
12
+
13
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
14
+ should probably proofread and complete it, then remove this comment. -->
15
+
16
+ # zephyr-7b-ipo-lora
17
+
18
+ This model is a fine-tuned version of [mistralai/Mistral-7B-v0.1](https://huggingface.co/mistralai/Mistral-7B-v0.1) on the None dataset.
19
+ It achieves the following results on the evaluation set:
20
+ - Loss: 18.0888
21
+ - Rewards/chosen: 0.0201
22
+ - Rewards/rejected: -0.1194
23
+ - Rewards/accuracies: 0.7320
24
+ - Rewards/margins: 0.1395
25
+ - Logps/rejected: -212.8609
26
+ - Logps/chosen: -268.5415
27
+ - Logits/rejected: -2.3003
28
+ - Logits/chosen: -2.6873
29
+
30
+ ## Model description
31
+
32
+ More information needed
33
+
34
+ ## Intended uses & limitations
35
+
36
+ More information needed
37
+
38
+ ## Training and evaluation data
39
+
40
+ More information needed
41
+
42
+ ## Training procedure
43
+
44
+ ### Training hyperparameters
45
+
46
+ The following hyperparameters were used during training:
47
+ - learning_rate: 5e-07
48
+ - train_batch_size: 1
49
+ - eval_batch_size: 1
50
+ - seed: 42
51
+ - distributed_type: multi-GPU
52
+ - num_devices: 8
53
+ - gradient_accumulation_steps: 32
54
+ - total_train_batch_size: 256
55
+ - total_eval_batch_size: 8
56
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
57
+ - lr_scheduler_type: linear
58
+ - lr_scheduler_warmup_ratio: 0.1
59
+ - num_epochs: 3
60
+
61
+ ### Training results
62
+
63
+ | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
64
+ |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
65
+ | 19.1303 | 1.0 | 242 | 18.9124 | 0.0302 | -0.0939 | 0.7360 | 0.1241 | -212.6061 | -268.4410 | -2.3029 | -2.6884 |
66
+ | 19.0824 | 2.0 | 484 | 18.2266 | 0.0186 | -0.1163 | 0.7600 | 0.1349 | -212.8297 | -268.5567 | -2.3011 | -2.6877 |
67
+ | 18.0366 | 3.0 | 726 | 18.0888 | 0.0201 | -0.1194 | 0.7320 | 0.1395 | -212.8609 | -268.5415 | -2.3003 | -2.6873 |
68
+
69
+
70
+ ### Framework versions
71
+
72
+ - Transformers 4.36.1
73
+ - Pytorch 2.1.2+cu121
74
+ - Datasets 2.14.6
75
+ - Tokenizers 0.15.0
76
+ ## Training procedure
77
+
78
+
79
+ ### Framework versions
80
+
81
+
82
+ - PEFT 0.6.1
adapter_config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "mistralai/Mistral-7B-v0.1",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layers_pattern": null,
10
+ "layers_to_transform": null,
11
+ "lora_alpha": 16,
12
+ "lora_dropout": 0.1,
13
+ "modules_to_save": null,
14
+ "peft_type": "LORA",
15
+ "r": 64,
16
+ "rank_pattern": {},
17
+ "revision": null,
18
+ "target_modules": [
19
+ "k_proj",
20
+ "o_proj",
21
+ "q_proj",
22
+ "v_proj"
23
+ ],
24
+ "task_type": "CAUSAL_LM"
25
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:738e66fb7a1f8c4673e32ab6bf7f25429395cf50e8aa2862bb87b459869d84b1
3
+ size 218138576
all_results.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.0,
3
+ "eval_logits/chosen": -2.687349796295166,
4
+ "eval_logits/rejected": -2.300260305404663,
5
+ "eval_logps/chosen": -268.54150390625,
6
+ "eval_logps/rejected": -212.86087036132812,
7
+ "eval_loss": 18.088815689086914,
8
+ "eval_rewards/accuracies": 0.7319999933242798,
9
+ "eval_rewards/chosen": 0.02013408951461315,
10
+ "eval_rewards/margins": 0.13951165974140167,
11
+ "eval_rewards/rejected": -0.11937756091356277,
12
+ "eval_runtime": 350.1967,
13
+ "eval_samples": 2000,
14
+ "eval_samples_per_second": 5.711,
15
+ "eval_steps_per_second": 0.714,
16
+ "train_loss": 19.467516181882747,
17
+ "train_runtime": 42806.6761,
18
+ "train_samples": 61966,
19
+ "train_samples_per_second": 4.343,
20
+ "train_steps_per_second": 0.017
21
+ }
eval_results.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.0,
3
+ "eval_logits/chosen": -2.687349796295166,
4
+ "eval_logits/rejected": -2.300260305404663,
5
+ "eval_logps/chosen": -268.54150390625,
6
+ "eval_logps/rejected": -212.86087036132812,
7
+ "eval_loss": 18.088815689086914,
8
+ "eval_rewards/accuracies": 0.7319999933242798,
9
+ "eval_rewards/chosen": 0.02013408951461315,
10
+ "eval_rewards/margins": 0.13951165974140167,
11
+ "eval_rewards/rejected": -0.11937756091356277,
12
+ "eval_runtime": 350.1967,
13
+ "eval_samples": 2000,
14
+ "eval_samples_per_second": 5.711,
15
+ "eval_steps_per_second": 0.714
16
+ }
runs/Dec23_06-36-03_ip-172-16-86-203.ec2.internal/events.out.tfevents.1703313736.ip-172-16-86-203.ec2.internal.47042.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9a340838bd1336c1af94b66efbe12d1e0d6e9d9232b55f51e61adce493e02824
3
+ size 53240
runs/Dec23_06-36-03_ip-172-16-86-203.ec2.internal/events.out.tfevents.1703356892.ip-172-16-86-203.ec2.internal.47042.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:750d984d6dfce32f30f9208c211925daa36662109b9db7cfea5154b477f1b37d
3
+ size 828
special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "</s>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "<unk>",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "added_tokens_decoder": {
5
+ "0": {
6
+ "content": "<unk>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "1": {
14
+ "content": "<s>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "2": {
22
+ "content": "</s>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ }
29
+ },
30
+ "additional_special_tokens": [],
31
+ "bos_token": "<s>",
32
+ "chat_template": "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'system' %}\n{{ '<|system|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}",
33
+ "clean_up_tokenization_spaces": false,
34
+ "eos_token": "</s>",
35
+ "legacy": true,
36
+ "model_max_length": 2048,
37
+ "pad_token": "</s>",
38
+ "sp_model_kwargs": {},
39
+ "spaces_between_special_tokens": false,
40
+ "tokenizer_class": "LlamaTokenizer",
41
+ "unk_token": "<unk>",
42
+ "use_default_system_prompt": true
43
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.0,
3
+ "train_loss": 19.467516181882747,
4
+ "train_runtime": 42806.6761,
5
+ "train_samples": 61966,
6
+ "train_samples_per_second": 4.343,
7
+ "train_steps_per_second": 0.017
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,1100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 2.9992254066615027,
5
+ "eval_steps": 100,
6
+ "global_step": 726,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.0,
13
+ "learning_rate": 6.84931506849315e-09,
14
+ "logits/chosen": -2.55204439163208,
15
+ "logits/rejected": -2.328748941421509,
16
+ "logps/chosen": -243.65313720703125,
17
+ "logps/rejected": -203.76687622070312,
18
+ "loss": 25.0,
19
+ "rewards/accuracies": 0.0,
20
+ "rewards/chosen": 0.0,
21
+ "rewards/margins": 0.0,
22
+ "rewards/rejected": 0.0,
23
+ "step": 1
24
+ },
25
+ {
26
+ "epoch": 0.04,
27
+ "learning_rate": 6.84931506849315e-08,
28
+ "logits/chosen": -2.6284379959106445,
29
+ "logits/rejected": -2.217939853668213,
30
+ "logps/chosen": -283.397705078125,
31
+ "logps/rejected": -223.02455139160156,
32
+ "loss": 25.2356,
33
+ "rewards/accuracies": 0.4652777910232544,
34
+ "rewards/chosen": 0.000989701016806066,
35
+ "rewards/margins": 0.0014238969888538122,
36
+ "rewards/rejected": -0.0004341956228017807,
37
+ "step": 10
38
+ },
39
+ {
40
+ "epoch": 0.08,
41
+ "learning_rate": 1.36986301369863e-07,
42
+ "logits/chosen": -2.6449296474456787,
43
+ "logits/rejected": -2.2949368953704834,
44
+ "logps/chosen": -276.48468017578125,
45
+ "logps/rejected": -204.2726593017578,
46
+ "loss": 25.4323,
47
+ "rewards/accuracies": 0.5093749761581421,
48
+ "rewards/chosen": 0.0012504293117672205,
49
+ "rewards/margins": 0.0009743297705426812,
50
+ "rewards/rejected": 0.0002760997449513525,
51
+ "step": 20
52
+ },
53
+ {
54
+ "epoch": 0.12,
55
+ "learning_rate": 2.054794520547945e-07,
56
+ "logits/chosen": -2.61030912399292,
57
+ "logits/rejected": -2.28235125541687,
58
+ "logps/chosen": -242.44509887695312,
59
+ "logps/rejected": -195.07154846191406,
60
+ "loss": 25.3196,
61
+ "rewards/accuracies": 0.5093749761581421,
62
+ "rewards/chosen": -0.0013605057029053569,
63
+ "rewards/margins": 0.0007983079412952065,
64
+ "rewards/rejected": -0.0021588136442005634,
65
+ "step": 30
66
+ },
67
+ {
68
+ "epoch": 0.17,
69
+ "learning_rate": 2.73972602739726e-07,
70
+ "logits/chosen": -2.676435947418213,
71
+ "logits/rejected": -2.2325174808502197,
72
+ "logps/chosen": -276.4580383300781,
73
+ "logps/rejected": -195.8528289794922,
74
+ "loss": 24.8252,
75
+ "rewards/accuracies": 0.546875,
76
+ "rewards/chosen": 0.004295675549656153,
77
+ "rewards/margins": 0.006117778830230236,
78
+ "rewards/rejected": -0.001822102814912796,
79
+ "step": 40
80
+ },
81
+ {
82
+ "epoch": 0.21,
83
+ "learning_rate": 3.424657534246575e-07,
84
+ "logits/chosen": -2.5881507396698,
85
+ "logits/rejected": -2.340297222137451,
86
+ "logps/chosen": -254.146484375,
87
+ "logps/rejected": -227.5242919921875,
88
+ "loss": 24.8309,
89
+ "rewards/accuracies": 0.484375,
90
+ "rewards/chosen": 0.0033471225760877132,
91
+ "rewards/margins": -0.0003331190091557801,
92
+ "rewards/rejected": 0.003680241061374545,
93
+ "step": 50
94
+ },
95
+ {
96
+ "epoch": 0.25,
97
+ "learning_rate": 4.10958904109589e-07,
98
+ "logits/chosen": -2.6254591941833496,
99
+ "logits/rejected": -2.264179229736328,
100
+ "logps/chosen": -268.96685791015625,
101
+ "logps/rejected": -212.42770385742188,
102
+ "loss": 23.9609,
103
+ "rewards/accuracies": 0.6156250238418579,
104
+ "rewards/chosen": 0.015944072976708412,
105
+ "rewards/margins": 0.016598206013441086,
106
+ "rewards/rejected": -0.0006541347247548401,
107
+ "step": 60
108
+ },
109
+ {
110
+ "epoch": 0.29,
111
+ "learning_rate": 4.794520547945205e-07,
112
+ "logits/chosen": -2.596461534500122,
113
+ "logits/rejected": -2.2788121700286865,
114
+ "logps/chosen": -244.193115234375,
115
+ "logps/rejected": -198.00149536132812,
116
+ "loss": 23.4551,
117
+ "rewards/accuracies": 0.550000011920929,
118
+ "rewards/chosen": 0.01318939495831728,
119
+ "rewards/margins": 0.014477437362074852,
120
+ "rewards/rejected": -0.0012880431022495031,
121
+ "step": 70
122
+ },
123
+ {
124
+ "epoch": 0.33,
125
+ "learning_rate": 4.946401225114854e-07,
126
+ "logits/chosen": -2.6231789588928223,
127
+ "logits/rejected": -2.248992443084717,
128
+ "logps/chosen": -256.1118469238281,
129
+ "logps/rejected": -212.2821502685547,
130
+ "loss": 22.8216,
131
+ "rewards/accuracies": 0.6625000238418579,
132
+ "rewards/chosen": 0.021900424733757973,
133
+ "rewards/margins": 0.034875936806201935,
134
+ "rewards/rejected": -0.012975512072443962,
135
+ "step": 80
136
+ },
137
+ {
138
+ "epoch": 0.37,
139
+ "learning_rate": 4.869831546707504e-07,
140
+ "logits/chosen": -2.6711127758026123,
141
+ "logits/rejected": -2.2961134910583496,
142
+ "logps/chosen": -265.5664978027344,
143
+ "logps/rejected": -222.6741180419922,
144
+ "loss": 22.2504,
145
+ "rewards/accuracies": 0.621874988079071,
146
+ "rewards/chosen": 0.037670500576496124,
147
+ "rewards/margins": 0.04158278554677963,
148
+ "rewards/rejected": -0.003912287298589945,
149
+ "step": 90
150
+ },
151
+ {
152
+ "epoch": 0.41,
153
+ "learning_rate": 4.793261868300153e-07,
154
+ "logits/chosen": -2.6314404010772705,
155
+ "logits/rejected": -2.367823362350464,
156
+ "logps/chosen": -272.08331298828125,
157
+ "logps/rejected": -240.5725555419922,
158
+ "loss": 21.7851,
159
+ "rewards/accuracies": 0.675000011920929,
160
+ "rewards/chosen": 0.04255244508385658,
161
+ "rewards/margins": 0.05080147832632065,
162
+ "rewards/rejected": -0.00824903603643179,
163
+ "step": 100
164
+ },
165
+ {
166
+ "epoch": 0.45,
167
+ "learning_rate": 4.7166921898928023e-07,
168
+ "logits/chosen": -2.665757417678833,
169
+ "logits/rejected": -2.239075183868408,
170
+ "logps/chosen": -236.4951934814453,
171
+ "logps/rejected": -197.68630981445312,
172
+ "loss": 21.284,
173
+ "rewards/accuracies": 0.715624988079071,
174
+ "rewards/chosen": 0.03496263176202774,
175
+ "rewards/margins": 0.06847874075174332,
176
+ "rewards/rejected": -0.03351610153913498,
177
+ "step": 110
178
+ },
179
+ {
180
+ "epoch": 0.5,
181
+ "learning_rate": 4.640122511485451e-07,
182
+ "logits/chosen": -2.61603045463562,
183
+ "logits/rejected": -2.319265127182007,
184
+ "logps/chosen": -263.4698791503906,
185
+ "logps/rejected": -221.0123748779297,
186
+ "loss": 20.8846,
187
+ "rewards/accuracies": 0.6312500238418579,
188
+ "rewards/chosen": 0.03651345893740654,
189
+ "rewards/margins": 0.06544128805398941,
190
+ "rewards/rejected": -0.02892782725393772,
191
+ "step": 120
192
+ },
193
+ {
194
+ "epoch": 0.54,
195
+ "learning_rate": 4.563552833078101e-07,
196
+ "logits/chosen": -2.642108201980591,
197
+ "logits/rejected": -2.2785372734069824,
198
+ "logps/chosen": -265.32977294921875,
199
+ "logps/rejected": -217.34768676757812,
200
+ "loss": 20.4002,
201
+ "rewards/accuracies": 0.671875,
202
+ "rewards/chosen": 0.039137523621320724,
203
+ "rewards/margins": 0.07437606900930405,
204
+ "rewards/rejected": -0.035238541662693024,
205
+ "step": 130
206
+ },
207
+ {
208
+ "epoch": 0.58,
209
+ "learning_rate": 4.4869831546707505e-07,
210
+ "logits/chosen": -2.6350350379943848,
211
+ "logits/rejected": -2.323685646057129,
212
+ "logps/chosen": -266.7952575683594,
213
+ "logps/rejected": -226.29617309570312,
214
+ "loss": 20.3918,
215
+ "rewards/accuracies": 0.706250011920929,
216
+ "rewards/chosen": 0.04871717840433121,
217
+ "rewards/margins": 0.0838700383901596,
218
+ "rewards/rejected": -0.0351528637111187,
219
+ "step": 140
220
+ },
221
+ {
222
+ "epoch": 0.62,
223
+ "learning_rate": 4.4104134762633994e-07,
224
+ "logits/chosen": -2.6673989295959473,
225
+ "logits/rejected": -2.305692195892334,
226
+ "logps/chosen": -267.29180908203125,
227
+ "logps/rejected": -207.8380889892578,
228
+ "loss": 19.8348,
229
+ "rewards/accuracies": 0.703125,
230
+ "rewards/chosen": 0.04811176657676697,
231
+ "rewards/margins": 0.09522075951099396,
232
+ "rewards/rejected": -0.047109004110097885,
233
+ "step": 150
234
+ },
235
+ {
236
+ "epoch": 0.66,
237
+ "learning_rate": 4.333843797856049e-07,
238
+ "logits/chosen": -2.664452314376831,
239
+ "logits/rejected": -2.3129963874816895,
240
+ "logps/chosen": -264.9499816894531,
241
+ "logps/rejected": -236.6409454345703,
242
+ "loss": 19.7381,
243
+ "rewards/accuracies": 0.6937500238418579,
244
+ "rewards/chosen": 0.045278288424015045,
245
+ "rewards/margins": 0.0909709632396698,
246
+ "rewards/rejected": -0.04569266736507416,
247
+ "step": 160
248
+ },
249
+ {
250
+ "epoch": 0.7,
251
+ "learning_rate": 4.257274119448698e-07,
252
+ "logits/chosen": -2.581209421157837,
253
+ "logits/rejected": -2.3164689540863037,
254
+ "logps/chosen": -269.43072509765625,
255
+ "logps/rejected": -230.83047485351562,
256
+ "loss": 19.8434,
257
+ "rewards/accuracies": 0.640625,
258
+ "rewards/chosen": 0.03498955816030502,
259
+ "rewards/margins": 0.08386311680078506,
260
+ "rewards/rejected": -0.048873551189899445,
261
+ "step": 170
262
+ },
263
+ {
264
+ "epoch": 0.74,
265
+ "learning_rate": 4.180704441041347e-07,
266
+ "logits/chosen": -2.648585557937622,
267
+ "logits/rejected": -2.3146426677703857,
268
+ "logps/chosen": -254.5563507080078,
269
+ "logps/rejected": -222.23922729492188,
270
+ "loss": 20.3028,
271
+ "rewards/accuracies": 0.6875,
272
+ "rewards/chosen": 0.03642386570572853,
273
+ "rewards/margins": 0.10294415056705475,
274
+ "rewards/rejected": -0.06652027368545532,
275
+ "step": 180
276
+ },
277
+ {
278
+ "epoch": 0.78,
279
+ "learning_rate": 4.1041347626339966e-07,
280
+ "logits/chosen": -2.684251308441162,
281
+ "logits/rejected": -2.282341241836548,
282
+ "logps/chosen": -262.4200744628906,
283
+ "logps/rejected": -213.4808807373047,
284
+ "loss": 20.0038,
285
+ "rewards/accuracies": 0.699999988079071,
286
+ "rewards/chosen": 0.03100644052028656,
287
+ "rewards/margins": 0.10564477741718292,
288
+ "rewards/rejected": -0.07463832199573517,
289
+ "step": 190
290
+ },
291
+ {
292
+ "epoch": 0.83,
293
+ "learning_rate": 4.027565084226646e-07,
294
+ "logits/chosen": -2.6774110794067383,
295
+ "logits/rejected": -2.31669020652771,
296
+ "logps/chosen": -242.54110717773438,
297
+ "logps/rejected": -201.0673370361328,
298
+ "loss": 18.8904,
299
+ "rewards/accuracies": 0.6812499761581421,
300
+ "rewards/chosen": 0.029109233990311623,
301
+ "rewards/margins": 0.09885577857494354,
302
+ "rewards/rejected": -0.06974655389785767,
303
+ "step": 200
304
+ },
305
+ {
306
+ "epoch": 0.87,
307
+ "learning_rate": 3.9509954058192954e-07,
308
+ "logits/chosen": -2.6493515968322754,
309
+ "logits/rejected": -2.2453055381774902,
310
+ "logps/chosen": -269.30072021484375,
311
+ "logps/rejected": -215.35482788085938,
312
+ "loss": 18.7438,
313
+ "rewards/accuracies": 0.6937500238418579,
314
+ "rewards/chosen": 0.03713355213403702,
315
+ "rewards/margins": 0.12264309078454971,
316
+ "rewards/rejected": -0.0855095237493515,
317
+ "step": 210
318
+ },
319
+ {
320
+ "epoch": 0.91,
321
+ "learning_rate": 3.874425727411945e-07,
322
+ "logits/chosen": -2.6454288959503174,
323
+ "logits/rejected": -2.2814364433288574,
324
+ "logps/chosen": -264.0574035644531,
325
+ "logps/rejected": -206.1600341796875,
326
+ "loss": 19.3111,
327
+ "rewards/accuracies": 0.71875,
328
+ "rewards/chosen": 0.03974270448088646,
329
+ "rewards/margins": 0.12600289285182953,
330
+ "rewards/rejected": -0.08626019209623337,
331
+ "step": 220
332
+ },
333
+ {
334
+ "epoch": 0.95,
335
+ "learning_rate": 3.797856049004594e-07,
336
+ "logits/chosen": -2.6513967514038086,
337
+ "logits/rejected": -2.437913179397583,
338
+ "logps/chosen": -261.82855224609375,
339
+ "logps/rejected": -220.0791778564453,
340
+ "loss": 19.1461,
341
+ "rewards/accuracies": 0.731249988079071,
342
+ "rewards/chosen": 0.04797480255365372,
343
+ "rewards/margins": 0.12008707225322723,
344
+ "rewards/rejected": -0.07211227715015411,
345
+ "step": 230
346
+ },
347
+ {
348
+ "epoch": 0.99,
349
+ "learning_rate": 3.7212863705972436e-07,
350
+ "logits/chosen": -2.664517879486084,
351
+ "logits/rejected": -2.3380627632141113,
352
+ "logps/chosen": -268.38922119140625,
353
+ "logps/rejected": -203.86264038085938,
354
+ "loss": 19.1303,
355
+ "rewards/accuracies": 0.7124999761581421,
356
+ "rewards/chosen": 0.037844784557819366,
357
+ "rewards/margins": 0.11463721841573715,
358
+ "rewards/rejected": -0.07679243385791779,
359
+ "step": 240
360
+ },
361
+ {
362
+ "epoch": 1.0,
363
+ "eval_logits/chosen": -2.688377618789673,
364
+ "eval_logits/rejected": -2.3029286861419678,
365
+ "eval_logps/chosen": -268.4410400390625,
366
+ "eval_logps/rejected": -212.60609436035156,
367
+ "eval_loss": 18.912391662597656,
368
+ "eval_rewards/accuracies": 0.7360000014305115,
369
+ "eval_rewards/chosen": 0.03018273413181305,
370
+ "eval_rewards/margins": 0.12408097833395004,
371
+ "eval_rewards/rejected": -0.09389825165271759,
372
+ "eval_runtime": 352.0426,
373
+ "eval_samples_per_second": 5.681,
374
+ "eval_steps_per_second": 0.71,
375
+ "step": 242
376
+ },
377
+ {
378
+ "epoch": 1.03,
379
+ "learning_rate": 3.6447166921898925e-07,
380
+ "logits/chosen": -2.585019588470459,
381
+ "logits/rejected": -2.2706055641174316,
382
+ "logps/chosen": -248.5968017578125,
383
+ "logps/rejected": -192.33859252929688,
384
+ "loss": 19.3292,
385
+ "rewards/accuracies": 0.7437499761581421,
386
+ "rewards/chosen": 0.026646222919225693,
387
+ "rewards/margins": 0.12298872321844101,
388
+ "rewards/rejected": -0.09634250402450562,
389
+ "step": 250
390
+ },
391
+ {
392
+ "epoch": 1.07,
393
+ "learning_rate": 3.568147013782542e-07,
394
+ "logits/chosen": -2.6509532928466797,
395
+ "logits/rejected": -2.2828612327575684,
396
+ "logps/chosen": -250.4876708984375,
397
+ "logps/rejected": -202.44654846191406,
398
+ "loss": 18.5341,
399
+ "rewards/accuracies": 0.734375,
400
+ "rewards/chosen": 0.04957858473062515,
401
+ "rewards/margins": 0.13254784047603607,
402
+ "rewards/rejected": -0.08296926319599152,
403
+ "step": 260
404
+ },
405
+ {
406
+ "epoch": 1.12,
407
+ "learning_rate": 3.4915773353751913e-07,
408
+ "logits/chosen": -2.6814422607421875,
409
+ "logits/rejected": -2.252516269683838,
410
+ "logps/chosen": -273.83038330078125,
411
+ "logps/rejected": -227.3008270263672,
412
+ "loss": 18.3748,
413
+ "rewards/accuracies": 0.768750011920929,
414
+ "rewards/chosen": 0.038934461772441864,
415
+ "rewards/margins": 0.14948731660842896,
416
+ "rewards/rejected": -0.11055286228656769,
417
+ "step": 270
418
+ },
419
+ {
420
+ "epoch": 1.16,
421
+ "learning_rate": 3.41500765696784e-07,
422
+ "logits/chosen": -2.6279919147491455,
423
+ "logits/rejected": -2.3474836349487305,
424
+ "logps/chosen": -230.10598754882812,
425
+ "logps/rejected": -215.902587890625,
426
+ "loss": 18.5872,
427
+ "rewards/accuracies": 0.71875,
428
+ "rewards/chosen": 0.03518626466393471,
429
+ "rewards/margins": 0.108550526201725,
430
+ "rewards/rejected": -0.0733642652630806,
431
+ "step": 280
432
+ },
433
+ {
434
+ "epoch": 1.2,
435
+ "learning_rate": 3.33843797856049e-07,
436
+ "logits/chosen": -2.625840663909912,
437
+ "logits/rejected": -2.388363838195801,
438
+ "logps/chosen": -287.68280029296875,
439
+ "logps/rejected": -237.7518768310547,
440
+ "loss": 18.1373,
441
+ "rewards/accuracies": 0.7437499761581421,
442
+ "rewards/chosen": 0.03980474919080734,
443
+ "rewards/margins": 0.125565305352211,
444
+ "rewards/rejected": -0.08576056361198425,
445
+ "step": 290
446
+ },
447
+ {
448
+ "epoch": 1.24,
449
+ "learning_rate": 3.2618683001531396e-07,
450
+ "logits/chosen": -2.592059373855591,
451
+ "logits/rejected": -2.242305278778076,
452
+ "logps/chosen": -239.9512939453125,
453
+ "logps/rejected": -212.0597686767578,
454
+ "loss": 18.2727,
455
+ "rewards/accuracies": 0.746874988079071,
456
+ "rewards/chosen": 0.030053243041038513,
457
+ "rewards/margins": 0.1290222704410553,
458
+ "rewards/rejected": -0.09896902740001678,
459
+ "step": 300
460
+ },
461
+ {
462
+ "epoch": 1.28,
463
+ "learning_rate": 3.1852986217457885e-07,
464
+ "logits/chosen": -2.6135897636413574,
465
+ "logits/rejected": -2.2893946170806885,
466
+ "logps/chosen": -252.56002807617188,
467
+ "logps/rejected": -218.57937622070312,
468
+ "loss": 18.9516,
469
+ "rewards/accuracies": 0.7124999761581421,
470
+ "rewards/chosen": 0.027646947652101517,
471
+ "rewards/margins": 0.11891412734985352,
472
+ "rewards/rejected": -0.0912671834230423,
473
+ "step": 310
474
+ },
475
+ {
476
+ "epoch": 1.32,
477
+ "learning_rate": 3.108728943338438e-07,
478
+ "logits/chosen": -2.6720099449157715,
479
+ "logits/rejected": -2.3789660930633545,
480
+ "logps/chosen": -255.5870361328125,
481
+ "logps/rejected": -232.6374969482422,
482
+ "loss": 18.993,
483
+ "rewards/accuracies": 0.684374988079071,
484
+ "rewards/chosen": 0.03462428227066994,
485
+ "rewards/margins": 0.11329145729541779,
486
+ "rewards/rejected": -0.07866715639829636,
487
+ "step": 320
488
+ },
489
+ {
490
+ "epoch": 1.36,
491
+ "learning_rate": 3.0321592649310873e-07,
492
+ "logits/chosen": -2.636521816253662,
493
+ "logits/rejected": -2.260267496109009,
494
+ "logps/chosen": -242.1477508544922,
495
+ "logps/rejected": -213.030029296875,
496
+ "loss": 18.1642,
497
+ "rewards/accuracies": 0.737500011920929,
498
+ "rewards/chosen": 0.04163060337305069,
499
+ "rewards/margins": 0.13628455996513367,
500
+ "rewards/rejected": -0.09465396404266357,
501
+ "step": 330
502
+ },
503
+ {
504
+ "epoch": 1.4,
505
+ "learning_rate": 2.955589586523736e-07,
506
+ "logits/chosen": -2.6310007572174072,
507
+ "logits/rejected": -2.3438334465026855,
508
+ "logps/chosen": -268.5300598144531,
509
+ "logps/rejected": -229.24057006835938,
510
+ "loss": 19.3254,
511
+ "rewards/accuracies": 0.734375,
512
+ "rewards/chosen": 0.03425261378288269,
513
+ "rewards/margins": 0.1407606601715088,
514
+ "rewards/rejected": -0.1065080538392067,
515
+ "step": 340
516
+ },
517
+ {
518
+ "epoch": 1.45,
519
+ "learning_rate": 2.8790199081163856e-07,
520
+ "logits/chosen": -2.6794886589050293,
521
+ "logits/rejected": -2.297974109649658,
522
+ "logps/chosen": -271.8599853515625,
523
+ "logps/rejected": -222.39688110351562,
524
+ "loss": 19.1924,
525
+ "rewards/accuracies": 0.684374988079071,
526
+ "rewards/chosen": 0.022753948345780373,
527
+ "rewards/margins": 0.10870112478733063,
528
+ "rewards/rejected": -0.0859471783041954,
529
+ "step": 350
530
+ },
531
+ {
532
+ "epoch": 1.49,
533
+ "learning_rate": 2.802450229709035e-07,
534
+ "logits/chosen": -2.635558843612671,
535
+ "logits/rejected": -2.196741819381714,
536
+ "logps/chosen": -250.3131103515625,
537
+ "logps/rejected": -201.41946411132812,
538
+ "loss": 18.4515,
539
+ "rewards/accuracies": 0.7250000238418579,
540
+ "rewards/chosen": 0.018490195274353027,
541
+ "rewards/margins": 0.12475041300058365,
542
+ "rewards/rejected": -0.10626020282506943,
543
+ "step": 360
544
+ },
545
+ {
546
+ "epoch": 1.53,
547
+ "learning_rate": 2.725880551301684e-07,
548
+ "logits/chosen": -2.6633524894714355,
549
+ "logits/rejected": -2.3473284244537354,
550
+ "logps/chosen": -271.9929504394531,
551
+ "logps/rejected": -228.14157104492188,
552
+ "loss": 19.0148,
553
+ "rewards/accuracies": 0.684374988079071,
554
+ "rewards/chosen": 0.015023022890090942,
555
+ "rewards/margins": 0.12562182545661926,
556
+ "rewards/rejected": -0.11059880256652832,
557
+ "step": 370
558
+ },
559
+ {
560
+ "epoch": 1.57,
561
+ "learning_rate": 2.649310872894334e-07,
562
+ "logits/chosen": -2.6451079845428467,
563
+ "logits/rejected": -2.3189663887023926,
564
+ "logps/chosen": -245.8695831298828,
565
+ "logps/rejected": -210.0269775390625,
566
+ "loss": 18.4524,
567
+ "rewards/accuracies": 0.7250000238418579,
568
+ "rewards/chosen": 0.007006034255027771,
569
+ "rewards/margins": 0.11740052700042725,
570
+ "rewards/rejected": -0.11039450019598007,
571
+ "step": 380
572
+ },
573
+ {
574
+ "epoch": 1.61,
575
+ "learning_rate": 2.572741194486983e-07,
576
+ "logits/chosen": -2.6605451107025146,
577
+ "logits/rejected": -2.206817626953125,
578
+ "logps/chosen": -275.4052429199219,
579
+ "logps/rejected": -203.68002319335938,
580
+ "loss": 18.4799,
581
+ "rewards/accuracies": 0.746874988079071,
582
+ "rewards/chosen": 0.017081182450056076,
583
+ "rewards/margins": 0.14246238768100739,
584
+ "rewards/rejected": -0.1253812164068222,
585
+ "step": 390
586
+ },
587
+ {
588
+ "epoch": 1.65,
589
+ "learning_rate": 2.496171516079632e-07,
590
+ "logits/chosen": -2.74629282951355,
591
+ "logits/rejected": -2.338416337966919,
592
+ "logps/chosen": -276.91033935546875,
593
+ "logps/rejected": -221.68307495117188,
594
+ "loss": 18.5723,
595
+ "rewards/accuracies": 0.7281249761581421,
596
+ "rewards/chosen": 0.018960032612085342,
597
+ "rewards/margins": 0.1522599160671234,
598
+ "rewards/rejected": -0.13329990208148956,
599
+ "step": 400
600
+ },
601
+ {
602
+ "epoch": 1.69,
603
+ "learning_rate": 2.4196018376722816e-07,
604
+ "logits/chosen": -2.616495370864868,
605
+ "logits/rejected": -2.2858164310455322,
606
+ "logps/chosen": -260.0771484375,
607
+ "logps/rejected": -216.95919799804688,
608
+ "loss": 18.8108,
609
+ "rewards/accuracies": 0.731249988079071,
610
+ "rewards/chosen": 0.022241920232772827,
611
+ "rewards/margins": 0.14108815789222717,
612
+ "rewards/rejected": -0.11884623765945435,
613
+ "step": 410
614
+ },
615
+ {
616
+ "epoch": 1.74,
617
+ "learning_rate": 2.343032159264931e-07,
618
+ "logits/chosen": -2.651947259902954,
619
+ "logits/rejected": -2.266605854034424,
620
+ "logps/chosen": -264.1819152832031,
621
+ "logps/rejected": -221.7666473388672,
622
+ "loss": 18.6108,
623
+ "rewards/accuracies": 0.746874988079071,
624
+ "rewards/chosen": 0.019768675789237022,
625
+ "rewards/margins": 0.14661307632923126,
626
+ "rewards/rejected": -0.1268443912267685,
627
+ "step": 420
628
+ },
629
+ {
630
+ "epoch": 1.78,
631
+ "learning_rate": 2.26646248085758e-07,
632
+ "logits/chosen": -2.6224217414855957,
633
+ "logits/rejected": -2.2512006759643555,
634
+ "logps/chosen": -242.04757690429688,
635
+ "logps/rejected": -202.48556518554688,
636
+ "loss": 18.291,
637
+ "rewards/accuracies": 0.753125011920929,
638
+ "rewards/chosen": 0.020827816799283028,
639
+ "rewards/margins": 0.13378821313381195,
640
+ "rewards/rejected": -0.11296038329601288,
641
+ "step": 430
642
+ },
643
+ {
644
+ "epoch": 1.82,
645
+ "learning_rate": 2.1898928024502298e-07,
646
+ "logits/chosen": -2.658205509185791,
647
+ "logits/rejected": -2.3186447620391846,
648
+ "logps/chosen": -247.4537353515625,
649
+ "logps/rejected": -198.62149047851562,
650
+ "loss": 18.2582,
651
+ "rewards/accuracies": 0.706250011920929,
652
+ "rewards/chosen": 0.008651083335280418,
653
+ "rewards/margins": 0.12013272196054459,
654
+ "rewards/rejected": -0.11148162931203842,
655
+ "step": 440
656
+ },
657
+ {
658
+ "epoch": 1.86,
659
+ "learning_rate": 2.113323124042879e-07,
660
+ "logits/chosen": -2.6413986682891846,
661
+ "logits/rejected": -2.323753595352173,
662
+ "logps/chosen": -265.66241455078125,
663
+ "logps/rejected": -233.0228729248047,
664
+ "loss": 18.5032,
665
+ "rewards/accuracies": 0.734375,
666
+ "rewards/chosen": 0.01642756350338459,
667
+ "rewards/margins": 0.13231472671031952,
668
+ "rewards/rejected": -0.11588716506958008,
669
+ "step": 450
670
+ },
671
+ {
672
+ "epoch": 1.9,
673
+ "learning_rate": 2.036753445635528e-07,
674
+ "logits/chosen": -2.6705307960510254,
675
+ "logits/rejected": -2.3020169734954834,
676
+ "logps/chosen": -286.9468078613281,
677
+ "logps/rejected": -232.2310028076172,
678
+ "loss": 18.0061,
679
+ "rewards/accuracies": 0.71875,
680
+ "rewards/chosen": 0.01540865283459425,
681
+ "rewards/margins": 0.14157035946846008,
682
+ "rewards/rejected": -0.12616169452667236,
683
+ "step": 460
684
+ },
685
+ {
686
+ "epoch": 1.94,
687
+ "learning_rate": 1.9601837672281775e-07,
688
+ "logits/chosen": -2.645066022872925,
689
+ "logits/rejected": -2.2868847846984863,
690
+ "logps/chosen": -275.22344970703125,
691
+ "logps/rejected": -230.9308624267578,
692
+ "loss": 18.0886,
693
+ "rewards/accuracies": 0.7593749761581421,
694
+ "rewards/chosen": 0.025068962946534157,
695
+ "rewards/margins": 0.1567041575908661,
696
+ "rewards/rejected": -0.13163520395755768,
697
+ "step": 470
698
+ },
699
+ {
700
+ "epoch": 1.98,
701
+ "learning_rate": 1.883614088820827e-07,
702
+ "logits/chosen": -2.6855411529541016,
703
+ "logits/rejected": -2.2669596672058105,
704
+ "logps/chosen": -257.3984680175781,
705
+ "logps/rejected": -210.7415771484375,
706
+ "loss": 19.0824,
707
+ "rewards/accuracies": 0.737500011920929,
708
+ "rewards/chosen": 0.01750902086496353,
709
+ "rewards/margins": 0.12825700640678406,
710
+ "rewards/rejected": -0.11074797064065933,
711
+ "step": 480
712
+ },
713
+ {
714
+ "epoch": 2.0,
715
+ "eval_logits/chosen": -2.687664747238159,
716
+ "eval_logits/rejected": -2.3010897636413574,
717
+ "eval_logps/chosen": -268.55670166015625,
718
+ "eval_logps/rejected": -212.82968139648438,
719
+ "eval_loss": 18.22655487060547,
720
+ "eval_rewards/accuracies": 0.7599999904632568,
721
+ "eval_rewards/chosen": 0.01861482858657837,
722
+ "eval_rewards/margins": 0.13487249612808228,
723
+ "eval_rewards/rejected": -0.1162576824426651,
724
+ "eval_runtime": 352.6258,
725
+ "eval_samples_per_second": 5.672,
726
+ "eval_steps_per_second": 0.709,
727
+ "step": 484
728
+ },
729
+ {
730
+ "epoch": 2.02,
731
+ "learning_rate": 1.807044410413476e-07,
732
+ "logits/chosen": -2.63578200340271,
733
+ "logits/rejected": -2.335254430770874,
734
+ "logps/chosen": -252.0665740966797,
735
+ "logps/rejected": -221.12002563476562,
736
+ "loss": 18.6586,
737
+ "rewards/accuracies": 0.734375,
738
+ "rewards/chosen": 0.00841272622346878,
739
+ "rewards/margins": 0.12638814747333527,
740
+ "rewards/rejected": -0.11797541379928589,
741
+ "step": 490
742
+ },
743
+ {
744
+ "epoch": 2.07,
745
+ "learning_rate": 1.7304747320061255e-07,
746
+ "logits/chosen": -2.576451063156128,
747
+ "logits/rejected": -2.343707799911499,
748
+ "logps/chosen": -253.4521484375,
749
+ "logps/rejected": -228.43115234375,
750
+ "loss": 18.2798,
751
+ "rewards/accuracies": 0.706250011920929,
752
+ "rewards/chosen": 0.0265754796564579,
753
+ "rewards/margins": 0.1290186494588852,
754
+ "rewards/rejected": -0.1024431437253952,
755
+ "step": 500
756
+ },
757
+ {
758
+ "epoch": 2.11,
759
+ "learning_rate": 1.6539050535987747e-07,
760
+ "logits/chosen": -2.672646999359131,
761
+ "logits/rejected": -2.302988290786743,
762
+ "logps/chosen": -275.57440185546875,
763
+ "logps/rejected": -221.45455932617188,
764
+ "loss": 18.1,
765
+ "rewards/accuracies": 0.737500011920929,
766
+ "rewards/chosen": 0.02416495978832245,
767
+ "rewards/margins": 0.14235563576221466,
768
+ "rewards/rejected": -0.11819068342447281,
769
+ "step": 510
770
+ },
771
+ {
772
+ "epoch": 2.15,
773
+ "learning_rate": 1.5773353751914243e-07,
774
+ "logits/chosen": -2.759183406829834,
775
+ "logits/rejected": -2.3177244663238525,
776
+ "logps/chosen": -282.8927307128906,
777
+ "logps/rejected": -217.7075958251953,
778
+ "loss": 18.4222,
779
+ "rewards/accuracies": 0.7093750238418579,
780
+ "rewards/chosen": 0.021920911967754364,
781
+ "rewards/margins": 0.13253231346607208,
782
+ "rewards/rejected": -0.11061139404773712,
783
+ "step": 520
784
+ },
785
+ {
786
+ "epoch": 2.19,
787
+ "learning_rate": 1.5007656967840735e-07,
788
+ "logits/chosen": -2.6353068351745605,
789
+ "logits/rejected": -2.291774272918701,
790
+ "logps/chosen": -260.0713806152344,
791
+ "logps/rejected": -232.3697052001953,
792
+ "loss": 18.084,
793
+ "rewards/accuracies": 0.7749999761581421,
794
+ "rewards/chosen": 0.020517101511359215,
795
+ "rewards/margins": 0.15263554453849792,
796
+ "rewards/rejected": -0.13211843371391296,
797
+ "step": 530
798
+ },
799
+ {
800
+ "epoch": 2.23,
801
+ "learning_rate": 1.4241960183767226e-07,
802
+ "logits/chosen": -2.665095806121826,
803
+ "logits/rejected": -2.3143036365509033,
804
+ "logps/chosen": -270.5109558105469,
805
+ "logps/rejected": -228.31759643554688,
806
+ "loss": 18.2089,
807
+ "rewards/accuracies": 0.71875,
808
+ "rewards/chosen": 0.020305803045630455,
809
+ "rewards/margins": 0.12145675718784332,
810
+ "rewards/rejected": -0.10115096718072891,
811
+ "step": 540
812
+ },
813
+ {
814
+ "epoch": 2.27,
815
+ "learning_rate": 1.347626339969372e-07,
816
+ "logits/chosen": -2.6581876277923584,
817
+ "logits/rejected": -2.402988910675049,
818
+ "logps/chosen": -276.17437744140625,
819
+ "logps/rejected": -239.04080200195312,
820
+ "loss": 18.2431,
821
+ "rewards/accuracies": 0.753125011920929,
822
+ "rewards/chosen": 0.04555383324623108,
823
+ "rewards/margins": 0.13798871636390686,
824
+ "rewards/rejected": -0.09243487566709518,
825
+ "step": 550
826
+ },
827
+ {
828
+ "epoch": 2.31,
829
+ "learning_rate": 1.2710566615620215e-07,
830
+ "logits/chosen": -2.6366963386535645,
831
+ "logits/rejected": -2.2572150230407715,
832
+ "logps/chosen": -272.67034912109375,
833
+ "logps/rejected": -214.80441284179688,
834
+ "loss": 17.8127,
835
+ "rewards/accuracies": 0.71875,
836
+ "rewards/chosen": 0.020294126123189926,
837
+ "rewards/margins": 0.14420394599437714,
838
+ "rewards/rejected": -0.1239098310470581,
839
+ "step": 560
840
+ },
841
+ {
842
+ "epoch": 2.35,
843
+ "learning_rate": 1.1944869831546706e-07,
844
+ "logits/chosen": -2.663076162338257,
845
+ "logits/rejected": -2.3145666122436523,
846
+ "logps/chosen": -273.4615478515625,
847
+ "logps/rejected": -232.04428100585938,
848
+ "loss": 18.2306,
849
+ "rewards/accuracies": 0.753125011920929,
850
+ "rewards/chosen": 0.03313525393605232,
851
+ "rewards/margins": 0.13954436779022217,
852
+ "rewards/rejected": -0.10640911012887955,
853
+ "step": 570
854
+ },
855
+ {
856
+ "epoch": 2.4,
857
+ "learning_rate": 1.11791730474732e-07,
858
+ "logits/chosen": -2.637507915496826,
859
+ "logits/rejected": -2.294003963470459,
860
+ "logps/chosen": -254.7287139892578,
861
+ "logps/rejected": -214.94534301757812,
862
+ "loss": 17.9129,
863
+ "rewards/accuracies": 0.762499988079071,
864
+ "rewards/chosen": 0.023206666111946106,
865
+ "rewards/margins": 0.14315126836299896,
866
+ "rewards/rejected": -0.11994459480047226,
867
+ "step": 580
868
+ },
869
+ {
870
+ "epoch": 2.44,
871
+ "learning_rate": 1.0413476263399694e-07,
872
+ "logits/chosen": -2.5812573432922363,
873
+ "logits/rejected": -2.254936695098877,
874
+ "logps/chosen": -267.64752197265625,
875
+ "logps/rejected": -209.09072875976562,
876
+ "loss": 18.3503,
877
+ "rewards/accuracies": 0.7437499761581421,
878
+ "rewards/chosen": 0.01215180940926075,
879
+ "rewards/margins": 0.13401289284229279,
880
+ "rewards/rejected": -0.12186107784509659,
881
+ "step": 590
882
+ },
883
+ {
884
+ "epoch": 2.48,
885
+ "learning_rate": 9.647779479326186e-08,
886
+ "logits/chosen": -2.645982265472412,
887
+ "logits/rejected": -2.3950445652008057,
888
+ "logps/chosen": -253.10336303710938,
889
+ "logps/rejected": -217.19949340820312,
890
+ "loss": 19.0115,
891
+ "rewards/accuracies": 0.699999988079071,
892
+ "rewards/chosen": 0.022207045927643776,
893
+ "rewards/margins": 0.11882482469081879,
894
+ "rewards/rejected": -0.09661778062582016,
895
+ "step": 600
896
+ },
897
+ {
898
+ "epoch": 2.52,
899
+ "learning_rate": 8.88208269525268e-08,
900
+ "logits/chosen": -2.606689929962158,
901
+ "logits/rejected": -2.274811029434204,
902
+ "logps/chosen": -254.597412109375,
903
+ "logps/rejected": -207.0297088623047,
904
+ "loss": 18.634,
905
+ "rewards/accuracies": 0.721875011920929,
906
+ "rewards/chosen": 0.020870240405201912,
907
+ "rewards/margins": 0.11707819998264313,
908
+ "rewards/rejected": -0.09620794653892517,
909
+ "step": 610
910
+ },
911
+ {
912
+ "epoch": 2.56,
913
+ "learning_rate": 8.116385911179173e-08,
914
+ "logits/chosen": -2.6234843730926514,
915
+ "logits/rejected": -2.277554750442505,
916
+ "logps/chosen": -265.1994934082031,
917
+ "logps/rejected": -217.2080841064453,
918
+ "loss": 18.3686,
919
+ "rewards/accuracies": 0.7124999761581421,
920
+ "rewards/chosen": 0.03428371995687485,
921
+ "rewards/margins": 0.13564249873161316,
922
+ "rewards/rejected": -0.1013587936758995,
923
+ "step": 620
924
+ },
925
+ {
926
+ "epoch": 2.6,
927
+ "learning_rate": 7.350689127105667e-08,
928
+ "logits/chosen": -2.681215286254883,
929
+ "logits/rejected": -2.2755188941955566,
930
+ "logps/chosen": -263.49530029296875,
931
+ "logps/rejected": -226.5316619873047,
932
+ "loss": 18.1639,
933
+ "rewards/accuracies": 0.706250011920929,
934
+ "rewards/chosen": 0.024900907650589943,
935
+ "rewards/margins": 0.14352695643901825,
936
+ "rewards/rejected": -0.11862604320049286,
937
+ "step": 630
938
+ },
939
+ {
940
+ "epoch": 2.64,
941
+ "learning_rate": 6.584992343032159e-08,
942
+ "logits/chosen": -2.589116096496582,
943
+ "logits/rejected": -2.3119215965270996,
944
+ "logps/chosen": -255.6792449951172,
945
+ "logps/rejected": -225.32754516601562,
946
+ "loss": 18.7407,
947
+ "rewards/accuracies": 0.7437499761581421,
948
+ "rewards/chosen": 0.022887857630848885,
949
+ "rewards/margins": 0.13587646186351776,
950
+ "rewards/rejected": -0.11298861354589462,
951
+ "step": 640
952
+ },
953
+ {
954
+ "epoch": 2.69,
955
+ "learning_rate": 5.819295558958652e-08,
956
+ "logits/chosen": -2.664092779159546,
957
+ "logits/rejected": -2.245487928390503,
958
+ "logps/chosen": -287.26129150390625,
959
+ "logps/rejected": -202.8896942138672,
960
+ "loss": 17.9121,
961
+ "rewards/accuracies": 0.765625,
962
+ "rewards/chosen": 0.03479045629501343,
963
+ "rewards/margins": 0.17097266018390656,
964
+ "rewards/rejected": -0.13618221879005432,
965
+ "step": 650
966
+ },
967
+ {
968
+ "epoch": 2.73,
969
+ "learning_rate": 5.0535987748851455e-08,
970
+ "logits/chosen": -2.6955533027648926,
971
+ "logits/rejected": -2.341909646987915,
972
+ "logps/chosen": -276.48583984375,
973
+ "logps/rejected": -231.2989959716797,
974
+ "loss": 18.0214,
975
+ "rewards/accuracies": 0.7437499761581421,
976
+ "rewards/chosen": 0.03698002174496651,
977
+ "rewards/margins": 0.14650048315525055,
978
+ "rewards/rejected": -0.10952047258615494,
979
+ "step": 660
980
+ },
981
+ {
982
+ "epoch": 2.77,
983
+ "learning_rate": 4.287901990811638e-08,
984
+ "logits/chosen": -2.691222667694092,
985
+ "logits/rejected": -2.269242286682129,
986
+ "logps/chosen": -257.7073974609375,
987
+ "logps/rejected": -232.1117706298828,
988
+ "loss": 18.0439,
989
+ "rewards/accuracies": 0.7124999761581421,
990
+ "rewards/chosen": 0.030216818675398827,
991
+ "rewards/margins": 0.14609763026237488,
992
+ "rewards/rejected": -0.1158808022737503,
993
+ "step": 670
994
+ },
995
+ {
996
+ "epoch": 2.81,
997
+ "learning_rate": 3.522205206738132e-08,
998
+ "logits/chosen": -2.6416354179382324,
999
+ "logits/rejected": -2.310128688812256,
1000
+ "logps/chosen": -271.9048767089844,
1001
+ "logps/rejected": -223.27969360351562,
1002
+ "loss": 18.4412,
1003
+ "rewards/accuracies": 0.7437499761581421,
1004
+ "rewards/chosen": 0.02033752016723156,
1005
+ "rewards/margins": 0.12728351354599,
1006
+ "rewards/rejected": -0.10694599151611328,
1007
+ "step": 680
1008
+ },
1009
+ {
1010
+ "epoch": 2.85,
1011
+ "learning_rate": 2.7565084226646246e-08,
1012
+ "logits/chosen": -2.660130739212036,
1013
+ "logits/rejected": -2.3302083015441895,
1014
+ "logps/chosen": -269.9329833984375,
1015
+ "logps/rejected": -232.0976104736328,
1016
+ "loss": 18.3673,
1017
+ "rewards/accuracies": 0.746874988079071,
1018
+ "rewards/chosen": 0.030508223921060562,
1019
+ "rewards/margins": 0.14415839314460754,
1020
+ "rewards/rejected": -0.11365016549825668,
1021
+ "step": 690
1022
+ },
1023
+ {
1024
+ "epoch": 2.89,
1025
+ "learning_rate": 1.9908116385911178e-08,
1026
+ "logits/chosen": -2.6514875888824463,
1027
+ "logits/rejected": -2.333763599395752,
1028
+ "logps/chosen": -251.9337158203125,
1029
+ "logps/rejected": -223.32437133789062,
1030
+ "loss": 18.373,
1031
+ "rewards/accuracies": 0.684374988079071,
1032
+ "rewards/chosen": 0.020953018218278885,
1033
+ "rewards/margins": 0.114670529961586,
1034
+ "rewards/rejected": -0.09371750801801682,
1035
+ "step": 700
1036
+ },
1037
+ {
1038
+ "epoch": 2.93,
1039
+ "learning_rate": 1.225114854517611e-08,
1040
+ "logits/chosen": -2.6763319969177246,
1041
+ "logits/rejected": -2.2669501304626465,
1042
+ "logps/chosen": -274.1488037109375,
1043
+ "logps/rejected": -220.2371063232422,
1044
+ "loss": 18.4056,
1045
+ "rewards/accuracies": 0.7250000238418579,
1046
+ "rewards/chosen": 0.005063548684120178,
1047
+ "rewards/margins": 0.12724530696868896,
1048
+ "rewards/rejected": -0.12218175083398819,
1049
+ "step": 710
1050
+ },
1051
+ {
1052
+ "epoch": 2.97,
1053
+ "learning_rate": 4.594180704441042e-09,
1054
+ "logits/chosen": -2.5772223472595215,
1055
+ "logits/rejected": -2.3834898471832275,
1056
+ "logps/chosen": -257.3438720703125,
1057
+ "logps/rejected": -220.6257781982422,
1058
+ "loss": 18.0366,
1059
+ "rewards/accuracies": 0.71875,
1060
+ "rewards/chosen": 0.019895819947123528,
1061
+ "rewards/margins": 0.12051371484994888,
1062
+ "rewards/rejected": -0.10061788558959961,
1063
+ "step": 720
1064
+ },
1065
+ {
1066
+ "epoch": 3.0,
1067
+ "eval_logits/chosen": -2.687349796295166,
1068
+ "eval_logits/rejected": -2.300260305404663,
1069
+ "eval_logps/chosen": -268.54150390625,
1070
+ "eval_logps/rejected": -212.86087036132812,
1071
+ "eval_loss": 18.088815689086914,
1072
+ "eval_rewards/accuracies": 0.7319999933242798,
1073
+ "eval_rewards/chosen": 0.02013408951461315,
1074
+ "eval_rewards/margins": 0.13951165974140167,
1075
+ "eval_rewards/rejected": -0.11937756091356277,
1076
+ "eval_runtime": 350.3279,
1077
+ "eval_samples_per_second": 5.709,
1078
+ "eval_steps_per_second": 0.714,
1079
+ "step": 726
1080
+ },
1081
+ {
1082
+ "epoch": 3.0,
1083
+ "step": 726,
1084
+ "total_flos": 0.0,
1085
+ "train_loss": 19.467516181882747,
1086
+ "train_runtime": 42806.6761,
1087
+ "train_samples_per_second": 4.343,
1088
+ "train_steps_per_second": 0.017
1089
+ }
1090
+ ],
1091
+ "logging_steps": 10,
1092
+ "max_steps": 726,
1093
+ "num_input_tokens_seen": 0,
1094
+ "num_train_epochs": 3,
1095
+ "save_steps": 500,
1096
+ "total_flos": 0.0,
1097
+ "train_batch_size": 1,
1098
+ "trial_name": null,
1099
+ "trial_params": null
1100
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dcef5bbdb0029b8a95cad5773a8a63ce2c0d87560605567e421e5d172bfb3de9
3
+ size 4856