LoneStriker commited on
Commit
d58f2e2
1 Parent(s): 04a1658

Upload folder using huggingface_hub

Browse files
README.md ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ datasets:
4
+ - HuggingFaceH4/ultrachat_200k
5
+ language:
6
+ - en
7
+ pipeline_tag: text-generation
8
+ ---
9
+ Self-Play Fine-Tuning Converts Weak Language Models to Strong Language Models (https://arxiv.org/abs/2401.01335)
10
+
11
+ # zephyr-7b-sft-full-spin-iter3
12
+
13
+ This model is a self-play fine-tuned model at iteration 3 from [alignment-handbook/zephyr-7b-sft-full](https://huggingface.co/alignment-handbook/zephyr-7b-sft-full) using synthetic data based on on the [HuggingFaceH4/ultrachat_200k](https://huggingface.co/datasets/HuggingFaceH4/ultrachat_200k) dataset.
14
+
15
+ ## Model Details
16
+
17
+ ### Model Description
18
+
19
+ - Model type: A 7B parameter GPT-like model fine-tuned on synthetic datasets.
20
+ - Language(s) (NLP): Primarily English
21
+ - License: MIT
22
+ - Finetuned from model: alignment-handbook/zephyr-7b-sft-full (based on mistralai/Mistral-7B-v0.1)
23
+
24
+ ### Training hyperparameters
25
+ The following hyperparameters were used during training:
26
+
27
+ - learning_rate: 1e-07
28
+ - train_batch_size: 8
29
+ - seed: 42
30
+ - distributed_type: multi-GPU
31
+ - num_devices: 8
32
+ - total_train_batch_size: 64
33
+ - optimizer: RMSProp
34
+ - lr_scheduler_type: linear
35
+ - lr_scheduler_warmup_ratio: 0.1
36
+ - num_epochs: 2.0
37
+
38
+ ## [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)
39
+ Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_UCLA-AGI__test_final)
40
+ | Metric | Value |
41
+ |-----------------------|---------------------------|
42
+ | Avg. | 63.70 |
43
+ | ARC (25-shot) | 66.13 |
44
+ | HellaSwag (10-shot) | 85.85 |
45
+ | MMLU (5-shot) | 61.51 |
46
+ | TruthfulQA (0-shot) | 57.89 |
47
+ | Winogrande (5-shot) | 76.64 |
48
+ | GSM8K (5-shot) | 34.19 |
49
+
50
+ ## Citation
51
+ ```
52
+ @misc{chen2024selfplay,
53
+ title={Self-Play Fine-Tuning Converts Weak Language Models to Strong Language Models},
54
+ author={Zixiang Chen and Yihe Deng and Huizhuo Yuan and Kaixuan Ji and Quanquan Gu},
55
+ year={2024},
56
+ eprint={2401.01335},
57
+ archivePrefix={arXiv},
58
+ primaryClass={cs.LG}
59
+ }
60
+ ```
config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "checkpoint-3112",
3
+ "architectures": [
4
+ "MistralForCausalLM"
5
+ ],
6
+ "bos_token_id": 1,
7
+ "eos_token_id": 2,
8
+ "hidden_act": "silu",
9
+ "hidden_size": 4096,
10
+ "initializer_range": 0.02,
11
+ "intermediate_size": 14336,
12
+ "max_position_embeddings": 32768,
13
+ "model_type": "mistral",
14
+ "num_attention_heads": 32,
15
+ "num_hidden_layers": 32,
16
+ "num_key_value_heads": 8,
17
+ "rms_norm_eps": 1e-05,
18
+ "rope_theta": 10000.0,
19
+ "sliding_window": 4096,
20
+ "tie_word_embeddings": false,
21
+ "torch_dtype": "bfloat16",
22
+ "transformers_version": "4.35.0",
23
+ "use_cache": false,
24
+ "vocab_size": 32000
25
+ }
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "transformers_version": "4.35.0"
6
+ }
model.safetensors.index.json ADDED
@@ -0,0 +1,298 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 14483464192
4
+ },
5
+ "weight_map": {
6
+ "lm_head.weight": "model-00003-of-00003.safetensors",
7
+ "model.embed_tokens.weight": "model-00001-of-00003.safetensors",
8
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00003.safetensors",
9
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
10
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
11
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
12
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
13
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
14
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
15
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
16
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
17
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00003.safetensors",
18
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
19
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
20
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
21
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
22
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
23
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
24
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
25
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
26
+ "model.layers.10.input_layernorm.weight": "model-00002-of-00003.safetensors",
27
+ "model.layers.10.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
28
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
29
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
30
+ "model.layers.10.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
31
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
32
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
33
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
34
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
35
+ "model.layers.11.input_layernorm.weight": "model-00002-of-00003.safetensors",
36
+ "model.layers.11.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
37
+ "model.layers.11.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
38
+ "model.layers.11.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
39
+ "model.layers.11.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
40
+ "model.layers.11.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
41
+ "model.layers.11.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
42
+ "model.layers.11.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
43
+ "model.layers.11.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
44
+ "model.layers.12.input_layernorm.weight": "model-00002-of-00003.safetensors",
45
+ "model.layers.12.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
46
+ "model.layers.12.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
47
+ "model.layers.12.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
48
+ "model.layers.12.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
49
+ "model.layers.12.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
50
+ "model.layers.12.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
51
+ "model.layers.12.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
52
+ "model.layers.12.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
53
+ "model.layers.13.input_layernorm.weight": "model-00002-of-00003.safetensors",
54
+ "model.layers.13.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
55
+ "model.layers.13.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
56
+ "model.layers.13.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
57
+ "model.layers.13.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
58
+ "model.layers.13.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
59
+ "model.layers.13.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
60
+ "model.layers.13.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
61
+ "model.layers.13.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
62
+ "model.layers.14.input_layernorm.weight": "model-00002-of-00003.safetensors",
63
+ "model.layers.14.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
64
+ "model.layers.14.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
65
+ "model.layers.14.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
66
+ "model.layers.14.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
67
+ "model.layers.14.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
68
+ "model.layers.14.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
69
+ "model.layers.14.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
70
+ "model.layers.14.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
71
+ "model.layers.15.input_layernorm.weight": "model-00002-of-00003.safetensors",
72
+ "model.layers.15.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
73
+ "model.layers.15.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
74
+ "model.layers.15.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
75
+ "model.layers.15.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
76
+ "model.layers.15.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
77
+ "model.layers.15.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
78
+ "model.layers.15.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
79
+ "model.layers.15.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
80
+ "model.layers.16.input_layernorm.weight": "model-00002-of-00003.safetensors",
81
+ "model.layers.16.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
82
+ "model.layers.16.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
83
+ "model.layers.16.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
84
+ "model.layers.16.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
85
+ "model.layers.16.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
86
+ "model.layers.16.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
87
+ "model.layers.16.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
88
+ "model.layers.16.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
89
+ "model.layers.17.input_layernorm.weight": "model-00002-of-00003.safetensors",
90
+ "model.layers.17.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
91
+ "model.layers.17.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
92
+ "model.layers.17.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
93
+ "model.layers.17.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
94
+ "model.layers.17.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
95
+ "model.layers.17.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
96
+ "model.layers.17.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
97
+ "model.layers.17.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
98
+ "model.layers.18.input_layernorm.weight": "model-00002-of-00003.safetensors",
99
+ "model.layers.18.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
100
+ "model.layers.18.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
101
+ "model.layers.18.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
102
+ "model.layers.18.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
103
+ "model.layers.18.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
104
+ "model.layers.18.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
105
+ "model.layers.18.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
106
+ "model.layers.18.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
107
+ "model.layers.19.input_layernorm.weight": "model-00002-of-00003.safetensors",
108
+ "model.layers.19.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
109
+ "model.layers.19.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
110
+ "model.layers.19.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
111
+ "model.layers.19.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
112
+ "model.layers.19.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
113
+ "model.layers.19.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
114
+ "model.layers.19.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
115
+ "model.layers.19.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
116
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00003.safetensors",
117
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
118
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
119
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
120
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
121
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
122
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
123
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
124
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
125
+ "model.layers.20.input_layernorm.weight": "model-00002-of-00003.safetensors",
126
+ "model.layers.20.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
127
+ "model.layers.20.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
128
+ "model.layers.20.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
129
+ "model.layers.20.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
130
+ "model.layers.20.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
131
+ "model.layers.20.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
132
+ "model.layers.20.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
133
+ "model.layers.20.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
134
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00003.safetensors",
135
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
136
+ "model.layers.21.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
137
+ "model.layers.21.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
138
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
139
+ "model.layers.21.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
140
+ "model.layers.21.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
141
+ "model.layers.21.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
142
+ "model.layers.21.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
143
+ "model.layers.22.input_layernorm.weight": "model-00003-of-00003.safetensors",
144
+ "model.layers.22.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
145
+ "model.layers.22.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
146
+ "model.layers.22.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
147
+ "model.layers.22.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
148
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
149
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
150
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
151
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
152
+ "model.layers.23.input_layernorm.weight": "model-00003-of-00003.safetensors",
153
+ "model.layers.23.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
154
+ "model.layers.23.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
155
+ "model.layers.23.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
156
+ "model.layers.23.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
157
+ "model.layers.23.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
158
+ "model.layers.23.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
159
+ "model.layers.23.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
160
+ "model.layers.23.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
161
+ "model.layers.24.input_layernorm.weight": "model-00003-of-00003.safetensors",
162
+ "model.layers.24.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
163
+ "model.layers.24.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
164
+ "model.layers.24.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
165
+ "model.layers.24.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
166
+ "model.layers.24.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
167
+ "model.layers.24.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
168
+ "model.layers.24.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
169
+ "model.layers.24.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
170
+ "model.layers.25.input_layernorm.weight": "model-00003-of-00003.safetensors",
171
+ "model.layers.25.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
172
+ "model.layers.25.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
173
+ "model.layers.25.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
174
+ "model.layers.25.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
175
+ "model.layers.25.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
176
+ "model.layers.25.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
177
+ "model.layers.25.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
178
+ "model.layers.25.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
179
+ "model.layers.26.input_layernorm.weight": "model-00003-of-00003.safetensors",
180
+ "model.layers.26.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
181
+ "model.layers.26.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
182
+ "model.layers.26.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
183
+ "model.layers.26.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
184
+ "model.layers.26.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
185
+ "model.layers.26.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
186
+ "model.layers.26.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
187
+ "model.layers.26.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
188
+ "model.layers.27.input_layernorm.weight": "model-00003-of-00003.safetensors",
189
+ "model.layers.27.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
190
+ "model.layers.27.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
191
+ "model.layers.27.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
192
+ "model.layers.27.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
193
+ "model.layers.27.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
194
+ "model.layers.27.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
195
+ "model.layers.27.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
196
+ "model.layers.27.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
197
+ "model.layers.28.input_layernorm.weight": "model-00003-of-00003.safetensors",
198
+ "model.layers.28.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
199
+ "model.layers.28.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
200
+ "model.layers.28.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
201
+ "model.layers.28.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
202
+ "model.layers.28.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
203
+ "model.layers.28.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
204
+ "model.layers.28.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
205
+ "model.layers.28.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
206
+ "model.layers.29.input_layernorm.weight": "model-00003-of-00003.safetensors",
207
+ "model.layers.29.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
208
+ "model.layers.29.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
209
+ "model.layers.29.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
210
+ "model.layers.29.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
211
+ "model.layers.29.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
212
+ "model.layers.29.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
213
+ "model.layers.29.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
214
+ "model.layers.29.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
215
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00003.safetensors",
216
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
217
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
218
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
219
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
220
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
221
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
222
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
223
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
224
+ "model.layers.30.input_layernorm.weight": "model-00003-of-00003.safetensors",
225
+ "model.layers.30.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
226
+ "model.layers.30.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
227
+ "model.layers.30.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
228
+ "model.layers.30.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
229
+ "model.layers.30.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
230
+ "model.layers.30.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
231
+ "model.layers.30.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
232
+ "model.layers.30.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
233
+ "model.layers.31.input_layernorm.weight": "model-00003-of-00003.safetensors",
234
+ "model.layers.31.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
235
+ "model.layers.31.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
236
+ "model.layers.31.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
237
+ "model.layers.31.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
238
+ "model.layers.31.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
239
+ "model.layers.31.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
240
+ "model.layers.31.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
241
+ "model.layers.31.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
242
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00003.safetensors",
243
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
244
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
245
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
246
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
247
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
248
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
249
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
250
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
251
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00003.safetensors",
252
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
253
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
254
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
255
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
256
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
257
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
258
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
259
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
260
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00003.safetensors",
261
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
262
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
263
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
264
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
265
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
266
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
267
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
268
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
269
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00003.safetensors",
270
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
271
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
272
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
273
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
274
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
275
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
276
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
277
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
278
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00003.safetensors",
279
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
280
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
281
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
282
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
283
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
284
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
285
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
286
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
287
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00003.safetensors",
288
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
289
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
290
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
291
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
292
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
293
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
294
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
295
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
296
+ "model.norm.weight": "model-00003-of-00003.safetensors"
297
+ }
298
+ }
output.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f95f953e134e2723a85d264c4fb93615676426bec3ff5ea61869fa6298dbf37a
3
+ size 4733287396
special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "</s>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "<unk>",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "<unk>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "<s>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "</s>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ }
27
+ },
28
+ "additional_special_tokens": [],
29
+ "bos_token": "<s>",
30
+ "chat_template": "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'system' %}\n{{ '<|system|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}",
31
+ "clean_up_tokenization_spaces": false,
32
+ "eos_token": "</s>",
33
+ "legacy": true,
34
+ "model_max_length": 2048,
35
+ "pad_token": "</s>",
36
+ "sp_model_kwargs": {},
37
+ "spaces_between_special_tokens": false,
38
+ "tokenizer_class": "LlamaTokenizer",
39
+ "unk_token": "<unk>",
40
+ "use_default_system_prompt": true
41
+ }
trainer_state.json ADDED
@@ -0,0 +1,2443 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 1.0,
5
+ "eval_steps": 100,
6
+ "global_step": 1556,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.0,
13
+ "learning_rate": 1.0706638115631692e-10,
14
+ "logits/chosen": 3.009334087371826,
15
+ "logits/rejected": 2.8860254287719727,
16
+ "logps/chosen": -363.7457580566406,
17
+ "logps/rejected": -405.4437255859375,
18
+ "loss": 0.6931,
19
+ "rewards/accuracies": 0.0,
20
+ "rewards/chosen": 0.0,
21
+ "rewards/margins": 0.0,
22
+ "rewards/rejected": 0.0,
23
+ "step": 1
24
+ },
25
+ {
26
+ "epoch": 0.01,
27
+ "learning_rate": 1.070663811563169e-09,
28
+ "logits/chosen": 2.383711338043213,
29
+ "logits/rejected": 3.0388379096984863,
30
+ "logps/chosen": -456.6111145019531,
31
+ "logps/rejected": -428.7125244140625,
32
+ "loss": 2.7789,
33
+ "rewards/accuracies": 0.4305555522441864,
34
+ "rewards/chosen": -0.10625362396240234,
35
+ "rewards/margins": -0.11497735977172852,
36
+ "rewards/rejected": 0.008723735809326172,
37
+ "step": 10
38
+ },
39
+ {
40
+ "epoch": 0.01,
41
+ "learning_rate": 2.141327623126338e-09,
42
+ "logits/chosen": 2.6908938884735107,
43
+ "logits/rejected": 3.1237950325012207,
44
+ "logps/chosen": -387.3503112792969,
45
+ "logps/rejected": -384.4349060058594,
46
+ "loss": 2.7716,
47
+ "rewards/accuracies": 0.4625000059604645,
48
+ "rewards/chosen": 0.43295717239379883,
49
+ "rewards/margins": 1.0383365154266357,
50
+ "rewards/rejected": -0.6053793430328369,
51
+ "step": 20
52
+ },
53
+ {
54
+ "epoch": 0.02,
55
+ "learning_rate": 3.2119914346895075e-09,
56
+ "logits/chosen": 2.479438543319702,
57
+ "logits/rejected": 2.9864134788513184,
58
+ "logps/chosen": -409.71563720703125,
59
+ "logps/rejected": -410.36260986328125,
60
+ "loss": 3.0305,
61
+ "rewards/accuracies": 0.574999988079071,
62
+ "rewards/chosen": 0.3105888366699219,
63
+ "rewards/margins": 0.5438008308410645,
64
+ "rewards/rejected": -0.23321199417114258,
65
+ "step": 30
66
+ },
67
+ {
68
+ "epoch": 0.03,
69
+ "learning_rate": 4.282655246252676e-09,
70
+ "logits/chosen": 2.513826847076416,
71
+ "logits/rejected": 3.7642135620117188,
72
+ "logps/chosen": -412.05780029296875,
73
+ "logps/rejected": -408.3034973144531,
74
+ "loss": 2.3915,
75
+ "rewards/accuracies": 0.512499988079071,
76
+ "rewards/chosen": -0.4330732822418213,
77
+ "rewards/margins": 0.3383500576019287,
78
+ "rewards/rejected": -0.77142333984375,
79
+ "step": 40
80
+ },
81
+ {
82
+ "epoch": 0.03,
83
+ "learning_rate": 5.353319057815846e-09,
84
+ "logits/chosen": 2.6957950592041016,
85
+ "logits/rejected": 3.350551128387451,
86
+ "logps/chosen": -428.7166442871094,
87
+ "logps/rejected": -425.9712829589844,
88
+ "loss": 2.6906,
89
+ "rewards/accuracies": 0.5375000238418579,
90
+ "rewards/chosen": 0.6460269093513489,
91
+ "rewards/margins": 0.9008991122245789,
92
+ "rewards/rejected": -0.25487232208251953,
93
+ "step": 50
94
+ },
95
+ {
96
+ "epoch": 0.04,
97
+ "learning_rate": 6.423982869379015e-09,
98
+ "logits/chosen": 1.877236008644104,
99
+ "logits/rejected": 3.345506191253662,
100
+ "logps/chosen": -423.69549560546875,
101
+ "logps/rejected": -409.8067321777344,
102
+ "loss": 1.8376,
103
+ "rewards/accuracies": 0.6875,
104
+ "rewards/chosen": 2.4741673469543457,
105
+ "rewards/margins": 4.329440116882324,
106
+ "rewards/rejected": -1.8552722930908203,
107
+ "step": 60
108
+ },
109
+ {
110
+ "epoch": 0.04,
111
+ "learning_rate": 7.494646680942184e-09,
112
+ "logits/chosen": 2.3659510612487793,
113
+ "logits/rejected": 3.347115993499756,
114
+ "logps/chosen": -419.614990234375,
115
+ "logps/rejected": -404.15386962890625,
116
+ "loss": 1.8117,
117
+ "rewards/accuracies": 0.637499988079071,
118
+ "rewards/chosen": 1.2335608005523682,
119
+ "rewards/margins": 1.696711778640747,
120
+ "rewards/rejected": -0.4631509780883789,
121
+ "step": 70
122
+ },
123
+ {
124
+ "epoch": 0.05,
125
+ "learning_rate": 8.565310492505352e-09,
126
+ "logits/chosen": 1.8567355871200562,
127
+ "logits/rejected": 3.529881238937378,
128
+ "logps/chosen": -454.572998046875,
129
+ "logps/rejected": -433.12152099609375,
130
+ "loss": 1.4964,
131
+ "rewards/accuracies": 0.737500011920929,
132
+ "rewards/chosen": 1.8937450647354126,
133
+ "rewards/margins": 4.073466777801514,
134
+ "rewards/rejected": -2.1797218322753906,
135
+ "step": 80
136
+ },
137
+ {
138
+ "epoch": 0.06,
139
+ "learning_rate": 9.635974304068522e-09,
140
+ "logits/chosen": 2.379859447479248,
141
+ "logits/rejected": 3.260948657989502,
142
+ "logps/chosen": -451.9266052246094,
143
+ "logps/rejected": -383.00042724609375,
144
+ "loss": 1.6543,
145
+ "rewards/accuracies": 0.6625000238418579,
146
+ "rewards/chosen": 2.33457612991333,
147
+ "rewards/margins": 3.6442272663116455,
148
+ "rewards/rejected": -1.3096508979797363,
149
+ "step": 90
150
+ },
151
+ {
152
+ "epoch": 0.06,
153
+ "learning_rate": 1.0706638115631692e-08,
154
+ "logits/chosen": 2.4745795726776123,
155
+ "logits/rejected": 3.668903350830078,
156
+ "logps/chosen": -402.97393798828125,
157
+ "logps/rejected": -378.31890869140625,
158
+ "loss": 1.5408,
159
+ "rewards/accuracies": 0.675000011920929,
160
+ "rewards/chosen": 0.7632139921188354,
161
+ "rewards/margins": 2.8585281372070312,
162
+ "rewards/rejected": -2.0953145027160645,
163
+ "step": 100
164
+ },
165
+ {
166
+ "epoch": 0.06,
167
+ "eval_logits/chosen": 1.7967612743377686,
168
+ "eval_logits/rejected": 2.483816146850586,
169
+ "eval_logps/chosen": -403.54827880859375,
170
+ "eval_logps/rejected": -402.92413330078125,
171
+ "eval_loss": 1.0591531991958618,
172
+ "eval_rewards/accuracies": 0.8125,
173
+ "eval_rewards/chosen": 2.333465814590454,
174
+ "eval_rewards/margins": 5.841272830963135,
175
+ "eval_rewards/rejected": -3.5078072547912598,
176
+ "eval_runtime": 76.9287,
177
+ "eval_samples_per_second": 12.999,
178
+ "eval_steps_per_second": 0.416,
179
+ "step": 100
180
+ },
181
+ {
182
+ "epoch": 0.07,
183
+ "learning_rate": 1.177730192719486e-08,
184
+ "logits/chosen": 2.7680611610412598,
185
+ "logits/rejected": 3.0494232177734375,
186
+ "logps/chosen": -384.3839111328125,
187
+ "logps/rejected": -407.72918701171875,
188
+ "loss": 1.0018,
189
+ "rewards/accuracies": 0.75,
190
+ "rewards/chosen": 2.2038276195526123,
191
+ "rewards/margins": 6.380339622497559,
192
+ "rewards/rejected": -4.176511764526367,
193
+ "step": 110
194
+ },
195
+ {
196
+ "epoch": 0.08,
197
+ "learning_rate": 1.284796573875803e-08,
198
+ "logits/chosen": 2.498812198638916,
199
+ "logits/rejected": 3.4484715461730957,
200
+ "logps/chosen": -401.28857421875,
201
+ "logps/rejected": -426.8399963378906,
202
+ "loss": 0.7845,
203
+ "rewards/accuracies": 0.8374999761581421,
204
+ "rewards/chosen": 2.2210142612457275,
205
+ "rewards/margins": 8.185151100158691,
206
+ "rewards/rejected": -5.964136123657227,
207
+ "step": 120
208
+ },
209
+ {
210
+ "epoch": 0.08,
211
+ "learning_rate": 1.3918629550321198e-08,
212
+ "logits/chosen": 2.083467483520508,
213
+ "logits/rejected": 2.998776435852051,
214
+ "logps/chosen": -437.83709716796875,
215
+ "logps/rejected": -462.96026611328125,
216
+ "loss": 0.9019,
217
+ "rewards/accuracies": 0.9125000238418579,
218
+ "rewards/chosen": 4.379509449005127,
219
+ "rewards/margins": 10.292404174804688,
220
+ "rewards/rejected": -5.912895202636719,
221
+ "step": 130
222
+ },
223
+ {
224
+ "epoch": 0.09,
225
+ "learning_rate": 1.4989293361884368e-08,
226
+ "logits/chosen": 2.5522398948669434,
227
+ "logits/rejected": 3.3716235160827637,
228
+ "logps/chosen": -422.5474548339844,
229
+ "logps/rejected": -406.94183349609375,
230
+ "loss": 0.8578,
231
+ "rewards/accuracies": 0.8374999761581421,
232
+ "rewards/chosen": 2.729921579360962,
233
+ "rewards/margins": 7.9893903732299805,
234
+ "rewards/rejected": -5.259469509124756,
235
+ "step": 140
236
+ },
237
+ {
238
+ "epoch": 0.1,
239
+ "learning_rate": 1.6059957173447535e-08,
240
+ "logits/chosen": 2.7751362323760986,
241
+ "logits/rejected": 2.9003946781158447,
242
+ "logps/chosen": -391.8503723144531,
243
+ "logps/rejected": -400.2032470703125,
244
+ "loss": 0.7248,
245
+ "rewards/accuracies": 0.800000011920929,
246
+ "rewards/chosen": 1.3115911483764648,
247
+ "rewards/margins": 8.482717514038086,
248
+ "rewards/rejected": -7.171125888824463,
249
+ "step": 150
250
+ },
251
+ {
252
+ "epoch": 0.1,
253
+ "learning_rate": 1.7130620985010704e-08,
254
+ "logits/chosen": 2.3851382732391357,
255
+ "logits/rejected": 3.175081968307495,
256
+ "logps/chosen": -482.95062255859375,
257
+ "logps/rejected": -422.1293029785156,
258
+ "loss": 0.6889,
259
+ "rewards/accuracies": 0.8999999761581421,
260
+ "rewards/chosen": 3.3662681579589844,
261
+ "rewards/margins": 10.24647331237793,
262
+ "rewards/rejected": -6.8802056312561035,
263
+ "step": 160
264
+ },
265
+ {
266
+ "epoch": 0.11,
267
+ "learning_rate": 1.8201284796573874e-08,
268
+ "logits/chosen": 2.398458957672119,
269
+ "logits/rejected": 3.4337353706359863,
270
+ "logps/chosen": -422.200927734375,
271
+ "logps/rejected": -402.37274169921875,
272
+ "loss": 0.8347,
273
+ "rewards/accuracies": 0.800000011920929,
274
+ "rewards/chosen": 0.8539754748344421,
275
+ "rewards/margins": 8.811124801635742,
276
+ "rewards/rejected": -7.957149505615234,
277
+ "step": 170
278
+ },
279
+ {
280
+ "epoch": 0.12,
281
+ "learning_rate": 1.9271948608137044e-08,
282
+ "logits/chosen": 2.4511101245880127,
283
+ "logits/rejected": 3.5071682929992676,
284
+ "logps/chosen": -444.2669372558594,
285
+ "logps/rejected": -460.7818908691406,
286
+ "loss": 0.7348,
287
+ "rewards/accuracies": 0.862500011920929,
288
+ "rewards/chosen": 0.6959193348884583,
289
+ "rewards/margins": 10.471872329711914,
290
+ "rewards/rejected": -9.77595329284668,
291
+ "step": 180
292
+ },
293
+ {
294
+ "epoch": 0.12,
295
+ "learning_rate": 2.0342612419700214e-08,
296
+ "logits/chosen": 2.3941550254821777,
297
+ "logits/rejected": 3.183414936065674,
298
+ "logps/chosen": -406.2020263671875,
299
+ "logps/rejected": -389.969970703125,
300
+ "loss": 0.6852,
301
+ "rewards/accuracies": 0.800000011920929,
302
+ "rewards/chosen": 0.565895140171051,
303
+ "rewards/margins": 9.187482833862305,
304
+ "rewards/rejected": -8.621587753295898,
305
+ "step": 190
306
+ },
307
+ {
308
+ "epoch": 0.13,
309
+ "learning_rate": 2.1413276231263384e-08,
310
+ "logits/chosen": 2.699617624282837,
311
+ "logits/rejected": 3.2629177570343018,
312
+ "logps/chosen": -370.5373840332031,
313
+ "logps/rejected": -373.650634765625,
314
+ "loss": 0.6805,
315
+ "rewards/accuracies": 0.824999988079071,
316
+ "rewards/chosen": 0.4384472370147705,
317
+ "rewards/margins": 9.281099319458008,
318
+ "rewards/rejected": -8.8426513671875,
319
+ "step": 200
320
+ },
321
+ {
322
+ "epoch": 0.13,
323
+ "eval_logits/chosen": 1.8075158596038818,
324
+ "eval_logits/rejected": 2.4975709915161133,
325
+ "eval_logps/chosen": -403.94219970703125,
326
+ "eval_logps/rejected": -404.73687744140625,
327
+ "eval_loss": 0.7941137552261353,
328
+ "eval_rewards/accuracies": 0.9140625,
329
+ "eval_rewards/chosen": 0.36384421586990356,
330
+ "eval_rewards/margins": 12.935296058654785,
331
+ "eval_rewards/rejected": -12.571451187133789,
332
+ "eval_runtime": 76.7386,
333
+ "eval_samples_per_second": 13.031,
334
+ "eval_steps_per_second": 0.417,
335
+ "step": 200
336
+ },
337
+ {
338
+ "epoch": 0.13,
339
+ "learning_rate": 2.248394004282655e-08,
340
+ "logits/chosen": 2.466681718826294,
341
+ "logits/rejected": 3.0487618446350098,
342
+ "logps/chosen": -413.58941650390625,
343
+ "logps/rejected": -422.9561462402344,
344
+ "loss": 0.5453,
345
+ "rewards/accuracies": 0.925000011920929,
346
+ "rewards/chosen": 1.9145727157592773,
347
+ "rewards/margins": 14.975469589233398,
348
+ "rewards/rejected": -13.06089973449707,
349
+ "step": 210
350
+ },
351
+ {
352
+ "epoch": 0.14,
353
+ "learning_rate": 2.355460385438972e-08,
354
+ "logits/chosen": 2.4473984241485596,
355
+ "logits/rejected": 3.8849105834960938,
356
+ "logps/chosen": -435.60198974609375,
357
+ "logps/rejected": -394.08990478515625,
358
+ "loss": 0.4307,
359
+ "rewards/accuracies": 0.875,
360
+ "rewards/chosen": 1.1083576679229736,
361
+ "rewards/margins": 12.617959976196289,
362
+ "rewards/rejected": -11.509601593017578,
363
+ "step": 220
364
+ },
365
+ {
366
+ "epoch": 0.15,
367
+ "learning_rate": 2.462526766595289e-08,
368
+ "logits/chosen": 3.0809953212738037,
369
+ "logits/rejected": 3.275381088256836,
370
+ "logps/chosen": -387.32427978515625,
371
+ "logps/rejected": -418.32745361328125,
372
+ "loss": 0.4707,
373
+ "rewards/accuracies": 0.925000011920929,
374
+ "rewards/chosen": 2.789315700531006,
375
+ "rewards/margins": 15.172778129577637,
376
+ "rewards/rejected": -12.383462905883789,
377
+ "step": 230
378
+ },
379
+ {
380
+ "epoch": 0.15,
381
+ "learning_rate": 2.569593147751606e-08,
382
+ "logits/chosen": 2.8269176483154297,
383
+ "logits/rejected": 3.720735549926758,
384
+ "logps/chosen": -400.30743408203125,
385
+ "logps/rejected": -414.60430908203125,
386
+ "loss": 0.4476,
387
+ "rewards/accuracies": 0.9125000238418579,
388
+ "rewards/chosen": 1.7549598217010498,
389
+ "rewards/margins": 15.389744758605957,
390
+ "rewards/rejected": -13.634783744812012,
391
+ "step": 240
392
+ },
393
+ {
394
+ "epoch": 0.16,
395
+ "learning_rate": 2.676659528907923e-08,
396
+ "logits/chosen": 2.572218894958496,
397
+ "logits/rejected": 2.84545636177063,
398
+ "logps/chosen": -398.96356201171875,
399
+ "logps/rejected": -404.20635986328125,
400
+ "loss": 0.3304,
401
+ "rewards/accuracies": 0.8999999761581421,
402
+ "rewards/chosen": 1.083133578300476,
403
+ "rewards/margins": 14.877154350280762,
404
+ "rewards/rejected": -13.79401969909668,
405
+ "step": 250
406
+ },
407
+ {
408
+ "epoch": 0.17,
409
+ "learning_rate": 2.7837259100642396e-08,
410
+ "logits/chosen": 2.4352571964263916,
411
+ "logits/rejected": 3.324097156524658,
412
+ "logps/chosen": -389.4182434082031,
413
+ "logps/rejected": -390.0982666015625,
414
+ "loss": 0.363,
415
+ "rewards/accuracies": 0.8999999761581421,
416
+ "rewards/chosen": 0.10746519267559052,
417
+ "rewards/margins": 14.993916511535645,
418
+ "rewards/rejected": -14.886451721191406,
419
+ "step": 260
420
+ },
421
+ {
422
+ "epoch": 0.17,
423
+ "learning_rate": 2.890792291220557e-08,
424
+ "logits/chosen": 2.1370952129364014,
425
+ "logits/rejected": 3.407914400100708,
426
+ "logps/chosen": -434.8224182128906,
427
+ "logps/rejected": -440.6358947753906,
428
+ "loss": 0.4309,
429
+ "rewards/accuracies": 0.925000011920929,
430
+ "rewards/chosen": 2.220722198486328,
431
+ "rewards/margins": 18.430742263793945,
432
+ "rewards/rejected": -16.210018157958984,
433
+ "step": 270
434
+ },
435
+ {
436
+ "epoch": 0.18,
437
+ "learning_rate": 2.9978586723768736e-08,
438
+ "logits/chosen": 2.740478992462158,
439
+ "logits/rejected": 3.258228302001953,
440
+ "logps/chosen": -433.20782470703125,
441
+ "logps/rejected": -437.18719482421875,
442
+ "loss": 0.3487,
443
+ "rewards/accuracies": 0.8999999761581421,
444
+ "rewards/chosen": 2.553795576095581,
445
+ "rewards/margins": 19.554113388061523,
446
+ "rewards/rejected": -17.00031852722168,
447
+ "step": 280
448
+ },
449
+ {
450
+ "epoch": 0.19,
451
+ "learning_rate": 3.1049250535331906e-08,
452
+ "logits/chosen": 2.1817595958709717,
453
+ "logits/rejected": 3.1689507961273193,
454
+ "logps/chosen": -440.03863525390625,
455
+ "logps/rejected": -409.65240478515625,
456
+ "loss": 0.3633,
457
+ "rewards/accuracies": 0.9375,
458
+ "rewards/chosen": 1.1603565216064453,
459
+ "rewards/margins": 17.322303771972656,
460
+ "rewards/rejected": -16.161945343017578,
461
+ "step": 290
462
+ },
463
+ {
464
+ "epoch": 0.19,
465
+ "learning_rate": 3.211991434689507e-08,
466
+ "logits/chosen": 2.570457935333252,
467
+ "logits/rejected": 3.2502989768981934,
468
+ "logps/chosen": -454.56890869140625,
469
+ "logps/rejected": -416.1253356933594,
470
+ "loss": 0.4447,
471
+ "rewards/accuracies": 0.925000011920929,
472
+ "rewards/chosen": 3.8409790992736816,
473
+ "rewards/margins": 22.155860900878906,
474
+ "rewards/rejected": -18.31488037109375,
475
+ "step": 300
476
+ },
477
+ {
478
+ "epoch": 0.19,
479
+ "eval_logits/chosen": 1.8170030117034912,
480
+ "eval_logits/rejected": 2.5056939125061035,
481
+ "eval_logps/chosen": -403.96087646484375,
482
+ "eval_logps/rejected": -405.89984130859375,
483
+ "eval_loss": 0.7104232311248779,
484
+ "eval_rewards/accuracies": 0.9296875,
485
+ "eval_rewards/chosen": 0.27033889293670654,
486
+ "eval_rewards/margins": 18.656814575195312,
487
+ "eval_rewards/rejected": -18.386476516723633,
488
+ "eval_runtime": 76.9022,
489
+ "eval_samples_per_second": 13.004,
490
+ "eval_steps_per_second": 0.416,
491
+ "step": 300
492
+ },
493
+ {
494
+ "epoch": 0.2,
495
+ "learning_rate": 3.3190578158458246e-08,
496
+ "logits/chosen": 2.6424567699432373,
497
+ "logits/rejected": 3.5541133880615234,
498
+ "logps/chosen": -389.58636474609375,
499
+ "logps/rejected": -417.542236328125,
500
+ "loss": 0.1892,
501
+ "rewards/accuracies": 0.9750000238418579,
502
+ "rewards/chosen": 2.047499179840088,
503
+ "rewards/margins": 22.001834869384766,
504
+ "rewards/rejected": -19.954334259033203,
505
+ "step": 310
506
+ },
507
+ {
508
+ "epoch": 0.21,
509
+ "learning_rate": 3.426124197002141e-08,
510
+ "logits/chosen": 2.256901264190674,
511
+ "logits/rejected": 3.4731545448303223,
512
+ "logps/chosen": -414.03875732421875,
513
+ "logps/rejected": -427.070556640625,
514
+ "loss": 0.3266,
515
+ "rewards/accuracies": 0.925000011920929,
516
+ "rewards/chosen": 0.8677770495414734,
517
+ "rewards/margins": 22.491207122802734,
518
+ "rewards/rejected": -21.623430252075195,
519
+ "step": 320
520
+ },
521
+ {
522
+ "epoch": 0.21,
523
+ "learning_rate": 3.533190578158458e-08,
524
+ "logits/chosen": 2.77763295173645,
525
+ "logits/rejected": 3.112457752227783,
526
+ "logps/chosen": -360.58135986328125,
527
+ "logps/rejected": -414.3995666503906,
528
+ "loss": 0.2844,
529
+ "rewards/accuracies": 0.949999988079071,
530
+ "rewards/chosen": 2.416537046432495,
531
+ "rewards/margins": 22.439632415771484,
532
+ "rewards/rejected": -20.023096084594727,
533
+ "step": 330
534
+ },
535
+ {
536
+ "epoch": 0.22,
537
+ "learning_rate": 3.640256959314775e-08,
538
+ "logits/chosen": 2.5908098220825195,
539
+ "logits/rejected": 3.282468795776367,
540
+ "logps/chosen": -449.01922607421875,
541
+ "logps/rejected": -422.73016357421875,
542
+ "loss": 0.3479,
543
+ "rewards/accuracies": 0.9750000238418579,
544
+ "rewards/chosen": 2.354691505432129,
545
+ "rewards/margins": 22.229778289794922,
546
+ "rewards/rejected": -19.875089645385742,
547
+ "step": 340
548
+ },
549
+ {
550
+ "epoch": 0.22,
551
+ "learning_rate": 3.747323340471092e-08,
552
+ "logits/chosen": 2.534389019012451,
553
+ "logits/rejected": 3.5590693950653076,
554
+ "logps/chosen": -405.7064514160156,
555
+ "logps/rejected": -434.4012756347656,
556
+ "loss": 0.4823,
557
+ "rewards/accuracies": 0.925000011920929,
558
+ "rewards/chosen": 1.6845805644989014,
559
+ "rewards/margins": 23.965158462524414,
560
+ "rewards/rejected": -22.280576705932617,
561
+ "step": 350
562
+ },
563
+ {
564
+ "epoch": 0.23,
565
+ "learning_rate": 3.854389721627409e-08,
566
+ "logits/chosen": 2.5174474716186523,
567
+ "logits/rejected": 3.512732744216919,
568
+ "logps/chosen": -435.5006408691406,
569
+ "logps/rejected": -422.19366455078125,
570
+ "loss": 0.204,
571
+ "rewards/accuracies": 0.9750000238418579,
572
+ "rewards/chosen": 3.582836151123047,
573
+ "rewards/margins": 24.59599494934082,
574
+ "rewards/rejected": -21.013154983520508,
575
+ "step": 360
576
+ },
577
+ {
578
+ "epoch": 0.24,
579
+ "learning_rate": 3.961456102783726e-08,
580
+ "logits/chosen": 2.3562188148498535,
581
+ "logits/rejected": 3.3465442657470703,
582
+ "logps/chosen": -470.4930725097656,
583
+ "logps/rejected": -405.6280822753906,
584
+ "loss": 0.3122,
585
+ "rewards/accuracies": 0.9624999761581421,
586
+ "rewards/chosen": 1.8608239889144897,
587
+ "rewards/margins": 23.556621551513672,
588
+ "rewards/rejected": -21.6957950592041,
589
+ "step": 370
590
+ },
591
+ {
592
+ "epoch": 0.24,
593
+ "learning_rate": 4.068522483940043e-08,
594
+ "logits/chosen": 2.7507357597351074,
595
+ "logits/rejected": 3.5644543170928955,
596
+ "logps/chosen": -368.4208068847656,
597
+ "logps/rejected": -416.85302734375,
598
+ "loss": 0.2533,
599
+ "rewards/accuracies": 0.9375,
600
+ "rewards/chosen": 1.0723613500595093,
601
+ "rewards/margins": 24.53675651550293,
602
+ "rewards/rejected": -23.464397430419922,
603
+ "step": 380
604
+ },
605
+ {
606
+ "epoch": 0.25,
607
+ "learning_rate": 4.175588865096359e-08,
608
+ "logits/chosen": 2.4924309253692627,
609
+ "logits/rejected": 3.5116946697235107,
610
+ "logps/chosen": -383.086181640625,
611
+ "logps/rejected": -417.8291931152344,
612
+ "loss": 0.2178,
613
+ "rewards/accuracies": 0.887499988079071,
614
+ "rewards/chosen": 4.934910774230957,
615
+ "rewards/margins": 26.8948974609375,
616
+ "rewards/rejected": -21.959985733032227,
617
+ "step": 390
618
+ },
619
+ {
620
+ "epoch": 0.26,
621
+ "learning_rate": 4.282655246252677e-08,
622
+ "logits/chosen": 2.5591559410095215,
623
+ "logits/rejected": 3.345097780227661,
624
+ "logps/chosen": -449.8941955566406,
625
+ "logps/rejected": -431.88818359375,
626
+ "loss": 0.2468,
627
+ "rewards/accuracies": 0.9624999761581421,
628
+ "rewards/chosen": 5.43572473526001,
629
+ "rewards/margins": 28.937246322631836,
630
+ "rewards/rejected": -23.501522064208984,
631
+ "step": 400
632
+ },
633
+ {
634
+ "epoch": 0.26,
635
+ "eval_logits/chosen": 1.8141372203826904,
636
+ "eval_logits/rejected": 2.503908395767212,
637
+ "eval_logps/chosen": -403.28955078125,
638
+ "eval_logps/rejected": -406.5526123046875,
639
+ "eval_loss": 0.5629487037658691,
640
+ "eval_rewards/accuracies": 0.9140625,
641
+ "eval_rewards/chosen": 3.626873731613159,
642
+ "eval_rewards/margins": 25.27718162536621,
643
+ "eval_rewards/rejected": -21.650310516357422,
644
+ "eval_runtime": 76.66,
645
+ "eval_samples_per_second": 13.045,
646
+ "eval_steps_per_second": 0.417,
647
+ "step": 400
648
+ },
649
+ {
650
+ "epoch": 0.26,
651
+ "learning_rate": 4.389721627408993e-08,
652
+ "logits/chosen": 2.353135585784912,
653
+ "logits/rejected": 3.1894092559814453,
654
+ "logps/chosen": -386.232666015625,
655
+ "logps/rejected": -399.92218017578125,
656
+ "loss": 0.1749,
657
+ "rewards/accuracies": 0.9624999761581421,
658
+ "rewards/chosen": 5.276652812957764,
659
+ "rewards/margins": 27.45583724975586,
660
+ "rewards/rejected": -22.17918586730957,
661
+ "step": 410
662
+ },
663
+ {
664
+ "epoch": 0.27,
665
+ "learning_rate": 4.49678800856531e-08,
666
+ "logits/chosen": 1.99558424949646,
667
+ "logits/rejected": 3.1743838787078857,
668
+ "logps/chosen": -412.32110595703125,
669
+ "logps/rejected": -421.9241638183594,
670
+ "loss": 0.272,
671
+ "rewards/accuracies": 0.9750000238418579,
672
+ "rewards/chosen": 6.199848175048828,
673
+ "rewards/margins": 27.825729370117188,
674
+ "rewards/rejected": -21.62588119506836,
675
+ "step": 420
676
+ },
677
+ {
678
+ "epoch": 0.28,
679
+ "learning_rate": 4.603854389721627e-08,
680
+ "logits/chosen": 2.605555772781372,
681
+ "logits/rejected": 3.407613754272461,
682
+ "logps/chosen": -434.2098083496094,
683
+ "logps/rejected": -447.4507751464844,
684
+ "loss": 0.3003,
685
+ "rewards/accuracies": 0.9624999761581421,
686
+ "rewards/chosen": 8.46845531463623,
687
+ "rewards/margins": 32.25395584106445,
688
+ "rewards/rejected": -23.785497665405273,
689
+ "step": 430
690
+ },
691
+ {
692
+ "epoch": 0.28,
693
+ "learning_rate": 4.710920770877944e-08,
694
+ "logits/chosen": 2.1723690032958984,
695
+ "logits/rejected": 3.847750186920166,
696
+ "logps/chosen": -421.90155029296875,
697
+ "logps/rejected": -414.0150451660156,
698
+ "loss": 0.2367,
699
+ "rewards/accuracies": 0.9375,
700
+ "rewards/chosen": 8.460034370422363,
701
+ "rewards/margins": 31.530038833618164,
702
+ "rewards/rejected": -23.070003509521484,
703
+ "step": 440
704
+ },
705
+ {
706
+ "epoch": 0.29,
707
+ "learning_rate": 4.817987152034261e-08,
708
+ "logits/chosen": 2.460693120956421,
709
+ "logits/rejected": 2.9349396228790283,
710
+ "logps/chosen": -502.804931640625,
711
+ "logps/rejected": -431.50335693359375,
712
+ "loss": 0.2369,
713
+ "rewards/accuracies": 0.9750000238418579,
714
+ "rewards/chosen": 7.705231666564941,
715
+ "rewards/margins": 32.00004959106445,
716
+ "rewards/rejected": -24.294815063476562,
717
+ "step": 450
718
+ },
719
+ {
720
+ "epoch": 0.3,
721
+ "learning_rate": 4.925053533190578e-08,
722
+ "logits/chosen": 2.333646297454834,
723
+ "logits/rejected": 4.089064598083496,
724
+ "logps/chosen": -422.8240661621094,
725
+ "logps/rejected": -442.1726989746094,
726
+ "loss": 0.1691,
727
+ "rewards/accuracies": 0.987500011920929,
728
+ "rewards/chosen": 4.384838581085205,
729
+ "rewards/margins": 29.339359283447266,
730
+ "rewards/rejected": -24.954524993896484,
731
+ "step": 460
732
+ },
733
+ {
734
+ "epoch": 0.3,
735
+ "learning_rate": 5.032119914346895e-08,
736
+ "logits/chosen": 2.252077579498291,
737
+ "logits/rejected": 3.398280382156372,
738
+ "logps/chosen": -426.0081481933594,
739
+ "logps/rejected": -431.38623046875,
740
+ "loss": 0.2549,
741
+ "rewards/accuracies": 0.9750000238418579,
742
+ "rewards/chosen": 3.1347286701202393,
743
+ "rewards/margins": 29.128936767578125,
744
+ "rewards/rejected": -25.994205474853516,
745
+ "step": 470
746
+ },
747
+ {
748
+ "epoch": 0.31,
749
+ "learning_rate": 5.139186295503212e-08,
750
+ "logits/chosen": 1.9574702978134155,
751
+ "logits/rejected": 3.0777688026428223,
752
+ "logps/chosen": -445.21435546875,
753
+ "logps/rejected": -431.9685974121094,
754
+ "loss": 0.2022,
755
+ "rewards/accuracies": 0.9375,
756
+ "rewards/chosen": 5.082431793212891,
757
+ "rewards/margins": 33.55067825317383,
758
+ "rewards/rejected": -28.468246459960938,
759
+ "step": 480
760
+ },
761
+ {
762
+ "epoch": 0.31,
763
+ "learning_rate": 5.246252676659528e-08,
764
+ "logits/chosen": 2.4008655548095703,
765
+ "logits/rejected": 3.1396138668060303,
766
+ "logps/chosen": -396.2805480957031,
767
+ "logps/rejected": -431.75994873046875,
768
+ "loss": 0.1925,
769
+ "rewards/accuracies": 0.987500011920929,
770
+ "rewards/chosen": 3.1321897506713867,
771
+ "rewards/margins": 31.742828369140625,
772
+ "rewards/rejected": -28.610637664794922,
773
+ "step": 490
774
+ },
775
+ {
776
+ "epoch": 0.32,
777
+ "learning_rate": 5.353319057815846e-08,
778
+ "logits/chosen": 2.6065423488616943,
779
+ "logits/rejected": 3.01694917678833,
780
+ "logps/chosen": -441.64752197265625,
781
+ "logps/rejected": -450.52337646484375,
782
+ "loss": 0.238,
783
+ "rewards/accuracies": 0.9375,
784
+ "rewards/chosen": 5.9151458740234375,
785
+ "rewards/margins": 37.43492889404297,
786
+ "rewards/rejected": -31.5197811126709,
787
+ "step": 500
788
+ },
789
+ {
790
+ "epoch": 0.32,
791
+ "eval_logits/chosen": 1.8356637954711914,
792
+ "eval_logits/rejected": 2.518232822418213,
793
+ "eval_logps/chosen": -403.12066650390625,
794
+ "eval_logps/rejected": -407.75311279296875,
795
+ "eval_loss": 0.40245920419692993,
796
+ "eval_rewards/accuracies": 0.9296875,
797
+ "eval_rewards/chosen": 4.471410751342773,
798
+ "eval_rewards/margins": 32.124061584472656,
799
+ "eval_rewards/rejected": -27.652652740478516,
800
+ "eval_runtime": 76.7691,
801
+ "eval_samples_per_second": 13.026,
802
+ "eval_steps_per_second": 0.417,
803
+ "step": 500
804
+ },
805
+ {
806
+ "epoch": 0.33,
807
+ "learning_rate": 5.460385438972163e-08,
808
+ "logits/chosen": 2.370246410369873,
809
+ "logits/rejected": 3.2074241638183594,
810
+ "logps/chosen": -463.4814453125,
811
+ "logps/rejected": -450.365478515625,
812
+ "loss": 0.1896,
813
+ "rewards/accuracies": 0.9750000238418579,
814
+ "rewards/chosen": 4.760863304138184,
815
+ "rewards/margins": 35.59904098510742,
816
+ "rewards/rejected": -30.838184356689453,
817
+ "step": 510
818
+ },
819
+ {
820
+ "epoch": 0.33,
821
+ "learning_rate": 5.567451820128479e-08,
822
+ "logits/chosen": 2.4717459678649902,
823
+ "logits/rejected": 3.241302967071533,
824
+ "logps/chosen": -476.26690673828125,
825
+ "logps/rejected": -425.7784729003906,
826
+ "loss": 0.2277,
827
+ "rewards/accuracies": 0.9375,
828
+ "rewards/chosen": 5.998467922210693,
829
+ "rewards/margins": 37.100624084472656,
830
+ "rewards/rejected": -31.102157592773438,
831
+ "step": 520
832
+ },
833
+ {
834
+ "epoch": 0.34,
835
+ "learning_rate": 5.6745182012847956e-08,
836
+ "logits/chosen": 2.8885834217071533,
837
+ "logits/rejected": 3.374998092651367,
838
+ "logps/chosen": -399.87249755859375,
839
+ "logps/rejected": -406.6166687011719,
840
+ "loss": 0.2215,
841
+ "rewards/accuracies": 0.9125000238418579,
842
+ "rewards/chosen": 5.892712116241455,
843
+ "rewards/margins": 36.44768524169922,
844
+ "rewards/rejected": -30.554973602294922,
845
+ "step": 530
846
+ },
847
+ {
848
+ "epoch": 0.35,
849
+ "learning_rate": 5.781584582441114e-08,
850
+ "logits/chosen": 2.536708116531372,
851
+ "logits/rejected": 3.2403769493103027,
852
+ "logps/chosen": -429.4972229003906,
853
+ "logps/rejected": -423.7460021972656,
854
+ "loss": 0.1918,
855
+ "rewards/accuracies": 0.9624999761581421,
856
+ "rewards/chosen": 7.94210958480835,
857
+ "rewards/margins": 36.776371002197266,
858
+ "rewards/rejected": -28.83426284790039,
859
+ "step": 540
860
+ },
861
+ {
862
+ "epoch": 0.35,
863
+ "learning_rate": 5.88865096359743e-08,
864
+ "logits/chosen": 1.9649451971054077,
865
+ "logits/rejected": 3.1596691608428955,
866
+ "logps/chosen": -472.22119140625,
867
+ "logps/rejected": -425.32763671875,
868
+ "loss": 0.15,
869
+ "rewards/accuracies": 0.9750000238418579,
870
+ "rewards/chosen": 12.239465713500977,
871
+ "rewards/margins": 48.21662139892578,
872
+ "rewards/rejected": -35.977149963378906,
873
+ "step": 550
874
+ },
875
+ {
876
+ "epoch": 0.36,
877
+ "learning_rate": 5.995717344753747e-08,
878
+ "logits/chosen": 3.032383441925049,
879
+ "logits/rejected": 3.257586717605591,
880
+ "logps/chosen": -416.17071533203125,
881
+ "logps/rejected": -410.1018981933594,
882
+ "loss": 0.1132,
883
+ "rewards/accuracies": 0.9750000238418579,
884
+ "rewards/chosen": 7.306866645812988,
885
+ "rewards/margins": 37.124568939208984,
886
+ "rewards/rejected": -29.81770896911621,
887
+ "step": 560
888
+ },
889
+ {
890
+ "epoch": 0.37,
891
+ "learning_rate": 6.102783725910064e-08,
892
+ "logits/chosen": 2.7644500732421875,
893
+ "logits/rejected": 3.5682806968688965,
894
+ "logps/chosen": -383.52093505859375,
895
+ "logps/rejected": -394.8160705566406,
896
+ "loss": 0.2133,
897
+ "rewards/accuracies": 0.949999988079071,
898
+ "rewards/chosen": 5.922687530517578,
899
+ "rewards/margins": 37.88574981689453,
900
+ "rewards/rejected": -31.963062286376953,
901
+ "step": 570
902
+ },
903
+ {
904
+ "epoch": 0.37,
905
+ "learning_rate": 6.209850107066381e-08,
906
+ "logits/chosen": 2.341892957687378,
907
+ "logits/rejected": 3.260730028152466,
908
+ "logps/chosen": -394.4947814941406,
909
+ "logps/rejected": -405.29498291015625,
910
+ "loss": 0.082,
911
+ "rewards/accuracies": 0.9375,
912
+ "rewards/chosen": 6.424149990081787,
913
+ "rewards/margins": 38.39518737792969,
914
+ "rewards/rejected": -31.971033096313477,
915
+ "step": 580
916
+ },
917
+ {
918
+ "epoch": 0.38,
919
+ "learning_rate": 6.316916488222698e-08,
920
+ "logits/chosen": 2.526381015777588,
921
+ "logits/rejected": 3.192876100540161,
922
+ "logps/chosen": -426.3685607910156,
923
+ "logps/rejected": -403.46759033203125,
924
+ "loss": 0.1342,
925
+ "rewards/accuracies": 0.9750000238418579,
926
+ "rewards/chosen": 3.9063773155212402,
927
+ "rewards/margins": 39.68169021606445,
928
+ "rewards/rejected": -35.77531814575195,
929
+ "step": 590
930
+ },
931
+ {
932
+ "epoch": 0.39,
933
+ "learning_rate": 6.423982869379014e-08,
934
+ "logits/chosen": 2.456986427307129,
935
+ "logits/rejected": 2.974177837371826,
936
+ "logps/chosen": -430.700927734375,
937
+ "logps/rejected": -460.1724548339844,
938
+ "loss": 0.0835,
939
+ "rewards/accuracies": 0.987500011920929,
940
+ "rewards/chosen": 2.9916865825653076,
941
+ "rewards/margins": 47.707908630371094,
942
+ "rewards/rejected": -44.716224670410156,
943
+ "step": 600
944
+ },
945
+ {
946
+ "epoch": 0.39,
947
+ "eval_logits/chosen": 1.8376859426498413,
948
+ "eval_logits/rejected": 2.519270658493042,
949
+ "eval_logps/chosen": -404.12213134765625,
950
+ "eval_logps/rejected": -409.9280700683594,
951
+ "eval_loss": 0.5330207943916321,
952
+ "eval_rewards/accuracies": 0.9453125,
953
+ "eval_rewards/chosen": -0.5357855558395386,
954
+ "eval_rewards/margins": 37.99186325073242,
955
+ "eval_rewards/rejected": -38.52764892578125,
956
+ "eval_runtime": 76.8308,
957
+ "eval_samples_per_second": 13.016,
958
+ "eval_steps_per_second": 0.416,
959
+ "step": 600
960
+ },
961
+ {
962
+ "epoch": 0.39,
963
+ "learning_rate": 6.531049250535332e-08,
964
+ "logits/chosen": 2.4591498374938965,
965
+ "logits/rejected": 3.2855117321014404,
966
+ "logps/chosen": -401.3484802246094,
967
+ "logps/rejected": -434.5301208496094,
968
+ "loss": 0.142,
969
+ "rewards/accuracies": 0.9375,
970
+ "rewards/chosen": -0.02256155014038086,
971
+ "rewards/margins": 46.42057418823242,
972
+ "rewards/rejected": -46.443138122558594,
973
+ "step": 610
974
+ },
975
+ {
976
+ "epoch": 0.4,
977
+ "learning_rate": 6.638115631691649e-08,
978
+ "logits/chosen": 2.539071559906006,
979
+ "logits/rejected": 3.2421278953552246,
980
+ "logps/chosen": -442.18243408203125,
981
+ "logps/rejected": -444.18878173828125,
982
+ "loss": 0.1058,
983
+ "rewards/accuracies": 1.0,
984
+ "rewards/chosen": 2.043628692626953,
985
+ "rewards/margins": 43.48805618286133,
986
+ "rewards/rejected": -41.444427490234375,
987
+ "step": 620
988
+ },
989
+ {
990
+ "epoch": 0.4,
991
+ "learning_rate": 6.745182012847965e-08,
992
+ "logits/chosen": 2.5688178539276123,
993
+ "logits/rejected": 3.445737838745117,
994
+ "logps/chosen": -411.5978088378906,
995
+ "logps/rejected": -407.0299072265625,
996
+ "loss": 0.1525,
997
+ "rewards/accuracies": 0.9375,
998
+ "rewards/chosen": 1.022148609161377,
999
+ "rewards/margins": 39.893680572509766,
1000
+ "rewards/rejected": -38.87153244018555,
1001
+ "step": 630
1002
+ },
1003
+ {
1004
+ "epoch": 0.41,
1005
+ "learning_rate": 6.852248394004282e-08,
1006
+ "logits/chosen": 2.5901875495910645,
1007
+ "logits/rejected": 2.911647319793701,
1008
+ "logps/chosen": -420.1778869628906,
1009
+ "logps/rejected": -382.272216796875,
1010
+ "loss": 0.2718,
1011
+ "rewards/accuracies": 0.9624999761581421,
1012
+ "rewards/chosen": 3.5251822471618652,
1013
+ "rewards/margins": 41.309791564941406,
1014
+ "rewards/rejected": -37.784610748291016,
1015
+ "step": 640
1016
+ },
1017
+ {
1018
+ "epoch": 0.42,
1019
+ "learning_rate": 6.9593147751606e-08,
1020
+ "logits/chosen": 2.287266492843628,
1021
+ "logits/rejected": 3.33722186088562,
1022
+ "logps/chosen": -452.6976013183594,
1023
+ "logps/rejected": -439.2931213378906,
1024
+ "loss": 0.0169,
1025
+ "rewards/accuracies": 1.0,
1026
+ "rewards/chosen": 6.000566482543945,
1027
+ "rewards/margins": 46.02397918701172,
1028
+ "rewards/rejected": -40.023414611816406,
1029
+ "step": 650
1030
+ },
1031
+ {
1032
+ "epoch": 0.42,
1033
+ "learning_rate": 7.066381156316916e-08,
1034
+ "logits/chosen": 2.2046732902526855,
1035
+ "logits/rejected": 3.6667628288269043,
1036
+ "logps/chosen": -403.92205810546875,
1037
+ "logps/rejected": -408.7096862792969,
1038
+ "loss": 0.2138,
1039
+ "rewards/accuracies": 0.9624999761581421,
1040
+ "rewards/chosen": 6.08463191986084,
1041
+ "rewards/margins": 44.13505554199219,
1042
+ "rewards/rejected": -38.0504264831543,
1043
+ "step": 660
1044
+ },
1045
+ {
1046
+ "epoch": 0.43,
1047
+ "learning_rate": 7.173447537473233e-08,
1048
+ "logits/chosen": 2.4322450160980225,
1049
+ "logits/rejected": 3.5664360523223877,
1050
+ "logps/chosen": -416.0870056152344,
1051
+ "logps/rejected": -415.59197998046875,
1052
+ "loss": 0.1237,
1053
+ "rewards/accuracies": 0.9750000238418579,
1054
+ "rewards/chosen": 4.357824325561523,
1055
+ "rewards/margins": 44.89923858642578,
1056
+ "rewards/rejected": -40.541419982910156,
1057
+ "step": 670
1058
+ },
1059
+ {
1060
+ "epoch": 0.44,
1061
+ "learning_rate": 7.28051391862955e-08,
1062
+ "logits/chosen": 2.468834638595581,
1063
+ "logits/rejected": 3.187020778656006,
1064
+ "logps/chosen": -395.1986083984375,
1065
+ "logps/rejected": -432.7027282714844,
1066
+ "loss": 0.1312,
1067
+ "rewards/accuracies": 0.9624999761581421,
1068
+ "rewards/chosen": 7.26413631439209,
1069
+ "rewards/margins": 49.74661636352539,
1070
+ "rewards/rejected": -42.48247528076172,
1071
+ "step": 680
1072
+ },
1073
+ {
1074
+ "epoch": 0.44,
1075
+ "learning_rate": 7.387580299785867e-08,
1076
+ "logits/chosen": 2.341510772705078,
1077
+ "logits/rejected": 3.5096206665039062,
1078
+ "logps/chosen": -391.4788513183594,
1079
+ "logps/rejected": -392.6119384765625,
1080
+ "loss": 0.1998,
1081
+ "rewards/accuracies": 0.949999988079071,
1082
+ "rewards/chosen": 6.877418518066406,
1083
+ "rewards/margins": 45.81779479980469,
1084
+ "rewards/rejected": -38.94037628173828,
1085
+ "step": 690
1086
+ },
1087
+ {
1088
+ "epoch": 0.45,
1089
+ "learning_rate": 7.494646680942184e-08,
1090
+ "logits/chosen": 2.7313547134399414,
1091
+ "logits/rejected": 3.659938097000122,
1092
+ "logps/chosen": -375.4259033203125,
1093
+ "logps/rejected": -431.1378479003906,
1094
+ "loss": 0.1294,
1095
+ "rewards/accuracies": 0.987500011920929,
1096
+ "rewards/chosen": 4.9859724044799805,
1097
+ "rewards/margins": 47.63801574707031,
1098
+ "rewards/rejected": -42.65203857421875,
1099
+ "step": 700
1100
+ },
1101
+ {
1102
+ "epoch": 0.45,
1103
+ "eval_logits/chosen": 1.8345167636871338,
1104
+ "eval_logits/rejected": 2.522487163543701,
1105
+ "eval_logps/chosen": -403.2203674316406,
1106
+ "eval_logps/rejected": -410.374755859375,
1107
+ "eval_loss": 0.40866366028785706,
1108
+ "eval_rewards/accuracies": 0.953125,
1109
+ "eval_rewards/chosen": 3.9727911949157715,
1110
+ "eval_rewards/margins": 44.73374938964844,
1111
+ "eval_rewards/rejected": -40.760955810546875,
1112
+ "eval_runtime": 76.9261,
1113
+ "eval_samples_per_second": 12.999,
1114
+ "eval_steps_per_second": 0.416,
1115
+ "step": 700
1116
+ },
1117
+ {
1118
+ "epoch": 0.46,
1119
+ "learning_rate": 7.601713062098501e-08,
1120
+ "logits/chosen": 2.3793327808380127,
1121
+ "logits/rejected": 3.142347574234009,
1122
+ "logps/chosen": -423.20880126953125,
1123
+ "logps/rejected": -401.803955078125,
1124
+ "loss": 0.1473,
1125
+ "rewards/accuracies": 0.9750000238418579,
1126
+ "rewards/chosen": 8.15241527557373,
1127
+ "rewards/margins": 47.408851623535156,
1128
+ "rewards/rejected": -39.256431579589844,
1129
+ "step": 710
1130
+ },
1131
+ {
1132
+ "epoch": 0.46,
1133
+ "learning_rate": 7.708779443254818e-08,
1134
+ "logits/chosen": 2.7316575050354004,
1135
+ "logits/rejected": 3.029636859893799,
1136
+ "logps/chosen": -434.23480224609375,
1137
+ "logps/rejected": -403.28717041015625,
1138
+ "loss": 0.1684,
1139
+ "rewards/accuracies": 1.0,
1140
+ "rewards/chosen": 10.977544784545898,
1141
+ "rewards/margins": 50.05615234375,
1142
+ "rewards/rejected": -39.07860565185547,
1143
+ "step": 720
1144
+ },
1145
+ {
1146
+ "epoch": 0.47,
1147
+ "learning_rate": 7.815845824411135e-08,
1148
+ "logits/chosen": 2.5688631534576416,
1149
+ "logits/rejected": 3.0123863220214844,
1150
+ "logps/chosen": -449.0638732910156,
1151
+ "logps/rejected": -398.37152099609375,
1152
+ "loss": 0.0823,
1153
+ "rewards/accuracies": 0.9624999761581421,
1154
+ "rewards/chosen": 8.119828224182129,
1155
+ "rewards/margins": 45.32478332519531,
1156
+ "rewards/rejected": -37.2049560546875,
1157
+ "step": 730
1158
+ },
1159
+ {
1160
+ "epoch": 0.48,
1161
+ "learning_rate": 7.922912205567452e-08,
1162
+ "logits/chosen": 2.6267566680908203,
1163
+ "logits/rejected": 3.3151562213897705,
1164
+ "logps/chosen": -390.72698974609375,
1165
+ "logps/rejected": -381.4679260253906,
1166
+ "loss": 0.2223,
1167
+ "rewards/accuracies": 0.9750000238418579,
1168
+ "rewards/chosen": 6.810037136077881,
1169
+ "rewards/margins": 40.27370071411133,
1170
+ "rewards/rejected": -33.46365737915039,
1171
+ "step": 740
1172
+ },
1173
+ {
1174
+ "epoch": 0.48,
1175
+ "learning_rate": 8.029978586723767e-08,
1176
+ "logits/chosen": 2.355883836746216,
1177
+ "logits/rejected": 3.5080723762512207,
1178
+ "logps/chosen": -395.70184326171875,
1179
+ "logps/rejected": -403.0619812011719,
1180
+ "loss": 0.1436,
1181
+ "rewards/accuracies": 1.0,
1182
+ "rewards/chosen": 9.782814025878906,
1183
+ "rewards/margins": 49.49211883544922,
1184
+ "rewards/rejected": -39.70930480957031,
1185
+ "step": 750
1186
+ },
1187
+ {
1188
+ "epoch": 0.49,
1189
+ "learning_rate": 8.137044967880086e-08,
1190
+ "logits/chosen": 2.538801670074463,
1191
+ "logits/rejected": 2.962019205093384,
1192
+ "logps/chosen": -395.83062744140625,
1193
+ "logps/rejected": -439.6985778808594,
1194
+ "loss": 0.1789,
1195
+ "rewards/accuracies": 1.0,
1196
+ "rewards/chosen": 6.836484432220459,
1197
+ "rewards/margins": 59.103851318359375,
1198
+ "rewards/rejected": -52.267372131347656,
1199
+ "step": 760
1200
+ },
1201
+ {
1202
+ "epoch": 0.49,
1203
+ "learning_rate": 8.244111349036403e-08,
1204
+ "logits/chosen": 2.372373104095459,
1205
+ "logits/rejected": 3.526844024658203,
1206
+ "logps/chosen": -391.49090576171875,
1207
+ "logps/rejected": -436.17193603515625,
1208
+ "loss": 0.1816,
1209
+ "rewards/accuracies": 0.987500011920929,
1210
+ "rewards/chosen": 1.2613162994384766,
1211
+ "rewards/margins": 54.581298828125,
1212
+ "rewards/rejected": -53.31998825073242,
1213
+ "step": 770
1214
+ },
1215
+ {
1216
+ "epoch": 0.5,
1217
+ "learning_rate": 8.351177730192718e-08,
1218
+ "logits/chosen": 2.641442060470581,
1219
+ "logits/rejected": 3.504525661468506,
1220
+ "logps/chosen": -429.4436950683594,
1221
+ "logps/rejected": -426.1357421875,
1222
+ "loss": 0.0799,
1223
+ "rewards/accuracies": 0.987500011920929,
1224
+ "rewards/chosen": -0.4186324179172516,
1225
+ "rewards/margins": 56.703216552734375,
1226
+ "rewards/rejected": -57.121849060058594,
1227
+ "step": 780
1228
+ },
1229
+ {
1230
+ "epoch": 0.51,
1231
+ "learning_rate": 8.458244111349035e-08,
1232
+ "logits/chosen": 2.8256888389587402,
1233
+ "logits/rejected": 3.2680656909942627,
1234
+ "logps/chosen": -432.7039489746094,
1235
+ "logps/rejected": -435.9918518066406,
1236
+ "loss": 0.1115,
1237
+ "rewards/accuracies": 1.0,
1238
+ "rewards/chosen": 0.19469785690307617,
1239
+ "rewards/margins": 58.439208984375,
1240
+ "rewards/rejected": -58.2445182800293,
1241
+ "step": 790
1242
+ },
1243
+ {
1244
+ "epoch": 0.51,
1245
+ "learning_rate": 8.565310492505354e-08,
1246
+ "logits/chosen": 2.2154085636138916,
1247
+ "logits/rejected": 3.348832607269287,
1248
+ "logps/chosen": -443.20269775390625,
1249
+ "logps/rejected": -411.3150939941406,
1250
+ "loss": 0.2809,
1251
+ "rewards/accuracies": 1.0,
1252
+ "rewards/chosen": 7.068073272705078,
1253
+ "rewards/margins": 56.71687698364258,
1254
+ "rewards/rejected": -49.648807525634766,
1255
+ "step": 800
1256
+ },
1257
+ {
1258
+ "epoch": 0.51,
1259
+ "eval_logits/chosen": 1.8377827405929565,
1260
+ "eval_logits/rejected": 2.5198471546173096,
1261
+ "eval_logps/chosen": -403.1432189941406,
1262
+ "eval_logps/rejected": -412.51531982421875,
1263
+ "eval_loss": 0.5127639770507812,
1264
+ "eval_rewards/accuracies": 0.953125,
1265
+ "eval_rewards/chosen": 4.35873556137085,
1266
+ "eval_rewards/margins": 55.822689056396484,
1267
+ "eval_rewards/rejected": -51.463958740234375,
1268
+ "eval_runtime": 76.8497,
1269
+ "eval_samples_per_second": 13.012,
1270
+ "eval_steps_per_second": 0.416,
1271
+ "step": 800
1272
+ },
1273
+ {
1274
+ "epoch": 0.52,
1275
+ "learning_rate": 8.672376873661669e-08,
1276
+ "logits/chosen": 2.567979335784912,
1277
+ "logits/rejected": 3.7970213890075684,
1278
+ "logps/chosen": -379.3563232421875,
1279
+ "logps/rejected": -417.9363708496094,
1280
+ "loss": 0.1456,
1281
+ "rewards/accuracies": 0.9624999761581421,
1282
+ "rewards/chosen": 6.566949367523193,
1283
+ "rewards/margins": 58.88117218017578,
1284
+ "rewards/rejected": -52.31422805786133,
1285
+ "step": 810
1286
+ },
1287
+ {
1288
+ "epoch": 0.53,
1289
+ "learning_rate": 8.779443254817986e-08,
1290
+ "logits/chosen": 2.987666130065918,
1291
+ "logits/rejected": 3.0821828842163086,
1292
+ "logps/chosen": -375.0899353027344,
1293
+ "logps/rejected": -380.730712890625,
1294
+ "loss": 0.113,
1295
+ "rewards/accuracies": 0.9750000238418579,
1296
+ "rewards/chosen": 4.425510406494141,
1297
+ "rewards/margins": 57.55885696411133,
1298
+ "rewards/rejected": -53.13335037231445,
1299
+ "step": 820
1300
+ },
1301
+ {
1302
+ "epoch": 0.53,
1303
+ "learning_rate": 8.886509635974304e-08,
1304
+ "logits/chosen": 2.3225650787353516,
1305
+ "logits/rejected": 3.374905824661255,
1306
+ "logps/chosen": -404.91314697265625,
1307
+ "logps/rejected": -406.26690673828125,
1308
+ "loss": 0.221,
1309
+ "rewards/accuracies": 0.9750000238418579,
1310
+ "rewards/chosen": 0.956459641456604,
1311
+ "rewards/margins": 58.1107177734375,
1312
+ "rewards/rejected": -57.15425491333008,
1313
+ "step": 830
1314
+ },
1315
+ {
1316
+ "epoch": 0.54,
1317
+ "learning_rate": 8.99357601713062e-08,
1318
+ "logits/chosen": 2.3407421112060547,
1319
+ "logits/rejected": 3.029287815093994,
1320
+ "logps/chosen": -389.4285583496094,
1321
+ "logps/rejected": -422.2571716308594,
1322
+ "loss": 0.123,
1323
+ "rewards/accuracies": 1.0,
1324
+ "rewards/chosen": 5.533617973327637,
1325
+ "rewards/margins": 57.36924362182617,
1326
+ "rewards/rejected": -51.83562469482422,
1327
+ "step": 840
1328
+ },
1329
+ {
1330
+ "epoch": 0.55,
1331
+ "learning_rate": 9.100642398286937e-08,
1332
+ "logits/chosen": 2.4234728813171387,
1333
+ "logits/rejected": 3.6180338859558105,
1334
+ "logps/chosen": -384.3294982910156,
1335
+ "logps/rejected": -382.1348876953125,
1336
+ "loss": 0.1292,
1337
+ "rewards/accuracies": 0.9624999761581421,
1338
+ "rewards/chosen": 5.42165994644165,
1339
+ "rewards/margins": 53.802391052246094,
1340
+ "rewards/rejected": -48.38072967529297,
1341
+ "step": 850
1342
+ },
1343
+ {
1344
+ "epoch": 0.55,
1345
+ "learning_rate": 9.207708779443254e-08,
1346
+ "logits/chosen": 2.336247444152832,
1347
+ "logits/rejected": 3.8205437660217285,
1348
+ "logps/chosen": -427.49468994140625,
1349
+ "logps/rejected": -434.2718811035156,
1350
+ "loss": 0.0955,
1351
+ "rewards/accuracies": 0.987500011920929,
1352
+ "rewards/chosen": 17.532028198242188,
1353
+ "rewards/margins": 70.969482421875,
1354
+ "rewards/rejected": -53.437461853027344,
1355
+ "step": 860
1356
+ },
1357
+ {
1358
+ "epoch": 0.56,
1359
+ "learning_rate": 9.314775160599571e-08,
1360
+ "logits/chosen": 2.201402187347412,
1361
+ "logits/rejected": 3.6681246757507324,
1362
+ "logps/chosen": -437.94091796875,
1363
+ "logps/rejected": -429.28021240234375,
1364
+ "loss": 0.0989,
1365
+ "rewards/accuracies": 1.0,
1366
+ "rewards/chosen": 10.197607040405273,
1367
+ "rewards/margins": 63.349510192871094,
1368
+ "rewards/rejected": -53.15190505981445,
1369
+ "step": 870
1370
+ },
1371
+ {
1372
+ "epoch": 0.57,
1373
+ "learning_rate": 9.421841541755888e-08,
1374
+ "logits/chosen": 2.64699125289917,
1375
+ "logits/rejected": 2.797672986984253,
1376
+ "logps/chosen": -419.5973205566406,
1377
+ "logps/rejected": -427.1859436035156,
1378
+ "loss": 0.2619,
1379
+ "rewards/accuracies": 0.987500011920929,
1380
+ "rewards/chosen": 5.267810344696045,
1381
+ "rewards/margins": 64.8418197631836,
1382
+ "rewards/rejected": -59.573997497558594,
1383
+ "step": 880
1384
+ },
1385
+ {
1386
+ "epoch": 0.57,
1387
+ "learning_rate": 9.528907922912205e-08,
1388
+ "logits/chosen": 2.1525919437408447,
1389
+ "logits/rejected": 3.06669545173645,
1390
+ "logps/chosen": -447.0118103027344,
1391
+ "logps/rejected": -425.5939025878906,
1392
+ "loss": 0.1568,
1393
+ "rewards/accuracies": 1.0,
1394
+ "rewards/chosen": 12.983186721801758,
1395
+ "rewards/margins": 62.67449188232422,
1396
+ "rewards/rejected": -49.69130325317383,
1397
+ "step": 890
1398
+ },
1399
+ {
1400
+ "epoch": 0.58,
1401
+ "learning_rate": 9.635974304068522e-08,
1402
+ "logits/chosen": 2.4299418926239014,
1403
+ "logits/rejected": 3.4242959022521973,
1404
+ "logps/chosen": -390.6571350097656,
1405
+ "logps/rejected": -455.71331787109375,
1406
+ "loss": 0.2051,
1407
+ "rewards/accuracies": 0.9624999761581421,
1408
+ "rewards/chosen": 5.089010715484619,
1409
+ "rewards/margins": 74.85060119628906,
1410
+ "rewards/rejected": -69.76158905029297,
1411
+ "step": 900
1412
+ },
1413
+ {
1414
+ "epoch": 0.58,
1415
+ "eval_logits/chosen": 1.8021771907806396,
1416
+ "eval_logits/rejected": 2.491039752960205,
1417
+ "eval_logps/chosen": -400.8381042480469,
1418
+ "eval_logps/rejected": -412.0938720703125,
1419
+ "eval_loss": 0.5601515173912048,
1420
+ "eval_rewards/accuracies": 0.9375,
1421
+ "eval_rewards/chosen": 15.884079933166504,
1422
+ "eval_rewards/margins": 65.24065399169922,
1423
+ "eval_rewards/rejected": -49.3565673828125,
1424
+ "eval_runtime": 77.0047,
1425
+ "eval_samples_per_second": 12.986,
1426
+ "eval_steps_per_second": 0.416,
1427
+ "step": 900
1428
+ },
1429
+ {
1430
+ "epoch": 0.58,
1431
+ "learning_rate": 9.743040685224839e-08,
1432
+ "logits/chosen": 2.6785802841186523,
1433
+ "logits/rejected": 3.158878803253174,
1434
+ "logps/chosen": -364.9392395019531,
1435
+ "logps/rejected": -421.75604248046875,
1436
+ "loss": 0.1121,
1437
+ "rewards/accuracies": 0.987500011920929,
1438
+ "rewards/chosen": 15.941950798034668,
1439
+ "rewards/margins": 70.44316101074219,
1440
+ "rewards/rejected": -54.501220703125,
1441
+ "step": 910
1442
+ },
1443
+ {
1444
+ "epoch": 0.59,
1445
+ "learning_rate": 9.850107066381156e-08,
1446
+ "logits/chosen": 2.1345481872558594,
1447
+ "logits/rejected": 3.1029703617095947,
1448
+ "logps/chosen": -467.07318115234375,
1449
+ "logps/rejected": -428.45220947265625,
1450
+ "loss": 0.2077,
1451
+ "rewards/accuracies": 1.0,
1452
+ "rewards/chosen": 8.983004570007324,
1453
+ "rewards/margins": 70.48181915283203,
1454
+ "rewards/rejected": -61.49879837036133,
1455
+ "step": 920
1456
+ },
1457
+ {
1458
+ "epoch": 0.6,
1459
+ "learning_rate": 9.957173447537473e-08,
1460
+ "logits/chosen": 2.542916774749756,
1461
+ "logits/rejected": 3.5035908222198486,
1462
+ "logps/chosen": -423.0205993652344,
1463
+ "logps/rejected": -416.65509033203125,
1464
+ "loss": 0.2814,
1465
+ "rewards/accuracies": 0.987500011920929,
1466
+ "rewards/chosen": 5.553844928741455,
1467
+ "rewards/margins": 68.67607116699219,
1468
+ "rewards/rejected": -63.122222900390625,
1469
+ "step": 930
1470
+ },
1471
+ {
1472
+ "epoch": 0.6,
1473
+ "learning_rate": 9.992858843132586e-08,
1474
+ "logits/chosen": 2.491159439086914,
1475
+ "logits/rejected": 3.259873867034912,
1476
+ "logps/chosen": -475.2666931152344,
1477
+ "logps/rejected": -461.07061767578125,
1478
+ "loss": 0.1522,
1479
+ "rewards/accuracies": 0.987500011920929,
1480
+ "rewards/chosen": 4.259446144104004,
1481
+ "rewards/margins": 78.9701919555664,
1482
+ "rewards/rejected": -74.71073913574219,
1483
+ "step": 940
1484
+ },
1485
+ {
1486
+ "epoch": 0.61,
1487
+ "learning_rate": 9.980956915020233e-08,
1488
+ "logits/chosen": 2.68623423576355,
1489
+ "logits/rejected": 3.0718812942504883,
1490
+ "logps/chosen": -361.89483642578125,
1491
+ "logps/rejected": -413.39288330078125,
1492
+ "loss": 0.3246,
1493
+ "rewards/accuracies": 0.9624999761581421,
1494
+ "rewards/chosen": 8.68491268157959,
1495
+ "rewards/margins": 75.4022445678711,
1496
+ "rewards/rejected": -66.71733093261719,
1497
+ "step": 950
1498
+ },
1499
+ {
1500
+ "epoch": 0.62,
1501
+ "learning_rate": 9.969054986907879e-08,
1502
+ "logits/chosen": 2.752331495285034,
1503
+ "logits/rejected": 3.481398344039917,
1504
+ "logps/chosen": -392.8135986328125,
1505
+ "logps/rejected": -413.65313720703125,
1506
+ "loss": 0.2217,
1507
+ "rewards/accuracies": 1.0,
1508
+ "rewards/chosen": 3.2605032920837402,
1509
+ "rewards/margins": 72.24468994140625,
1510
+ "rewards/rejected": -68.98418426513672,
1511
+ "step": 960
1512
+ },
1513
+ {
1514
+ "epoch": 0.62,
1515
+ "learning_rate": 9.957153058795524e-08,
1516
+ "logits/chosen": 2.0877063274383545,
1517
+ "logits/rejected": 3.1967997550964355,
1518
+ "logps/chosen": -398.50030517578125,
1519
+ "logps/rejected": -458.2386779785156,
1520
+ "loss": 0.1414,
1521
+ "rewards/accuracies": 0.9750000238418579,
1522
+ "rewards/chosen": 4.164697170257568,
1523
+ "rewards/margins": 79.06629943847656,
1524
+ "rewards/rejected": -74.90159606933594,
1525
+ "step": 970
1526
+ },
1527
+ {
1528
+ "epoch": 0.63,
1529
+ "learning_rate": 9.94525113068317e-08,
1530
+ "logits/chosen": 2.6983773708343506,
1531
+ "logits/rejected": 3.458531141281128,
1532
+ "logps/chosen": -400.029296875,
1533
+ "logps/rejected": -411.06903076171875,
1534
+ "loss": 0.2352,
1535
+ "rewards/accuracies": 0.9750000238418579,
1536
+ "rewards/chosen": 4.437505722045898,
1537
+ "rewards/margins": 78.08453369140625,
1538
+ "rewards/rejected": -73.64701843261719,
1539
+ "step": 980
1540
+ },
1541
+ {
1542
+ "epoch": 0.64,
1543
+ "learning_rate": 9.933349202570817e-08,
1544
+ "logits/chosen": 2.2401540279388428,
1545
+ "logits/rejected": 3.9224307537078857,
1546
+ "logps/chosen": -416.910400390625,
1547
+ "logps/rejected": -441.75482177734375,
1548
+ "loss": 0.1331,
1549
+ "rewards/accuracies": 1.0,
1550
+ "rewards/chosen": 7.2108893394470215,
1551
+ "rewards/margins": 89.5989990234375,
1552
+ "rewards/rejected": -82.38809967041016,
1553
+ "step": 990
1554
+ },
1555
+ {
1556
+ "epoch": 0.64,
1557
+ "learning_rate": 9.921447274458463e-08,
1558
+ "logits/chosen": 2.333087682723999,
1559
+ "logits/rejected": 3.3145065307617188,
1560
+ "logps/chosen": -430.5704650878906,
1561
+ "logps/rejected": -428.00384521484375,
1562
+ "loss": 0.2009,
1563
+ "rewards/accuracies": 1.0,
1564
+ "rewards/chosen": -3.077216863632202,
1565
+ "rewards/margins": 81.33201599121094,
1566
+ "rewards/rejected": -84.40922546386719,
1567
+ "step": 1000
1568
+ },
1569
+ {
1570
+ "epoch": 0.64,
1571
+ "eval_logits/chosen": 1.85965096950531,
1572
+ "eval_logits/rejected": 2.5712876319885254,
1573
+ "eval_logps/chosen": -405.8722839355469,
1574
+ "eval_logps/rejected": -420.0833740234375,
1575
+ "eval_loss": 0.873622715473175,
1576
+ "eval_rewards/accuracies": 0.9296875,
1577
+ "eval_rewards/chosen": -9.286641120910645,
1578
+ "eval_rewards/margins": 80.01751708984375,
1579
+ "eval_rewards/rejected": -89.30416107177734,
1580
+ "eval_runtime": 76.9649,
1581
+ "eval_samples_per_second": 12.993,
1582
+ "eval_steps_per_second": 0.416,
1583
+ "step": 1000
1584
+ },
1585
+ {
1586
+ "epoch": 0.65,
1587
+ "learning_rate": 9.909545346346108e-08,
1588
+ "logits/chosen": 2.4723129272460938,
1589
+ "logits/rejected": 3.478045701980591,
1590
+ "logps/chosen": -388.63848876953125,
1591
+ "logps/rejected": -405.98748779296875,
1592
+ "loss": 0.3397,
1593
+ "rewards/accuracies": 0.987500011920929,
1594
+ "rewards/chosen": 3.27624773979187,
1595
+ "rewards/margins": 83.57379150390625,
1596
+ "rewards/rejected": -80.29755401611328,
1597
+ "step": 1010
1598
+ },
1599
+ {
1600
+ "epoch": 0.66,
1601
+ "learning_rate": 9.897643418233753e-08,
1602
+ "logits/chosen": 2.1651980876922607,
1603
+ "logits/rejected": 3.202363967895508,
1604
+ "logps/chosen": -439.97894287109375,
1605
+ "logps/rejected": -464.5369567871094,
1606
+ "loss": 0.296,
1607
+ "rewards/accuracies": 0.987500011920929,
1608
+ "rewards/chosen": 9.399145126342773,
1609
+ "rewards/margins": 104.0501480102539,
1610
+ "rewards/rejected": -94.65100860595703,
1611
+ "step": 1020
1612
+ },
1613
+ {
1614
+ "epoch": 0.66,
1615
+ "learning_rate": 9.885741490121398e-08,
1616
+ "logits/chosen": 2.6700689792633057,
1617
+ "logits/rejected": 3.753943681716919,
1618
+ "logps/chosen": -412.857666015625,
1619
+ "logps/rejected": -408.2137756347656,
1620
+ "loss": 0.267,
1621
+ "rewards/accuracies": 1.0,
1622
+ "rewards/chosen": 17.997615814208984,
1623
+ "rewards/margins": 86.39213562011719,
1624
+ "rewards/rejected": -68.39451599121094,
1625
+ "step": 1030
1626
+ },
1627
+ {
1628
+ "epoch": 0.67,
1629
+ "learning_rate": 9.873839562009045e-08,
1630
+ "logits/chosen": 2.110105514526367,
1631
+ "logits/rejected": 3.5830421447753906,
1632
+ "logps/chosen": -426.0606994628906,
1633
+ "logps/rejected": -430.07073974609375,
1634
+ "loss": 0.1166,
1635
+ "rewards/accuracies": 0.987500011920929,
1636
+ "rewards/chosen": 16.306238174438477,
1637
+ "rewards/margins": 97.02738952636719,
1638
+ "rewards/rejected": -80.72114562988281,
1639
+ "step": 1040
1640
+ },
1641
+ {
1642
+ "epoch": 0.67,
1643
+ "learning_rate": 9.861937633896691e-08,
1644
+ "logits/chosen": 2.830747127532959,
1645
+ "logits/rejected": 3.9411635398864746,
1646
+ "logps/chosen": -391.10858154296875,
1647
+ "logps/rejected": -417.4032287597656,
1648
+ "loss": 0.3818,
1649
+ "rewards/accuracies": 0.949999988079071,
1650
+ "rewards/chosen": 4.93004846572876,
1651
+ "rewards/margins": 88.80459594726562,
1652
+ "rewards/rejected": -83.87454223632812,
1653
+ "step": 1050
1654
+ },
1655
+ {
1656
+ "epoch": 0.68,
1657
+ "learning_rate": 9.850035705784336e-08,
1658
+ "logits/chosen": 2.3416190147399902,
1659
+ "logits/rejected": 3.0985264778137207,
1660
+ "logps/chosen": -461.04718017578125,
1661
+ "logps/rejected": -448.1722106933594,
1662
+ "loss": 0.0969,
1663
+ "rewards/accuracies": 0.9624999761581421,
1664
+ "rewards/chosen": 14.76380443572998,
1665
+ "rewards/margins": 97.76150512695312,
1666
+ "rewards/rejected": -82.99769592285156,
1667
+ "step": 1060
1668
+ },
1669
+ {
1670
+ "epoch": 0.69,
1671
+ "learning_rate": 9.838133777671982e-08,
1672
+ "logits/chosen": 2.414294719696045,
1673
+ "logits/rejected": 3.4742074012756348,
1674
+ "logps/chosen": -384.5827941894531,
1675
+ "logps/rejected": -398.83050537109375,
1676
+ "loss": 0.4072,
1677
+ "rewards/accuracies": 0.9375,
1678
+ "rewards/chosen": 10.109460830688477,
1679
+ "rewards/margins": 92.21754455566406,
1680
+ "rewards/rejected": -82.10807037353516,
1681
+ "step": 1070
1682
+ },
1683
+ {
1684
+ "epoch": 0.69,
1685
+ "learning_rate": 9.826231849559629e-08,
1686
+ "logits/chosen": 2.7465944290161133,
1687
+ "logits/rejected": 3.1911470890045166,
1688
+ "logps/chosen": -422.5624084472656,
1689
+ "logps/rejected": -440.286376953125,
1690
+ "loss": 0.1751,
1691
+ "rewards/accuracies": 1.0,
1692
+ "rewards/chosen": 17.269311904907227,
1693
+ "rewards/margins": 104.30892181396484,
1694
+ "rewards/rejected": -87.03959655761719,
1695
+ "step": 1080
1696
+ },
1697
+ {
1698
+ "epoch": 0.7,
1699
+ "learning_rate": 9.814329921447275e-08,
1700
+ "logits/chosen": 2.822819232940674,
1701
+ "logits/rejected": 3.005201816558838,
1702
+ "logps/chosen": -391.0602111816406,
1703
+ "logps/rejected": -419.813720703125,
1704
+ "loss": 0.3021,
1705
+ "rewards/accuracies": 1.0,
1706
+ "rewards/chosen": 13.785235404968262,
1707
+ "rewards/margins": 121.23738098144531,
1708
+ "rewards/rejected": -107.45216369628906,
1709
+ "step": 1090
1710
+ },
1711
+ {
1712
+ "epoch": 0.71,
1713
+ "learning_rate": 9.80242799333492e-08,
1714
+ "logits/chosen": 2.8149497509002686,
1715
+ "logits/rejected": 3.3617329597473145,
1716
+ "logps/chosen": -402.799560546875,
1717
+ "logps/rejected": -425.45343017578125,
1718
+ "loss": 0.2838,
1719
+ "rewards/accuracies": 0.925000011920929,
1720
+ "rewards/chosen": 14.5247163772583,
1721
+ "rewards/margins": 105.98973083496094,
1722
+ "rewards/rejected": -91.46500396728516,
1723
+ "step": 1100
1724
+ },
1725
+ {
1726
+ "epoch": 0.71,
1727
+ "eval_logits/chosen": 1.8558049201965332,
1728
+ "eval_logits/rejected": 2.5366647243499756,
1729
+ "eval_logps/chosen": -402.8280944824219,
1730
+ "eval_logps/rejected": -421.0693054199219,
1731
+ "eval_loss": 0.8600718975067139,
1732
+ "eval_rewards/accuracies": 0.953125,
1733
+ "eval_rewards/chosen": 5.934433460235596,
1734
+ "eval_rewards/margins": 100.16818237304688,
1735
+ "eval_rewards/rejected": -94.23374938964844,
1736
+ "eval_runtime": 76.8109,
1737
+ "eval_samples_per_second": 13.019,
1738
+ "eval_steps_per_second": 0.417,
1739
+ "step": 1100
1740
+ },
1741
+ {
1742
+ "epoch": 0.71,
1743
+ "learning_rate": 9.790526065222565e-08,
1744
+ "logits/chosen": 2.653266429901123,
1745
+ "logits/rejected": 3.1961002349853516,
1746
+ "logps/chosen": -445.80322265625,
1747
+ "logps/rejected": -429.64373779296875,
1748
+ "loss": 0.5293,
1749
+ "rewards/accuracies": 0.9750000238418579,
1750
+ "rewards/chosen": 21.07932472229004,
1751
+ "rewards/margins": 100.654052734375,
1752
+ "rewards/rejected": -79.57472229003906,
1753
+ "step": 1110
1754
+ },
1755
+ {
1756
+ "epoch": 0.72,
1757
+ "learning_rate": 9.778624137110211e-08,
1758
+ "logits/chosen": 2.4288971424102783,
1759
+ "logits/rejected": 3.4981236457824707,
1760
+ "logps/chosen": -452.72967529296875,
1761
+ "logps/rejected": -437.428955078125,
1762
+ "loss": 0.1353,
1763
+ "rewards/accuracies": 0.987500011920929,
1764
+ "rewards/chosen": 19.18131446838379,
1765
+ "rewards/margins": 117.1941146850586,
1766
+ "rewards/rejected": -98.01280212402344,
1767
+ "step": 1120
1768
+ },
1769
+ {
1770
+ "epoch": 0.73,
1771
+ "learning_rate": 9.766722208997857e-08,
1772
+ "logits/chosen": 2.525935173034668,
1773
+ "logits/rejected": 3.092148780822754,
1774
+ "logps/chosen": -402.27886962890625,
1775
+ "logps/rejected": -428.43280029296875,
1776
+ "loss": 0.1894,
1777
+ "rewards/accuracies": 0.9750000238418579,
1778
+ "rewards/chosen": 20.851654052734375,
1779
+ "rewards/margins": 118.8115234375,
1780
+ "rewards/rejected": -97.95986938476562,
1781
+ "step": 1130
1782
+ },
1783
+ {
1784
+ "epoch": 0.73,
1785
+ "learning_rate": 9.754820280885503e-08,
1786
+ "logits/chosen": 2.715815782546997,
1787
+ "logits/rejected": 4.007362365722656,
1788
+ "logps/chosen": -399.8186340332031,
1789
+ "logps/rejected": -423.14862060546875,
1790
+ "loss": 0.1684,
1791
+ "rewards/accuracies": 0.9750000238418579,
1792
+ "rewards/chosen": 9.72397232055664,
1793
+ "rewards/margins": 105.40447998046875,
1794
+ "rewards/rejected": -95.68050384521484,
1795
+ "step": 1140
1796
+ },
1797
+ {
1798
+ "epoch": 0.74,
1799
+ "learning_rate": 9.742918352773148e-08,
1800
+ "logits/chosen": 2.2262580394744873,
1801
+ "logits/rejected": 3.2408432960510254,
1802
+ "logps/chosen": -477.47711181640625,
1803
+ "logps/rejected": -495.76043701171875,
1804
+ "loss": 0.1725,
1805
+ "rewards/accuracies": 0.987500011920929,
1806
+ "rewards/chosen": 6.365964412689209,
1807
+ "rewards/margins": 132.43780517578125,
1808
+ "rewards/rejected": -126.07185363769531,
1809
+ "step": 1150
1810
+ },
1811
+ {
1812
+ "epoch": 0.75,
1813
+ "learning_rate": 9.731016424660795e-08,
1814
+ "logits/chosen": 2.7380359172821045,
1815
+ "logits/rejected": 3.5110669136047363,
1816
+ "logps/chosen": -398.9952392578125,
1817
+ "logps/rejected": -426.838623046875,
1818
+ "loss": 0.2338,
1819
+ "rewards/accuracies": 1.0,
1820
+ "rewards/chosen": 12.770757675170898,
1821
+ "rewards/margins": 123.94891357421875,
1822
+ "rewards/rejected": -111.17814636230469,
1823
+ "step": 1160
1824
+ },
1825
+ {
1826
+ "epoch": 0.75,
1827
+ "learning_rate": 9.719114496548441e-08,
1828
+ "logits/chosen": 2.0883517265319824,
1829
+ "logits/rejected": 3.5194995403289795,
1830
+ "logps/chosen": -424.92803955078125,
1831
+ "logps/rejected": -390.85784912109375,
1832
+ "loss": 0.3672,
1833
+ "rewards/accuracies": 0.9624999761581421,
1834
+ "rewards/chosen": 9.357166290283203,
1835
+ "rewards/margins": 103.42280578613281,
1836
+ "rewards/rejected": -94.06563568115234,
1837
+ "step": 1170
1838
+ },
1839
+ {
1840
+ "epoch": 0.76,
1841
+ "learning_rate": 9.707212568436087e-08,
1842
+ "logits/chosen": 2.170081615447998,
1843
+ "logits/rejected": 3.3753578662872314,
1844
+ "logps/chosen": -437.91082763671875,
1845
+ "logps/rejected": -430.90966796875,
1846
+ "loss": 0.0796,
1847
+ "rewards/accuracies": 1.0,
1848
+ "rewards/chosen": 8.644025802612305,
1849
+ "rewards/margins": 108.9352798461914,
1850
+ "rewards/rejected": -100.2912368774414,
1851
+ "step": 1180
1852
+ },
1853
+ {
1854
+ "epoch": 0.76,
1855
+ "learning_rate": 9.695310640323732e-08,
1856
+ "logits/chosen": 2.7604737281799316,
1857
+ "logits/rejected": 3.7521843910217285,
1858
+ "logps/chosen": -370.9176025390625,
1859
+ "logps/rejected": -435.0440979003906,
1860
+ "loss": 0.2187,
1861
+ "rewards/accuracies": 1.0,
1862
+ "rewards/chosen": 10.98758316040039,
1863
+ "rewards/margins": 134.98072814941406,
1864
+ "rewards/rejected": -123.9931411743164,
1865
+ "step": 1190
1866
+ },
1867
+ {
1868
+ "epoch": 0.77,
1869
+ "learning_rate": 9.683408712211378e-08,
1870
+ "logits/chosen": 2.3693931102752686,
1871
+ "logits/rejected": 3.17862606048584,
1872
+ "logps/chosen": -467.9139099121094,
1873
+ "logps/rejected": -460.11297607421875,
1874
+ "loss": 0.3489,
1875
+ "rewards/accuracies": 0.987500011920929,
1876
+ "rewards/chosen": 3.4328982830047607,
1877
+ "rewards/margins": 135.82644653320312,
1878
+ "rewards/rejected": -132.3935546875,
1879
+ "step": 1200
1880
+ },
1881
+ {
1882
+ "epoch": 0.77,
1883
+ "eval_logits/chosen": 1.8742191791534424,
1884
+ "eval_logits/rejected": 2.560530662536621,
1885
+ "eval_logps/chosen": -404.9606628417969,
1886
+ "eval_logps/rejected": -424.3955383300781,
1887
+ "eval_loss": 1.1546683311462402,
1888
+ "eval_rewards/accuracies": 0.9453125,
1889
+ "eval_rewards/chosen": -4.728623390197754,
1890
+ "eval_rewards/margins": 106.13644409179688,
1891
+ "eval_rewards/rejected": -110.86505889892578,
1892
+ "eval_runtime": 77.052,
1893
+ "eval_samples_per_second": 12.978,
1894
+ "eval_steps_per_second": 0.415,
1895
+ "step": 1200
1896
+ },
1897
+ {
1898
+ "epoch": 0.78,
1899
+ "learning_rate": 9.671506784099024e-08,
1900
+ "logits/chosen": 2.997239589691162,
1901
+ "logits/rejected": 3.5606212615966797,
1902
+ "logps/chosen": -392.54229736328125,
1903
+ "logps/rejected": -423.11322021484375,
1904
+ "loss": 0.0937,
1905
+ "rewards/accuracies": 0.987500011920929,
1906
+ "rewards/chosen": -0.05036654323339462,
1907
+ "rewards/margins": 117.02787780761719,
1908
+ "rewards/rejected": -117.07823181152344,
1909
+ "step": 1210
1910
+ },
1911
+ {
1912
+ "epoch": 0.78,
1913
+ "learning_rate": 9.659604855986669e-08,
1914
+ "logits/chosen": 2.1404788494110107,
1915
+ "logits/rejected": 3.323446273803711,
1916
+ "logps/chosen": -427.2289123535156,
1917
+ "logps/rejected": -433.6624450683594,
1918
+ "loss": 0.2752,
1919
+ "rewards/accuracies": 1.0,
1920
+ "rewards/chosen": 12.247556686401367,
1921
+ "rewards/margins": 107.3965835571289,
1922
+ "rewards/rejected": -95.14904022216797,
1923
+ "step": 1220
1924
+ },
1925
+ {
1926
+ "epoch": 0.79,
1927
+ "learning_rate": 9.647702927874315e-08,
1928
+ "logits/chosen": 2.563352346420288,
1929
+ "logits/rejected": 3.0540013313293457,
1930
+ "logps/chosen": -447.8446350097656,
1931
+ "logps/rejected": -452.87969970703125,
1932
+ "loss": 0.0435,
1933
+ "rewards/accuracies": 0.9750000238418579,
1934
+ "rewards/chosen": 23.387487411499023,
1935
+ "rewards/margins": 119.50389099121094,
1936
+ "rewards/rejected": -96.11639404296875,
1937
+ "step": 1230
1938
+ },
1939
+ {
1940
+ "epoch": 0.8,
1941
+ "learning_rate": 9.63580099976196e-08,
1942
+ "logits/chosen": 2.1849985122680664,
1943
+ "logits/rejected": 3.4553771018981934,
1944
+ "logps/chosen": -387.8475341796875,
1945
+ "logps/rejected": -393.53192138671875,
1946
+ "loss": 0.2143,
1947
+ "rewards/accuracies": 1.0,
1948
+ "rewards/chosen": 26.278972625732422,
1949
+ "rewards/margins": 115.37886810302734,
1950
+ "rewards/rejected": -89.09989929199219,
1951
+ "step": 1240
1952
+ },
1953
+ {
1954
+ "epoch": 0.8,
1955
+ "learning_rate": 9.623899071649607e-08,
1956
+ "logits/chosen": 3.139633893966675,
1957
+ "logits/rejected": 3.37255859375,
1958
+ "logps/chosen": -398.74005126953125,
1959
+ "logps/rejected": -418.0592346191406,
1960
+ "loss": 0.0799,
1961
+ "rewards/accuracies": 0.9750000238418579,
1962
+ "rewards/chosen": 22.002050399780273,
1963
+ "rewards/margins": 125.72208404541016,
1964
+ "rewards/rejected": -103.72003173828125,
1965
+ "step": 1250
1966
+ },
1967
+ {
1968
+ "epoch": 0.81,
1969
+ "learning_rate": 9.611997143537253e-08,
1970
+ "logits/chosen": 2.665964126586914,
1971
+ "logits/rejected": 2.8448374271392822,
1972
+ "logps/chosen": -427.25677490234375,
1973
+ "logps/rejected": -430.95025634765625,
1974
+ "loss": 0.3788,
1975
+ "rewards/accuracies": 0.9624999761581421,
1976
+ "rewards/chosen": 21.40536880493164,
1977
+ "rewards/margins": 111.3967056274414,
1978
+ "rewards/rejected": -89.9913330078125,
1979
+ "step": 1260
1980
+ },
1981
+ {
1982
+ "epoch": 0.82,
1983
+ "learning_rate": 9.600095215424899e-08,
1984
+ "logits/chosen": 2.7928450107574463,
1985
+ "logits/rejected": 3.595740795135498,
1986
+ "logps/chosen": -382.04327392578125,
1987
+ "logps/rejected": -431.33734130859375,
1988
+ "loss": 0.3577,
1989
+ "rewards/accuracies": 0.9624999761581421,
1990
+ "rewards/chosen": 16.352216720581055,
1991
+ "rewards/margins": 119.71681213378906,
1992
+ "rewards/rejected": -103.3646011352539,
1993
+ "step": 1270
1994
+ },
1995
+ {
1996
+ "epoch": 0.82,
1997
+ "learning_rate": 9.588193287312544e-08,
1998
+ "logits/chosen": 2.237015962600708,
1999
+ "logits/rejected": 2.9541878700256348,
2000
+ "logps/chosen": -421.40032958984375,
2001
+ "logps/rejected": -427.09649658203125,
2002
+ "loss": 0.0439,
2003
+ "rewards/accuracies": 1.0,
2004
+ "rewards/chosen": 19.52680015563965,
2005
+ "rewards/margins": 119.0426254272461,
2006
+ "rewards/rejected": -99.51582336425781,
2007
+ "step": 1280
2008
+ },
2009
+ {
2010
+ "epoch": 0.83,
2011
+ "learning_rate": 9.57629135920019e-08,
2012
+ "logits/chosen": 2.776695728302002,
2013
+ "logits/rejected": 3.7990450859069824,
2014
+ "logps/chosen": -398.505859375,
2015
+ "logps/rejected": -432.1246032714844,
2016
+ "loss": 0.5895,
2017
+ "rewards/accuracies": 1.0,
2018
+ "rewards/chosen": 19.98464012145996,
2019
+ "rewards/margins": 124.7027816772461,
2020
+ "rewards/rejected": -104.7181625366211,
2021
+ "step": 1290
2022
+ },
2023
+ {
2024
+ "epoch": 0.84,
2025
+ "learning_rate": 9.564389431087836e-08,
2026
+ "logits/chosen": 2.5397887229919434,
2027
+ "logits/rejected": 3.605865478515625,
2028
+ "logps/chosen": -403.42132568359375,
2029
+ "logps/rejected": -455.809814453125,
2030
+ "loss": 0.1571,
2031
+ "rewards/accuracies": 1.0,
2032
+ "rewards/chosen": 29.50467872619629,
2033
+ "rewards/margins": 132.95152282714844,
2034
+ "rewards/rejected": -103.44685363769531,
2035
+ "step": 1300
2036
+ },
2037
+ {
2038
+ "epoch": 0.84,
2039
+ "eval_logits/chosen": 1.7718256711959839,
2040
+ "eval_logits/rejected": 2.484117031097412,
2041
+ "eval_logps/chosen": -400.06805419921875,
2042
+ "eval_logps/rejected": -421.99560546875,
2043
+ "eval_loss": 0.9890532493591309,
2044
+ "eval_rewards/accuracies": 0.9453125,
2045
+ "eval_rewards/chosen": 19.734703063964844,
2046
+ "eval_rewards/margins": 118.60000610351562,
2047
+ "eval_rewards/rejected": -98.86530303955078,
2048
+ "eval_runtime": 76.9799,
2049
+ "eval_samples_per_second": 12.99,
2050
+ "eval_steps_per_second": 0.416,
2051
+ "step": 1300
2052
+ },
2053
+ {
2054
+ "epoch": 0.84,
2055
+ "learning_rate": 9.552487502975481e-08,
2056
+ "logits/chosen": 2.1767947673797607,
2057
+ "logits/rejected": 3.4365925788879395,
2058
+ "logps/chosen": -440.6006774902344,
2059
+ "logps/rejected": -473.36700439453125,
2060
+ "loss": 0.3716,
2061
+ "rewards/accuracies": 0.987500011920929,
2062
+ "rewards/chosen": 15.208317756652832,
2063
+ "rewards/margins": 130.98887634277344,
2064
+ "rewards/rejected": -115.78055572509766,
2065
+ "step": 1310
2066
+ },
2067
+ {
2068
+ "epoch": 0.85,
2069
+ "learning_rate": 9.540585574863127e-08,
2070
+ "logits/chosen": 2.517340898513794,
2071
+ "logits/rejected": 3.1486434936523438,
2072
+ "logps/chosen": -437.44854736328125,
2073
+ "logps/rejected": -439.794921875,
2074
+ "loss": 0.2643,
2075
+ "rewards/accuracies": 0.9624999761581421,
2076
+ "rewards/chosen": 2.325850009918213,
2077
+ "rewards/margins": 115.98299407958984,
2078
+ "rewards/rejected": -113.65716552734375,
2079
+ "step": 1320
2080
+ },
2081
+ {
2082
+ "epoch": 0.85,
2083
+ "learning_rate": 9.528683646750774e-08,
2084
+ "logits/chosen": 2.605825185775757,
2085
+ "logits/rejected": 3.62730073928833,
2086
+ "logps/chosen": -415.30926513671875,
2087
+ "logps/rejected": -434.9833068847656,
2088
+ "loss": 0.1454,
2089
+ "rewards/accuracies": 0.9750000238418579,
2090
+ "rewards/chosen": 12.735153198242188,
2091
+ "rewards/margins": 126.29862213134766,
2092
+ "rewards/rejected": -113.56346130371094,
2093
+ "step": 1330
2094
+ },
2095
+ {
2096
+ "epoch": 0.86,
2097
+ "learning_rate": 9.51678171863842e-08,
2098
+ "logits/chosen": 2.488985776901245,
2099
+ "logits/rejected": 3.484701156616211,
2100
+ "logps/chosen": -426.5611877441406,
2101
+ "logps/rejected": -440.25946044921875,
2102
+ "loss": 0.5792,
2103
+ "rewards/accuracies": 0.9624999761581421,
2104
+ "rewards/chosen": 8.252630233764648,
2105
+ "rewards/margins": 123.63459777832031,
2106
+ "rewards/rejected": -115.3819580078125,
2107
+ "step": 1340
2108
+ },
2109
+ {
2110
+ "epoch": 0.87,
2111
+ "learning_rate": 9.504879790526065e-08,
2112
+ "logits/chosen": 2.908905506134033,
2113
+ "logits/rejected": 3.2668673992156982,
2114
+ "logps/chosen": -379.87969970703125,
2115
+ "logps/rejected": -445.2496643066406,
2116
+ "loss": 0.3025,
2117
+ "rewards/accuracies": 1.0,
2118
+ "rewards/chosen": 7.975564479827881,
2119
+ "rewards/margins": 144.19297790527344,
2120
+ "rewards/rejected": -136.21739196777344,
2121
+ "step": 1350
2122
+ },
2123
+ {
2124
+ "epoch": 0.87,
2125
+ "learning_rate": 9.49297786241371e-08,
2126
+ "logits/chosen": 2.915165424346924,
2127
+ "logits/rejected": 3.0565438270568848,
2128
+ "logps/chosen": -418.11669921875,
2129
+ "logps/rejected": -470.24554443359375,
2130
+ "loss": 0.1609,
2131
+ "rewards/accuracies": 0.987500011920929,
2132
+ "rewards/chosen": 5.699041843414307,
2133
+ "rewards/margins": 146.64541625976562,
2134
+ "rewards/rejected": -140.94638061523438,
2135
+ "step": 1360
2136
+ },
2137
+ {
2138
+ "epoch": 0.88,
2139
+ "learning_rate": 9.481075934301356e-08,
2140
+ "logits/chosen": 2.8868582248687744,
2141
+ "logits/rejected": 3.6842854022979736,
2142
+ "logps/chosen": -373.21185302734375,
2143
+ "logps/rejected": -419.702392578125,
2144
+ "loss": 0.1811,
2145
+ "rewards/accuracies": 0.9624999761581421,
2146
+ "rewards/chosen": -9.702957153320312,
2147
+ "rewards/margins": 123.20098876953125,
2148
+ "rewards/rejected": -132.90394592285156,
2149
+ "step": 1370
2150
+ },
2151
+ {
2152
+ "epoch": 0.89,
2153
+ "learning_rate": 9.469174006189002e-08,
2154
+ "logits/chosen": 2.415966749191284,
2155
+ "logits/rejected": 3.7415695190429688,
2156
+ "logps/chosen": -477.23406982421875,
2157
+ "logps/rejected": -459.58343505859375,
2158
+ "loss": 0.1843,
2159
+ "rewards/accuracies": 0.987500011920929,
2160
+ "rewards/chosen": 7.450096130371094,
2161
+ "rewards/margins": 136.1929931640625,
2162
+ "rewards/rejected": -128.74290466308594,
2163
+ "step": 1380
2164
+ },
2165
+ {
2166
+ "epoch": 0.89,
2167
+ "learning_rate": 9.457272078076648e-08,
2168
+ "logits/chosen": 2.6312685012817383,
2169
+ "logits/rejected": 3.0948596000671387,
2170
+ "logps/chosen": -434.4181213378906,
2171
+ "logps/rejected": -447.225341796875,
2172
+ "loss": 0.3219,
2173
+ "rewards/accuracies": 0.9750000238418579,
2174
+ "rewards/chosen": 3.2366714477539062,
2175
+ "rewards/margins": 148.01669311523438,
2176
+ "rewards/rejected": -144.780029296875,
2177
+ "step": 1390
2178
+ },
2179
+ {
2180
+ "epoch": 0.9,
2181
+ "learning_rate": 9.445370149964293e-08,
2182
+ "logits/chosen": 2.789755344390869,
2183
+ "logits/rejected": 3.598536729812622,
2184
+ "logps/chosen": -400.4031982421875,
2185
+ "logps/rejected": -415.2806091308594,
2186
+ "loss": 0.2459,
2187
+ "rewards/accuracies": 0.949999988079071,
2188
+ "rewards/chosen": 13.055984497070312,
2189
+ "rewards/margins": 139.63107299804688,
2190
+ "rewards/rejected": -126.5750961303711,
2191
+ "step": 1400
2192
+ },
2193
+ {
2194
+ "epoch": 0.9,
2195
+ "eval_logits/chosen": 1.8071796894073486,
2196
+ "eval_logits/rejected": 2.5058929920196533,
2197
+ "eval_logps/chosen": -401.0693359375,
2198
+ "eval_logps/rejected": -428.1545715332031,
2199
+ "eval_loss": 1.071203589439392,
2200
+ "eval_rewards/accuracies": 0.9375,
2201
+ "eval_rewards/chosen": 14.728099822998047,
2202
+ "eval_rewards/margins": 144.3882293701172,
2203
+ "eval_rewards/rejected": -129.66012573242188,
2204
+ "eval_runtime": 76.8152,
2205
+ "eval_samples_per_second": 13.018,
2206
+ "eval_steps_per_second": 0.417,
2207
+ "step": 1400
2208
+ },
2209
+ {
2210
+ "epoch": 0.91,
2211
+ "learning_rate": 9.43346822185194e-08,
2212
+ "logits/chosen": 2.2163331508636475,
2213
+ "logits/rejected": 3.5338504314422607,
2214
+ "logps/chosen": -404.0025329589844,
2215
+ "logps/rejected": -437.22613525390625,
2216
+ "loss": 0.2835,
2217
+ "rewards/accuracies": 1.0,
2218
+ "rewards/chosen": 19.502986907958984,
2219
+ "rewards/margins": 152.5010528564453,
2220
+ "rewards/rejected": -132.998046875,
2221
+ "step": 1410
2222
+ },
2223
+ {
2224
+ "epoch": 0.91,
2225
+ "learning_rate": 9.421566293739586e-08,
2226
+ "logits/chosen": 2.900045156478882,
2227
+ "logits/rejected": 3.8935494422912598,
2228
+ "logps/chosen": -371.714599609375,
2229
+ "logps/rejected": -410.403564453125,
2230
+ "loss": 0.1478,
2231
+ "rewards/accuracies": 0.987500011920929,
2232
+ "rewards/chosen": -17.428253173828125,
2233
+ "rewards/margins": 138.4210968017578,
2234
+ "rewards/rejected": -155.84933471679688,
2235
+ "step": 1420
2236
+ },
2237
+ {
2238
+ "epoch": 0.92,
2239
+ "learning_rate": 9.409664365627231e-08,
2240
+ "logits/chosen": 2.534538984298706,
2241
+ "logits/rejected": 2.9124529361724854,
2242
+ "logps/chosen": -439.07183837890625,
2243
+ "logps/rejected": -433.35064697265625,
2244
+ "loss": 0.2963,
2245
+ "rewards/accuracies": 0.9624999761581421,
2246
+ "rewards/chosen": 5.632425308227539,
2247
+ "rewards/margins": 152.6386260986328,
2248
+ "rewards/rejected": -147.00619506835938,
2249
+ "step": 1430
2250
+ },
2251
+ {
2252
+ "epoch": 0.93,
2253
+ "learning_rate": 9.397762437514877e-08,
2254
+ "logits/chosen": 2.745168685913086,
2255
+ "logits/rejected": 3.2941577434539795,
2256
+ "logps/chosen": -414.8768005371094,
2257
+ "logps/rejected": -474.46258544921875,
2258
+ "loss": 0.1725,
2259
+ "rewards/accuracies": 0.9750000238418579,
2260
+ "rewards/chosen": -17.173574447631836,
2261
+ "rewards/margins": 160.98873901367188,
2262
+ "rewards/rejected": -178.16232299804688,
2263
+ "step": 1440
2264
+ },
2265
+ {
2266
+ "epoch": 0.93,
2267
+ "learning_rate": 9.385860509402523e-08,
2268
+ "logits/chosen": 2.712263584136963,
2269
+ "logits/rejected": 3.576184034347534,
2270
+ "logps/chosen": -399.8708801269531,
2271
+ "logps/rejected": -430.77703857421875,
2272
+ "loss": 0.159,
2273
+ "rewards/accuracies": 0.9624999761581421,
2274
+ "rewards/chosen": -4.633294582366943,
2275
+ "rewards/margins": 155.65737915039062,
2276
+ "rewards/rejected": -160.29067993164062,
2277
+ "step": 1450
2278
+ },
2279
+ {
2280
+ "epoch": 0.94,
2281
+ "learning_rate": 9.373958581290168e-08,
2282
+ "logits/chosen": 2.4558093547821045,
2283
+ "logits/rejected": 3.536351442337036,
2284
+ "logps/chosen": -453.64324951171875,
2285
+ "logps/rejected": -463.8458557128906,
2286
+ "loss": 0.166,
2287
+ "rewards/accuracies": 0.987500011920929,
2288
+ "rewards/chosen": 0.3881942629814148,
2289
+ "rewards/margins": 154.9169464111328,
2290
+ "rewards/rejected": -154.52874755859375,
2291
+ "step": 1460
2292
+ },
2293
+ {
2294
+ "epoch": 0.94,
2295
+ "learning_rate": 9.362056653177814e-08,
2296
+ "logits/chosen": 2.787285566329956,
2297
+ "logits/rejected": 3.214555025100708,
2298
+ "logps/chosen": -444.3168029785156,
2299
+ "logps/rejected": -416.84820556640625,
2300
+ "loss": 0.3787,
2301
+ "rewards/accuracies": 0.925000011920929,
2302
+ "rewards/chosen": 8.255861282348633,
2303
+ "rewards/margins": 150.24636840820312,
2304
+ "rewards/rejected": -141.99050903320312,
2305
+ "step": 1470
2306
+ },
2307
+ {
2308
+ "epoch": 0.95,
2309
+ "learning_rate": 9.35015472506546e-08,
2310
+ "logits/chosen": 2.6908648014068604,
2311
+ "logits/rejected": 3.7777984142303467,
2312
+ "logps/chosen": -413.5292053222656,
2313
+ "logps/rejected": -414.0494079589844,
2314
+ "loss": 0.4045,
2315
+ "rewards/accuracies": 0.9624999761581421,
2316
+ "rewards/chosen": 13.289766311645508,
2317
+ "rewards/margins": 143.24343872070312,
2318
+ "rewards/rejected": -129.95367431640625,
2319
+ "step": 1480
2320
+ },
2321
+ {
2322
+ "epoch": 0.96,
2323
+ "learning_rate": 9.338252796953105e-08,
2324
+ "logits/chosen": 2.289970636367798,
2325
+ "logits/rejected": 3.352263927459717,
2326
+ "logps/chosen": -403.9881591796875,
2327
+ "logps/rejected": -431.7518005371094,
2328
+ "loss": 0.378,
2329
+ "rewards/accuracies": 0.987500011920929,
2330
+ "rewards/chosen": 20.252124786376953,
2331
+ "rewards/margins": 172.3944549560547,
2332
+ "rewards/rejected": -152.142333984375,
2333
+ "step": 1490
2334
+ },
2335
+ {
2336
+ "epoch": 0.96,
2337
+ "learning_rate": 9.326350868840752e-08,
2338
+ "logits/chosen": 3.019411563873291,
2339
+ "logits/rejected": 3.345460891723633,
2340
+ "logps/chosen": -434.126220703125,
2341
+ "logps/rejected": -452.2032165527344,
2342
+ "loss": 0.537,
2343
+ "rewards/accuracies": 1.0,
2344
+ "rewards/chosen": 17.267078399658203,
2345
+ "rewards/margins": 171.45938110351562,
2346
+ "rewards/rejected": -154.19232177734375,
2347
+ "step": 1500
2348
+ },
2349
+ {
2350
+ "epoch": 0.96,
2351
+ "eval_logits/chosen": 1.8465936183929443,
2352
+ "eval_logits/rejected": 2.4808287620544434,
2353
+ "eval_logps/chosen": -400.0705871582031,
2354
+ "eval_logps/rejected": -427.70770263671875,
2355
+ "eval_loss": 1.2171391248703003,
2356
+ "eval_rewards/accuracies": 0.9296875,
2357
+ "eval_rewards/chosen": 19.721826553344727,
2358
+ "eval_rewards/margins": 147.1477813720703,
2359
+ "eval_rewards/rejected": -127.42596435546875,
2360
+ "eval_runtime": 76.9797,
2361
+ "eval_samples_per_second": 12.99,
2362
+ "eval_steps_per_second": 0.416,
2363
+ "step": 1500
2364
+ },
2365
+ {
2366
+ "epoch": 0.97,
2367
+ "learning_rate": 9.314448940728398e-08,
2368
+ "logits/chosen": 2.212067127227783,
2369
+ "logits/rejected": 3.642113447189331,
2370
+ "logps/chosen": -408.46063232421875,
2371
+ "logps/rejected": -423.57464599609375,
2372
+ "loss": 0.1855,
2373
+ "rewards/accuracies": 0.9750000238418579,
2374
+ "rewards/chosen": 38.055782318115234,
2375
+ "rewards/margins": 152.7517547607422,
2376
+ "rewards/rejected": -114.69598388671875,
2377
+ "step": 1510
2378
+ },
2379
+ {
2380
+ "epoch": 0.98,
2381
+ "learning_rate": 9.302547012616043e-08,
2382
+ "logits/chosen": 2.540785074234009,
2383
+ "logits/rejected": 3.4431614875793457,
2384
+ "logps/chosen": -421.13995361328125,
2385
+ "logps/rejected": -475.09716796875,
2386
+ "loss": 0.2447,
2387
+ "rewards/accuracies": 0.9624999761581421,
2388
+ "rewards/chosen": 10.023767471313477,
2389
+ "rewards/margins": 167.34268188476562,
2390
+ "rewards/rejected": -157.3189239501953,
2391
+ "step": 1520
2392
+ },
2393
+ {
2394
+ "epoch": 0.98,
2395
+ "learning_rate": 9.290645084503689e-08,
2396
+ "logits/chosen": 2.7672924995422363,
2397
+ "logits/rejected": 3.700913190841675,
2398
+ "logps/chosen": -417.6861877441406,
2399
+ "logps/rejected": -453.88818359375,
2400
+ "loss": 0.2154,
2401
+ "rewards/accuracies": 0.949999988079071,
2402
+ "rewards/chosen": 11.756277084350586,
2403
+ "rewards/margins": 158.40457153320312,
2404
+ "rewards/rejected": -146.64828491210938,
2405
+ "step": 1530
2406
+ },
2407
+ {
2408
+ "epoch": 0.99,
2409
+ "learning_rate": 9.278743156391336e-08,
2410
+ "logits/chosen": 2.3169944286346436,
2411
+ "logits/rejected": 3.3370776176452637,
2412
+ "logps/chosen": -397.16961669921875,
2413
+ "logps/rejected": -429.99554443359375,
2414
+ "loss": 0.5247,
2415
+ "rewards/accuracies": 0.9750000238418579,
2416
+ "rewards/chosen": -8.517876625061035,
2417
+ "rewards/margins": 147.62835693359375,
2418
+ "rewards/rejected": -156.146240234375,
2419
+ "step": 1540
2420
+ },
2421
+ {
2422
+ "epoch": 1.0,
2423
+ "learning_rate": 9.26684122827898e-08,
2424
+ "logits/chosen": 2.920701026916504,
2425
+ "logits/rejected": 3.607771396636963,
2426
+ "logps/chosen": -409.669189453125,
2427
+ "logps/rejected": -427.26617431640625,
2428
+ "loss": 0.6167,
2429
+ "rewards/accuracies": 0.9750000238418579,
2430
+ "rewards/chosen": -11.549623489379883,
2431
+ "rewards/margins": 140.46511840820312,
2432
+ "rewards/rejected": -152.01473999023438,
2433
+ "step": 1550
2434
+ }
2435
+ ],
2436
+ "logging_steps": 10,
2437
+ "max_steps": 9336,
2438
+ "num_train_epochs": 6,
2439
+ "save_steps": 500,
2440
+ "total_flos": 0.0,
2441
+ "trial_name": null,
2442
+ "trial_params": null
2443
+ }