statking commited on
Commit
473666a
1 Parent(s): d448622

Model save

Browse files
README.md ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ base_model: mistralai/Mistral-7B-v0.1
4
+ tags:
5
+ - trl
6
+ - orpo
7
+ - generated_from_trainer
8
+ model-index:
9
+ - name: zephyr-7b-sft-full-orpo
10
+ results: []
11
+ ---
12
+
13
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
14
+ should probably proofread and complete it, then remove this comment. -->
15
+
16
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="200" height="32"/>](https://wandb.ai/statking/huggingface/runs/lw7rbi20)
17
+ # zephyr-7b-sft-full-orpo
18
+
19
+ This model is a fine-tuned version of [mistralai/Mistral-7B-v0.1](https://huggingface.co/mistralai/Mistral-7B-v0.1) on an unknown dataset.
20
+
21
+ ## Model description
22
+
23
+ More information needed
24
+
25
+ ## Intended uses & limitations
26
+
27
+ More information needed
28
+
29
+ ## Training and evaluation data
30
+
31
+ More information needed
32
+
33
+ ## Training procedure
34
+
35
+ ### Training hyperparameters
36
+
37
+ The following hyperparameters were used during training:
38
+ - learning_rate: 2e-05
39
+ - train_batch_size: 8
40
+ - eval_batch_size: 8
41
+ - seed: 42
42
+ - distributed_type: multi-GPU
43
+ - num_devices: 4
44
+ - gradient_accumulation_steps: 2
45
+ - total_train_batch_size: 64
46
+ - total_eval_batch_size: 32
47
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
48
+ - lr_scheduler_type: inverse_sqrt
49
+ - lr_scheduler_warmup_steps: 100
50
+ - num_epochs: 1
51
+
52
+ ### Training results
53
+
54
+
55
+
56
+ ### Framework versions
57
+
58
+ - Transformers 4.41.0.dev0
59
+ - Pytorch 2.3.0+cu121
60
+ - Datasets 2.19.1
61
+ - Tokenizers 0.19.1
all_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 0.9994756161510225,
3
+ "total_flos": 0.0,
4
+ "train_loss": 0.56347276506494,
5
+ "train_runtime": 19079.6454,
6
+ "train_samples": 61005,
7
+ "train_samples_per_second": 3.197,
8
+ "train_steps_per_second": 0.05
9
+ }
config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "mistralai/Mistral-7B-v0.1",
3
+ "architectures": [
4
+ "MistralForCausalLM"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 1,
8
+ "eos_token_id": 2,
9
+ "hidden_act": "silu",
10
+ "hidden_size": 4096,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 14336,
13
+ "max_position_embeddings": 32768,
14
+ "model_type": "mistral",
15
+ "num_attention_heads": 32,
16
+ "num_hidden_layers": 32,
17
+ "num_key_value_heads": 8,
18
+ "rms_norm_eps": 1e-05,
19
+ "rope_theta": 10000.0,
20
+ "sliding_window": 4096,
21
+ "tie_word_embeddings": false,
22
+ "torch_dtype": "bfloat16",
23
+ "transformers_version": "4.41.0.dev0",
24
+ "use_cache": false,
25
+ "vocab_size": 32000
26
+ }
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "transformers_version": "4.41.0.dev0"
6
+ }
model-00001-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f8de661845ca4af8b34b8ce2a45722bcc4b2834c916890a9672a109c08517fd6
3
+ size 4943162336
model-00002-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:41d93ae590bd8fedc2ce9c0d3406d28032086aa93efb3b8bd7351d4fc1684cda
3
+ size 4999819336
model-00003-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:327daa62025779f1d2ac5100edbf25a4b7774225203e30efa878921815a519f4
3
+ size 4540516344
model.safetensors.index.json ADDED
@@ -0,0 +1,298 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 14483464192
4
+ },
5
+ "weight_map": {
6
+ "lm_head.weight": "model-00003-of-00003.safetensors",
7
+ "model.embed_tokens.weight": "model-00001-of-00003.safetensors",
8
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00003.safetensors",
9
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
10
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
11
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
12
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
13
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
14
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
15
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
16
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
17
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00003.safetensors",
18
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
19
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
20
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
21
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
22
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
23
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
24
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
25
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
26
+ "model.layers.10.input_layernorm.weight": "model-00002-of-00003.safetensors",
27
+ "model.layers.10.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
28
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
29
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
30
+ "model.layers.10.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
31
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
32
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
33
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
34
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
35
+ "model.layers.11.input_layernorm.weight": "model-00002-of-00003.safetensors",
36
+ "model.layers.11.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
37
+ "model.layers.11.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
38
+ "model.layers.11.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
39
+ "model.layers.11.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
40
+ "model.layers.11.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
41
+ "model.layers.11.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
42
+ "model.layers.11.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
43
+ "model.layers.11.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
44
+ "model.layers.12.input_layernorm.weight": "model-00002-of-00003.safetensors",
45
+ "model.layers.12.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
46
+ "model.layers.12.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
47
+ "model.layers.12.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
48
+ "model.layers.12.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
49
+ "model.layers.12.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
50
+ "model.layers.12.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
51
+ "model.layers.12.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
52
+ "model.layers.12.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
53
+ "model.layers.13.input_layernorm.weight": "model-00002-of-00003.safetensors",
54
+ "model.layers.13.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
55
+ "model.layers.13.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
56
+ "model.layers.13.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
57
+ "model.layers.13.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
58
+ "model.layers.13.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
59
+ "model.layers.13.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
60
+ "model.layers.13.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
61
+ "model.layers.13.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
62
+ "model.layers.14.input_layernorm.weight": "model-00002-of-00003.safetensors",
63
+ "model.layers.14.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
64
+ "model.layers.14.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
65
+ "model.layers.14.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
66
+ "model.layers.14.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
67
+ "model.layers.14.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
68
+ "model.layers.14.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
69
+ "model.layers.14.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
70
+ "model.layers.14.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
71
+ "model.layers.15.input_layernorm.weight": "model-00002-of-00003.safetensors",
72
+ "model.layers.15.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
73
+ "model.layers.15.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
74
+ "model.layers.15.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
75
+ "model.layers.15.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
76
+ "model.layers.15.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
77
+ "model.layers.15.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
78
+ "model.layers.15.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
79
+ "model.layers.15.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
80
+ "model.layers.16.input_layernorm.weight": "model-00002-of-00003.safetensors",
81
+ "model.layers.16.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
82
+ "model.layers.16.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
83
+ "model.layers.16.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
84
+ "model.layers.16.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
85
+ "model.layers.16.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
86
+ "model.layers.16.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
87
+ "model.layers.16.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
88
+ "model.layers.16.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
89
+ "model.layers.17.input_layernorm.weight": "model-00002-of-00003.safetensors",
90
+ "model.layers.17.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
91
+ "model.layers.17.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
92
+ "model.layers.17.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
93
+ "model.layers.17.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
94
+ "model.layers.17.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
95
+ "model.layers.17.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
96
+ "model.layers.17.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
97
+ "model.layers.17.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
98
+ "model.layers.18.input_layernorm.weight": "model-00002-of-00003.safetensors",
99
+ "model.layers.18.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
100
+ "model.layers.18.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
101
+ "model.layers.18.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
102
+ "model.layers.18.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
103
+ "model.layers.18.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
104
+ "model.layers.18.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
105
+ "model.layers.18.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
106
+ "model.layers.18.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
107
+ "model.layers.19.input_layernorm.weight": "model-00002-of-00003.safetensors",
108
+ "model.layers.19.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
109
+ "model.layers.19.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
110
+ "model.layers.19.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
111
+ "model.layers.19.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
112
+ "model.layers.19.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
113
+ "model.layers.19.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
114
+ "model.layers.19.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
115
+ "model.layers.19.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
116
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00003.safetensors",
117
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
118
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
119
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
120
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
121
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
122
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
123
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
124
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
125
+ "model.layers.20.input_layernorm.weight": "model-00002-of-00003.safetensors",
126
+ "model.layers.20.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
127
+ "model.layers.20.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
128
+ "model.layers.20.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
129
+ "model.layers.20.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
130
+ "model.layers.20.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
131
+ "model.layers.20.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
132
+ "model.layers.20.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
133
+ "model.layers.20.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
134
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00003.safetensors",
135
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
136
+ "model.layers.21.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
137
+ "model.layers.21.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
138
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
139
+ "model.layers.21.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
140
+ "model.layers.21.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
141
+ "model.layers.21.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
142
+ "model.layers.21.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
143
+ "model.layers.22.input_layernorm.weight": "model-00003-of-00003.safetensors",
144
+ "model.layers.22.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
145
+ "model.layers.22.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
146
+ "model.layers.22.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
147
+ "model.layers.22.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
148
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
149
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
150
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
151
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
152
+ "model.layers.23.input_layernorm.weight": "model-00003-of-00003.safetensors",
153
+ "model.layers.23.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
154
+ "model.layers.23.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
155
+ "model.layers.23.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
156
+ "model.layers.23.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
157
+ "model.layers.23.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
158
+ "model.layers.23.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
159
+ "model.layers.23.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
160
+ "model.layers.23.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
161
+ "model.layers.24.input_layernorm.weight": "model-00003-of-00003.safetensors",
162
+ "model.layers.24.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
163
+ "model.layers.24.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
164
+ "model.layers.24.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
165
+ "model.layers.24.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
166
+ "model.layers.24.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
167
+ "model.layers.24.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
168
+ "model.layers.24.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
169
+ "model.layers.24.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
170
+ "model.layers.25.input_layernorm.weight": "model-00003-of-00003.safetensors",
171
+ "model.layers.25.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
172
+ "model.layers.25.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
173
+ "model.layers.25.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
174
+ "model.layers.25.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
175
+ "model.layers.25.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
176
+ "model.layers.25.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
177
+ "model.layers.25.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
178
+ "model.layers.25.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
179
+ "model.layers.26.input_layernorm.weight": "model-00003-of-00003.safetensors",
180
+ "model.layers.26.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
181
+ "model.layers.26.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
182
+ "model.layers.26.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
183
+ "model.layers.26.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
184
+ "model.layers.26.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
185
+ "model.layers.26.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
186
+ "model.layers.26.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
187
+ "model.layers.26.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
188
+ "model.layers.27.input_layernorm.weight": "model-00003-of-00003.safetensors",
189
+ "model.layers.27.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
190
+ "model.layers.27.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
191
+ "model.layers.27.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
192
+ "model.layers.27.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
193
+ "model.layers.27.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
194
+ "model.layers.27.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
195
+ "model.layers.27.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
196
+ "model.layers.27.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
197
+ "model.layers.28.input_layernorm.weight": "model-00003-of-00003.safetensors",
198
+ "model.layers.28.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
199
+ "model.layers.28.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
200
+ "model.layers.28.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
201
+ "model.layers.28.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
202
+ "model.layers.28.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
203
+ "model.layers.28.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
204
+ "model.layers.28.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
205
+ "model.layers.28.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
206
+ "model.layers.29.input_layernorm.weight": "model-00003-of-00003.safetensors",
207
+ "model.layers.29.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
208
+ "model.layers.29.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
209
+ "model.layers.29.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
210
+ "model.layers.29.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
211
+ "model.layers.29.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
212
+ "model.layers.29.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
213
+ "model.layers.29.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
214
+ "model.layers.29.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
215
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00003.safetensors",
216
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
217
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
218
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
219
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
220
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
221
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
222
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
223
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
224
+ "model.layers.30.input_layernorm.weight": "model-00003-of-00003.safetensors",
225
+ "model.layers.30.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
226
+ "model.layers.30.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
227
+ "model.layers.30.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
228
+ "model.layers.30.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
229
+ "model.layers.30.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
230
+ "model.layers.30.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
231
+ "model.layers.30.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
232
+ "model.layers.30.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
233
+ "model.layers.31.input_layernorm.weight": "model-00003-of-00003.safetensors",
234
+ "model.layers.31.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
235
+ "model.layers.31.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
236
+ "model.layers.31.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
237
+ "model.layers.31.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
238
+ "model.layers.31.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
239
+ "model.layers.31.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
240
+ "model.layers.31.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
241
+ "model.layers.31.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
242
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00003.safetensors",
243
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
244
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
245
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
246
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
247
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
248
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
249
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
250
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
251
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00003.safetensors",
252
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
253
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
254
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
255
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
256
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
257
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
258
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
259
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
260
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00003.safetensors",
261
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
262
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
263
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
264
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
265
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
266
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
267
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
268
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
269
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00003.safetensors",
270
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
271
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
272
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
273
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
274
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
275
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
276
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
277
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
278
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00003.safetensors",
279
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
280
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
281
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
282
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
283
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
284
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
285
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
286
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
287
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00003.safetensors",
288
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
289
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
290
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
291
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
292
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
293
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
294
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
295
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
296
+ "model.norm.weight": "model-00003-of-00003.safetensors"
297
+ }
298
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "</s>",
17
+ "unk_token": {
18
+ "content": "<unk>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dadfd56d766715c61d2ef780a525ab43b8e6da4de6865bda3d95fdef5e134055
3
+ size 493443
tokenizer_config.json ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "added_tokens_decoder": {
5
+ "0": {
6
+ "content": "<unk>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "1": {
14
+ "content": "<s>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "2": {
22
+ "content": "</s>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ }
29
+ },
30
+ "additional_special_tokens": [],
31
+ "bos_token": "<s>",
32
+ "chat_template": "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'system' %}\n{{ '<|system|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}",
33
+ "clean_up_tokenization_spaces": false,
34
+ "eos_token": "</s>",
35
+ "model_max_length": 2048,
36
+ "pad_token": "</s>",
37
+ "sp_model_kwargs": {},
38
+ "spaces_between_special_tokens": false,
39
+ "tokenizer_class": "LlamaTokenizer",
40
+ "unk_token": "<unk>",
41
+ "use_default_system_prompt": false
42
+ }
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 0.9994756161510225,
3
+ "total_flos": 0.0,
4
+ "train_loss": 0.56347276506494,
5
+ "train_runtime": 19079.6454,
6
+ "train_samples": 61005,
7
+ "train_samples_per_second": 3.197,
8
+ "train_steps_per_second": 0.05
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,1752 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.9994756161510225,
5
+ "eval_steps": 500,
6
+ "global_step": 953,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.01048767697954903,
13
+ "grad_norm": 12.504458138350461,
14
+ "learning_rate": 2.0000000000000003e-06,
15
+ "log_odds_chosen": 0.1660214066505432,
16
+ "log_odds_ratio": -0.6960338354110718,
17
+ "logits/chosen": -2.542905330657959,
18
+ "logits/rejected": -2.5316882133483887,
19
+ "logps/chosen": -0.9998037219047546,
20
+ "logps/rejected": -1.0999689102172852,
21
+ "loss": 2.7433,
22
+ "nll_loss": 2.6550583839416504,
23
+ "rewards/accuracies": 0.550000011920929,
24
+ "rewards/chosen": -0.04999018833041191,
25
+ "rewards/margins": 0.005008256994187832,
26
+ "rewards/rejected": -0.05499844625592232,
27
+ "step": 10
28
+ },
29
+ {
30
+ "epoch": 0.02097535395909806,
31
+ "grad_norm": 3.296398746092505,
32
+ "learning_rate": 4.000000000000001e-06,
33
+ "log_odds_chosen": 0.1942831575870514,
34
+ "log_odds_ratio": -0.6660380959510803,
35
+ "logits/chosen": -3.148456335067749,
36
+ "logits/rejected": -3.171660900115967,
37
+ "logps/chosen": -0.7626909613609314,
38
+ "logps/rejected": -0.8731427192687988,
39
+ "loss": 0.563,
40
+ "nll_loss": 0.5225270986557007,
41
+ "rewards/accuracies": 0.6000000238418579,
42
+ "rewards/chosen": -0.03813454881310463,
43
+ "rewards/margins": 0.00552258500829339,
44
+ "rewards/rejected": -0.04365713149309158,
45
+ "step": 20
46
+ },
47
+ {
48
+ "epoch": 0.03146303093864709,
49
+ "grad_norm": 2.4400188978085695,
50
+ "learning_rate": 6e-06,
51
+ "log_odds_chosen": 0.2339784801006317,
52
+ "log_odds_ratio": -0.6537522673606873,
53
+ "logits/chosen": -2.9630327224731445,
54
+ "logits/rejected": -2.9368481636047363,
55
+ "logps/chosen": -0.8345462679862976,
56
+ "logps/rejected": -0.9655241966247559,
57
+ "loss": 0.5355,
58
+ "nll_loss": 0.4940575659275055,
59
+ "rewards/accuracies": 0.6000000238418579,
60
+ "rewards/chosen": -0.04172731190919876,
61
+ "rewards/margins": 0.0065488978289067745,
62
+ "rewards/rejected": -0.04827621206641197,
63
+ "step": 30
64
+ },
65
+ {
66
+ "epoch": 0.04195070791819612,
67
+ "grad_norm": 2.765802378357493,
68
+ "learning_rate": 8.000000000000001e-06,
69
+ "log_odds_chosen": 0.15870003402233124,
70
+ "log_odds_ratio": -0.6969180107116699,
71
+ "logits/chosen": -2.8065195083618164,
72
+ "logits/rejected": -2.7910008430480957,
73
+ "logps/chosen": -0.8027766346931458,
74
+ "logps/rejected": -0.9165509343147278,
75
+ "loss": 0.5199,
76
+ "nll_loss": 0.48035889863967896,
77
+ "rewards/accuracies": 0.5625,
78
+ "rewards/chosen": -0.04013883322477341,
79
+ "rewards/margins": 0.005688714794814587,
80
+ "rewards/rejected": -0.04582754150032997,
81
+ "step": 40
82
+ },
83
+ {
84
+ "epoch": 0.05243838489774515,
85
+ "grad_norm": 2.7404814506796704,
86
+ "learning_rate": 1e-05,
87
+ "log_odds_chosen": 0.24872338771820068,
88
+ "log_odds_ratio": -0.680080771446228,
89
+ "logits/chosen": -2.7704856395721436,
90
+ "logits/rejected": -2.77298641204834,
91
+ "logps/chosen": -0.7987793684005737,
92
+ "logps/rejected": -0.9668463468551636,
93
+ "loss": 0.5424,
94
+ "nll_loss": 0.48421746492385864,
95
+ "rewards/accuracies": 0.59375,
96
+ "rewards/chosen": -0.03993896767497063,
97
+ "rewards/margins": 0.00840335339307785,
98
+ "rewards/rejected": -0.048342324793338776,
99
+ "step": 50
100
+ },
101
+ {
102
+ "epoch": 0.06292606187729417,
103
+ "grad_norm": 2.7601739927853473,
104
+ "learning_rate": 1.2e-05,
105
+ "log_odds_chosen": 0.21160352230072021,
106
+ "log_odds_ratio": -0.6764382123947144,
107
+ "logits/chosen": -3.0032615661621094,
108
+ "logits/rejected": -2.9960169792175293,
109
+ "logps/chosen": -0.7965995669364929,
110
+ "logps/rejected": -0.917363166809082,
111
+ "loss": 0.5463,
112
+ "nll_loss": 0.516124427318573,
113
+ "rewards/accuracies": 0.6000000238418579,
114
+ "rewards/chosen": -0.039829984307289124,
115
+ "rewards/margins": 0.006038171239197254,
116
+ "rewards/rejected": -0.045868150889873505,
117
+ "step": 60
118
+ },
119
+ {
120
+ "epoch": 0.07341373885684321,
121
+ "grad_norm": 3.2123267767300128,
122
+ "learning_rate": 1.4e-05,
123
+ "log_odds_chosen": 0.19886036217212677,
124
+ "log_odds_ratio": -0.690485417842865,
125
+ "logits/chosen": -2.978163719177246,
126
+ "logits/rejected": -3.0078656673431396,
127
+ "logps/chosen": -0.8206535577774048,
128
+ "logps/rejected": -0.9310994148254395,
129
+ "loss": 0.5403,
130
+ "nll_loss": 0.530234694480896,
131
+ "rewards/accuracies": 0.574999988079071,
132
+ "rewards/chosen": -0.04103267565369606,
133
+ "rewards/margins": 0.0055222949013113976,
134
+ "rewards/rejected": -0.046554967761039734,
135
+ "step": 70
136
+ },
137
+ {
138
+ "epoch": 0.08390141583639224,
139
+ "grad_norm": 3.267750524500123,
140
+ "learning_rate": 1.6000000000000003e-05,
141
+ "log_odds_chosen": 0.1725669652223587,
142
+ "log_odds_ratio": -0.689757764339447,
143
+ "logits/chosen": -2.963442087173462,
144
+ "logits/rejected": -2.953914165496826,
145
+ "logps/chosen": -0.8903671503067017,
146
+ "logps/rejected": -1.0184500217437744,
147
+ "loss": 0.5632,
148
+ "nll_loss": 0.48384732007980347,
149
+ "rewards/accuracies": 0.518750011920929,
150
+ "rewards/chosen": -0.0445183590054512,
151
+ "rewards/margins": 0.006404136773198843,
152
+ "rewards/rejected": -0.050922494381666183,
153
+ "step": 80
154
+ },
155
+ {
156
+ "epoch": 0.09438909281594127,
157
+ "grad_norm": 6.338896835312273,
158
+ "learning_rate": 1.8e-05,
159
+ "log_odds_chosen": 0.2590278387069702,
160
+ "log_odds_ratio": -0.6696828603744507,
161
+ "logits/chosen": -2.7556283473968506,
162
+ "logits/rejected": -2.759223461151123,
163
+ "logps/chosen": -0.8806008100509644,
164
+ "logps/rejected": -1.0427037477493286,
165
+ "loss": 0.5599,
166
+ "nll_loss": 0.49117976427078247,
167
+ "rewards/accuracies": 0.59375,
168
+ "rewards/chosen": -0.044030044227838516,
169
+ "rewards/margins": 0.008105142042040825,
170
+ "rewards/rejected": -0.05213518068194389,
171
+ "step": 90
172
+ },
173
+ {
174
+ "epoch": 0.1048767697954903,
175
+ "grad_norm": 2.844482964932932,
176
+ "learning_rate": 2e-05,
177
+ "log_odds_chosen": 0.20001336932182312,
178
+ "log_odds_ratio": -0.6672823429107666,
179
+ "logits/chosen": -2.836613178253174,
180
+ "logits/rejected": -2.826347827911377,
181
+ "logps/chosen": -0.8816211819648743,
182
+ "logps/rejected": -1.0050264596939087,
183
+ "loss": 0.5675,
184
+ "nll_loss": 0.5239149332046509,
185
+ "rewards/accuracies": 0.574999988079071,
186
+ "rewards/chosen": -0.044081058353185654,
187
+ "rewards/margins": 0.006170268170535564,
188
+ "rewards/rejected": -0.05025132745504379,
189
+ "step": 100
190
+ },
191
+ {
192
+ "epoch": 0.11536444677503933,
193
+ "grad_norm": 2.717573270122186,
194
+ "learning_rate": 1.9069251784911845e-05,
195
+ "log_odds_chosen": 0.26770642399787903,
196
+ "log_odds_ratio": -0.6399692296981812,
197
+ "logits/chosen": -2.8041529655456543,
198
+ "logits/rejected": -2.828374147415161,
199
+ "logps/chosen": -0.8482567071914673,
200
+ "logps/rejected": -1.021328330039978,
201
+ "loss": 0.568,
202
+ "nll_loss": 0.5094035863876343,
203
+ "rewards/accuracies": 0.625,
204
+ "rewards/chosen": -0.042412832379341125,
205
+ "rewards/margins": 0.008653589524328709,
206
+ "rewards/rejected": -0.05106641724705696,
207
+ "step": 110
208
+ },
209
+ {
210
+ "epoch": 0.12585212375458835,
211
+ "grad_norm": 2.3522582585650906,
212
+ "learning_rate": 1.825741858350554e-05,
213
+ "log_odds_chosen": 0.2770318388938904,
214
+ "log_odds_ratio": -0.6538770198822021,
215
+ "logits/chosen": -2.9046432971954346,
216
+ "logits/rejected": -2.921250343322754,
217
+ "logps/chosen": -0.8698671460151672,
218
+ "logps/rejected": -1.0593181848526,
219
+ "loss": 0.6048,
220
+ "nll_loss": 0.5620476007461548,
221
+ "rewards/accuracies": 0.6312500238418579,
222
+ "rewards/chosen": -0.0434933602809906,
223
+ "rewards/margins": 0.009472550824284554,
224
+ "rewards/rejected": -0.05296590179204941,
225
+ "step": 120
226
+ },
227
+ {
228
+ "epoch": 0.1363398007341374,
229
+ "grad_norm": 2.3512564845307704,
230
+ "learning_rate": 1.7541160386140587e-05,
231
+ "log_odds_chosen": 0.213302880525589,
232
+ "log_odds_ratio": -0.6861675977706909,
233
+ "logits/chosen": -2.926781177520752,
234
+ "logits/rejected": -2.930361747741699,
235
+ "logps/chosen": -0.9192083477973938,
236
+ "logps/rejected": -1.06519615650177,
237
+ "loss": 0.5923,
238
+ "nll_loss": 0.5574383735656738,
239
+ "rewards/accuracies": 0.5625,
240
+ "rewards/chosen": -0.04596042260527611,
241
+ "rewards/margins": 0.007299385964870453,
242
+ "rewards/rejected": -0.05325980857014656,
243
+ "step": 130
244
+ },
245
+ {
246
+ "epoch": 0.14682747771368643,
247
+ "grad_norm": 2.2489368047485705,
248
+ "learning_rate": 1.6903085094570334e-05,
249
+ "log_odds_chosen": 0.24789170920848846,
250
+ "log_odds_ratio": -0.655090868473053,
251
+ "logits/chosen": -2.9084389209747314,
252
+ "logits/rejected": -2.9173099994659424,
253
+ "logps/chosen": -0.9441210031509399,
254
+ "logps/rejected": -1.1045926809310913,
255
+ "loss": 0.5882,
256
+ "nll_loss": 0.5544429421424866,
257
+ "rewards/accuracies": 0.574999988079071,
258
+ "rewards/chosen": -0.047206051647663116,
259
+ "rewards/margins": 0.008023588918149471,
260
+ "rewards/rejected": -0.05522964149713516,
261
+ "step": 140
262
+ },
263
+ {
264
+ "epoch": 0.15731515469323545,
265
+ "grad_norm": 2.6715309670512903,
266
+ "learning_rate": 1.6329931618554523e-05,
267
+ "log_odds_chosen": 0.14654028415679932,
268
+ "log_odds_ratio": -0.7416929006576538,
269
+ "logits/chosen": -2.8286139965057373,
270
+ "logits/rejected": -2.842860698699951,
271
+ "logps/chosen": -0.9699670672416687,
272
+ "logps/rejected": -1.0669214725494385,
273
+ "loss": 0.5441,
274
+ "nll_loss": 0.5359360575675964,
275
+ "rewards/accuracies": 0.6000000238418579,
276
+ "rewards/chosen": -0.048498354852199554,
277
+ "rewards/margins": 0.004847715608775616,
278
+ "rewards/rejected": -0.053346067667007446,
279
+ "step": 150
280
+ },
281
+ {
282
+ "epoch": 0.16780283167278448,
283
+ "grad_norm": 2.4917874181934616,
284
+ "learning_rate": 1.5811388300841898e-05,
285
+ "log_odds_chosen": 0.19475655257701874,
286
+ "log_odds_ratio": -0.664051353931427,
287
+ "logits/chosen": -2.8252522945404053,
288
+ "logits/rejected": -2.839994192123413,
289
+ "logps/chosen": -0.9179447889328003,
290
+ "logps/rejected": -1.0352815389633179,
291
+ "loss": 0.6078,
292
+ "nll_loss": 0.5540346503257751,
293
+ "rewards/accuracies": 0.6312500238418579,
294
+ "rewards/chosen": -0.045897237956523895,
295
+ "rewards/margins": 0.005866840481758118,
296
+ "rewards/rejected": -0.05176408216357231,
297
+ "step": 160
298
+ },
299
+ {
300
+ "epoch": 0.1782905086523335,
301
+ "grad_norm": 2.493896039254152,
302
+ "learning_rate": 1.533929977694741e-05,
303
+ "log_odds_chosen": 0.25445470213890076,
304
+ "log_odds_ratio": -0.6574397087097168,
305
+ "logits/chosen": -2.895998477935791,
306
+ "logits/rejected": -2.9125123023986816,
307
+ "logps/chosen": -0.8917832374572754,
308
+ "logps/rejected": -1.0586717128753662,
309
+ "loss": 0.5884,
310
+ "nll_loss": 0.5544494986534119,
311
+ "rewards/accuracies": 0.581250011920929,
312
+ "rewards/chosen": -0.04458915814757347,
313
+ "rewards/margins": 0.008344428613781929,
314
+ "rewards/rejected": -0.05293358489871025,
315
+ "step": 170
316
+ },
317
+ {
318
+ "epoch": 0.18877818563188253,
319
+ "grad_norm": 2.368451448201635,
320
+ "learning_rate": 1.49071198499986e-05,
321
+ "log_odds_chosen": 0.2552924156188965,
322
+ "log_odds_ratio": -0.6543556451797485,
323
+ "logits/chosen": -2.8886399269104004,
324
+ "logits/rejected": -2.905686378479004,
325
+ "logps/chosen": -0.9206914901733398,
326
+ "logps/rejected": -1.091048240661621,
327
+ "loss": 0.5686,
328
+ "nll_loss": 0.551173985004425,
329
+ "rewards/accuracies": 0.606249988079071,
330
+ "rewards/chosen": -0.04603457450866699,
331
+ "rewards/margins": 0.008517834357917309,
332
+ "rewards/rejected": -0.054552413523197174,
333
+ "step": 180
334
+ },
335
+ {
336
+ "epoch": 0.19926586261143156,
337
+ "grad_norm": 4.734046585912702,
338
+ "learning_rate": 1.4509525002200235e-05,
339
+ "log_odds_chosen": 0.21173310279846191,
340
+ "log_odds_ratio": -0.6579927206039429,
341
+ "logits/chosen": -2.9355111122131348,
342
+ "logits/rejected": -2.952430009841919,
343
+ "logps/chosen": -0.9388859868049622,
344
+ "logps/rejected": -1.0733187198638916,
345
+ "loss": 0.5936,
346
+ "nll_loss": 0.6142745018005371,
347
+ "rewards/accuracies": 0.606249988079071,
348
+ "rewards/chosen": -0.04694430157542229,
349
+ "rewards/margins": 0.006721635349094868,
350
+ "rewards/rejected": -0.05366594344377518,
351
+ "step": 190
352
+ },
353
+ {
354
+ "epoch": 0.2097535395909806,
355
+ "grad_norm": 2.2391424397427073,
356
+ "learning_rate": 1.4142135623730951e-05,
357
+ "log_odds_chosen": 0.28418153524398804,
358
+ "log_odds_ratio": -0.6668760180473328,
359
+ "logits/chosen": -2.873599052429199,
360
+ "logits/rejected": -2.9066414833068848,
361
+ "logps/chosen": -0.9204713702201843,
362
+ "logps/rejected": -1.128112554550171,
363
+ "loss": 0.5689,
364
+ "nll_loss": 0.5723541975021362,
365
+ "rewards/accuracies": 0.581250011920929,
366
+ "rewards/chosen": -0.046023570001125336,
367
+ "rewards/margins": 0.010382059030234814,
368
+ "rewards/rejected": -0.056405626237392426,
369
+ "step": 200
370
+ },
371
+ {
372
+ "epoch": 0.22024121657052964,
373
+ "grad_norm": 2.1684330770876152,
374
+ "learning_rate": 1.3801311186847084e-05,
375
+ "log_odds_chosen": 0.11919783055782318,
376
+ "log_odds_ratio": -0.7173447012901306,
377
+ "logits/chosen": -2.884079933166504,
378
+ "logits/rejected": -2.8981668949127197,
379
+ "logps/chosen": -0.8726099729537964,
380
+ "logps/rejected": -0.9488958120346069,
381
+ "loss": 0.5693,
382
+ "nll_loss": 0.5325449109077454,
383
+ "rewards/accuracies": 0.606249988079071,
384
+ "rewards/chosen": -0.04363049939274788,
385
+ "rewards/margins": 0.0038142912089824677,
386
+ "rewards/rejected": -0.04744479060173035,
387
+ "step": 210
388
+ },
389
+ {
390
+ "epoch": 0.23072889355007867,
391
+ "grad_norm": 2.510753834710904,
392
+ "learning_rate": 1.3483997249264842e-05,
393
+ "log_odds_chosen": 0.18100012838840485,
394
+ "log_odds_ratio": -0.7047401666641235,
395
+ "logits/chosen": -2.8885810375213623,
396
+ "logits/rejected": -2.8980116844177246,
397
+ "logps/chosen": -0.8880792856216431,
398
+ "logps/rejected": -1.0071966648101807,
399
+ "loss": 0.5589,
400
+ "nll_loss": 0.5211626291275024,
401
+ "rewards/accuracies": 0.5874999761581421,
402
+ "rewards/chosen": -0.044403962790966034,
403
+ "rewards/margins": 0.005955878179520369,
404
+ "rewards/rejected": -0.05035984516143799,
405
+ "step": 220
406
+ },
407
+ {
408
+ "epoch": 0.2412165705296277,
409
+ "grad_norm": 2.0148191421861705,
410
+ "learning_rate": 1.3187609467915744e-05,
411
+ "log_odds_chosen": 0.2717307209968567,
412
+ "log_odds_ratio": -0.6763201951980591,
413
+ "logits/chosen": -2.829516887664795,
414
+ "logits/rejected": -2.842909574508667,
415
+ "logps/chosen": -0.9367680549621582,
416
+ "logps/rejected": -1.1125657558441162,
417
+ "loss": 0.5701,
418
+ "nll_loss": 0.5263533592224121,
419
+ "rewards/accuracies": 0.5625,
420
+ "rewards/chosen": -0.04683841019868851,
421
+ "rewards/margins": 0.008789879269897938,
422
+ "rewards/rejected": -0.05562828853726387,
423
+ "step": 230
424
+ },
425
+ {
426
+ "epoch": 0.2517042475091767,
427
+ "grad_norm": 2.286828850039024,
428
+ "learning_rate": 1.2909944487358057e-05,
429
+ "log_odds_chosen": 0.2564060091972351,
430
+ "log_odds_ratio": -0.651031494140625,
431
+ "logits/chosen": -2.979280471801758,
432
+ "logits/rejected": -3.0063037872314453,
433
+ "logps/chosen": -0.9010913968086243,
434
+ "logps/rejected": -1.065353512763977,
435
+ "loss": 0.5799,
436
+ "nll_loss": 0.5546143054962158,
437
+ "rewards/accuracies": 0.581250011920929,
438
+ "rewards/chosen": -0.04505457356572151,
439
+ "rewards/margins": 0.008213100023567677,
440
+ "rewards/rejected": -0.053267668932676315,
441
+ "step": 240
442
+ },
443
+ {
444
+ "epoch": 0.26219192448872575,
445
+ "grad_norm": 3.959216899336302,
446
+ "learning_rate": 1.2649110640673518e-05,
447
+ "log_odds_chosen": 0.2661912143230438,
448
+ "log_odds_ratio": -0.6746715307235718,
449
+ "logits/chosen": -2.9726908206939697,
450
+ "logits/rejected": -2.974113941192627,
451
+ "logps/chosen": -0.8829942941665649,
452
+ "logps/rejected": -1.0264866352081299,
453
+ "loss": 0.5502,
454
+ "nll_loss": 0.5201153755187988,
455
+ "rewards/accuracies": 0.5874999761581421,
456
+ "rewards/chosen": -0.04414971172809601,
457
+ "rewards/margins": 0.007174622267484665,
458
+ "rewards/rejected": -0.05132433772087097,
459
+ "step": 250
460
+ },
461
+ {
462
+ "epoch": 0.2726796014682748,
463
+ "grad_norm": 2.2699181039817,
464
+ "learning_rate": 1.2403473458920845e-05,
465
+ "log_odds_chosen": 0.2342940866947174,
466
+ "log_odds_ratio": -0.6783974766731262,
467
+ "logits/chosen": -2.9759726524353027,
468
+ "logits/rejected": -2.9923360347747803,
469
+ "logps/chosen": -0.9042210578918457,
470
+ "logps/rejected": -1.0481539964675903,
471
+ "loss": 0.5304,
472
+ "nll_loss": 0.45657747983932495,
473
+ "rewards/accuracies": 0.6312500238418579,
474
+ "rewards/chosen": -0.0452110581099987,
475
+ "rewards/margins": 0.007196647580713034,
476
+ "rewards/rejected": -0.052407700568437576,
477
+ "step": 260
478
+ },
479
+ {
480
+ "epoch": 0.2831672784478238,
481
+ "grad_norm": 2.380998150273162,
482
+ "learning_rate": 1.2171612389003691e-05,
483
+ "log_odds_chosen": 0.17961958050727844,
484
+ "log_odds_ratio": -0.6983593702316284,
485
+ "logits/chosen": -2.938765525817871,
486
+ "logits/rejected": -2.965757369995117,
487
+ "logps/chosen": -0.9548166990280151,
488
+ "logps/rejected": -1.0895111560821533,
489
+ "loss": 0.5673,
490
+ "nll_loss": 0.5430372357368469,
491
+ "rewards/accuracies": 0.4937500059604645,
492
+ "rewards/chosen": -0.04774082824587822,
493
+ "rewards/margins": 0.006734730210155249,
494
+ "rewards/rejected": -0.0544755645096302,
495
+ "step": 270
496
+ },
497
+ {
498
+ "epoch": 0.29365495542737285,
499
+ "grad_norm": 2.0870887262121323,
500
+ "learning_rate": 1.1952286093343936e-05,
501
+ "log_odds_chosen": 0.2291949987411499,
502
+ "log_odds_ratio": -0.6750219464302063,
503
+ "logits/chosen": -2.928527355194092,
504
+ "logits/rejected": -2.9543163776397705,
505
+ "logps/chosen": -0.9355181455612183,
506
+ "logps/rejected": -1.0729036331176758,
507
+ "loss": 0.5434,
508
+ "nll_loss": 0.47713321447372437,
509
+ "rewards/accuracies": 0.65625,
510
+ "rewards/chosen": -0.04677591472864151,
511
+ "rewards/margins": 0.006869266740977764,
512
+ "rewards/rejected": -0.05364518240094185,
513
+ "step": 280
514
+ },
515
+ {
516
+ "epoch": 0.30414263240692185,
517
+ "grad_norm": 2.661552133228645,
518
+ "learning_rate": 1.1744404390294071e-05,
519
+ "log_odds_chosen": 0.36491650342941284,
520
+ "log_odds_ratio": -0.620793879032135,
521
+ "logits/chosen": -2.880122661590576,
522
+ "logits/rejected": -2.8935391902923584,
523
+ "logps/chosen": -0.836012065410614,
524
+ "logps/rejected": -1.05286705493927,
525
+ "loss": 0.5596,
526
+ "nll_loss": 0.4885989725589752,
527
+ "rewards/accuracies": 0.6000000238418579,
528
+ "rewards/chosen": -0.0418006032705307,
529
+ "rewards/margins": 0.010842744261026382,
530
+ "rewards/rejected": -0.05264334753155708,
531
+ "step": 290
532
+ },
533
+ {
534
+ "epoch": 0.3146303093864709,
535
+ "grad_norm": 3.127285518362044,
536
+ "learning_rate": 1.1547005383792517e-05,
537
+ "log_odds_chosen": 0.255328893661499,
538
+ "log_odds_ratio": -0.6939107179641724,
539
+ "logits/chosen": -2.9603378772735596,
540
+ "logits/rejected": -2.992128372192383,
541
+ "logps/chosen": -0.8731514811515808,
542
+ "logps/rejected": -1.0526010990142822,
543
+ "loss": 0.5835,
544
+ "nll_loss": 0.5112031102180481,
545
+ "rewards/accuracies": 0.5625,
546
+ "rewards/chosen": -0.0436575748026371,
547
+ "rewards/margins": 0.008972481824457645,
548
+ "rewards/rejected": -0.052630048245191574,
549
+ "step": 300
550
+ },
551
+ {
552
+ "epoch": 0.3251179863660199,
553
+ "grad_norm": 2.013637214040506,
554
+ "learning_rate": 1.1359236684941297e-05,
555
+ "log_odds_chosen": 0.21040907502174377,
556
+ "log_odds_ratio": -0.688109278678894,
557
+ "logits/chosen": -2.9860305786132812,
558
+ "logits/rejected": -2.9820261001586914,
559
+ "logps/chosen": -0.9089478254318237,
560
+ "logps/rejected": -1.0382112264633179,
561
+ "loss": 0.585,
562
+ "nll_loss": 0.5399721264839172,
563
+ "rewards/accuracies": 0.6000000238418579,
564
+ "rewards/chosen": -0.045447397977113724,
565
+ "rewards/margins": 0.006463165394961834,
566
+ "rewards/rejected": -0.051910560578107834,
567
+ "step": 310
568
+ },
569
+ {
570
+ "epoch": 0.33560566334556896,
571
+ "grad_norm": 2.1577553752792995,
572
+ "learning_rate": 1.118033988749895e-05,
573
+ "log_odds_chosen": 0.27985960245132446,
574
+ "log_odds_ratio": -0.6601210832595825,
575
+ "logits/chosen": -3.0387003421783447,
576
+ "logits/rejected": -3.0464096069335938,
577
+ "logps/chosen": -0.9086373448371887,
578
+ "logps/rejected": -1.0836986303329468,
579
+ "loss": 0.5243,
580
+ "nll_loss": 0.4922841191291809,
581
+ "rewards/accuracies": 0.550000011920929,
582
+ "rewards/chosen": -0.045431867241859436,
583
+ "rewards/margins": 0.008753069676458836,
584
+ "rewards/rejected": -0.0541849359869957,
585
+ "step": 320
586
+ },
587
+ {
588
+ "epoch": 0.34609334032511796,
589
+ "grad_norm": 2.422690319169778,
590
+ "learning_rate": 1.1009637651263608e-05,
591
+ "log_odds_chosen": 0.28255337476730347,
592
+ "log_odds_ratio": -0.6909259557723999,
593
+ "logits/chosen": -2.950887441635132,
594
+ "logits/rejected": -2.9948947429656982,
595
+ "logps/chosen": -0.9054603576660156,
596
+ "logps/rejected": -1.0888211727142334,
597
+ "loss": 0.5544,
598
+ "nll_loss": 0.5376341342926025,
599
+ "rewards/accuracies": 0.581250011920929,
600
+ "rewards/chosen": -0.04527302458882332,
601
+ "rewards/margins": 0.009168041869997978,
602
+ "rewards/rejected": -0.05444106459617615,
603
+ "step": 330
604
+ },
605
+ {
606
+ "epoch": 0.356581017304667,
607
+ "grad_norm": 2.2975046406882798,
608
+ "learning_rate": 1.0846522890932809e-05,
609
+ "log_odds_chosen": 0.2153971642255783,
610
+ "log_odds_ratio": -0.6926898956298828,
611
+ "logits/chosen": -2.9686572551727295,
612
+ "logits/rejected": -3.0199432373046875,
613
+ "logps/chosen": -0.8590608835220337,
614
+ "logps/rejected": -1.00636887550354,
615
+ "loss": 0.5708,
616
+ "nll_loss": 0.5127817392349243,
617
+ "rewards/accuracies": 0.543749988079071,
618
+ "rewards/chosen": -0.042953044176101685,
619
+ "rewards/margins": 0.007365405559539795,
620
+ "rewards/rejected": -0.05031844973564148,
621
+ "step": 340
622
+ },
623
+ {
624
+ "epoch": 0.36706869428421607,
625
+ "grad_norm": 2.135727653321979,
626
+ "learning_rate": 1.0690449676496977e-05,
627
+ "log_odds_chosen": 0.2665565609931946,
628
+ "log_odds_ratio": -0.6829238533973694,
629
+ "logits/chosen": -3.044860363006592,
630
+ "logits/rejected": -3.0616378784179688,
631
+ "logps/chosen": -0.8791500329971313,
632
+ "logps/rejected": -1.0402672290802002,
633
+ "loss": 0.5495,
634
+ "nll_loss": 0.5228344202041626,
635
+ "rewards/accuracies": 0.574999988079071,
636
+ "rewards/chosen": -0.04395749792456627,
637
+ "rewards/margins": 0.00805586390197277,
638
+ "rewards/rejected": -0.05201335996389389,
639
+ "step": 350
640
+ },
641
+ {
642
+ "epoch": 0.37755637126376507,
643
+ "grad_norm": 3.150177435714442,
644
+ "learning_rate": 1.0540925533894598e-05,
645
+ "log_odds_chosen": 0.4033277928829193,
646
+ "log_odds_ratio": -0.602225124835968,
647
+ "logits/chosen": -2.9472672939300537,
648
+ "logits/rejected": -2.975858211517334,
649
+ "logps/chosen": -0.8669608235359192,
650
+ "logps/rejected": -1.110353708267212,
651
+ "loss": 0.5494,
652
+ "nll_loss": 0.5087054371833801,
653
+ "rewards/accuracies": 0.625,
654
+ "rewards/chosen": -0.0433480478823185,
655
+ "rewards/margins": 0.01216964516788721,
656
+ "rewards/rejected": -0.05551769211888313,
657
+ "step": 360
658
+ },
659
+ {
660
+ "epoch": 0.3880440482433141,
661
+ "grad_norm": 2.130197231019511,
662
+ "learning_rate": 1.0397504898200728e-05,
663
+ "log_odds_chosen": 0.3966829478740692,
664
+ "log_odds_ratio": -0.6142522096633911,
665
+ "logits/chosen": -3.0528526306152344,
666
+ "logits/rejected": -3.0623490810394287,
667
+ "logps/chosen": -0.8640265464782715,
668
+ "logps/rejected": -1.1243717670440674,
669
+ "loss": 0.5232,
670
+ "nll_loss": 0.5101068615913391,
671
+ "rewards/accuracies": 0.6000000238418579,
672
+ "rewards/chosen": -0.043201327323913574,
673
+ "rewards/margins": 0.013017257675528526,
674
+ "rewards/rejected": -0.05621858313679695,
675
+ "step": 370
676
+ },
677
+ {
678
+ "epoch": 0.3985317252228631,
679
+ "grad_norm": 2.415549044992692,
680
+ "learning_rate": 1.0259783520851543e-05,
681
+ "log_odds_chosen": 0.46208301186561584,
682
+ "log_odds_ratio": -0.5873923301696777,
683
+ "logits/chosen": -3.055903196334839,
684
+ "logits/rejected": -3.089763879776001,
685
+ "logps/chosen": -0.8685981035232544,
686
+ "logps/rejected": -1.1247217655181885,
687
+ "loss": 0.5376,
688
+ "nll_loss": 0.5167646408081055,
689
+ "rewards/accuracies": 0.668749988079071,
690
+ "rewards/chosen": -0.0434299036860466,
691
+ "rewards/margins": 0.01280617993324995,
692
+ "rewards/rejected": -0.056236088275909424,
693
+ "step": 380
694
+ },
695
+ {
696
+ "epoch": 0.4090194022024122,
697
+ "grad_norm": 2.4197618087673036,
698
+ "learning_rate": 1.0127393670836667e-05,
699
+ "log_odds_chosen": 0.08936772495508194,
700
+ "log_odds_ratio": -0.7186132073402405,
701
+ "logits/chosen": -2.998857021331787,
702
+ "logits/rejected": -3.021352529525757,
703
+ "logps/chosen": -0.9128287434577942,
704
+ "logps/rejected": -0.9754525423049927,
705
+ "loss": 0.5571,
706
+ "nll_loss": 0.5319759845733643,
707
+ "rewards/accuracies": 0.5625,
708
+ "rewards/chosen": -0.04564143717288971,
709
+ "rewards/margins": 0.0031311833299696445,
710
+ "rewards/rejected": -0.048772621899843216,
711
+ "step": 390
712
+ },
713
+ {
714
+ "epoch": 0.4195070791819612,
715
+ "grad_norm": 2.0748995530757424,
716
+ "learning_rate": 1e-05,
717
+ "log_odds_chosen": 0.23965713381767273,
718
+ "log_odds_ratio": -0.6899853348731995,
719
+ "logits/chosen": -2.883575201034546,
720
+ "logits/rejected": -2.908125400543213,
721
+ "logps/chosen": -0.9490350484848022,
722
+ "logps/rejected": -1.1106139421463013,
723
+ "loss": 0.5725,
724
+ "nll_loss": 0.5262094736099243,
725
+ "rewards/accuracies": 0.53125,
726
+ "rewards/chosen": -0.04745175316929817,
727
+ "rewards/margins": 0.00807894580066204,
728
+ "rewards/rejected": -0.05553068965673447,
729
+ "step": 400
730
+ },
731
+ {
732
+ "epoch": 0.4299947561615102,
733
+ "grad_norm": 2.0498490112152026,
734
+ "learning_rate": 9.877295966495898e-06,
735
+ "log_odds_chosen": 0.14244404435157776,
736
+ "log_odds_ratio": -0.7278560996055603,
737
+ "logits/chosen": -2.988100051879883,
738
+ "logits/rejected": -2.9914164543151855,
739
+ "logps/chosen": -0.8709594011306763,
740
+ "logps/rejected": -0.9773006439208984,
741
+ "loss": 0.5455,
742
+ "nll_loss": 0.4832683503627777,
743
+ "rewards/accuracies": 0.5687500238418579,
744
+ "rewards/chosen": -0.04354798048734665,
745
+ "rewards/margins": 0.0053170593455433846,
746
+ "rewards/rejected": -0.04886503517627716,
747
+ "step": 410
748
+ },
749
+ {
750
+ "epoch": 0.4404824331410593,
751
+ "grad_norm": 1.9311064341389872,
752
+ "learning_rate": 9.759000729485331e-06,
753
+ "log_odds_chosen": 0.30063071846961975,
754
+ "log_odds_ratio": -0.643203854560852,
755
+ "logits/chosen": -2.9488558769226074,
756
+ "logits/rejected": -2.9841551780700684,
757
+ "logps/chosen": -0.8707404136657715,
758
+ "logps/rejected": -1.0532442331314087,
759
+ "loss": 0.5355,
760
+ "nll_loss": 0.474843829870224,
761
+ "rewards/accuracies": 0.612500011920929,
762
+ "rewards/chosen": -0.04353701323270798,
763
+ "rewards/margins": 0.009125196374952793,
764
+ "rewards/rejected": -0.05266221612691879,
765
+ "step": 420
766
+ },
767
+ {
768
+ "epoch": 0.4509701101206083,
769
+ "grad_norm": 2.119895291758326,
770
+ "learning_rate": 9.644856443408244e-06,
771
+ "log_odds_chosen": 0.2837393879890442,
772
+ "log_odds_ratio": -0.6551750898361206,
773
+ "logits/chosen": -2.9840757846832275,
774
+ "logits/rejected": -2.9921929836273193,
775
+ "logps/chosen": -0.8468173146247864,
776
+ "logps/rejected": -1.0135347843170166,
777
+ "loss": 0.5557,
778
+ "nll_loss": 0.5443450212478638,
779
+ "rewards/accuracies": 0.6312500238418579,
780
+ "rewards/chosen": -0.04234086349606514,
781
+ "rewards/margins": 0.00833587534725666,
782
+ "rewards/rejected": -0.05067674070596695,
783
+ "step": 430
784
+ },
785
+ {
786
+ "epoch": 0.46145778710015734,
787
+ "grad_norm": 2.095435518308805,
788
+ "learning_rate": 9.534625892455923e-06,
789
+ "log_odds_chosen": 0.2355252504348755,
790
+ "log_odds_ratio": -0.6598283648490906,
791
+ "logits/chosen": -3.0252740383148193,
792
+ "logits/rejected": -3.045849323272705,
793
+ "logps/chosen": -0.8709392547607422,
794
+ "logps/rejected": -1.0179613828659058,
795
+ "loss": 0.5508,
796
+ "nll_loss": 0.5189236998558044,
797
+ "rewards/accuracies": 0.5874999761581421,
798
+ "rewards/chosen": -0.04354696720838547,
799
+ "rewards/margins": 0.0073511130176484585,
800
+ "rewards/rejected": -0.050898075103759766,
801
+ "step": 440
802
+ },
803
+ {
804
+ "epoch": 0.47194546407970633,
805
+ "grad_norm": 1.9017756846669818,
806
+ "learning_rate": 9.428090415820635e-06,
807
+ "log_odds_chosen": 0.34075412154197693,
808
+ "log_odds_ratio": -0.6583858728408813,
809
+ "logits/chosen": -3.0218703746795654,
810
+ "logits/rejected": -3.0481696128845215,
811
+ "logps/chosen": -0.8293315768241882,
812
+ "logps/rejected": -1.047191858291626,
813
+ "loss": 0.5286,
814
+ "nll_loss": 0.4964592456817627,
815
+ "rewards/accuracies": 0.6312500238418579,
816
+ "rewards/chosen": -0.04146658256649971,
817
+ "rewards/margins": 0.010893006809055805,
818
+ "rewards/rejected": -0.05235959216952324,
819
+ "step": 450
820
+ },
821
+ {
822
+ "epoch": 0.4824331410592554,
823
+ "grad_norm": 2.079766146123277,
824
+ "learning_rate": 9.325048082403139e-06,
825
+ "log_odds_chosen": 0.16855968534946442,
826
+ "log_odds_ratio": -0.711928129196167,
827
+ "logits/chosen": -3.0086510181427,
828
+ "logits/rejected": -3.0489156246185303,
829
+ "logps/chosen": -0.9442957043647766,
830
+ "logps/rejected": -1.072997808456421,
831
+ "loss": 0.5326,
832
+ "nll_loss": 0.5338221788406372,
833
+ "rewards/accuracies": 0.543749988079071,
834
+ "rewards/chosen": -0.04721478372812271,
835
+ "rewards/margins": 0.00643510278314352,
836
+ "rewards/rejected": -0.05364988371729851,
837
+ "step": 460
838
+ },
839
+ {
840
+ "epoch": 0.4929208180388044,
841
+ "grad_norm": 2.4868491558153085,
842
+ "learning_rate": 9.225312080288851e-06,
843
+ "log_odds_chosen": 0.23586861789226532,
844
+ "log_odds_ratio": -0.6902174949645996,
845
+ "logits/chosen": -2.986264705657959,
846
+ "logits/rejected": -3.0127644538879395,
847
+ "logps/chosen": -0.8882457613945007,
848
+ "logps/rejected": -1.034985899925232,
849
+ "loss": 0.5413,
850
+ "nll_loss": 0.5090312361717224,
851
+ "rewards/accuracies": 0.5562499761581421,
852
+ "rewards/chosen": -0.044412292540073395,
853
+ "rewards/margins": 0.007337009999901056,
854
+ "rewards/rejected": -0.051749296486377716,
855
+ "step": 470
856
+ },
857
+ {
858
+ "epoch": 0.5034084950183534,
859
+ "grad_norm": 2.0043501739666882,
860
+ "learning_rate": 9.12870929175277e-06,
861
+ "log_odds_chosen": 0.17604230344295502,
862
+ "log_odds_ratio": -0.707550048828125,
863
+ "logits/chosen": -3.088604211807251,
864
+ "logits/rejected": -3.12184476852417,
865
+ "logps/chosen": -0.8456010818481445,
866
+ "logps/rejected": -0.9586717486381531,
867
+ "loss": 0.5178,
868
+ "nll_loss": 0.5126105546951294,
869
+ "rewards/accuracies": 0.5625,
870
+ "rewards/chosen": -0.042280055582523346,
871
+ "rewards/margins": 0.005653535481542349,
872
+ "rewards/rejected": -0.047933585941791534,
873
+ "step": 480
874
+ },
875
+ {
876
+ "epoch": 0.5138961719979025,
877
+ "grad_norm": 1.9415978406566505,
878
+ "learning_rate": 9.035079029052514e-06,
879
+ "log_odds_chosen": 0.22476902604103088,
880
+ "log_odds_ratio": -0.6716736555099487,
881
+ "logits/chosen": -3.003417491912842,
882
+ "logits/rejected": -3.0048608779907227,
883
+ "logps/chosen": -0.9196673631668091,
884
+ "logps/rejected": -1.0358223915100098,
885
+ "loss": 0.5397,
886
+ "nll_loss": 0.5024985671043396,
887
+ "rewards/accuracies": 0.5874999761581421,
888
+ "rewards/chosen": -0.045983362942934036,
889
+ "rewards/margins": 0.005807754583656788,
890
+ "rewards/rejected": -0.051791124045848846,
891
+ "step": 490
892
+ },
893
+ {
894
+ "epoch": 0.5243838489774515,
895
+ "grad_norm": 2.2353701695425423,
896
+ "learning_rate": 8.94427190999916e-06,
897
+ "log_odds_chosen": 0.20684054493904114,
898
+ "log_odds_ratio": -0.698712944984436,
899
+ "logits/chosen": -3.0111751556396484,
900
+ "logits/rejected": -3.0036330223083496,
901
+ "logps/chosen": -0.8826943635940552,
902
+ "logps/rejected": -1.0074814558029175,
903
+ "loss": 0.548,
904
+ "nll_loss": 0.5235316157341003,
905
+ "rewards/accuracies": 0.625,
906
+ "rewards/chosen": -0.0441347137093544,
907
+ "rewards/margins": 0.006239361595362425,
908
+ "rewards/rejected": -0.050374072045087814,
909
+ "step": 500
910
+ },
911
+ {
912
+ "epoch": 0.5348715259570005,
913
+ "grad_norm": 1.742537477144132,
914
+ "learning_rate": 8.856148855400955e-06,
915
+ "log_odds_chosen": 0.3066679835319519,
916
+ "log_odds_ratio": -0.6453306674957275,
917
+ "logits/chosen": -2.9636032581329346,
918
+ "logits/rejected": -2.97407865524292,
919
+ "logps/chosen": -0.8404191136360168,
920
+ "logps/rejected": -1.0267155170440674,
921
+ "loss": 0.5264,
922
+ "nll_loss": 0.5354185104370117,
923
+ "rewards/accuracies": 0.606249988079071,
924
+ "rewards/chosen": -0.04202095791697502,
925
+ "rewards/margins": 0.009314822033047676,
926
+ "rewards/rejected": -0.05133577436208725,
927
+ "step": 510
928
+ },
929
+ {
930
+ "epoch": 0.5453592029365496,
931
+ "grad_norm": 1.6799388590726438,
932
+ "learning_rate": 8.770580193070294e-06,
933
+ "log_odds_chosen": 0.24468369781970978,
934
+ "log_odds_ratio": -0.6710330247879028,
935
+ "logits/chosen": -2.959213972091675,
936
+ "logits/rejected": -2.966728687286377,
937
+ "logps/chosen": -0.9035038948059082,
938
+ "logps/rejected": -1.0690029859542847,
939
+ "loss": 0.5366,
940
+ "nll_loss": 0.47406935691833496,
941
+ "rewards/accuracies": 0.606249988079071,
942
+ "rewards/chosen": -0.04517520219087601,
943
+ "rewards/margins": 0.008274954743683338,
944
+ "rewards/rejected": -0.053450148552656174,
945
+ "step": 520
946
+ },
947
+ {
948
+ "epoch": 0.5558468799160986,
949
+ "grad_norm": 1.8707354612150964,
950
+ "learning_rate": 8.687444855261389e-06,
951
+ "log_odds_chosen": 0.4215427339076996,
952
+ "log_odds_ratio": -0.6489927172660828,
953
+ "logits/chosen": -3.0756938457489014,
954
+ "logits/rejected": -3.0923542976379395,
955
+ "logps/chosen": -0.8253329992294312,
956
+ "logps/rejected": -1.1108949184417725,
957
+ "loss": 0.5365,
958
+ "nll_loss": 0.45042163133621216,
959
+ "rewards/accuracies": 0.53125,
960
+ "rewards/chosen": -0.04126664996147156,
961
+ "rewards/margins": 0.014278100803494453,
962
+ "rewards/rejected": -0.05554475262761116,
963
+ "step": 530
964
+ },
965
+ {
966
+ "epoch": 0.5663345568956476,
967
+ "grad_norm": 1.922705947748225,
968
+ "learning_rate": 8.606629658238705e-06,
969
+ "log_odds_chosen": 0.1879667341709137,
970
+ "log_odds_ratio": -0.6903280019760132,
971
+ "logits/chosen": -2.975130796432495,
972
+ "logits/rejected": -3.0028696060180664,
973
+ "logps/chosen": -0.8695458173751831,
974
+ "logps/rejected": -0.9805169105529785,
975
+ "loss": 0.5535,
976
+ "nll_loss": 0.5275255441665649,
977
+ "rewards/accuracies": 0.574999988079071,
978
+ "rewards/chosen": -0.04347729682922363,
979
+ "rewards/margins": 0.005548550747334957,
980
+ "rewards/rejected": -0.049025844782590866,
981
+ "step": 540
982
+ },
983
+ {
984
+ "epoch": 0.5768222338751966,
985
+ "grad_norm": 1.9089385183272836,
986
+ "learning_rate": 8.528028654224417e-06,
987
+ "log_odds_chosen": 0.42722567915916443,
988
+ "log_odds_ratio": -0.6043616533279419,
989
+ "logits/chosen": -2.9973807334899902,
990
+ "logits/rejected": -3.0049965381622314,
991
+ "logps/chosen": -0.8592002987861633,
992
+ "logps/rejected": -1.1192405223846436,
993
+ "loss": 0.537,
994
+ "nll_loss": 0.5372708439826965,
995
+ "rewards/accuracies": 0.6312500238418579,
996
+ "rewards/chosen": -0.042960021644830704,
997
+ "rewards/margins": 0.013002010062336922,
998
+ "rewards/rejected": -0.05596202611923218,
999
+ "step": 550
1000
+ },
1001
+ {
1002
+ "epoch": 0.5873099108547457,
1003
+ "grad_norm": 1.9519454661958895,
1004
+ "learning_rate": 8.451542547285167e-06,
1005
+ "log_odds_chosen": 0.23686861991882324,
1006
+ "log_odds_ratio": -0.679013192653656,
1007
+ "logits/chosen": -3.0309016704559326,
1008
+ "logits/rejected": -3.0620574951171875,
1009
+ "logps/chosen": -0.8845365643501282,
1010
+ "logps/rejected": -1.0314432382583618,
1011
+ "loss": 0.5215,
1012
+ "nll_loss": 0.5018130540847778,
1013
+ "rewards/accuracies": 0.6000000238418579,
1014
+ "rewards/chosen": -0.04422682151198387,
1015
+ "rewards/margins": 0.0073453388176858425,
1016
+ "rewards/rejected": -0.05157216265797615,
1017
+ "step": 560
1018
+ },
1019
+ {
1020
+ "epoch": 0.5977975878342947,
1021
+ "grad_norm": 1.902474576616517,
1022
+ "learning_rate": 8.37707816583391e-06,
1023
+ "log_odds_chosen": 0.157462477684021,
1024
+ "log_odds_ratio": -0.7165660858154297,
1025
+ "logits/chosen": -2.971592903137207,
1026
+ "logits/rejected": -2.9932913780212402,
1027
+ "logps/chosen": -0.8898121118545532,
1028
+ "logps/rejected": -0.9948716163635254,
1029
+ "loss": 0.5041,
1030
+ "nll_loss": 0.5276492834091187,
1031
+ "rewards/accuracies": 0.5,
1032
+ "rewards/chosen": -0.044490598142147064,
1033
+ "rewards/margins": 0.005252980627119541,
1034
+ "rewards/rejected": -0.04974358528852463,
1035
+ "step": 570
1036
+ },
1037
+ {
1038
+ "epoch": 0.6082852648138437,
1039
+ "grad_norm": 1.9526588876095308,
1040
+ "learning_rate": 8.304547985373997e-06,
1041
+ "log_odds_chosen": 0.27767136693000793,
1042
+ "log_odds_ratio": -0.6578360199928284,
1043
+ "logits/chosen": -3.0485613346099854,
1044
+ "logits/rejected": -3.061281204223633,
1045
+ "logps/chosen": -0.8733240962028503,
1046
+ "logps/rejected": -1.0594861507415771,
1047
+ "loss": 0.5456,
1048
+ "nll_loss": 0.48286086320877075,
1049
+ "rewards/accuracies": 0.5687500238418579,
1050
+ "rewards/chosen": -0.043666206300258636,
1051
+ "rewards/margins": 0.009308096952736378,
1052
+ "rewards/rejected": -0.05297430604696274,
1053
+ "step": 580
1054
+ },
1055
+ {
1056
+ "epoch": 0.6187729417933928,
1057
+ "grad_norm": 1.963515177379308,
1058
+ "learning_rate": 8.233869695926184e-06,
1059
+ "log_odds_chosen": 0.32016056776046753,
1060
+ "log_odds_ratio": -0.6649240255355835,
1061
+ "logits/chosen": -3.0834898948669434,
1062
+ "logits/rejected": -3.123967409133911,
1063
+ "logps/chosen": -0.8281318545341492,
1064
+ "logps/rejected": -1.021436095237732,
1065
+ "loss": 0.5124,
1066
+ "nll_loss": 0.5498961210250854,
1067
+ "rewards/accuracies": 0.612500011920929,
1068
+ "rewards/chosen": -0.04140659421682358,
1069
+ "rewards/margins": 0.009665210731327534,
1070
+ "rewards/rejected": -0.05107180029153824,
1071
+ "step": 590
1072
+ },
1073
+ {
1074
+ "epoch": 0.6292606187729418,
1075
+ "grad_norm": 2.1416673571833584,
1076
+ "learning_rate": 8.164965809277262e-06,
1077
+ "log_odds_chosen": 0.3141978085041046,
1078
+ "log_odds_ratio": -0.6486893892288208,
1079
+ "logits/chosen": -3.1147074699401855,
1080
+ "logits/rejected": -3.11454176902771,
1081
+ "logps/chosen": -0.8215556144714355,
1082
+ "logps/rejected": -1.009476661682129,
1083
+ "loss": 0.5144,
1084
+ "nll_loss": 0.4836875796318054,
1085
+ "rewards/accuracies": 0.6000000238418579,
1086
+ "rewards/chosen": -0.04107777774333954,
1087
+ "rewards/margins": 0.009396053850650787,
1088
+ "rewards/rejected": -0.05047383904457092,
1089
+ "step": 600
1090
+ },
1091
+ {
1092
+ "epoch": 0.6397482957524908,
1093
+ "grad_norm": 2.03894912155955,
1094
+ "learning_rate": 8.097763301789162e-06,
1095
+ "log_odds_chosen": 0.1958848237991333,
1096
+ "log_odds_ratio": -0.6933802366256714,
1097
+ "logits/chosen": -3.016098737716675,
1098
+ "logits/rejected": -3.046642780303955,
1099
+ "logps/chosen": -0.8733209371566772,
1100
+ "logps/rejected": -0.9883171916007996,
1101
+ "loss": 0.526,
1102
+ "nll_loss": 0.4880569875240326,
1103
+ "rewards/accuracies": 0.6000000238418579,
1104
+ "rewards/chosen": -0.0436660535633564,
1105
+ "rewards/margins": 0.005749809555709362,
1106
+ "rewards/rejected": -0.049415864050388336,
1107
+ "step": 610
1108
+ },
1109
+ {
1110
+ "epoch": 0.6502359727320398,
1111
+ "grad_norm": 2.068974001178546,
1112
+ "learning_rate": 8.03219328902499e-06,
1113
+ "log_odds_chosen": 0.17991718649864197,
1114
+ "log_odds_ratio": -0.7055822610855103,
1115
+ "logits/chosen": -3.045403003692627,
1116
+ "logits/rejected": -3.0644798278808594,
1117
+ "logps/chosen": -0.8806620836257935,
1118
+ "logps/rejected": -1.0145095586776733,
1119
+ "loss": 0.5295,
1120
+ "nll_loss": 0.5151625275611877,
1121
+ "rewards/accuracies": 0.5375000238418579,
1122
+ "rewards/chosen": -0.04403311014175415,
1123
+ "rewards/margins": 0.006692370865494013,
1124
+ "rewards/rejected": -0.05072547867894173,
1125
+ "step": 620
1126
+ },
1127
+ {
1128
+ "epoch": 0.6607236497115889,
1129
+ "grad_norm": 1.9705491215328443,
1130
+ "learning_rate": 7.968190728895958e-06,
1131
+ "log_odds_chosen": 0.23948292434215546,
1132
+ "log_odds_ratio": -0.6947344541549683,
1133
+ "logits/chosen": -3.016519546508789,
1134
+ "logits/rejected": -3.042133331298828,
1135
+ "logps/chosen": -0.8557758331298828,
1136
+ "logps/rejected": -1.0029237270355225,
1137
+ "loss": 0.5331,
1138
+ "nll_loss": 0.5245988368988037,
1139
+ "rewards/accuracies": 0.53125,
1140
+ "rewards/chosen": -0.0427887924015522,
1141
+ "rewards/margins": 0.007357400842010975,
1142
+ "rewards/rejected": -0.0501461923122406,
1143
+ "step": 630
1144
+ },
1145
+ {
1146
+ "epoch": 0.6712113266911379,
1147
+ "grad_norm": 2.664256522681278,
1148
+ "learning_rate": 7.905694150420949e-06,
1149
+ "log_odds_chosen": 0.3717094659805298,
1150
+ "log_odds_ratio": -0.6480633020401001,
1151
+ "logits/chosen": -3.0543761253356934,
1152
+ "logits/rejected": -3.0751733779907227,
1153
+ "logps/chosen": -0.8645519018173218,
1154
+ "logps/rejected": -1.102386713027954,
1155
+ "loss": 0.5149,
1156
+ "nll_loss": 0.46133953332901,
1157
+ "rewards/accuracies": 0.574999988079071,
1158
+ "rewards/chosen": -0.04322759807109833,
1159
+ "rewards/margins": 0.011891739442944527,
1160
+ "rewards/rejected": -0.0551193431019783,
1161
+ "step": 640
1162
+ },
1163
+ {
1164
+ "epoch": 0.6816990036706869,
1165
+ "grad_norm": 1.878621524799117,
1166
+ "learning_rate": 7.844645405527363e-06,
1167
+ "log_odds_chosen": 0.1861819326877594,
1168
+ "log_odds_ratio": -0.7022497057914734,
1169
+ "logits/chosen": -3.0863146781921387,
1170
+ "logits/rejected": -3.113098621368408,
1171
+ "logps/chosen": -0.8403372764587402,
1172
+ "logps/rejected": -0.9548438191413879,
1173
+ "loss": 0.5336,
1174
+ "nll_loss": 0.5122831463813782,
1175
+ "rewards/accuracies": 0.59375,
1176
+ "rewards/chosen": -0.04201686754822731,
1177
+ "rewards/margins": 0.0057253288105130196,
1178
+ "rewards/rejected": -0.047742195427417755,
1179
+ "step": 650
1180
+ },
1181
+ {
1182
+ "epoch": 0.6921866806502359,
1183
+ "grad_norm": 1.8977100039056058,
1184
+ "learning_rate": 7.78498944161523e-06,
1185
+ "log_odds_chosen": 0.2854728400707245,
1186
+ "log_odds_ratio": -0.6552462577819824,
1187
+ "logits/chosen": -3.052263021469116,
1188
+ "logits/rejected": -3.0898962020874023,
1189
+ "logps/chosen": -0.8826674222946167,
1190
+ "logps/rejected": -1.0711818933486938,
1191
+ "loss": 0.5304,
1192
+ "nll_loss": 0.4874996542930603,
1193
+ "rewards/accuracies": 0.574999988079071,
1194
+ "rewards/chosen": -0.044133372604846954,
1195
+ "rewards/margins": 0.009425725787878036,
1196
+ "rewards/rejected": -0.05355909466743469,
1197
+ "step": 660
1198
+ },
1199
+ {
1200
+ "epoch": 0.702674357629785,
1201
+ "grad_norm": 1.8195731091765575,
1202
+ "learning_rate": 7.726674092862559e-06,
1203
+ "log_odds_chosen": 0.4364054203033447,
1204
+ "log_odds_ratio": -0.6321254968643188,
1205
+ "logits/chosen": -2.9931445121765137,
1206
+ "logits/rejected": -3.025317907333374,
1207
+ "logps/chosen": -0.8416171073913574,
1208
+ "logps/rejected": -1.1292223930358887,
1209
+ "loss": 0.5237,
1210
+ "nll_loss": 0.46936100721359253,
1211
+ "rewards/accuracies": 0.643750011920929,
1212
+ "rewards/chosen": -0.04208085685968399,
1213
+ "rewards/margins": 0.014380265958607197,
1214
+ "rewards/rejected": -0.05646112561225891,
1215
+ "step": 670
1216
+ },
1217
+ {
1218
+ "epoch": 0.713162034609334,
1219
+ "grad_norm": 2.0599075037830192,
1220
+ "learning_rate": 7.669649888473705e-06,
1221
+ "log_odds_chosen": 0.31395241618156433,
1222
+ "log_odds_ratio": -0.650139570236206,
1223
+ "logits/chosen": -2.9855525493621826,
1224
+ "logits/rejected": -2.9897267818450928,
1225
+ "logps/chosen": -0.8750125169754028,
1226
+ "logps/rejected": -1.0669299364089966,
1227
+ "loss": 0.5075,
1228
+ "nll_loss": 0.4943002760410309,
1229
+ "rewards/accuracies": 0.6312500238418579,
1230
+ "rewards/chosen": -0.04375062882900238,
1231
+ "rewards/margins": 0.009595867246389389,
1232
+ "rewards/rejected": -0.05334649235010147,
1233
+ "step": 680
1234
+ },
1235
+ {
1236
+ "epoch": 0.723649711588883,
1237
+ "grad_norm": 1.8347271674417223,
1238
+ "learning_rate": 7.61386987626881e-06,
1239
+ "log_odds_chosen": 0.18291696906089783,
1240
+ "log_odds_ratio": -0.7239105701446533,
1241
+ "logits/chosen": -2.97595477104187,
1242
+ "logits/rejected": -2.991725444793701,
1243
+ "logps/chosen": -0.8641953468322754,
1244
+ "logps/rejected": -0.9991108179092407,
1245
+ "loss": 0.5304,
1246
+ "nll_loss": 0.5499680638313293,
1247
+ "rewards/accuracies": 0.550000011920929,
1248
+ "rewards/chosen": -0.04320976510643959,
1249
+ "rewards/margins": 0.006745772901922464,
1250
+ "rewards/rejected": -0.04995553940534592,
1251
+ "step": 690
1252
+ },
1253
+ {
1254
+ "epoch": 0.7341373885684321,
1255
+ "grad_norm": 2.2852704943230915,
1256
+ "learning_rate": 7.559289460184545e-06,
1257
+ "log_odds_chosen": 0.3105728030204773,
1258
+ "log_odds_ratio": -0.6319602727890015,
1259
+ "logits/chosen": -2.985989809036255,
1260
+ "logits/rejected": -3.0209579467773438,
1261
+ "logps/chosen": -0.8320032358169556,
1262
+ "logps/rejected": -1.0303562879562378,
1263
+ "loss": 0.5296,
1264
+ "nll_loss": 0.5422422885894775,
1265
+ "rewards/accuracies": 0.6187499761581421,
1266
+ "rewards/chosen": -0.04160016402602196,
1267
+ "rewards/margins": 0.009917653165757656,
1268
+ "rewards/rejected": -0.05151782184839249,
1269
+ "step": 700
1270
+ },
1271
+ {
1272
+ "epoch": 0.7446250655479811,
1273
+ "grad_norm": 1.9768197452256755,
1274
+ "learning_rate": 7.505866250408016e-06,
1275
+ "log_odds_chosen": 0.2948063015937805,
1276
+ "log_odds_ratio": -0.6451742649078369,
1277
+ "logits/chosen": -3.1170597076416016,
1278
+ "logits/rejected": -3.136089324951172,
1279
+ "logps/chosen": -0.8415013551712036,
1280
+ "logps/rejected": -1.0454984903335571,
1281
+ "loss": 0.5237,
1282
+ "nll_loss": 0.47949719429016113,
1283
+ "rewards/accuracies": 0.6000000238418579,
1284
+ "rewards/chosen": -0.04207506403326988,
1285
+ "rewards/margins": 0.01019985694438219,
1286
+ "rewards/rejected": -0.0522749237716198,
1287
+ "step": 710
1288
+ },
1289
+ {
1290
+ "epoch": 0.7551127425275301,
1291
+ "grad_norm": 1.905599119477425,
1292
+ "learning_rate": 7.4535599249993e-06,
1293
+ "log_odds_chosen": 0.40306347608566284,
1294
+ "log_odds_ratio": -0.6352882385253906,
1295
+ "logits/chosen": -3.064483642578125,
1296
+ "logits/rejected": -3.087808847427368,
1297
+ "logps/chosen": -0.7972971200942993,
1298
+ "logps/rejected": -1.046507477760315,
1299
+ "loss": 0.5304,
1300
+ "nll_loss": 0.4636651873588562,
1301
+ "rewards/accuracies": 0.625,
1302
+ "rewards/chosen": -0.039864856749773026,
1303
+ "rewards/margins": 0.012460513040423393,
1304
+ "rewards/rejected": -0.05232536792755127,
1305
+ "step": 720
1306
+ },
1307
+ {
1308
+ "epoch": 0.7656004195070791,
1309
+ "grad_norm": 2.19124615484763,
1310
+ "learning_rate": 7.402332101976053e-06,
1311
+ "log_odds_chosen": 0.12367966026067734,
1312
+ "log_odds_ratio": -0.7226089239120483,
1313
+ "logits/chosen": -3.0835583209991455,
1314
+ "logits/rejected": -3.0826332569122314,
1315
+ "logps/chosen": -0.8365408778190613,
1316
+ "logps/rejected": -0.9029885530471802,
1317
+ "loss": 0.5374,
1318
+ "nll_loss": 0.5031268000602722,
1319
+ "rewards/accuracies": 0.5375000238418579,
1320
+ "rewards/chosen": -0.04182704538106918,
1321
+ "rewards/margins": 0.0033223754726350307,
1322
+ "rewards/rejected": -0.04514942690730095,
1323
+ "step": 730
1324
+ },
1325
+ {
1326
+ "epoch": 0.7760880964866282,
1327
+ "grad_norm": 2.0835998895674837,
1328
+ "learning_rate": 7.352146220938079e-06,
1329
+ "log_odds_chosen": 0.33691075444221497,
1330
+ "log_odds_ratio": -0.6264201402664185,
1331
+ "logits/chosen": -3.1278512477874756,
1332
+ "logits/rejected": -3.139995574951172,
1333
+ "logps/chosen": -0.8067742586135864,
1334
+ "logps/rejected": -1.0221493244171143,
1335
+ "loss": 0.5312,
1336
+ "nll_loss": 0.4790155291557312,
1337
+ "rewards/accuracies": 0.668749988079071,
1338
+ "rewards/chosen": -0.040338706225156784,
1339
+ "rewards/margins": 0.010768752545118332,
1340
+ "rewards/rejected": -0.051107458770275116,
1341
+ "step": 740
1342
+ },
1343
+ {
1344
+ "epoch": 0.7865757734661772,
1345
+ "grad_norm": 1.9667031119071154,
1346
+ "learning_rate": 7.3029674334022146e-06,
1347
+ "log_odds_chosen": 0.23670358955860138,
1348
+ "log_odds_ratio": -0.6752098202705383,
1349
+ "logits/chosen": -3.1056113243103027,
1350
+ "logits/rejected": -3.1298460960388184,
1351
+ "logps/chosen": -0.8614869117736816,
1352
+ "logps/rejected": -0.9949930310249329,
1353
+ "loss": 0.5426,
1354
+ "nll_loss": 0.4975660443305969,
1355
+ "rewards/accuracies": 0.6000000238418579,
1356
+ "rewards/chosen": -0.0430743470788002,
1357
+ "rewards/margins": 0.006675302051007748,
1358
+ "rewards/rejected": -0.049749650061130524,
1359
+ "step": 750
1360
+ },
1361
+ {
1362
+ "epoch": 0.7970634504457262,
1363
+ "grad_norm": 1.8638714551633075,
1364
+ "learning_rate": 7.254762501100117e-06,
1365
+ "log_odds_chosen": 0.2394195795059204,
1366
+ "log_odds_ratio": -0.6686865091323853,
1367
+ "logits/chosen": -3.092322826385498,
1368
+ "logits/rejected": -3.0998446941375732,
1369
+ "logps/chosen": -0.8189753293991089,
1370
+ "logps/rejected": -0.9735254049301147,
1371
+ "loss": 0.5115,
1372
+ "nll_loss": 0.4049908220767975,
1373
+ "rewards/accuracies": 0.5687500238418579,
1374
+ "rewards/chosen": -0.0409487709403038,
1375
+ "rewards/margins": 0.007727508433163166,
1376
+ "rewards/rejected": -0.048676274716854095,
1377
+ "step": 760
1378
+ },
1379
+ {
1380
+ "epoch": 0.8075511274252754,
1381
+ "grad_norm": 2.098087236150393,
1382
+ "learning_rate": 7.207499701564472e-06,
1383
+ "log_odds_chosen": 0.21572642028331757,
1384
+ "log_odds_ratio": -0.7029857635498047,
1385
+ "logits/chosen": -3.0059127807617188,
1386
+ "logits/rejected": -3.0258781909942627,
1387
+ "logps/chosen": -0.8941653370857239,
1388
+ "logps/rejected": -1.0438942909240723,
1389
+ "loss": 0.5343,
1390
+ "nll_loss": 0.5011810064315796,
1391
+ "rewards/accuracies": 0.48750001192092896,
1392
+ "rewards/chosen": -0.044708263128995895,
1393
+ "rewards/margins": 0.007486448623239994,
1394
+ "rewards/rejected": -0.05219471454620361,
1395
+ "step": 770
1396
+ },
1397
+ {
1398
+ "epoch": 0.8180388044048243,
1399
+ "grad_norm": 1.908201970451478,
1400
+ "learning_rate": 7.1611487403943295e-06,
1401
+ "log_odds_chosen": 0.22588184475898743,
1402
+ "log_odds_ratio": -0.6703106164932251,
1403
+ "logits/chosen": -3.0057101249694824,
1404
+ "logits/rejected": -3.0319108963012695,
1405
+ "logps/chosen": -0.8802768588066101,
1406
+ "logps/rejected": -0.997613787651062,
1407
+ "loss": 0.5466,
1408
+ "nll_loss": 0.5490036606788635,
1409
+ "rewards/accuracies": 0.612500011920929,
1410
+ "rewards/chosen": -0.0440138503909111,
1411
+ "rewards/margins": 0.005866837687790394,
1412
+ "rewards/rejected": -0.04988069087266922,
1413
+ "step": 780
1414
+ },
1415
+ {
1416
+ "epoch": 0.8285264813843733,
1417
+ "grad_norm": 1.8452821315553456,
1418
+ "learning_rate": 7.115680669648201e-06,
1419
+ "log_odds_chosen": 0.32251420617103577,
1420
+ "log_odds_ratio": -0.6489396691322327,
1421
+ "logits/chosen": -2.991415500640869,
1422
+ "logits/rejected": -3.0075478553771973,
1423
+ "logps/chosen": -0.8143788576126099,
1424
+ "logps/rejected": -1.0171436071395874,
1425
+ "loss": 0.5052,
1426
+ "nll_loss": 0.4423222541809082,
1427
+ "rewards/accuracies": 0.5874999761581421,
1428
+ "rewards/chosen": -0.04071894288063049,
1429
+ "rewards/margins": 0.010138243436813354,
1430
+ "rewards/rejected": -0.05085718631744385,
1431
+ "step": 790
1432
+ },
1433
+ {
1434
+ "epoch": 0.8390141583639223,
1435
+ "grad_norm": 2.099723593564682,
1436
+ "learning_rate": 7.0710678118654756e-06,
1437
+ "log_odds_chosen": 0.4498319625854492,
1438
+ "log_odds_ratio": -0.5986544489860535,
1439
+ "logits/chosen": -2.9999208450317383,
1440
+ "logits/rejected": -2.9963490962982178,
1441
+ "logps/chosen": -0.782555341720581,
1442
+ "logps/rejected": -1.068285584449768,
1443
+ "loss": 0.5173,
1444
+ "nll_loss": 0.4201901853084564,
1445
+ "rewards/accuracies": 0.65625,
1446
+ "rewards/chosen": -0.03912776708602905,
1447
+ "rewards/margins": 0.014286505989730358,
1448
+ "rewards/rejected": -0.053414274007081985,
1449
+ "step": 800
1450
+ },
1451
+ {
1452
+ "epoch": 0.8495018353434715,
1453
+ "grad_norm": 1.9010573028789273,
1454
+ "learning_rate": 7.027283689263066e-06,
1455
+ "log_odds_chosen": 0.34422335028648376,
1456
+ "log_odds_ratio": -0.6322020292282104,
1457
+ "logits/chosen": -3.0011842250823975,
1458
+ "logits/rejected": -2.9966137409210205,
1459
+ "logps/chosen": -0.8086786270141602,
1460
+ "logps/rejected": -1.0155996084213257,
1461
+ "loss": 0.5132,
1462
+ "nll_loss": 0.4740920066833496,
1463
+ "rewards/accuracies": 0.59375,
1464
+ "rewards/chosen": -0.040433935821056366,
1465
+ "rewards/margins": 0.010346042923629284,
1466
+ "rewards/rejected": -0.05077998712658882,
1467
+ "step": 810
1468
+ },
1469
+ {
1470
+ "epoch": 0.8599895123230205,
1471
+ "grad_norm": 2.3144073315770353,
1472
+ "learning_rate": 6.984302957695783e-06,
1473
+ "log_odds_chosen": 0.29515784978866577,
1474
+ "log_odds_ratio": -0.6521409749984741,
1475
+ "logits/chosen": -2.943692445755005,
1476
+ "logits/rejected": -2.9414219856262207,
1477
+ "logps/chosen": -0.8414862751960754,
1478
+ "logps/rejected": -1.0143965482711792,
1479
+ "loss": 0.504,
1480
+ "nll_loss": 0.4271189570426941,
1481
+ "rewards/accuracies": 0.6000000238418579,
1482
+ "rewards/chosen": -0.04207431524991989,
1483
+ "rewards/margins": 0.008645516820251942,
1484
+ "rewards/rejected": -0.05071982741355896,
1485
+ "step": 820
1486
+ },
1487
+ {
1488
+ "epoch": 0.8704771893025695,
1489
+ "grad_norm": 2.371001107698096,
1490
+ "learning_rate": 6.942101345006233e-06,
1491
+ "log_odds_chosen": 0.2455742061138153,
1492
+ "log_odds_ratio": -0.7013689279556274,
1493
+ "logits/chosen": -2.933568239212036,
1494
+ "logits/rejected": -2.977832794189453,
1495
+ "logps/chosen": -0.8553229570388794,
1496
+ "logps/rejected": -1.0332233905792236,
1497
+ "loss": 0.5251,
1498
+ "nll_loss": 0.46586036682128906,
1499
+ "rewards/accuracies": 0.53125,
1500
+ "rewards/chosen": -0.04276614636182785,
1501
+ "rewards/margins": 0.008895025588572025,
1502
+ "rewards/rejected": -0.0516611710190773,
1503
+ "step": 830
1504
+ },
1505
+ {
1506
+ "epoch": 0.8809648662821186,
1507
+ "grad_norm": 1.977587507180873,
1508
+ "learning_rate": 6.900655593423542e-06,
1509
+ "log_odds_chosen": 0.19387319684028625,
1510
+ "log_odds_ratio": -0.6939007639884949,
1511
+ "logits/chosen": -2.9483094215393066,
1512
+ "logits/rejected": -2.966421365737915,
1513
+ "logps/chosen": -0.8696029782295227,
1514
+ "logps/rejected": -1.0034617185592651,
1515
+ "loss": 0.5136,
1516
+ "nll_loss": 0.48451894521713257,
1517
+ "rewards/accuracies": 0.543749988079071,
1518
+ "rewards/chosen": -0.043480150401592255,
1519
+ "rewards/margins": 0.006692938506603241,
1520
+ "rewards/rejected": -0.050173092633485794,
1521
+ "step": 840
1522
+ },
1523
+ {
1524
+ "epoch": 0.8914525432616676,
1525
+ "grad_norm": 2.0931872980265527,
1526
+ "learning_rate": 6.859943405700353e-06,
1527
+ "log_odds_chosen": 0.27469760179519653,
1528
+ "log_odds_ratio": -0.6496983170509338,
1529
+ "logits/chosen": -2.882544994354248,
1530
+ "logits/rejected": -2.907102584838867,
1531
+ "logps/chosen": -0.8309645652770996,
1532
+ "logps/rejected": -0.9983605146408081,
1533
+ "loss": 0.5054,
1534
+ "nll_loss": 0.4892002046108246,
1535
+ "rewards/accuracies": 0.59375,
1536
+ "rewards/chosen": -0.0415482297539711,
1537
+ "rewards/margins": 0.008369805291295052,
1538
+ "rewards/rejected": -0.049918033182621,
1539
+ "step": 850
1540
+ },
1541
+ {
1542
+ "epoch": 0.9019402202412166,
1543
+ "grad_norm": 1.9059523373512675,
1544
+ "learning_rate": 6.819943394704736e-06,
1545
+ "log_odds_chosen": 0.2372780740261078,
1546
+ "log_odds_ratio": -0.6811105012893677,
1547
+ "logits/chosen": -2.9579243659973145,
1548
+ "logits/rejected": -2.9706907272338867,
1549
+ "logps/chosen": -0.8282278180122375,
1550
+ "logps/rejected": -0.982342541217804,
1551
+ "loss": 0.5277,
1552
+ "nll_loss": 0.4725598692893982,
1553
+ "rewards/accuracies": 0.543749988079071,
1554
+ "rewards/chosen": -0.0414113886654377,
1555
+ "rewards/margins": 0.007705743424594402,
1556
+ "rewards/rejected": -0.049117133021354675,
1557
+ "step": 860
1558
+ },
1559
+ {
1560
+ "epoch": 0.9124278972207656,
1561
+ "grad_norm": 1.892543797666968,
1562
+ "learning_rate": 6.780635036208105e-06,
1563
+ "log_odds_chosen": 0.287548691034317,
1564
+ "log_odds_ratio": -0.6644268035888672,
1565
+ "logits/chosen": -3.0049710273742676,
1566
+ "logits/rejected": -3.0431902408599854,
1567
+ "logps/chosen": -0.8620280027389526,
1568
+ "logps/rejected": -1.0551369190216064,
1569
+ "loss": 0.4935,
1570
+ "nll_loss": 0.4828346371650696,
1571
+ "rewards/accuracies": 0.5625,
1572
+ "rewards/chosen": -0.04310140386223793,
1573
+ "rewards/margins": 0.009655444882810116,
1574
+ "rewards/rejected": -0.05275684595108032,
1575
+ "step": 870
1576
+ },
1577
+ {
1578
+ "epoch": 0.9229155742003147,
1579
+ "grad_norm": 1.6128728864363475,
1580
+ "learning_rate": 6.741998624632421e-06,
1581
+ "log_odds_chosen": 0.2844703197479248,
1582
+ "log_odds_ratio": -0.6617631316184998,
1583
+ "logits/chosen": -3.044353723526001,
1584
+ "logits/rejected": -3.0480034351348877,
1585
+ "logps/chosen": -0.808245837688446,
1586
+ "logps/rejected": -0.990073561668396,
1587
+ "loss": 0.4881,
1588
+ "nll_loss": 0.43747878074645996,
1589
+ "rewards/accuracies": 0.606249988079071,
1590
+ "rewards/chosen": -0.0404122956097126,
1591
+ "rewards/margins": 0.009091392159461975,
1592
+ "rewards/rejected": -0.04950368404388428,
1593
+ "step": 880
1594
+ },
1595
+ {
1596
+ "epoch": 0.9334032511798637,
1597
+ "grad_norm": 2.329046484142618,
1598
+ "learning_rate": 6.70401523153991e-06,
1599
+ "log_odds_chosen": 0.32051050662994385,
1600
+ "log_odds_ratio": -0.6461818218231201,
1601
+ "logits/chosen": -3.0071539878845215,
1602
+ "logits/rejected": -3.0232186317443848,
1603
+ "logps/chosen": -0.8105939030647278,
1604
+ "logps/rejected": -0.993729293346405,
1605
+ "loss": 0.4935,
1606
+ "nll_loss": 0.46434158086776733,
1607
+ "rewards/accuracies": 0.5874999761581421,
1608
+ "rewards/chosen": -0.04052969440817833,
1609
+ "rewards/margins": 0.00915677472949028,
1610
+ "rewards/rejected": -0.04968646913766861,
1611
+ "step": 890
1612
+ },
1613
+ {
1614
+ "epoch": 0.9438909281594127,
1615
+ "grad_norm": 2.0086740635642073,
1616
+ "learning_rate": 6.666666666666667e-06,
1617
+ "log_odds_chosen": 0.2798821032047272,
1618
+ "log_odds_ratio": -0.664302408695221,
1619
+ "logits/chosen": -2.9259209632873535,
1620
+ "logits/rejected": -2.9381814002990723,
1621
+ "logps/chosen": -0.7818757891654968,
1622
+ "logps/rejected": -0.9571603536605835,
1623
+ "loss": 0.5239,
1624
+ "nll_loss": 0.4661863446235657,
1625
+ "rewards/accuracies": 0.581250011920929,
1626
+ "rewards/chosen": -0.03909378498792648,
1627
+ "rewards/margins": 0.008764232508838177,
1628
+ "rewards/rejected": -0.04785802215337753,
1629
+ "step": 900
1630
+ },
1631
+ {
1632
+ "epoch": 0.9543786051389617,
1633
+ "grad_norm": 2.068822950454407,
1634
+ "learning_rate": 6.629935441317959e-06,
1635
+ "log_odds_chosen": 0.479647159576416,
1636
+ "log_odds_ratio": -0.6314842700958252,
1637
+ "logits/chosen": -2.974902629852295,
1638
+ "logits/rejected": -2.9787256717681885,
1639
+ "logps/chosen": -0.8285977244377136,
1640
+ "logps/rejected": -1.1534996032714844,
1641
+ "loss": 0.5142,
1642
+ "nll_loss": 0.46572408080101013,
1643
+ "rewards/accuracies": 0.612500011920929,
1644
+ "rewards/chosen": -0.04142988473176956,
1645
+ "rewards/margins": 0.016245096921920776,
1646
+ "rewards/rejected": -0.05767498165369034,
1647
+ "step": 910
1648
+ },
1649
+ {
1650
+ "epoch": 0.9648662821185108,
1651
+ "grad_norm": 1.9606527520032064,
1652
+ "learning_rate": 6.593804733957872e-06,
1653
+ "log_odds_chosen": 0.3219223618507385,
1654
+ "log_odds_ratio": -0.649006187915802,
1655
+ "logits/chosen": -2.895038604736328,
1656
+ "logits/rejected": -2.9138269424438477,
1657
+ "logps/chosen": -0.7895429134368896,
1658
+ "logps/rejected": -0.9961126446723938,
1659
+ "loss": 0.4837,
1660
+ "nll_loss": 0.43109196424484253,
1661
+ "rewards/accuracies": 0.6187499761581421,
1662
+ "rewards/chosen": -0.0394771471619606,
1663
+ "rewards/margins": 0.010328484699130058,
1664
+ "rewards/rejected": -0.04980562627315521,
1665
+ "step": 920
1666
+ },
1667
+ {
1668
+ "epoch": 0.9753539590980598,
1669
+ "grad_norm": 2.2191050074705405,
1670
+ "learning_rate": 6.55825835783953e-06,
1671
+ "log_odds_chosen": 0.21952304244041443,
1672
+ "log_odds_ratio": -0.6805615425109863,
1673
+ "logits/chosen": -2.8973617553710938,
1674
+ "logits/rejected": -2.900251865386963,
1675
+ "logps/chosen": -0.8730388879776001,
1676
+ "logps/rejected": -1.0255097150802612,
1677
+ "loss": 0.5135,
1678
+ "nll_loss": 0.5237925052642822,
1679
+ "rewards/accuracies": 0.581250011920929,
1680
+ "rewards/chosen": -0.04365193843841553,
1681
+ "rewards/margins": 0.007623549550771713,
1682
+ "rewards/rejected": -0.05127548426389694,
1683
+ "step": 930
1684
+ },
1685
+ {
1686
+ "epoch": 0.9858416360776088,
1687
+ "grad_norm": 1.9816052115352747,
1688
+ "learning_rate": 6.523280730534423e-06,
1689
+ "log_odds_chosen": 0.2554723024368286,
1690
+ "log_odds_ratio": -0.6887288689613342,
1691
+ "logits/chosen": -2.93623685836792,
1692
+ "logits/rejected": -2.9283607006073,
1693
+ "logps/chosen": -0.7786284685134888,
1694
+ "logps/rejected": -0.9273189306259155,
1695
+ "loss": 0.5095,
1696
+ "nll_loss": 0.4773116111755371,
1697
+ "rewards/accuracies": 0.5874999761581421,
1698
+ "rewards/chosen": -0.03893141821026802,
1699
+ "rewards/margins": 0.007434530649334192,
1700
+ "rewards/rejected": -0.046365950256586075,
1701
+ "step": 940
1702
+ },
1703
+ {
1704
+ "epoch": 0.9963293130571579,
1705
+ "grad_norm": 2.074452011854083,
1706
+ "learning_rate": 6.488856845230502e-06,
1707
+ "log_odds_chosen": 0.2605803310871124,
1708
+ "log_odds_ratio": -0.6914502382278442,
1709
+ "logits/chosen": -2.9090209007263184,
1710
+ "logits/rejected": -2.9163012504577637,
1711
+ "logps/chosen": -0.8585780262947083,
1712
+ "logps/rejected": -1.0175925493240356,
1713
+ "loss": 0.5383,
1714
+ "nll_loss": 0.503527045249939,
1715
+ "rewards/accuracies": 0.5625,
1716
+ "rewards/chosen": -0.04292890429496765,
1717
+ "rewards/margins": 0.007950720377266407,
1718
+ "rewards/rejected": -0.050879620015621185,
1719
+ "step": 950
1720
+ },
1721
+ {
1722
+ "epoch": 0.9994756161510225,
1723
+ "step": 953,
1724
+ "total_flos": 0.0,
1725
+ "train_loss": 0.56347276506494,
1726
+ "train_runtime": 19079.6454,
1727
+ "train_samples_per_second": 3.197,
1728
+ "train_steps_per_second": 0.05
1729
+ }
1730
+ ],
1731
+ "logging_steps": 10,
1732
+ "max_steps": 953,
1733
+ "num_input_tokens_seen": 0,
1734
+ "num_train_epochs": 1,
1735
+ "save_steps": 500,
1736
+ "stateful_callbacks": {
1737
+ "TrainerControl": {
1738
+ "args": {
1739
+ "should_epoch_stop": false,
1740
+ "should_evaluate": false,
1741
+ "should_log": false,
1742
+ "should_save": false,
1743
+ "should_training_stop": false
1744
+ },
1745
+ "attributes": {}
1746
+ }
1747
+ },
1748
+ "total_flos": 0.0,
1749
+ "train_batch_size": 8,
1750
+ "trial_name": null,
1751
+ "trial_params": null
1752
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0bf035813fd07976aa0220cc580abeb9837a63f23c3331acf22c16aa3d9e2647
3
+ size 6648