aaabiao commited on
Commit
6644817
1 Parent(s): 473a001

Add files using large-upload tool

Browse files
adapter_config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "/ML-A100/team/mm/zhangge/models/neo_pt2.6_phase2/sft_ckpts/neo_7B_sft_v0_1_plus/checkpoint-225",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layers_pattern": null,
10
+ "layers_to_transform": null,
11
+ "loftq_config": {},
12
+ "lora_alpha": 128,
13
+ "lora_dropout": 0.05,
14
+ "megatron_config": null,
15
+ "megatron_core": "megatron.core",
16
+ "modules_to_save": null,
17
+ "peft_type": "LORA",
18
+ "r": 128,
19
+ "rank_pattern": {},
20
+ "revision": null,
21
+ "target_modules": [
22
+ "q_proj",
23
+ "up_proj",
24
+ "v_proj",
25
+ "down_proj",
26
+ "gate_proj",
27
+ "o_proj",
28
+ "k_proj"
29
+ ],
30
+ "task_type": "CAUSAL_LM"
31
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cc064700e949be371f981af015bd5ce2fb366d19dc9d008fe404deaec70680ec
3
+ size 770757112
added_tokens.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "<|CLS|>": 64000,
3
+ "<|EOD|>": 64002,
4
+ "<|MASK|>": 64003,
5
+ "<|PAD|>": 64004,
6
+ "<|SEP|>": 64001
7
+ }
all_results.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.0,
3
+ "eval_logits/chosen": -3.411515712738037,
4
+ "eval_logits/rejected": -3.456860065460205,
5
+ "eval_logps/chosen": -564.323974609375,
6
+ "eval_logps/rejected": -567.8529052734375,
7
+ "eval_loss": 0.7979298830032349,
8
+ "eval_rewards/accuracies": 0.46875,
9
+ "eval_rewards/chosen": 4.5177483558654785,
10
+ "eval_rewards/diff": -0.33996284008026123,
11
+ "eval_rewards/diff_abs": 1.2063032388687134,
12
+ "eval_rewards/rejected": 4.610641002655029,
13
+ "eval_rewards/student_margin": -0.09289252758026123,
14
+ "eval_rewards/teacher_margin": 0.2470703125,
15
+ "eval_runtime": 26.855,
16
+ "eval_samples": 1543,
17
+ "eval_samples_per_second": 57.457,
18
+ "eval_steps_per_second": 0.149,
19
+ "train_loss": 0.54411713648699,
20
+ "train_runtime": 5965.6032,
21
+ "train_samples": 160261,
22
+ "train_samples_per_second": 26.864,
23
+ "train_steps_per_second": 0.14
24
+ }
config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/ML-A100/team/mm/zhangge/models/neo_pt2.6_phase2/sft_ckpts/neo_7B_sft_v0_1_plus/checkpoint-225",
3
+ "architectures": [
4
+ "LlamaForCausalLM"
5
+ ],
6
+ "attention_bias": false,
7
+ "attention_dropout": 0.0,
8
+ "bos_token_id": 1,
9
+ "eos_token_id": 2,
10
+ "hidden_act": "silu",
11
+ "hidden_size": 3072,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 24576,
14
+ "max_position_embeddings": 8192,
15
+ "model_type": "llama",
16
+ "num_attention_heads": 16,
17
+ "num_hidden_layers": 28,
18
+ "num_key_value_heads": 16,
19
+ "pretraining_tp": 1,
20
+ "rms_norm_eps": 1e-05,
21
+ "rope_scaling": null,
22
+ "rope_theta": 10000.0,
23
+ "tie_word_embeddings": false,
24
+ "torch_dtype": "bfloat16",
25
+ "transformers_version": "4.39.0.dev0",
26
+ "use_cache": true,
27
+ "vocab_size": 64256
28
+ }
eval_results.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.0,
3
+ "eval_logits/chosen": -3.411515712738037,
4
+ "eval_logits/rejected": -3.456860065460205,
5
+ "eval_logps/chosen": -564.323974609375,
6
+ "eval_logps/rejected": -567.8529052734375,
7
+ "eval_loss": 0.7979298830032349,
8
+ "eval_rewards/accuracies": 0.46875,
9
+ "eval_rewards/chosen": 4.5177483558654785,
10
+ "eval_rewards/diff": -0.33996284008026123,
11
+ "eval_rewards/diff_abs": 1.2063032388687134,
12
+ "eval_rewards/rejected": 4.610641002655029,
13
+ "eval_rewards/student_margin": -0.09289252758026123,
14
+ "eval_rewards/teacher_margin": 0.2470703125,
15
+ "eval_runtime": 26.855,
16
+ "eval_samples": 1543,
17
+ "eval_samples_per_second": 57.457,
18
+ "eval_steps_per_second": 0.149
19
+ }
runs/May20_13-38-06_t-20240520213126-vc7c4-worker-0/events.out.tfevents.1716213772.t-20240520213126-vc7c4-worker-0.6614.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:690f45ad235acf5dd05898832f8d02cd7d529fa2ced6a20c05a8d85c7d51ef89
3
+ size 79234
runs/May20_13-38-06_t-20240520213126-vc7c4-worker-0/events.out.tfevents.1716219766.t-20240520213126-vc7c4-worker-0.6614.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:51de31136fb0ff439c7b368ac779dde0792323f1d29b36ca14e5a7173ed9bd26
3
+ size 1017
special_tokens_map.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|CLS|>",
4
+ "<|SEP|>",
5
+ "<|EOD|>",
6
+ "<|MASK|>",
7
+ "<|PAD|>"
8
+ ],
9
+ "bos_token": {
10
+ "content": "<s>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "eos_token": {
17
+ "content": "</s>",
18
+ "lstrip": false,
19
+ "normalized": true,
20
+ "rstrip": false,
21
+ "single_word": true
22
+ },
23
+ "pad_token": {
24
+ "content": "<unk>",
25
+ "lstrip": false,
26
+ "normalized": true,
27
+ "rstrip": false,
28
+ "single_word": true
29
+ },
30
+ "unk_token": {
31
+ "content": "<unk>",
32
+ "lstrip": false,
33
+ "normalized": true,
34
+ "rstrip": false,
35
+ "single_word": true
36
+ }
37
+ }
tokenization_hkgpt.py ADDED
@@ -0,0 +1,253 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 HKGAI Inc. All Rights Reserved.
2
+
3
+ # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
6
+ # and OPT implementations in this library. It has been modified from its
7
+ # original forms to accommodate minor architectural differences compared
8
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
9
+ #
10
+ # Licensed under the Apache License, Version 2.0 (the "License");
11
+ # you may not use this file except in compliance with the License.
12
+ # You may obtain a copy of the License at
13
+ #
14
+ # http://www.apache.org/licenses/LICENSE-2.0
15
+ #
16
+ # Unless required by applicable law or agreed to in writing, software
17
+ # distributed under the License is distributed on an "AS IS" BASIS,
18
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19
+ # See the License for the specific language governing permissions and
20
+ # limitations under the License.
21
+
22
+ import os
23
+ from shutil import copyfile
24
+ from typing import Any, Dict, List, Optional, Tuple
25
+
26
+ import sentencepiece as spm
27
+
28
+ from transformers.tokenization_utils import AddedToken, PreTrainedTokenizer
29
+ from transformers.utils import logging
30
+
31
+
32
+ logger = logging.get_logger(__name__)
33
+
34
+ VOCAB_FILES_NAMES = {"vocab_file": "tokenizer.model"}
35
+
36
+ PRETRAINED_VOCAB_FILES_MAP = {
37
+ "vocab_file": {},
38
+ "tokenizer_file": {},
39
+ }
40
+ PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {}
41
+
42
+
43
+ class HKGPTTokenizer(PreTrainedTokenizer):
44
+ """
45
+ Construct a HKGPT tokenizer. Based on byte-level Byte-Pair-Encoding.
46
+
47
+ Args:
48
+ vocab_file (`str`):
49
+ Path to the vocabulary file.
50
+ """
51
+
52
+ vocab_files_names = VOCAB_FILES_NAMES
53
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
54
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
55
+ model_input_names = ["input_ids", "attention_mask"]
56
+
57
+ def __init__(
58
+ self,
59
+ vocab_file,
60
+ unk_token="<unk>",
61
+ bos_token="<s>",
62
+ eos_token="</s>",
63
+ pad_token=None,
64
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
65
+ add_bos_token=True,
66
+ add_eos_token=False,
67
+ clean_up_tokenization_spaces=False,
68
+ **kwargs,
69
+ ):
70
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
71
+ bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token
72
+ eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token
73
+ unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token
74
+ pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token
75
+
76
+ self.vocab_file = vocab_file
77
+ self.add_bos_token = add_bos_token
78
+ self.add_eos_token = add_eos_token
79
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
80
+ self.sp_model.Load(vocab_file)
81
+
82
+ super().__init__(
83
+ bos_token=bos_token,
84
+ eos_token=eos_token,
85
+ unk_token=unk_token,
86
+ pad_token=pad_token,
87
+ add_bos_token=add_bos_token,
88
+ add_eos_token=add_eos_token,
89
+ sp_model_kwargs=self.sp_model_kwargs,
90
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
91
+ **kwargs,
92
+ )
93
+
94
+ def __getstate__(self):
95
+ state = self.__dict__.copy()
96
+ state["sp_model"] = None
97
+ return state
98
+
99
+ def __setstate__(self, d):
100
+ self.__dict__ = d
101
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
102
+ self.sp_model.Load(self.vocab_file)
103
+
104
+ @property
105
+ def vocab_size(self):
106
+ """Returns vocab size"""
107
+ return self.sp_model.get_piece_size()
108
+
109
+ def get_vocab(self):
110
+ """Returns vocab as a dict"""
111
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
112
+ vocab.update(self.added_tokens_encoder)
113
+ return vocab
114
+
115
+ def _tokenize(self, text):
116
+ """Returns a tokenized string."""
117
+ return self.sp_model.encode(text, out_type=str)
118
+
119
+ def _convert_token_to_id(self, token):
120
+ """Converts a token (str) in an id using the vocab."""
121
+ return self.sp_model.piece_to_id(token)
122
+
123
+ def _convert_id_to_token(self, index):
124
+ """Converts an index (integer) in a token (str) using the vocab."""
125
+ token = self.sp_model.IdToPiece(index)
126
+ return token
127
+
128
+ def convert_tokens_to_string(self, tokens):
129
+ """Converts a sequence of tokens (string) in a single string."""
130
+ current_sub_tokens = []
131
+ out_string = ""
132
+ prev_is_special = False
133
+ for i, token in enumerate(tokens):
134
+ # make sure that special tokens are not decoded using sentencepiece model
135
+ if token in self.all_special_tokens:
136
+ if not prev_is_special and i != 0:
137
+ out_string += " "
138
+ out_string += self.sp_model.decode(current_sub_tokens) + token
139
+ prev_is_special = True
140
+ current_sub_tokens = []
141
+ else:
142
+ current_sub_tokens.append(token)
143
+ prev_is_special = False
144
+ out_string += self.sp_model.decode(current_sub_tokens)
145
+ return out_string
146
+
147
+ def save_vocabulary(self, save_directory, filename_prefix: Optional[str] = None) -> Tuple[str]:
148
+ """
149
+ Save the vocabulary and special tokens file to a directory.
150
+
151
+ Args:
152
+ save_directory (`str`):
153
+ The directory in which to save the vocabulary.
154
+
155
+ Returns:
156
+ `Tuple(str)`: Paths to the files saved.
157
+ """
158
+ if not os.path.isdir(save_directory):
159
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
160
+ return
161
+ out_vocab_file = os.path.join(
162
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
163
+ )
164
+
165
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
166
+ copyfile(self.vocab_file, out_vocab_file)
167
+ elif not os.path.isfile(self.vocab_file):
168
+ with open(out_vocab_file, "wb") as fi:
169
+ content_spiece_model = self.sp_model.serialized_model_proto()
170
+ fi.write(content_spiece_model)
171
+
172
+ return (out_vocab_file,)
173
+
174
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
175
+ bos_token_id = [self.bos_token_id] if self.add_bos_token else []
176
+ eos_token_id = [self.eos_token_id] if self.add_eos_token else []
177
+
178
+ output = bos_token_id + token_ids_0 + eos_token_id
179
+
180
+ if token_ids_1 is not None:
181
+ output = output + bos_token_id + token_ids_1 + eos_token_id
182
+
183
+ return output
184
+
185
+ def get_special_tokens_mask(
186
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
187
+ ) -> List[int]:
188
+ """
189
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
190
+ special tokens using the tokenizer `prepare_for_model` method.
191
+
192
+ Args:
193
+ token_ids_0 (`List[int]`):
194
+ List of IDs.
195
+ token_ids_1 (`List[int]`, *optional*):
196
+ Optional second list of IDs for sequence pairs.
197
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
198
+ Whether or not the token list is already formatted with special tokens for the model.
199
+
200
+ Returns:
201
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
202
+ """
203
+ if already_has_special_tokens:
204
+ return super().get_special_tokens_mask(
205
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
206
+ )
207
+
208
+ bos_token_id = [1] if self.add_bos_token else []
209
+ eos_token_id = [1] if self.add_eos_token else []
210
+
211
+ if token_ids_1 is None:
212
+ return bos_token_id + ([0] * len(token_ids_0)) + eos_token_id
213
+ return (
214
+ bos_token_id
215
+ + ([0] * len(token_ids_0))
216
+ + eos_token_id
217
+ + bos_token_id
218
+ + ([0] * len(token_ids_1))
219
+ + eos_token_id
220
+ )
221
+
222
+ def create_token_type_ids_from_sequences(
223
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
224
+ ) -> List[int]:
225
+ """
226
+ Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT
227
+ sequence pair mask has the following format:
228
+
229
+ ```
230
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
231
+ | first sequence | second sequence |
232
+ ```
233
+
234
+ if token_ids_1 is None, only returns the first portion of the mask (0s).
235
+
236
+ Args:
237
+ token_ids_0 (`List[int]`):
238
+ List of ids.
239
+ token_ids_1 (`List[int]`, *optional*):
240
+ Optional second list of IDs for sequence pairs.
241
+
242
+ Returns:
243
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
244
+ """
245
+ bos_token_id = [self.bos_token_id] if self.add_bos_token else []
246
+ eos_token_id = [self.eos_token_id] if self.add_eos_token else []
247
+
248
+ output = [0] * len(bos_token_id + token_ids_0 + eos_token_id)
249
+
250
+ if token_ids_1 is not None:
251
+ output += [1] * len(bos_token_id + token_ids_1 + eos_token_id)
252
+
253
+ return output
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f6a2447b0e5664cabb2481587597102d82f42f0ccb7ef22e1c2d95494a8b03c5
3
+ size 1002561
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.0,
3
+ "train_loss": 0.54411713648699,
4
+ "train_runtime": 5965.6032,
5
+ "train_samples": 160261,
6
+ "train_samples_per_second": 26.864,
7
+ "train_steps_per_second": 0.14
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,1542 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 1.0,
5
+ "eval_steps": 800000000,
6
+ "global_step": 835,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.0,
13
+ "grad_norm": 14.9375,
14
+ "learning_rate": 5.952380952380953e-08,
15
+ "logits/chosen": -3.4845848083496094,
16
+ "logits/rejected": -3.85036301612854,
17
+ "logps/chosen": -306.50885009765625,
18
+ "logps/rejected": -197.74395751953125,
19
+ "loss": 0.6931,
20
+ "rewards/accuracies": 0.0,
21
+ "rewards/chosen": 0.0,
22
+ "rewards/diff": -0.625,
23
+ "rewards/diff_abs": 0.625,
24
+ "rewards/rejected": 0.0,
25
+ "rewards/student_margin": 0.0,
26
+ "rewards/teacher_margin": 0.625,
27
+ "step": 1
28
+ },
29
+ {
30
+ "epoch": 0.01,
31
+ "grad_norm": 15.1875,
32
+ "learning_rate": 5.952380952380953e-07,
33
+ "logits/chosen": -3.4539499282836914,
34
+ "logits/rejected": -3.5230212211608887,
35
+ "logps/chosen": -201.3124237060547,
36
+ "logps/rejected": -183.91929626464844,
37
+ "loss": 0.7251,
38
+ "rewards/accuracies": 0.5185185670852661,
39
+ "rewards/chosen": 0.06644736230373383,
40
+ "rewards/diff": -0.6283153891563416,
41
+ "rewards/diff_abs": 0.7078281044960022,
42
+ "rewards/rejected": 0.013049829751253128,
43
+ "rewards/student_margin": 0.0533975288271904,
44
+ "rewards/teacher_margin": 0.6817129254341125,
45
+ "step": 10
46
+ },
47
+ {
48
+ "epoch": 0.02,
49
+ "grad_norm": 15.0,
50
+ "learning_rate": 1.1904761904761906e-06,
51
+ "logits/chosen": -3.593590259552002,
52
+ "logits/rejected": -3.5751953125,
53
+ "logps/chosen": -218.2281951904297,
54
+ "logps/rejected": -209.72158813476562,
55
+ "loss": 0.7314,
56
+ "rewards/accuracies": 0.36666667461395264,
57
+ "rewards/chosen": -0.14473959803581238,
58
+ "rewards/diff": -1.088902235031128,
59
+ "rewards/diff_abs": 1.216476321220398,
60
+ "rewards/rejected": -0.044899843633174896,
61
+ "rewards/student_margin": -0.09983976185321808,
62
+ "rewards/teacher_margin": 0.9890626072883606,
63
+ "step": 20
64
+ },
65
+ {
66
+ "epoch": 0.04,
67
+ "grad_norm": 12.875,
68
+ "learning_rate": 1.7857142857142859e-06,
69
+ "logits/chosen": -3.489861249923706,
70
+ "logits/rejected": -3.60286283493042,
71
+ "logps/chosen": -259.5788269042969,
72
+ "logps/rejected": -200.3897705078125,
73
+ "loss": 0.7006,
74
+ "rewards/accuracies": 0.5,
75
+ "rewards/chosen": 0.16129140555858612,
76
+ "rewards/diff": -0.9457392692565918,
77
+ "rewards/diff_abs": 0.9774287343025208,
78
+ "rewards/rejected": 0.19505144655704498,
79
+ "rewards/student_margin": -0.03376004844903946,
80
+ "rewards/teacher_margin": 0.911979079246521,
81
+ "step": 30
82
+ },
83
+ {
84
+ "epoch": 0.05,
85
+ "grad_norm": 13.125,
86
+ "learning_rate": 2.380952380952381e-06,
87
+ "logits/chosen": -3.4493842124938965,
88
+ "logits/rejected": -3.5313167572021484,
89
+ "logps/chosen": -296.2957458496094,
90
+ "logps/rejected": -205.90768432617188,
91
+ "loss": 0.6915,
92
+ "rewards/accuracies": 0.6333333253860474,
93
+ "rewards/chosen": 0.4081878662109375,
94
+ "rewards/diff": -0.8339607119560242,
95
+ "rewards/diff_abs": 0.9871258735656738,
96
+ "rewards/rejected": 0.23329439759254456,
97
+ "rewards/student_margin": 0.17489352822303772,
98
+ "rewards/teacher_margin": 1.0088541507720947,
99
+ "step": 40
100
+ },
101
+ {
102
+ "epoch": 0.06,
103
+ "grad_norm": 12.1875,
104
+ "learning_rate": 2.9761904761904763e-06,
105
+ "logits/chosen": -3.627382755279541,
106
+ "logits/rejected": -3.624690294265747,
107
+ "logps/chosen": -232.656494140625,
108
+ "logps/rejected": -218.9987335205078,
109
+ "loss": 0.6477,
110
+ "rewards/accuracies": 0.5333333611488342,
111
+ "rewards/chosen": 0.6538265943527222,
112
+ "rewards/diff": -1.1430647373199463,
113
+ "rewards/diff_abs": 1.2350889444351196,
114
+ "rewards/rejected": 0.550537109375,
115
+ "rewards/student_margin": 0.10328948497772217,
116
+ "rewards/teacher_margin": 1.2463542222976685,
117
+ "step": 50
118
+ },
119
+ {
120
+ "epoch": 0.07,
121
+ "grad_norm": 12.1875,
122
+ "learning_rate": 3.5714285714285718e-06,
123
+ "logits/chosen": -3.5310890674591064,
124
+ "logits/rejected": -3.5235633850097656,
125
+ "logps/chosen": -278.9076232910156,
126
+ "logps/rejected": -228.38461303710938,
127
+ "loss": 0.6531,
128
+ "rewards/accuracies": 0.6666666269302368,
129
+ "rewards/chosen": 1.3353300094604492,
130
+ "rewards/diff": -0.48476704955101013,
131
+ "rewards/diff_abs": 0.8795832395553589,
132
+ "rewards/rejected": 0.9247845411300659,
133
+ "rewards/student_margin": 0.4105454385280609,
134
+ "rewards/teacher_margin": 0.895312488079071,
135
+ "step": 60
136
+ },
137
+ {
138
+ "epoch": 0.08,
139
+ "grad_norm": 12.0625,
140
+ "learning_rate": 4.166666666666667e-06,
141
+ "logits/chosen": -3.580937147140503,
142
+ "logits/rejected": -3.5811400413513184,
143
+ "logps/chosen": -300.9478454589844,
144
+ "logps/rejected": -296.41937255859375,
145
+ "loss": 0.6277,
146
+ "rewards/accuracies": 0.699999988079071,
147
+ "rewards/chosen": 1.321845293045044,
148
+ "rewards/diff": -0.41078656911849976,
149
+ "rewards/diff_abs": 0.8033410906791687,
150
+ "rewards/rejected": 0.977423369884491,
151
+ "rewards/student_margin": 0.34442177414894104,
152
+ "rewards/teacher_margin": 0.7552083730697632,
153
+ "step": 70
154
+ },
155
+ {
156
+ "epoch": 0.1,
157
+ "grad_norm": 11.875,
158
+ "learning_rate": 4.761904761904762e-06,
159
+ "logits/chosen": -3.363053560256958,
160
+ "logits/rejected": -3.429394483566284,
161
+ "logps/chosen": -307.85614013671875,
162
+ "logps/rejected": -194.5691680908203,
163
+ "loss": 0.6022,
164
+ "rewards/accuracies": 0.7666666507720947,
165
+ "rewards/chosen": 1.5754692554473877,
166
+ "rewards/diff": -0.049827940762043,
167
+ "rewards/diff_abs": 1.0208818912506104,
168
+ "rewards/rejected": 0.6659221649169922,
169
+ "rewards/student_margin": 0.9095471501350403,
170
+ "rewards/teacher_margin": 0.9593750238418579,
171
+ "step": 80
172
+ },
173
+ {
174
+ "epoch": 0.11,
175
+ "grad_norm": 10.625,
176
+ "learning_rate": 4.9992125742993825e-06,
177
+ "logits/chosen": -3.5306942462921143,
178
+ "logits/rejected": -3.4903416633605957,
179
+ "logps/chosen": -306.61328125,
180
+ "logps/rejected": -260.5257873535156,
181
+ "loss": 0.6025,
182
+ "rewards/accuracies": 0.6999999284744263,
183
+ "rewards/chosen": 1.7541630268096924,
184
+ "rewards/diff": -0.37054818868637085,
185
+ "rewards/diff_abs": 1.008590817451477,
186
+ "rewards/rejected": 1.359086275100708,
187
+ "rewards/student_margin": 0.39507681131362915,
188
+ "rewards/teacher_margin": 0.765625,
189
+ "step": 90
190
+ },
191
+ {
192
+ "epoch": 0.12,
193
+ "grad_norm": 11.6875,
194
+ "learning_rate": 4.994402324561469e-06,
195
+ "logits/chosen": -3.487095594406128,
196
+ "logits/rejected": -3.4807047843933105,
197
+ "logps/chosen": -291.501953125,
198
+ "logps/rejected": -213.4379425048828,
199
+ "loss": 0.6059,
200
+ "rewards/accuracies": 0.8333333134651184,
201
+ "rewards/chosen": 1.6134860515594482,
202
+ "rewards/diff": -0.03169644996523857,
203
+ "rewards/diff_abs": 0.517871618270874,
204
+ "rewards/rejected": 0.8097659349441528,
205
+ "rewards/student_margin": 0.8037201762199402,
206
+ "rewards/teacher_margin": 0.8354166746139526,
207
+ "step": 100
208
+ },
209
+ {
210
+ "epoch": 0.13,
211
+ "grad_norm": 12.0625,
212
+ "learning_rate": 4.985227689958313e-06,
213
+ "logits/chosen": -3.4644827842712402,
214
+ "logits/rejected": -3.5029213428497314,
215
+ "logps/chosen": -310.9401550292969,
216
+ "logps/rejected": -203.2042999267578,
217
+ "loss": 0.5783,
218
+ "rewards/accuracies": 0.7000000476837158,
219
+ "rewards/chosen": 1.3005656003952026,
220
+ "rewards/diff": -0.4949645400047302,
221
+ "rewards/diff_abs": 0.8325679898262024,
222
+ "rewards/rejected": 0.9288633465766907,
223
+ "rewards/student_margin": 0.3717021346092224,
224
+ "rewards/teacher_margin": 0.8666666746139526,
225
+ "step": 110
226
+ },
227
+ {
228
+ "epoch": 0.14,
229
+ "grad_norm": 11.375,
230
+ "learning_rate": 4.97170472308737e-06,
231
+ "logits/chosen": -3.5512795448303223,
232
+ "logits/rejected": -3.5486133098602295,
233
+ "logps/chosen": -240.02197265625,
234
+ "logps/rejected": -220.6559600830078,
235
+ "loss": 0.6029,
236
+ "rewards/accuracies": 0.5666666626930237,
237
+ "rewards/chosen": 1.3574118614196777,
238
+ "rewards/diff": -0.24382737278938293,
239
+ "rewards/diff_abs": 1.0172048807144165,
240
+ "rewards/rejected": 0.7210308909416199,
241
+ "rewards/student_margin": 0.6363809704780579,
242
+ "rewards/teacher_margin": 0.8802083134651184,
243
+ "step": 120
244
+ },
245
+ {
246
+ "epoch": 0.16,
247
+ "grad_norm": 10.8125,
248
+ "learning_rate": 4.953857084699501e-06,
249
+ "logits/chosen": -3.4069736003875732,
250
+ "logits/rejected": -3.45965313911438,
251
+ "logps/chosen": -239.0903778076172,
252
+ "logps/rejected": -190.62875366210938,
253
+ "loss": 0.6033,
254
+ "rewards/accuracies": 0.7333333492279053,
255
+ "rewards/chosen": 1.439429521560669,
256
+ "rewards/diff": -0.09966392815113068,
257
+ "rewards/diff_abs": 0.7962394952774048,
258
+ "rewards/rejected": 0.7729476690292358,
259
+ "rewards/student_margin": 0.6664819121360779,
260
+ "rewards/teacher_margin": 0.7661458253860474,
261
+ "step": 130
262
+ },
263
+ {
264
+ "epoch": 0.17,
265
+ "grad_norm": 11.9375,
266
+ "learning_rate": 4.931716002300424e-06,
267
+ "logits/chosen": -3.446927547454834,
268
+ "logits/rejected": -3.4422965049743652,
269
+ "logps/chosen": -305.3811950683594,
270
+ "logps/rejected": -268.9550476074219,
271
+ "loss": 0.5658,
272
+ "rewards/accuracies": 0.7999999523162842,
273
+ "rewards/chosen": 1.7481582164764404,
274
+ "rewards/diff": 0.060656942427158356,
275
+ "rewards/diff_abs": 0.8949319124221802,
276
+ "rewards/rejected": 0.8583346605300903,
277
+ "rewards/student_margin": 0.8898237347602844,
278
+ "rewards/teacher_margin": 0.8291667103767395,
279
+ "step": 140
280
+ },
281
+ {
282
+ "epoch": 0.18,
283
+ "grad_norm": 10.6875,
284
+ "learning_rate": 4.905320215512843e-06,
285
+ "logits/chosen": -3.3709404468536377,
286
+ "logits/rejected": -3.4576239585876465,
287
+ "logps/chosen": -273.4628601074219,
288
+ "logps/rejected": -242.08724975585938,
289
+ "loss": 0.5915,
290
+ "rewards/accuracies": 0.6333333253860474,
291
+ "rewards/chosen": 0.9606747627258301,
292
+ "rewards/diff": -0.34713083505630493,
293
+ "rewards/diff_abs": 0.9387839436531067,
294
+ "rewards/rejected": 0.5515555143356323,
295
+ "rewards/student_margin": 0.409119188785553,
296
+ "rewards/teacher_margin": 0.7562500238418579,
297
+ "step": 150
298
+ },
299
+ {
300
+ "epoch": 0.19,
301
+ "grad_norm": 11.6875,
302
+ "learning_rate": 4.874715908294827e-06,
303
+ "logits/chosen": -3.4495322704315186,
304
+ "logits/rejected": -3.4219632148742676,
305
+ "logps/chosen": -236.69869995117188,
306
+ "logps/rejected": -200.24969482421875,
307
+ "loss": 0.567,
308
+ "rewards/accuracies": 0.7333332896232605,
309
+ "rewards/chosen": 1.1124448776245117,
310
+ "rewards/diff": -0.28646618127822876,
311
+ "rewards/diff_abs": 0.8947150111198425,
312
+ "rewards/rejected": 0.5187025666236877,
313
+ "rewards/student_margin": 0.5937421917915344,
314
+ "rewards/teacher_margin": 0.8802083730697632,
315
+ "step": 160
316
+ },
317
+ {
318
+ "epoch": 0.2,
319
+ "grad_norm": 12.3125,
320
+ "learning_rate": 4.839956628133049e-06,
321
+ "logits/chosen": -3.4103050231933594,
322
+ "logits/rejected": -3.464110851287842,
323
+ "logps/chosen": -237.78280639648438,
324
+ "logps/rejected": -208.2376708984375,
325
+ "loss": 0.5312,
326
+ "rewards/accuracies": 0.8666666150093079,
327
+ "rewards/chosen": 1.213085651397705,
328
+ "rewards/diff": -0.21489715576171875,
329
+ "rewards/diff_abs": 1.0237081050872803,
330
+ "rewards/rejected": 0.386316180229187,
331
+ "rewards/student_margin": 0.8267695307731628,
332
+ "rewards/teacher_margin": 1.0416667461395264,
333
+ "step": 170
334
+ },
335
+ {
336
+ "epoch": 0.22,
337
+ "grad_norm": 11.25,
338
+ "learning_rate": 4.801103192352272e-06,
339
+ "logits/chosen": -3.5754635334014893,
340
+ "logits/rejected": -3.633957624435425,
341
+ "logps/chosen": -344.4823303222656,
342
+ "logps/rejected": -243.0480499267578,
343
+ "loss": 0.5386,
344
+ "rewards/accuracies": 0.6666666269302368,
345
+ "rewards/chosen": 1.8415803909301758,
346
+ "rewards/diff": -0.09891305863857269,
347
+ "rewards/diff_abs": 1.243789553642273,
348
+ "rewards/rejected": 0.9873684048652649,
349
+ "rewards/student_margin": 0.8542119860649109,
350
+ "rewards/teacher_margin": 0.9531251192092896,
351
+ "step": 180
352
+ },
353
+ {
354
+ "epoch": 0.23,
355
+ "grad_norm": 11.25,
356
+ "learning_rate": 4.758223581705006e-06,
357
+ "logits/chosen": -3.512629747390747,
358
+ "logits/rejected": -3.5428214073181152,
359
+ "logps/chosen": -243.7911376953125,
360
+ "logps/rejected": -196.57791137695312,
361
+ "loss": 0.564,
362
+ "rewards/accuracies": 0.6333333253860474,
363
+ "rewards/chosen": 1.4310871362686157,
364
+ "rewards/diff": -0.1522880345582962,
365
+ "rewards/diff_abs": 1.0001410245895386,
366
+ "rewards/rejected": 0.8344168663024902,
367
+ "rewards/student_margin": 0.5966703295707703,
368
+ "rewards/teacher_margin": 0.7489583492279053,
369
+ "step": 190
370
+ },
371
+ {
372
+ "epoch": 0.24,
373
+ "grad_norm": 12.75,
374
+ "learning_rate": 4.711392821427515e-06,
375
+ "logits/chosen": -3.6087615489959717,
376
+ "logits/rejected": -3.622082233428955,
377
+ "logps/chosen": -233.5066680908203,
378
+ "logps/rejected": -160.3419647216797,
379
+ "loss": 0.5557,
380
+ "rewards/accuracies": 0.7333332896232605,
381
+ "rewards/chosen": 1.0799269676208496,
382
+ "rewards/diff": -0.27179113030433655,
383
+ "rewards/diff_abs": 0.9324356913566589,
384
+ "rewards/rejected": 0.15380141139030457,
385
+ "rewards/student_margin": 0.9261256456375122,
386
+ "rewards/teacher_margin": 1.1979167461395264,
387
+ "step": 200
388
+ },
389
+ {
390
+ "epoch": 0.25,
391
+ "grad_norm": 10.3125,
392
+ "learning_rate": 4.6606928499702905e-06,
393
+ "logits/chosen": -3.5973472595214844,
394
+ "logits/rejected": -3.656515598297119,
395
+ "logps/chosen": -237.35546875,
396
+ "logps/rejected": -227.3077392578125,
397
+ "loss": 0.544,
398
+ "rewards/accuracies": 0.699999988079071,
399
+ "rewards/chosen": 1.372194766998291,
400
+ "rewards/diff": -0.45501255989074707,
401
+ "rewards/diff_abs": 1.0200004577636719,
402
+ "rewards/rejected": 0.9282490611076355,
403
+ "rewards/student_margin": 0.4439457952976227,
404
+ "rewards/teacher_margin": 0.8989583849906921,
405
+ "step": 210
406
+ },
407
+ {
408
+ "epoch": 0.26,
409
+ "grad_norm": 10.5,
410
+ "learning_rate": 4.606212375632682e-06,
411
+ "logits/chosen": -3.341809034347534,
412
+ "logits/rejected": -3.4072697162628174,
413
+ "logps/chosen": -242.65316772460938,
414
+ "logps/rejected": -186.21214294433594,
415
+ "loss": 0.5484,
416
+ "rewards/accuracies": 0.7999999523162842,
417
+ "rewards/chosen": 1.5010632276535034,
418
+ "rewards/diff": 0.02491099201142788,
419
+ "rewards/diff_abs": 1.0156395435333252,
420
+ "rewards/rejected": 0.4521939158439636,
421
+ "rewards/student_margin": 1.048869252204895,
422
+ "rewards/teacher_margin": 1.023958444595337,
423
+ "step": 220
424
+ },
425
+ {
426
+ "epoch": 0.28,
427
+ "grad_norm": 12.9375,
428
+ "learning_rate": 4.5480467213524935e-06,
429
+ "logits/chosen": -3.4449222087860107,
430
+ "logits/rejected": -3.4908764362335205,
431
+ "logps/chosen": -260.27532958984375,
432
+ "logps/rejected": -249.1790313720703,
433
+ "loss": 0.5548,
434
+ "rewards/accuracies": 0.6333333253860474,
435
+ "rewards/chosen": 1.5822021961212158,
436
+ "rewards/diff": -0.133940190076828,
437
+ "rewards/diff_abs": 0.9377338290214539,
438
+ "rewards/rejected": 0.8869755864143372,
439
+ "rewards/student_margin": 0.6952265501022339,
440
+ "rewards/teacher_margin": 0.8291667699813843,
441
+ "step": 230
442
+ },
443
+ {
444
+ "epoch": 0.29,
445
+ "grad_norm": 10.75,
446
+ "learning_rate": 4.4862976579221605e-06,
447
+ "logits/chosen": -3.4081084728240967,
448
+ "logits/rejected": -3.435927152633667,
449
+ "logps/chosen": -305.90277099609375,
450
+ "logps/rejected": -222.0186767578125,
451
+ "loss": 0.5421,
452
+ "rewards/accuracies": 0.6999999284744263,
453
+ "rewards/chosen": 1.8166110515594482,
454
+ "rewards/diff": -0.12463061511516571,
455
+ "rewards/diff_abs": 1.1340343952178955,
456
+ "rewards/rejected": 0.781866729259491,
457
+ "rewards/student_margin": 1.034744381904602,
458
+ "rewards/teacher_margin": 1.1593749523162842,
459
+ "step": 240
460
+ },
461
+ {
462
+ "epoch": 0.3,
463
+ "grad_norm": 10.3125,
464
+ "learning_rate": 4.421073225923276e-06,
465
+ "logits/chosen": -3.4236435890197754,
466
+ "logits/rejected": -3.5582706928253174,
467
+ "logps/chosen": -304.5841064453125,
468
+ "logps/rejected": -224.82040405273438,
469
+ "loss": 0.5406,
470
+ "rewards/accuracies": 0.699999988079071,
471
+ "rewards/chosen": 1.731606125831604,
472
+ "rewards/diff": 0.09823840111494064,
473
+ "rewards/diff_abs": 1.1416826248168945,
474
+ "rewards/rejected": 0.6896177530288696,
475
+ "rewards/student_margin": 1.0419883728027344,
476
+ "rewards/teacher_margin": 0.9437500238418579,
477
+ "step": 250
478
+ },
479
+ {
480
+ "epoch": 0.31,
481
+ "grad_norm": 12.8125,
482
+ "learning_rate": 4.3524875466910634e-06,
483
+ "logits/chosen": -3.3874142169952393,
484
+ "logits/rejected": -3.38875150680542,
485
+ "logps/chosen": -248.728271484375,
486
+ "logps/rejected": -241.2711181640625,
487
+ "loss": 0.5522,
488
+ "rewards/accuracies": 0.6000000238418579,
489
+ "rewards/chosen": 1.1090809106826782,
490
+ "rewards/diff": -0.14672747254371643,
491
+ "rewards/diff_abs": 0.8784782290458679,
492
+ "rewards/rejected": 0.7037249803543091,
493
+ "rewards/student_margin": 0.405355840921402,
494
+ "rewards/teacher_margin": 0.5520833730697632,
495
+ "step": 260
496
+ },
497
+ {
498
+ "epoch": 0.32,
499
+ "grad_norm": 10.5,
500
+ "learning_rate": 4.280660622639513e-06,
501
+ "logits/chosen": -3.518489122390747,
502
+ "logits/rejected": -3.5266849994659424,
503
+ "logps/chosen": -238.49270629882812,
504
+ "logps/rejected": -191.0264129638672,
505
+ "loss": 0.5309,
506
+ "rewards/accuracies": 0.8666666150093079,
507
+ "rewards/chosen": 1.5766558647155762,
508
+ "rewards/diff": 0.2415703982114792,
509
+ "rewards/diff_abs": 0.9685176014900208,
510
+ "rewards/rejected": 0.47050219774246216,
511
+ "rewards/student_margin": 1.1061537265777588,
512
+ "rewards/teacher_margin": 0.8645833730697632,
513
+ "step": 270
514
+ },
515
+ {
516
+ "epoch": 0.34,
517
+ "grad_norm": 10.375,
518
+ "learning_rate": 4.205718127296574e-06,
519
+ "logits/chosen": -3.5537657737731934,
520
+ "logits/rejected": -3.529198169708252,
521
+ "logps/chosen": -241.38253784179688,
522
+ "logps/rejected": -211.21163940429688,
523
+ "loss": 0.5324,
524
+ "rewards/accuracies": 0.8333333730697632,
525
+ "rewards/chosen": 1.566563367843628,
526
+ "rewards/diff": -0.13755005598068237,
527
+ "rewards/diff_abs": 1.2915265560150146,
528
+ "rewards/rejected": 0.8463010787963867,
529
+ "rewards/student_margin": 0.7202624678611755,
530
+ "rewards/teacher_margin": 0.8578125238418579,
531
+ "step": 280
532
+ },
533
+ {
534
+ "epoch": 0.35,
535
+ "grad_norm": 11.6875,
536
+ "learning_rate": 4.127791185416747e-06,
537
+ "logits/chosen": -3.4216790199279785,
538
+ "logits/rejected": -3.4342334270477295,
539
+ "logps/chosen": -219.7965087890625,
540
+ "logps/rejected": -173.47998046875,
541
+ "loss": 0.5566,
542
+ "rewards/accuracies": 0.6666666865348816,
543
+ "rewards/chosen": 1.3700611591339111,
544
+ "rewards/diff": -0.2189801037311554,
545
+ "rewards/diff_abs": 1.011496901512146,
546
+ "rewards/rejected": 0.6609162092208862,
547
+ "rewards/student_margin": 0.7091449499130249,
548
+ "rewards/teacher_margin": 0.9281250834465027,
549
+ "step": 290
550
+ },
551
+ {
552
+ "epoch": 0.36,
553
+ "grad_norm": 9.25,
554
+ "learning_rate": 4.047016143555834e-06,
555
+ "logits/chosen": -3.4285099506378174,
556
+ "logits/rejected": -3.44201397895813,
557
+ "logps/chosen": -247.718994140625,
558
+ "logps/rejected": -208.1968231201172,
559
+ "loss": 0.5411,
560
+ "rewards/accuracies": 0.7333332896232605,
561
+ "rewards/chosen": 1.704085350036621,
562
+ "rewards/diff": 0.11051769554615021,
563
+ "rewards/diff_abs": 0.9950829744338989,
564
+ "rewards/rejected": 0.6805468201637268,
565
+ "rewards/student_margin": 1.023538589477539,
566
+ "rewards/teacher_margin": 0.91302090883255,
567
+ "step": 300
568
+ },
569
+ {
570
+ "epoch": 0.37,
571
+ "grad_norm": 11.375,
572
+ "learning_rate": 3.9635343315092374e-06,
573
+ "logits/chosen": -3.350679874420166,
574
+ "logits/rejected": -3.487694263458252,
575
+ "logps/chosen": -243.7193603515625,
576
+ "logps/rejected": -210.34561157226562,
577
+ "loss": 0.558,
578
+ "rewards/accuracies": 0.73333340883255,
579
+ "rewards/chosen": 1.3691414594650269,
580
+ "rewards/diff": -0.047634802758693695,
581
+ "rewards/diff_abs": 1.2383002042770386,
582
+ "rewards/rejected": 0.43865126371383667,
583
+ "rewards/student_margin": 0.9304901957511902,
584
+ "rewards/teacher_margin": 0.9781249165534973,
585
+ "step": 310
586
+ },
587
+ {
588
+ "epoch": 0.38,
589
+ "grad_norm": 11.0,
590
+ "learning_rate": 3.877491815031241e-06,
591
+ "logits/chosen": -3.520355701446533,
592
+ "logits/rejected": -3.64158296585083,
593
+ "logps/chosen": -258.4951171875,
594
+ "logps/rejected": -180.27655029296875,
595
+ "loss": 0.528,
596
+ "rewards/accuracies": 0.8666666150093079,
597
+ "rewards/chosen": 1.5588480234146118,
598
+ "rewards/diff": 0.2940705418586731,
599
+ "rewards/diff_abs": 0.8084346055984497,
600
+ "rewards/rejected": 0.40748587250709534,
601
+ "rewards/student_margin": 1.1513621807098389,
602
+ "rewards/teacher_margin": 0.8572916984558105,
603
+ "step": 320
604
+ },
605
+ {
606
+ "epoch": 0.4,
607
+ "grad_norm": 11.625,
608
+ "learning_rate": 3.789039140267903e-06,
609
+ "logits/chosen": -3.6287574768066406,
610
+ "logits/rejected": -3.6443278789520264,
611
+ "logps/chosen": -239.03488159179688,
612
+ "logps/rejected": -204.2160186767578,
613
+ "loss": 0.5197,
614
+ "rewards/accuracies": 0.7666666507720947,
615
+ "rewards/chosen": 1.4275104999542236,
616
+ "rewards/diff": 0.07205963134765625,
617
+ "rewards/diff_abs": 1.0453150272369385,
618
+ "rewards/rejected": 0.3346175253391266,
619
+ "rewards/student_margin": 1.0928928852081299,
620
+ "rewards/teacher_margin": 1.0208333730697632,
621
+ "step": 330
622
+ },
623
+ {
624
+ "epoch": 0.41,
625
+ "grad_norm": 11.0625,
626
+ "learning_rate": 3.6983310703507475e-06,
627
+ "logits/chosen": -3.4879977703094482,
628
+ "logits/rejected": -3.631270170211792,
629
+ "logps/chosen": -316.2113342285156,
630
+ "logps/rejected": -292.9886474609375,
631
+ "loss": 0.5119,
632
+ "rewards/accuracies": 0.7666667103767395,
633
+ "rewards/chosen": 1.9592492580413818,
634
+ "rewards/diff": 0.0617034025490284,
635
+ "rewards/diff_abs": 1.0368849039077759,
636
+ "rewards/rejected": 1.1829627752304077,
637
+ "rewards/student_margin": 0.7762867212295532,
638
+ "rewards/teacher_margin": 0.7145833969116211,
639
+ "step": 340
640
+ },
641
+ {
642
+ "epoch": 0.42,
643
+ "grad_norm": 11.625,
644
+ "learning_rate": 3.6055263146121062e-06,
645
+ "logits/chosen": -3.4843573570251465,
646
+ "logits/rejected": -3.5558838844299316,
647
+ "logps/chosen": -243.1865234375,
648
+ "logps/rejected": -191.44906616210938,
649
+ "loss": 0.5281,
650
+ "rewards/accuracies": 0.8333333730697632,
651
+ "rewards/chosen": 1.671415090560913,
652
+ "rewards/diff": 0.22389063239097595,
653
+ "rewards/diff_abs": 1.1865875720977783,
654
+ "rewards/rejected": 0.6318994760513306,
655
+ "rewards/student_margin": 1.0395156145095825,
656
+ "rewards/teacher_margin": 0.815625011920929,
657
+ "step": 350
658
+ },
659
+ {
660
+ "epoch": 0.43,
661
+ "grad_norm": 11.75,
662
+ "learning_rate": 3.5107872508959144e-06,
663
+ "logits/chosen": -3.551055908203125,
664
+ "logits/rejected": -3.672009229660034,
665
+ "logps/chosen": -303.6122741699219,
666
+ "logps/rejected": -230.38363647460938,
667
+ "loss": 0.5345,
668
+ "rewards/accuracies": 0.7666666507720947,
669
+ "rewards/chosen": 1.3571428060531616,
670
+ "rewards/diff": 0.1063896045088768,
671
+ "rewards/diff_abs": 1.227370023727417,
672
+ "rewards/rejected": 0.45752400159835815,
673
+ "rewards/student_margin": 0.8996188044548035,
674
+ "rewards/teacher_margin": 0.7932292222976685,
675
+ "step": 360
676
+ },
677
+ {
678
+ "epoch": 0.44,
679
+ "grad_norm": 10.875,
680
+ "learning_rate": 3.414279641449809e-06,
681
+ "logits/chosen": -3.435415744781494,
682
+ "logits/rejected": -3.4730231761932373,
683
+ "logps/chosen": -295.2155456542969,
684
+ "logps/rejected": -237.608642578125,
685
+ "loss": 0.5138,
686
+ "rewards/accuracies": 0.7333332896232605,
687
+ "rewards/chosen": 1.6991815567016602,
688
+ "rewards/diff": -0.06112980842590332,
689
+ "rewards/diff_abs": 1.0851820707321167,
690
+ "rewards/rejected": 0.8478114008903503,
691
+ "rewards/student_margin": 0.8513702154159546,
692
+ "rewards/teacher_margin": 0.9125000238418579,
693
+ "step": 370
694
+ },
695
+ {
696
+ "epoch": 0.46,
697
+ "grad_norm": 10.25,
698
+ "learning_rate": 3.3161723428956356e-06,
699
+ "logits/chosen": -3.3455491065979004,
700
+ "logits/rejected": -3.498779296875,
701
+ "logps/chosen": -304.9415283203125,
702
+ "logps/rejected": -242.94873046875,
703
+ "loss": 0.5174,
704
+ "rewards/accuracies": 0.800000011920929,
705
+ "rewards/chosen": 1.997698426246643,
706
+ "rewards/diff": 0.04048812389373779,
707
+ "rewards/diff_abs": 1.1324741840362549,
708
+ "rewards/rejected": 0.8811686635017395,
709
+ "rewards/student_margin": 1.1165297031402588,
710
+ "rewards/teacher_margin": 1.0760416984558105,
711
+ "step": 380
712
+ },
713
+ {
714
+ "epoch": 0.47,
715
+ "grad_norm": 10.625,
716
+ "learning_rate": 3.216637010785813e-06,
717
+ "logits/chosen": -3.564321994781494,
718
+ "logits/rejected": -3.5550827980041504,
719
+ "logps/chosen": -323.22161865234375,
720
+ "logps/rejected": -285.3416442871094,
721
+ "loss": 0.5179,
722
+ "rewards/accuracies": 0.76666659116745,
723
+ "rewards/chosen": 2.0031332969665527,
724
+ "rewards/diff": 0.2937852442264557,
725
+ "rewards/diff_abs": 1.2637544870376587,
726
+ "rewards/rejected": 0.8124731183052063,
727
+ "rewards/student_margin": 1.1906602382659912,
728
+ "rewards/teacher_margin": 0.8968750238418579,
729
+ "step": 390
730
+ },
731
+ {
732
+ "epoch": 0.48,
733
+ "grad_norm": 12.9375,
734
+ "learning_rate": 3.115847799262494e-06,
735
+ "logits/chosen": -3.467402696609497,
736
+ "logits/rejected": -3.590373992919922,
737
+ "logps/chosen": -257.94512939453125,
738
+ "logps/rejected": -220.92965698242188,
739
+ "loss": 0.5129,
740
+ "rewards/accuracies": 0.8333333730697632,
741
+ "rewards/chosen": 1.5651861429214478,
742
+ "rewards/diff": 0.25105172395706177,
743
+ "rewards/diff_abs": 0.9998427629470825,
744
+ "rewards/rejected": 0.43444690108299255,
745
+ "rewards/student_margin": 1.1307392120361328,
746
+ "rewards/teacher_margin": 0.879687488079071,
747
+ "step": 400
748
+ },
749
+ {
750
+ "epoch": 0.49,
751
+ "grad_norm": 10.25,
752
+ "learning_rate": 3.0139810563450094e-06,
753
+ "logits/chosen": -3.6093788146972656,
754
+ "logits/rejected": -3.6794228553771973,
755
+ "logps/chosen": -293.86090087890625,
756
+ "logps/rejected": -235.68692016601562,
757
+ "loss": 0.516,
758
+ "rewards/accuracies": 0.7666666507720947,
759
+ "rewards/chosen": 1.6038920879364014,
760
+ "rewards/diff": 0.06115199252963066,
761
+ "rewards/diff_abs": 0.9691001772880554,
762
+ "rewards/rejected": 0.7916983366012573,
763
+ "rewards/student_margin": 0.8121936917304993,
764
+ "rewards/teacher_margin": 0.7510417103767395,
765
+ "step": 410
766
+ },
767
+ {
768
+ "epoch": 0.5,
769
+ "grad_norm": 9.8125,
770
+ "learning_rate": 2.911215015378752e-06,
771
+ "logits/chosen": -3.5684292316436768,
772
+ "logits/rejected": -3.6296639442443848,
773
+ "logps/chosen": -225.4886016845703,
774
+ "logps/rejected": -186.40719604492188,
775
+ "loss": 0.5008,
776
+ "rewards/accuracies": 0.7666667103767395,
777
+ "rewards/chosen": 1.4102319478988647,
778
+ "rewards/diff": 0.22086882591247559,
779
+ "rewards/diff_abs": 1.04868483543396,
780
+ "rewards/rejected": 0.43415483832359314,
781
+ "rewards/student_margin": 0.9760771989822388,
782
+ "rewards/teacher_margin": 0.7552083730697632,
783
+ "step": 420
784
+ },
785
+ {
786
+ "epoch": 0.51,
787
+ "grad_norm": 10.875,
788
+ "learning_rate": 2.8077294831853547e-06,
789
+ "logits/chosen": -3.450024127960205,
790
+ "logits/rejected": -3.508530378341675,
791
+ "logps/chosen": -287.51263427734375,
792
+ "logps/rejected": -215.53939819335938,
793
+ "loss": 0.5224,
794
+ "rewards/accuracies": 0.6333333253860474,
795
+ "rewards/chosen": 1.462050199508667,
796
+ "rewards/diff": -0.2909145951271057,
797
+ "rewards/diff_abs": 1.0944832563400269,
798
+ "rewards/rejected": 0.7868188619613647,
799
+ "rewards/student_margin": 0.6752313375473022,
800
+ "rewards/teacher_margin": 0.9661458134651184,
801
+ "step": 430
802
+ },
803
+ {
804
+ "epoch": 0.53,
805
+ "grad_norm": 11.0625,
806
+ "learning_rate": 2.703705525459806e-06,
807
+ "logits/chosen": -3.5202553272247314,
808
+ "logits/rejected": -3.5470759868621826,
809
+ "logps/chosen": -221.18173217773438,
810
+ "logps/rejected": -204.56344604492188,
811
+ "loss": 0.5345,
812
+ "rewards/accuracies": 0.8333333134651184,
813
+ "rewards/chosen": 1.5964858531951904,
814
+ "rewards/diff": 0.1700429618358612,
815
+ "rewards/diff_abs": 0.6282828450202942,
816
+ "rewards/rejected": 0.6587344408035278,
817
+ "rewards/student_margin": 0.9377514123916626,
818
+ "rewards/teacher_margin": 0.767708420753479,
819
+ "step": 440
820
+ },
821
+ {
822
+ "epoch": 0.54,
823
+ "grad_norm": 11.5,
824
+ "learning_rate": 2.599325149964946e-06,
825
+ "logits/chosen": -3.427098512649536,
826
+ "logits/rejected": -3.5964770317077637,
827
+ "logps/chosen": -338.41900634765625,
828
+ "logps/rejected": -305.21978759765625,
829
+ "loss": 0.5261,
830
+ "rewards/accuracies": 0.5666666626930237,
831
+ "rewards/chosen": 1.980444312095642,
832
+ "rewards/diff": -0.2440481185913086,
833
+ "rewards/diff_abs": 0.9519003033638,
834
+ "rewards/rejected": 1.4953259229660034,
835
+ "rewards/student_margin": 0.485118567943573,
836
+ "rewards/teacher_margin": 0.7291667461395264,
837
+ "step": 450
838
+ },
839
+ {
840
+ "epoch": 0.55,
841
+ "grad_norm": 10.875,
842
+ "learning_rate": 2.4947709880776607e-06,
843
+ "logits/chosen": -3.465344190597534,
844
+ "logits/rejected": -3.593451738357544,
845
+ "logps/chosen": -249.97262573242188,
846
+ "logps/rejected": -215.36184692382812,
847
+ "loss": 0.5113,
848
+ "rewards/accuracies": 0.7333333492279053,
849
+ "rewards/chosen": 1.3433548212051392,
850
+ "rewards/diff": 0.33395522832870483,
851
+ "rewards/diff_abs": 1.4617677927017212,
852
+ "rewards/rejected": 0.1708579957485199,
853
+ "rewards/student_margin": 1.172497034072876,
854
+ "rewards/teacher_margin": 0.8385416865348816,
855
+ "step": 460
856
+ },
857
+ {
858
+ "epoch": 0.56,
859
+ "grad_norm": 10.5,
860
+ "learning_rate": 2.3902259752439462e-06,
861
+ "logits/chosen": -3.506533145904541,
862
+ "logits/rejected": -3.5754833221435547,
863
+ "logps/chosen": -280.00299072265625,
864
+ "logps/rejected": -243.15451049804688,
865
+ "loss": 0.5074,
866
+ "rewards/accuracies": 0.699999988079071,
867
+ "rewards/chosen": 1.4691600799560547,
868
+ "rewards/diff": -0.009477054700255394,
869
+ "rewards/diff_abs": 1.2979676723480225,
870
+ "rewards/rejected": 0.6395747661590576,
871
+ "rewards/student_margin": 0.8295854330062866,
872
+ "rewards/teacher_margin": 0.839062511920929,
873
+ "step": 470
874
+ },
875
+ {
876
+ "epoch": 0.57,
877
+ "grad_norm": 11.125,
878
+ "learning_rate": 2.2858730309019594e-06,
879
+ "logits/chosen": -3.401517868041992,
880
+ "logits/rejected": -3.449411392211914,
881
+ "logps/chosen": -333.2916564941406,
882
+ "logps/rejected": -242.51858520507812,
883
+ "loss": 0.5146,
884
+ "rewards/accuracies": 0.699999988079071,
885
+ "rewards/chosen": 2.0638175010681152,
886
+ "rewards/diff": 0.23633404076099396,
887
+ "rewards/diff_abs": 1.1042144298553467,
888
+ "rewards/rejected": 0.9806085824966431,
889
+ "rewards/student_margin": 1.0832091569900513,
890
+ "rewards/teacher_margin": 0.846875011920929,
891
+ "step": 480
892
+ },
893
+ {
894
+ "epoch": 0.59,
895
+ "grad_norm": 11.25,
896
+ "learning_rate": 2.181894738433076e-06,
897
+ "logits/chosen": -3.5467307567596436,
898
+ "logits/rejected": -3.588332414627075,
899
+ "logps/chosen": -248.4571990966797,
900
+ "logps/rejected": -221.55154418945312,
901
+ "loss": 0.5411,
902
+ "rewards/accuracies": 0.7333332896232605,
903
+ "rewards/chosen": 1.626908540725708,
904
+ "rewards/diff": -0.0030008137691766024,
905
+ "rewards/diff_abs": 1.0150421857833862,
906
+ "rewards/rejected": 0.7426697015762329,
907
+ "rewards/student_margin": 0.8842388391494751,
908
+ "rewards/teacher_margin": 0.8872395753860474,
909
+ "step": 490
910
+ },
911
+ {
912
+ "epoch": 0.6,
913
+ "grad_norm": 10.625,
914
+ "learning_rate": 2.078473025700937e-06,
915
+ "logits/chosen": -3.5422046184539795,
916
+ "logits/rejected": -3.618915557861328,
917
+ "logps/chosen": -197.5839385986328,
918
+ "logps/rejected": -168.53799438476562,
919
+ "loss": 0.5448,
920
+ "rewards/accuracies": 0.7000000476837158,
921
+ "rewards/chosen": 0.9767719507217407,
922
+ "rewards/diff": 0.020980846136808395,
923
+ "rewards/diff_abs": 1.2862763404846191,
924
+ "rewards/rejected": 0.22818705439567566,
925
+ "rewards/student_margin": 0.748585045337677,
926
+ "rewards/teacher_margin": 0.7276042103767395,
927
+ "step": 500
928
+ },
929
+ {
930
+ "epoch": 0.61,
931
+ "grad_norm": 11.6875,
932
+ "learning_rate": 1.975788846737431e-06,
933
+ "logits/chosen": -3.4971141815185547,
934
+ "logits/rejected": -3.526686191558838,
935
+ "logps/chosen": -224.8160400390625,
936
+ "logps/rejected": -224.65371704101562,
937
+ "loss": 0.523,
938
+ "rewards/accuracies": 0.7333333492279053,
939
+ "rewards/chosen": 1.1530336141586304,
940
+ "rewards/diff": -0.030303645879030228,
941
+ "rewards/diff_abs": 1.0633232593536377,
942
+ "rewards/rejected": 0.4067746698856354,
943
+ "rewards/student_margin": 0.7462589144706726,
944
+ "rewards/teacher_margin": 0.7765625715255737,
945
+ "step": 510
946
+ },
947
+ {
948
+ "epoch": 0.62,
949
+ "grad_norm": 10.1875,
950
+ "learning_rate": 1.8740218651325714e-06,
951
+ "logits/chosen": -3.4748759269714355,
952
+ "logits/rejected": -3.4663357734680176,
953
+ "logps/chosen": -258.1708679199219,
954
+ "logps/rejected": -236.91549682617188,
955
+ "loss": 0.5224,
956
+ "rewards/accuracies": 0.7666666507720947,
957
+ "rewards/chosen": 1.9041097164154053,
958
+ "rewards/diff": 0.3186507225036621,
959
+ "rewards/diff_abs": 1.0768160820007324,
960
+ "rewards/rejected": 0.7318129539489746,
961
+ "rewards/student_margin": 1.1722967624664307,
962
+ "rewards/teacher_margin": 0.853645920753479,
963
+ "step": 520
964
+ },
965
+ {
966
+ "epoch": 0.63,
967
+ "grad_norm": 11.1875,
968
+ "learning_rate": 1.7733501396822178e-06,
969
+ "logits/chosen": -3.5963053703308105,
970
+ "logits/rejected": -3.566746234893799,
971
+ "logps/chosen": -200.7073211669922,
972
+ "logps/rejected": -181.52761840820312,
973
+ "loss": 0.5364,
974
+ "rewards/accuracies": 0.6666666269302368,
975
+ "rewards/chosen": 1.2579277753829956,
976
+ "rewards/diff": -0.2456444799900055,
977
+ "rewards/diff_abs": 1.066627025604248,
978
+ "rewards/rejected": 0.5113847851753235,
979
+ "rewards/student_margin": 0.7465430498123169,
980
+ "rewards/teacher_margin": 0.9921875,
981
+ "step": 530
982
+ },
983
+ {
984
+ "epoch": 0.65,
985
+ "grad_norm": 10.8125,
986
+ "learning_rate": 1.6739498128436563e-06,
987
+ "logits/chosen": -3.5266900062561035,
988
+ "logits/rejected": -3.5792396068573,
989
+ "logps/chosen": -277.3493957519531,
990
+ "logps/rejected": -250.41488647460938,
991
+ "loss": 0.51,
992
+ "rewards/accuracies": 0.800000011920929,
993
+ "rewards/chosen": 1.8075897693634033,
994
+ "rewards/diff": 0.4050876498222351,
995
+ "rewards/diff_abs": 1.2073490619659424,
996
+ "rewards/rejected": 0.4259396195411682,
997
+ "rewards/student_margin": 1.3816502094268799,
998
+ "rewards/teacher_margin": 0.9765625,
999
+ "step": 540
1000
+ },
1001
+ {
1002
+ "epoch": 0.66,
1003
+ "grad_norm": 10.3125,
1004
+ "learning_rate": 1.5759948025441535e-06,
1005
+ "logits/chosen": -3.3835601806640625,
1006
+ "logits/rejected": -3.446404218673706,
1007
+ "logps/chosen": -268.1842041015625,
1008
+ "logps/rejected": -229.45700073242188,
1009
+ "loss": 0.5225,
1010
+ "rewards/accuracies": 0.800000011920929,
1011
+ "rewards/chosen": 1.4978923797607422,
1012
+ "rewards/diff": 0.04186774417757988,
1013
+ "rewards/diff_abs": 1.2465837001800537,
1014
+ "rewards/rejected": 0.4945663511753082,
1015
+ "rewards/student_margin": 1.003326177597046,
1016
+ "rewards/teacher_margin": 0.9614583849906921,
1017
+ "step": 550
1018
+ },
1019
+ {
1020
+ "epoch": 0.67,
1021
+ "grad_norm": 10.0,
1022
+ "learning_rate": 1.479656497881698e-06,
1023
+ "logits/chosen": -3.572722911834717,
1024
+ "logits/rejected": -3.628993511199951,
1025
+ "logps/chosen": -231.67037963867188,
1026
+ "logps/rejected": -189.6853790283203,
1027
+ "loss": 0.4966,
1028
+ "rewards/accuracies": 0.6333333253860474,
1029
+ "rewards/chosen": 1.3352339267730713,
1030
+ "rewards/diff": -0.23537194728851318,
1031
+ "rewards/diff_abs": 1.2142590284347534,
1032
+ "rewards/rejected": 0.7659183740615845,
1033
+ "rewards/student_margin": 0.5693155527114868,
1034
+ "rewards/teacher_margin": 0.8046875,
1035
+ "step": 560
1036
+ },
1037
+ {
1038
+ "epoch": 0.68,
1039
+ "grad_norm": 10.9375,
1040
+ "learning_rate": 1.3851034592503648e-06,
1041
+ "logits/chosen": -3.4025959968566895,
1042
+ "logits/rejected": -3.5293147563934326,
1043
+ "logps/chosen": -274.0171203613281,
1044
+ "logps/rejected": -199.73716735839844,
1045
+ "loss": 0.5341,
1046
+ "rewards/accuracies": 0.7333332300186157,
1047
+ "rewards/chosen": 1.475367784500122,
1048
+ "rewards/diff": 0.08657832443714142,
1049
+ "rewards/diff_abs": 0.861635684967041,
1050
+ "rewards/rejected": 0.5617061257362366,
1051
+ "rewards/student_margin": 0.9136616587638855,
1052
+ "rewards/teacher_margin": 0.82708340883255,
1053
+ "step": 570
1054
+ },
1055
+ {
1056
+ "epoch": 0.69,
1057
+ "grad_norm": 11.75,
1058
+ "learning_rate": 1.2925011234149859e-06,
1059
+ "logits/chosen": -3.494055986404419,
1060
+ "logits/rejected": -3.6171557903289795,
1061
+ "logps/chosen": -205.4471435546875,
1062
+ "logps/rejected": -157.2217559814453,
1063
+ "loss": 0.5149,
1064
+ "rewards/accuracies": 0.699999988079071,
1065
+ "rewards/chosen": 1.3053886890411377,
1066
+ "rewards/diff": 0.1055004820227623,
1067
+ "rewards/diff_abs": 1.2906330823898315,
1068
+ "rewards/rejected": 0.30822157859802246,
1069
+ "rewards/student_margin": 0.9971672296524048,
1070
+ "rewards/teacher_margin": 0.8916667699813843,
1071
+ "step": 580
1072
+ },
1073
+ {
1074
+ "epoch": 0.71,
1075
+ "grad_norm": 10.9375,
1076
+ "learning_rate": 1.2020115140511436e-06,
1077
+ "logits/chosen": -3.38506817817688,
1078
+ "logits/rejected": -3.3986282348632812,
1079
+ "logps/chosen": -287.0667419433594,
1080
+ "logps/rejected": -257.8066711425781,
1081
+ "loss": 0.5156,
1082
+ "rewards/accuracies": 0.8333333134651184,
1083
+ "rewards/chosen": 1.4152857065200806,
1084
+ "rewards/diff": 0.06253819167613983,
1085
+ "rewards/diff_abs": 0.9119114875793457,
1086
+ "rewards/rejected": 0.5600391626358032,
1087
+ "rewards/student_margin": 0.8552465438842773,
1088
+ "rewards/teacher_margin": 0.7927082777023315,
1089
+ "step": 590
1090
+ },
1091
+ {
1092
+ "epoch": 0.72,
1093
+ "grad_norm": 10.375,
1094
+ "learning_rate": 1.11379295825695e-06,
1095
+ "logits/chosen": -3.4194672107696533,
1096
+ "logits/rejected": -3.4630534648895264,
1097
+ "logps/chosen": -275.80841064453125,
1098
+ "logps/rejected": -247.9615478515625,
1099
+ "loss": 0.5304,
1100
+ "rewards/accuracies": 0.5666666626930237,
1101
+ "rewards/chosen": 1.539294958114624,
1102
+ "rewards/diff": -0.13074719905853271,
1103
+ "rewards/diff_abs": 0.9170882105827332,
1104
+ "rewards/rejected": 0.9658753275871277,
1105
+ "rewards/student_margin": 0.5734195113182068,
1106
+ "rewards/teacher_margin": 0.7041667699813843,
1107
+ "step": 600
1108
+ },
1109
+ {
1110
+ "epoch": 0.73,
1111
+ "grad_norm": 10.875,
1112
+ "learning_rate": 1.0279998095326188e-06,
1113
+ "logits/chosen": -3.5342392921447754,
1114
+ "logits/rejected": -3.6398627758026123,
1115
+ "logps/chosen": -282.4989013671875,
1116
+ "logps/rejected": -232.01602172851562,
1117
+ "loss": 0.5212,
1118
+ "rewards/accuracies": 0.7666666507720947,
1119
+ "rewards/chosen": 1.5444934368133545,
1120
+ "rewards/diff": 0.07039527595043182,
1121
+ "rewards/diff_abs": 0.9651015996932983,
1122
+ "rewards/rejected": 0.6813898682594299,
1123
+ "rewards/student_margin": 0.8631036877632141,
1124
+ "rewards/teacher_margin": 0.7927082777023315,
1125
+ "step": 610
1126
+ },
1127
+ {
1128
+ "epoch": 0.74,
1129
+ "grad_norm": 10.375,
1130
+ "learning_rate": 9.447821777125376e-07,
1131
+ "logits/chosen": -3.4949746131896973,
1132
+ "logits/rejected": -3.4841065406799316,
1133
+ "logps/chosen": -235.8585968017578,
1134
+ "logps/rejected": -223.1814422607422,
1135
+ "loss": 0.516,
1136
+ "rewards/accuracies": 0.7333332896232605,
1137
+ "rewards/chosen": 1.090267539024353,
1138
+ "rewards/diff": -0.26063305139541626,
1139
+ "rewards/diff_abs": 1.1585631370544434,
1140
+ "rewards/rejected": 0.42069220542907715,
1141
+ "rewards/student_margin": 0.6695753335952759,
1142
+ "rewards/teacher_margin": 0.9302083849906921,
1143
+ "step": 620
1144
+ },
1145
+ {
1146
+ "epoch": 0.75,
1147
+ "grad_norm": 13.0625,
1148
+ "learning_rate": 8.642856663223537e-07,
1149
+ "logits/chosen": -3.6274445056915283,
1150
+ "logits/rejected": -3.7008399963378906,
1151
+ "logps/chosen": -279.4967346191406,
1152
+ "logps/rejected": -193.52825927734375,
1153
+ "loss": 0.5387,
1154
+ "rewards/accuracies": 0.8333331942558289,
1155
+ "rewards/chosen": 1.603075623512268,
1156
+ "rewards/diff": 0.050136499106884,
1157
+ "rewards/diff_abs": 0.9624601602554321,
1158
+ "rewards/rejected": 0.5263765454292297,
1159
+ "rewards/student_margin": 1.0766990184783936,
1160
+ "rewards/teacher_margin": 1.0265624523162842,
1161
+ "step": 630
1162
+ },
1163
+ {
1164
+ "epoch": 0.77,
1165
+ "grad_norm": 9.0625,
1166
+ "learning_rate": 7.866511178206202e-07,
1167
+ "logits/chosen": -3.556497097015381,
1168
+ "logits/rejected": -3.509038209915161,
1169
+ "logps/chosen": -290.5392150878906,
1170
+ "logps/rejected": -260.15875244140625,
1171
+ "loss": 0.5064,
1172
+ "rewards/accuracies": 0.5666666626930237,
1173
+ "rewards/chosen": 1.7650150060653687,
1174
+ "rewards/diff": -0.24563904106616974,
1175
+ "rewards/diff_abs": 1.3142454624176025,
1176
+ "rewards/rejected": 1.0887789726257324,
1177
+ "rewards/student_margin": 0.6762360334396362,
1178
+ "rewards/teacher_margin": 0.921875,
1179
+ "step": 640
1180
+ },
1181
+ {
1182
+ "epoch": 0.78,
1183
+ "grad_norm": 11.375,
1184
+ "learning_rate": 7.120143671707535e-07,
1185
+ "logits/chosen": -3.6382040977478027,
1186
+ "logits/rejected": -3.5810635089874268,
1187
+ "logps/chosen": -239.7833709716797,
1188
+ "logps/rejected": -191.7135772705078,
1189
+ "loss": 0.5104,
1190
+ "rewards/accuracies": 0.699999988079071,
1191
+ "rewards/chosen": 1.411780595779419,
1192
+ "rewards/diff": 0.06282065808773041,
1193
+ "rewards/diff_abs": 0.9871824383735657,
1194
+ "rewards/rejected": 0.6317722797393799,
1195
+ "rewards/student_margin": 0.7800081968307495,
1196
+ "rewards/teacher_margin": 0.7171874642372131,
1197
+ "step": 650
1198
+ },
1199
+ {
1200
+ "epoch": 0.79,
1201
+ "grad_norm": 10.0,
1202
+ "learning_rate": 6.405060041744557e-07,
1203
+ "logits/chosen": -3.4055404663085938,
1204
+ "logits/rejected": -3.4413161277770996,
1205
+ "logps/chosen": -315.9834899902344,
1206
+ "logps/rejected": -280.46771240234375,
1207
+ "loss": 0.5225,
1208
+ "rewards/accuracies": 0.699999988079071,
1209
+ "rewards/chosen": 1.993194580078125,
1210
+ "rewards/diff": 0.17093998193740845,
1211
+ "rewards/diff_abs": 1.2821754217147827,
1212
+ "rewards/rejected": 0.9248586893081665,
1213
+ "rewards/student_margin": 1.068335771560669,
1214
+ "rewards/teacher_margin": 0.8973957896232605,
1215
+ "step": 660
1216
+ },
1217
+ {
1218
+ "epoch": 0.8,
1219
+ "grad_norm": 11.375,
1220
+ "learning_rate": 5.72251144982447e-07,
1221
+ "logits/chosen": -3.526531219482422,
1222
+ "logits/rejected": -3.4491629600524902,
1223
+ "logps/chosen": -256.53570556640625,
1224
+ "logps/rejected": -279.9180603027344,
1225
+ "loss": 0.4906,
1226
+ "rewards/accuracies": 0.699999988079071,
1227
+ "rewards/chosen": 1.8302761316299438,
1228
+ "rewards/diff": 0.47802895307540894,
1229
+ "rewards/diff_abs": 1.4019181728363037,
1230
+ "rewards/rejected": 0.6298513412475586,
1231
+ "rewards/student_margin": 1.2004249095916748,
1232
+ "rewards/teacher_margin": 0.7223958969116211,
1233
+ "step": 670
1234
+ },
1235
+ {
1236
+ "epoch": 0.81,
1237
+ "grad_norm": 11.0,
1238
+ "learning_rate": 5.07369213182295e-07,
1239
+ "logits/chosen": -3.4488792419433594,
1240
+ "logits/rejected": -3.5185768604278564,
1241
+ "logps/chosen": -257.1033630371094,
1242
+ "logps/rejected": -192.66726684570312,
1243
+ "loss": 0.5175,
1244
+ "rewards/accuracies": 0.6999999284744263,
1245
+ "rewards/chosen": 1.1292977333068848,
1246
+ "rewards/diff": 0.002992980182170868,
1247
+ "rewards/diff_abs": 1.323104977607727,
1248
+ "rewards/rejected": 0.12526309490203857,
1249
+ "rewards/student_margin": 1.0040346384048462,
1250
+ "rewards/teacher_margin": 1.0010416507720947,
1251
+ "step": 680
1252
+ },
1253
+ {
1254
+ "epoch": 0.83,
1255
+ "grad_norm": 9.4375,
1256
+ "learning_rate": 4.4597373084635717e-07,
1257
+ "logits/chosen": -3.419471263885498,
1258
+ "logits/rejected": -3.40906023979187,
1259
+ "logps/chosen": -296.2270812988281,
1260
+ "logps/rejected": -242.5465850830078,
1261
+ "loss": 0.508,
1262
+ "rewards/accuracies": 0.6333333253860474,
1263
+ "rewards/chosen": 1.3991469144821167,
1264
+ "rewards/diff": -0.30492842197418213,
1265
+ "rewards/diff_abs": 1.2077829837799072,
1266
+ "rewards/rejected": 0.8592837452888489,
1267
+ "rewards/student_margin": 0.5398632884025574,
1268
+ "rewards/teacher_margin": 0.8447917103767395,
1269
+ "step": 690
1270
+ },
1271
+ {
1272
+ "epoch": 0.84,
1273
+ "grad_norm": 10.8125,
1274
+ "learning_rate": 3.88172119905435e-07,
1275
+ "logits/chosen": -3.573878526687622,
1276
+ "logits/rejected": -3.4745190143585205,
1277
+ "logps/chosen": -265.7789001464844,
1278
+ "logps/rejected": -231.770263671875,
1279
+ "loss": 0.5098,
1280
+ "rewards/accuracies": 0.699999988079071,
1281
+ "rewards/chosen": 1.3960720300674438,
1282
+ "rewards/diff": 0.12062199413776398,
1283
+ "rewards/diff_abs": 0.9348724484443665,
1284
+ "rewards/rejected": 0.4093042314052582,
1285
+ "rewards/student_margin": 0.9867678880691528,
1286
+ "rewards/teacher_margin": 0.86614590883255,
1287
+ "step": 700
1288
+ },
1289
+ {
1290
+ "epoch": 0.85,
1291
+ "grad_norm": 8.875,
1292
+ "learning_rate": 3.3406551419567584e-07,
1293
+ "logits/chosen": -3.4966206550598145,
1294
+ "logits/rejected": -3.4546685218811035,
1295
+ "logps/chosen": -286.70538330078125,
1296
+ "logps/rejected": -290.0686950683594,
1297
+ "loss": 0.4928,
1298
+ "rewards/accuracies": 0.6999999284744263,
1299
+ "rewards/chosen": 1.7340021133422852,
1300
+ "rewards/diff": 0.5937216281890869,
1301
+ "rewards/diff_abs": 1.2892600297927856,
1302
+ "rewards/rejected": 0.528822124004364,
1303
+ "rewards/student_margin": 1.2051799297332764,
1304
+ "rewards/teacher_margin": 0.6114583611488342,
1305
+ "step": 710
1306
+ },
1307
+ {
1308
+ "epoch": 0.86,
1309
+ "grad_norm": 10.0,
1310
+ "learning_rate": 2.837485825075728e-07,
1311
+ "logits/chosen": -3.5864462852478027,
1312
+ "logits/rejected": -3.6643550395965576,
1313
+ "logps/chosen": -302.582763671875,
1314
+ "logps/rejected": -229.8857879638672,
1315
+ "loss": 0.523,
1316
+ "rewards/accuracies": 0.6333333253860474,
1317
+ "rewards/chosen": 1.3810118436813354,
1318
+ "rewards/diff": -0.328029602766037,
1319
+ "rewards/diff_abs": 1.2970329523086548,
1320
+ "rewards/rejected": 0.7757080793380737,
1321
+ "rewards/student_margin": 0.6053037643432617,
1322
+ "rewards/teacher_margin": 0.9333332777023315,
1323
+ "step": 720
1324
+ },
1325
+ {
1326
+ "epoch": 0.87,
1327
+ "grad_norm": 10.25,
1328
+ "learning_rate": 2.37309362946673e-07,
1329
+ "logits/chosen": -3.469447612762451,
1330
+ "logits/rejected": -3.529064655303955,
1331
+ "logps/chosen": -201.64187622070312,
1332
+ "logps/rejected": -166.51071166992188,
1333
+ "loss": 0.5148,
1334
+ "rewards/accuracies": 0.8333333134651184,
1335
+ "rewards/chosen": 0.9876018762588501,
1336
+ "rewards/diff": 0.09399458020925522,
1337
+ "rewards/diff_abs": 0.9936901330947876,
1338
+ "rewards/rejected": 0.07589896023273468,
1339
+ "rewards/student_margin": 0.9117029309272766,
1340
+ "rewards/teacher_margin": 0.8177083134651184,
1341
+ "step": 730
1342
+ },
1343
+ {
1344
+ "epoch": 0.89,
1345
+ "grad_norm": 9.375,
1346
+ "learning_rate": 1.948291088958032e-07,
1347
+ "logits/chosen": -3.3895657062530518,
1348
+ "logits/rejected": -3.42724347114563,
1349
+ "logps/chosen": -260.0352783203125,
1350
+ "logps/rejected": -211.0215606689453,
1351
+ "loss": 0.5147,
1352
+ "rewards/accuracies": 0.6666666269302368,
1353
+ "rewards/chosen": 1.2599786520004272,
1354
+ "rewards/diff": -0.1080915778875351,
1355
+ "rewards/diff_abs": 1.3488976955413818,
1356
+ "rewards/rejected": 0.6868201494216919,
1357
+ "rewards/student_margin": 0.5731583833694458,
1358
+ "rewards/teacher_margin": 0.6812499761581421,
1359
+ "step": 740
1360
+ },
1361
+ {
1362
+ "epoch": 0.9,
1363
+ "grad_norm": 11.8125,
1364
+ "learning_rate": 1.5638214684833923e-07,
1365
+ "logits/chosen": -3.3913490772247314,
1366
+ "logits/rejected": -3.495671510696411,
1367
+ "logps/chosen": -283.8644714355469,
1368
+ "logps/rejected": -207.0258026123047,
1369
+ "loss": 0.5143,
1370
+ "rewards/accuracies": 0.800000011920929,
1371
+ "rewards/chosen": 1.7908437252044678,
1372
+ "rewards/diff": 0.14334459602832794,
1373
+ "rewards/diff_abs": 1.1933469772338867,
1374
+ "rewards/rejected": 0.6808325052261353,
1375
+ "rewards/student_margin": 1.1100112199783325,
1376
+ "rewards/teacher_margin": 0.9666666984558105,
1377
+ "step": 750
1378
+ },
1379
+ {
1380
+ "epoch": 0.91,
1381
+ "grad_norm": 12.25,
1382
+ "learning_rate": 1.220357463612501e-07,
1383
+ "logits/chosen": -3.5331833362579346,
1384
+ "logits/rejected": -3.496367931365967,
1385
+ "logps/chosen": -264.0143127441406,
1386
+ "logps/rejected": -205.7065887451172,
1387
+ "loss": 0.5444,
1388
+ "rewards/accuracies": 0.800000011920929,
1389
+ "rewards/chosen": 1.7263545989990234,
1390
+ "rewards/diff": 0.29247918725013733,
1391
+ "rewards/diff_abs": 0.9457036852836609,
1392
+ "rewards/rejected": 0.7515836358070374,
1393
+ "rewards/student_margin": 0.9747709035873413,
1394
+ "rewards/teacher_margin": 0.6822917461395264,
1395
+ "step": 760
1396
+ },
1397
+ {
1398
+ "epoch": 0.92,
1399
+ "grad_norm": 10.6875,
1400
+ "learning_rate": 9.185000235546443e-08,
1401
+ "logits/chosen": -3.5394463539123535,
1402
+ "logits/rejected": -3.528214931488037,
1403
+ "logps/chosen": -222.8568572998047,
1404
+ "logps/rejected": -199.4870147705078,
1405
+ "loss": 0.5187,
1406
+ "rewards/accuracies": 0.5666666626930237,
1407
+ "rewards/chosen": 1.2547296285629272,
1408
+ "rewards/diff": -0.4252438545227051,
1409
+ "rewards/diff_abs": 0.9407827258110046,
1410
+ "rewards/rejected": 0.9872652292251587,
1411
+ "rewards/student_margin": 0.2674644887447357,
1412
+ "rewards/teacher_margin": 0.6927083730697632,
1413
+ "step": 770
1414
+ },
1415
+ {
1416
+ "epoch": 0.93,
1417
+ "grad_norm": 11.5625,
1418
+ "learning_rate": 6.587772996949876e-08,
1419
+ "logits/chosen": -3.472136974334717,
1420
+ "logits/rejected": -3.594128370285034,
1421
+ "logps/chosen": -274.9361877441406,
1422
+ "logps/rejected": -187.9529266357422,
1423
+ "loss": 0.5248,
1424
+ "rewards/accuracies": 0.7666667103767395,
1425
+ "rewards/chosen": 1.5798580646514893,
1426
+ "rewards/diff": 0.20931819081306458,
1427
+ "rewards/diff_abs": 0.9370753169059753,
1428
+ "rewards/rejected": 0.4444982409477234,
1429
+ "rewards/student_margin": 1.1353598833084106,
1430
+ "rewards/teacher_margin": 0.9260417819023132,
1431
+ "step": 780
1432
+ },
1433
+ {
1434
+ "epoch": 0.95,
1435
+ "grad_norm": 11.375,
1436
+ "learning_rate": 4.416437215030628e-08,
1437
+ "logits/chosen": -3.366868257522583,
1438
+ "logits/rejected": -3.4336013793945312,
1439
+ "logps/chosen": -232.9638214111328,
1440
+ "logps/rejected": -209.1346893310547,
1441
+ "loss": 0.5262,
1442
+ "rewards/accuracies": 0.7333332896232605,
1443
+ "rewards/chosen": 1.4733285903930664,
1444
+ "rewards/diff": -0.1160399541258812,
1445
+ "rewards/diff_abs": 1.4289867877960205,
1446
+ "rewards/rejected": 0.5935351252555847,
1447
+ "rewards/student_margin": 0.8797934651374817,
1448
+ "rewards/teacher_margin": 0.9958333969116211,
1449
+ "step": 790
1450
+ },
1451
+ {
1452
+ "epoch": 0.96,
1453
+ "grad_norm": 11.3125,
1454
+ "learning_rate": 2.6747920143047056e-08,
1455
+ "logits/chosen": -3.585693836212158,
1456
+ "logits/rejected": -3.666484832763672,
1457
+ "logps/chosen": -243.569091796875,
1458
+ "logps/rejected": -184.44293212890625,
1459
+ "loss": 0.5029,
1460
+ "rewards/accuracies": 0.76666659116745,
1461
+ "rewards/chosen": 1.4393314123153687,
1462
+ "rewards/diff": 0.24042055010795593,
1463
+ "rewards/diff_abs": 1.126199722290039,
1464
+ "rewards/rejected": 0.07599426060914993,
1465
+ "rewards/student_margin": 1.3633372783660889,
1466
+ "rewards/teacher_margin": 1.1229166984558105,
1467
+ "step": 800
1468
+ },
1469
+ {
1470
+ "epoch": 0.97,
1471
+ "grad_norm": 12.0,
1472
+ "learning_rate": 1.3658847018884758e-08,
1473
+ "logits/chosen": -3.3958117961883545,
1474
+ "logits/rejected": -3.488321304321289,
1475
+ "logps/chosen": -304.1349792480469,
1476
+ "logps/rejected": -259.19927978515625,
1477
+ "loss": 0.5219,
1478
+ "rewards/accuracies": 0.6000000238418579,
1479
+ "rewards/chosen": 1.6452767848968506,
1480
+ "rewards/diff": -0.22879931330680847,
1481
+ "rewards/diff_abs": 1.1713745594024658,
1482
+ "rewards/rejected": 1.1782429218292236,
1483
+ "rewards/student_margin": 0.4670340418815613,
1484
+ "rewards/teacher_margin": 0.6958333849906921,
1485
+ "step": 810
1486
+ },
1487
+ {
1488
+ "epoch": 0.98,
1489
+ "grad_norm": 9.6875,
1490
+ "learning_rate": 4.920054357119841e-09,
1491
+ "logits/chosen": -3.4455044269561768,
1492
+ "logits/rejected": -3.4982807636260986,
1493
+ "logps/chosen": -252.8186798095703,
1494
+ "logps/rejected": -198.8025665283203,
1495
+ "loss": 0.5123,
1496
+ "rewards/accuracies": 0.800000011920929,
1497
+ "rewards/chosen": 1.7279932498931885,
1498
+ "rewards/diff": 0.12250219285488129,
1499
+ "rewards/diff_abs": 0.8293051719665527,
1500
+ "rewards/rejected": 0.713824450969696,
1501
+ "rewards/student_margin": 1.0141689777374268,
1502
+ "rewards/teacher_margin": 0.8916667699813843,
1503
+ "step": 820
1504
+ },
1505
+ {
1506
+ "epoch": 0.99,
1507
+ "grad_norm": 10.875,
1508
+ "learning_rate": 5.468321749468875e-10,
1509
+ "logits/chosen": -3.456815242767334,
1510
+ "logits/rejected": -3.5720372200012207,
1511
+ "logps/chosen": -234.10720825195312,
1512
+ "logps/rejected": -200.9365692138672,
1513
+ "loss": 0.5071,
1514
+ "rewards/accuracies": 0.699999988079071,
1515
+ "rewards/chosen": 0.9176043272018433,
1516
+ "rewards/diff": -0.2213120013475418,
1517
+ "rewards/diff_abs": 0.8564842343330383,
1518
+ "rewards/rejected": 0.30974966287612915,
1519
+ "rewards/student_margin": 0.6078547239303589,
1520
+ "rewards/teacher_margin": 0.8291667103767395,
1521
+ "step": 830
1522
+ },
1523
+ {
1524
+ "epoch": 1.0,
1525
+ "step": 835,
1526
+ "total_flos": 0.0,
1527
+ "train_loss": 0.54411713648699,
1528
+ "train_runtime": 5965.6032,
1529
+ "train_samples_per_second": 26.864,
1530
+ "train_steps_per_second": 0.14
1531
+ }
1532
+ ],
1533
+ "logging_steps": 10,
1534
+ "max_steps": 835,
1535
+ "num_input_tokens_seen": 0,
1536
+ "num_train_epochs": 1,
1537
+ "save_steps": 1000000000000000000000000000000000,
1538
+ "total_flos": 0.0,
1539
+ "train_batch_size": 3,
1540
+ "trial_name": null,
1541
+ "trial_params": null
1542
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2a6770c7dd90a1a65bf69073d2cba72eaa9cd4b306fc155687832cbc6046a635
3
+ size 5240