Go4miii commited on
Commit
ffe002c
1 Parent(s): 30fe73d

commit from root

Browse files
Baichuan-13B-Chat-lora-Consulting/README.md ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+ ### Framework versions
7
+
8
+
9
+ - PEFT 0.5.0
Baichuan-13B-Chat-lora-Consulting/adapter_config.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "baichuan-inc/Baichuan-13B-Chat",
4
+ "bias": "none",
5
+ "fan_in_fan_out": false,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 32.0,
11
+ "lora_dropout": 0.1,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 8,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "W_pack"
18
+ ],
19
+ "task_type": "CAUSAL_LM"
20
+ }
Baichuan-13B-Chat-lora-Consulting/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:25e792f17d845ba58c7d966070b66e38bc368f6bd33794bfa668856df922d3db
3
+ size 26243422
Baichuan-13B-Chat-lora-Consulting/all_results.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.0,
3
+ "eval_loss": 1.0848859548568726,
4
+ "eval_runtime": 23.9261,
5
+ "eval_samples_per_second": 26.457,
6
+ "eval_steps_per_second": 3.344,
7
+ "train_loss": 1.1088850573849165,
8
+ "train_runtime": 12573.3474,
9
+ "train_samples_per_second": 9.954,
10
+ "train_steps_per_second": 0.155
11
+ }
Baichuan-13B-Chat-lora-Consulting/eval_results.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.0,
3
+ "eval_loss": 1.0848859548568726,
4
+ "eval_runtime": 23.9261,
5
+ "eval_samples_per_second": 26.457,
6
+ "eval_steps_per_second": 3.344
7
+ }
Baichuan-13B-Chat-lora-Consulting/special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": true
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": true
15
+ },
16
+ "pad_token": {
17
+ "content": "<unk>",
18
+ "lstrip": false,
19
+ "normalized": true,
20
+ "rstrip": false,
21
+ "single_word": true
22
+ },
23
+ "unk_token": {
24
+ "content": "<unk>",
25
+ "lstrip": false,
26
+ "normalized": true,
27
+ "rstrip": false,
28
+ "single_word": true
29
+ }
30
+ }
Baichuan-13B-Chat-lora-Consulting/tokenization_baichuan.py ADDED
@@ -0,0 +1,232 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2023, Baichuan Intelligent Technology. All rights reserved.
2
+
3
+ import os
4
+ from shutil import copyfile
5
+ from typing import Any, Dict, List, Optional, Tuple
6
+
7
+ import sentencepiece as spm
8
+ from transformers.tokenization_utils import AddedToken, PreTrainedTokenizer
9
+ from transformers.utils import logging
10
+
11
+
12
+ logger = logging.get_logger(__name__)
13
+
14
+ VOCAB_FILES_NAMES = {"vocab_file": "tokenizer.model"}
15
+
16
+ PRETRAINED_VOCAB_FILES_MAP = {
17
+ "vocab_file": {},
18
+ "tokenizer_file": {},
19
+ }
20
+ PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {}
21
+
22
+
23
+ class BaichuanTokenizer(PreTrainedTokenizer):
24
+ """
25
+ Construct a Baichuan tokenizer. Based on byte-level Byte-Pair-Encoding.
26
+
27
+ Args:
28
+ vocab_file (`str`):
29
+ Path to the vocabulary file.
30
+ """
31
+
32
+ vocab_files_names = VOCAB_FILES_NAMES
33
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
34
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
35
+ model_input_names = ["input_ids", "attention_mask"]
36
+
37
+ def __init__(
38
+ self,
39
+ vocab_file,
40
+ unk_token="<unk>",
41
+ bos_token="<s>",
42
+ eos_token="</s>",
43
+ pad_token=None,
44
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
45
+ add_bos_token=True,
46
+ add_eos_token=False,
47
+ clean_up_tokenization_spaces=False,
48
+ **kwargs,
49
+ ):
50
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
51
+ bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token
52
+ eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token
53
+ unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token
54
+ pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token
55
+ super().__init__(
56
+ bos_token=bos_token,
57
+ eos_token=eos_token,
58
+ unk_token=unk_token,
59
+ pad_token=pad_token,
60
+ add_bos_token=add_bos_token,
61
+ add_eos_token=add_eos_token,
62
+ sp_model_kwargs=self.sp_model_kwargs,
63
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
64
+ **kwargs,
65
+ )
66
+ self.vocab_file = vocab_file
67
+ self.add_bos_token = add_bos_token
68
+ self.add_eos_token = add_eos_token
69
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
70
+ self.sp_model.Load(vocab_file)
71
+
72
+ def __getstate__(self):
73
+ state = self.__dict__.copy()
74
+ state["sp_model"] = None
75
+ return state
76
+
77
+ def __setstate__(self, d):
78
+ self.__dict__ = d
79
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
80
+ self.sp_model.Load(self.vocab_file)
81
+
82
+ @property
83
+ def vocab_size(self):
84
+ """Returns vocab size"""
85
+ return self.sp_model.get_piece_size()
86
+
87
+ def get_vocab(self):
88
+ """Returns vocab as a dict"""
89
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
90
+ vocab.update(self.added_tokens_encoder)
91
+ return vocab
92
+
93
+ def _tokenize(self, text):
94
+ """Returns a tokenized string."""
95
+ return self.sp_model.encode(text, out_type=str)
96
+
97
+ def _convert_token_to_id(self, token):
98
+ """Converts a token (str) in an id using the vocab."""
99
+ return self.sp_model.piece_to_id(token)
100
+
101
+ def _convert_id_to_token(self, index):
102
+ """Converts an index (integer) in a token (str) using the vocab."""
103
+ token = self.sp_model.IdToPiece(index)
104
+ return token
105
+
106
+ def convert_tokens_to_string(self, tokens):
107
+ """Converts a sequence of tokens (string) in a single string."""
108
+ current_sub_tokens = []
109
+ out_string = ""
110
+ prev_is_special = False
111
+ for i, token in enumerate(tokens):
112
+ # make sure that special tokens are not decoded using sentencepiece model
113
+ if token in self.all_special_tokens:
114
+ if not prev_is_special and i != 0:
115
+ out_string += " "
116
+ out_string += self.sp_model.decode(current_sub_tokens) + token
117
+ prev_is_special = True
118
+ current_sub_tokens = []
119
+ else:
120
+ current_sub_tokens.append(token)
121
+ prev_is_special = False
122
+ out_string += self.sp_model.decode(current_sub_tokens)
123
+ return out_string
124
+
125
+ def save_vocabulary(self, save_directory, filename_prefix: Optional[str] = None) -> Tuple[str]:
126
+ """
127
+ Save the vocabulary and special tokens file to a directory.
128
+
129
+ Args:
130
+ save_directory (`str`):
131
+ The directory in which to save the vocabulary.
132
+
133
+ Returns:
134
+ `Tuple(str)`: Paths to the files saved.
135
+ """
136
+ if not os.path.isdir(save_directory):
137
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
138
+ return
139
+ out_vocab_file = os.path.join(
140
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
141
+ )
142
+
143
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
144
+ copyfile(self.vocab_file, out_vocab_file)
145
+ elif not os.path.isfile(self.vocab_file):
146
+ with open(out_vocab_file, "wb") as fi:
147
+ content_spiece_model = self.sp_model.serialized_model_proto()
148
+ fi.write(content_spiece_model)
149
+
150
+ return (out_vocab_file,)
151
+
152
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
153
+ bos_token_id = [self.bos_token_id] if self.add_bos_token else []
154
+ eos_token_id = [self.eos_token_id] if self.add_eos_token else []
155
+
156
+ output = bos_token_id + token_ids_0 + eos_token_id
157
+
158
+ if token_ids_1 is not None:
159
+ output = output + bos_token_id + token_ids_1 + eos_token_id
160
+
161
+ return output
162
+
163
+ def get_special_tokens_mask(
164
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
165
+ ) -> List[int]:
166
+ """
167
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
168
+ special tokens using the tokenizer `prepare_for_model` method.
169
+
170
+ Args:
171
+ token_ids_0 (`List[int]`):
172
+ List of IDs.
173
+ token_ids_1 (`List[int]`, *optional*):
174
+ Optional second list of IDs for sequence pairs.
175
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
176
+ Whether or not the token list is already formatted with special tokens for the model.
177
+
178
+ Returns:
179
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
180
+ """
181
+ if already_has_special_tokens:
182
+ return super().get_special_tokens_mask(
183
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
184
+ )
185
+
186
+ bos_token_id = [1] if self.add_bos_token else []
187
+ eos_token_id = [1] if self.add_eos_token else []
188
+
189
+ if token_ids_1 is None:
190
+ return bos_token_id + ([0] * len(token_ids_0)) + eos_token_id
191
+ return (
192
+ bos_token_id
193
+ + ([0] * len(token_ids_0))
194
+ + eos_token_id
195
+ + bos_token_id
196
+ + ([0] * len(token_ids_1))
197
+ + eos_token_id
198
+ )
199
+
200
+ def create_token_type_ids_from_sequences(
201
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
202
+ ) -> List[int]:
203
+ """
204
+ Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT
205
+ sequence pair mask has the following format:
206
+
207
+ ```
208
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
209
+ | first sequence | second sequence |
210
+ ```
211
+
212
+ if token_ids_1 is None, only returns the first portion of the mask (0s).
213
+
214
+ Args:
215
+ token_ids_0 (`List[int]`):
216
+ List of ids.
217
+ token_ids_1 (`List[int]`, *optional*):
218
+ Optional second list of IDs for sequence pairs.
219
+
220
+ Returns:
221
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
222
+ """
223
+ bos_token_id = [self.bos_token_id] if self.add_bos_token else []
224
+ eos_token_id = [self.eos_token_id] if self.add_eos_token else []
225
+
226
+ output = [0] * len(bos_token_id + token_ids_0 + eos_token_id)
227
+
228
+ if token_ids_1 is not None:
229
+ output += [1] * len(bos_token_id + token_ids_1 + eos_token_id)
230
+
231
+ return output
232
+
Baichuan-13B-Chat-lora-Consulting/tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f7d1ab69d25c74644af5c5e4dcd1cc6e96d33783dbd257b6bdea55b643c72813
3
+ size 1136765
Baichuan-13B-Chat-lora-Consulting/tokenizer_config.json ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_eos_token": false,
4
+ "auto_map": {
5
+ "AutoTokenizer": [
6
+ "tokenization_baichuan.BaichuanTokenizer",
7
+ null
8
+ ]
9
+ },
10
+ "bos_token": {
11
+ "__type": "AddedToken",
12
+ "content": "<s>",
13
+ "lstrip": false,
14
+ "normalized": true,
15
+ "rstrip": false,
16
+ "single_word": true
17
+ },
18
+ "clean_up_tokenization_spaces": false,
19
+ "eos_token": {
20
+ "__type": "AddedToken",
21
+ "content": "</s>",
22
+ "lstrip": false,
23
+ "normalized": true,
24
+ "rstrip": false,
25
+ "single_word": true
26
+ },
27
+ "model_max_length": 4096,
28
+ "pad_token": {
29
+ "__type": "AddedToken",
30
+ "content": "<unk>",
31
+ "lstrip": false,
32
+ "normalized": true,
33
+ "rstrip": false,
34
+ "single_word": true
35
+ },
36
+ "padding_side": "right",
37
+ "sp_model_kwargs": {},
38
+ "split_special_tokens": false,
39
+ "tokenizer_class": "BaichuanTokenizer",
40
+ "unk_token": {
41
+ "__type": "AddedToken",
42
+ "content": "<unk>",
43
+ "lstrip": false,
44
+ "normalized": true,
45
+ "rstrip": false,
46
+ "single_word": true
47
+ }
48
+ }
Baichuan-13B-Chat-lora-Consulting/train_results.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.0,
3
+ "train_loss": 1.1088850573849165,
4
+ "train_runtime": 12573.3474,
5
+ "train_samples_per_second": 9.954,
6
+ "train_steps_per_second": 0.155
7
+ }
Baichuan-13B-Chat-lora-Consulting/trainer_log.jsonl ADDED
@@ -0,0 +1,216 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"current_steps": 10, "total_steps": 1954, "loss": 1.5804, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.9996768893414955e-05, "epoch": 0.01, "percentage": 0.51, "elapsed_time": "0:00:59", "remaining_time": "3:11:23"}
2
+ {"current_steps": 20, "total_steps": 1954, "loss": 1.36, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.998707640886381e-05, "epoch": 0.02, "percentage": 1.02, "elapsed_time": "0:01:59", "remaining_time": "3:12:19"}
3
+ {"current_steps": 30, "total_steps": 1954, "loss": 1.2505, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.99709250517426e-05, "epoch": 0.03, "percentage": 1.54, "elapsed_time": "0:02:56", "remaining_time": "3:08:21"}
4
+ {"current_steps": 40, "total_steps": 1954, "loss": 1.2495, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.994831899699185e-05, "epoch": 0.04, "percentage": 2.05, "elapsed_time": "0:03:53", "remaining_time": "3:05:58"}
5
+ {"current_steps": 50, "total_steps": 1954, "loss": 1.2084, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.9919264088017345e-05, "epoch": 0.05, "percentage": 2.56, "elapsed_time": "0:04:55", "remaining_time": "3:07:20"}
6
+ {"current_steps": 60, "total_steps": 1954, "loss": 1.1923, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.98837678351797e-05, "epoch": 0.06, "percentage": 3.07, "elapsed_time": "0:05:56", "remaining_time": "3:07:18"}
7
+ {"current_steps": 70, "total_steps": 1954, "loss": 1.1929, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.984183941385301e-05, "epoch": 0.07, "percentage": 3.58, "elapsed_time": "0:06:56", "remaining_time": "3:06:52"}
8
+ {"current_steps": 80, "total_steps": 1954, "loss": 1.1736, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.979348966205315e-05, "epoch": 0.08, "percentage": 4.09, "elapsed_time": "0:08:00", "remaining_time": "3:07:36"}
9
+ {"current_steps": 90, "total_steps": 1954, "loss": 1.1626, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.9738731077636225e-05, "epoch": 0.09, "percentage": 4.61, "elapsed_time": "0:08:57", "remaining_time": "3:05:35"}
10
+ {"current_steps": 100, "total_steps": 1954, "loss": 1.1779, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.9677577815068056e-05, "epoch": 0.1, "percentage": 5.12, "elapsed_time": "0:09:56", "remaining_time": "3:04:25"}
11
+ {"current_steps": 100, "total_steps": 1954, "loss": null, "eval_loss": 1.1614866256713867, "predict_loss": null, "reward": null, "learning_rate": null, "epoch": 0.1, "percentage": 5.12, "elapsed_time": "0:09:56", "remaining_time": "3:04:25"}
12
+ {"current_steps": 110, "total_steps": 1954, "loss": 1.1527, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.9610045681765385e-05, "epoch": 0.11, "percentage": 5.63, "elapsed_time": "0:11:23", "remaining_time": "3:10:58"}
13
+ {"current_steps": 120, "total_steps": 1954, "loss": 1.1638, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.953615213400987e-05, "epoch": 0.12, "percentage": 6.14, "elapsed_time": "0:12:29", "remaining_time": "3:11:00"}
14
+ {"current_steps": 130, "total_steps": 1954, "loss": 1.155, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.945591627243581e-05, "epoch": 0.13, "percentage": 6.65, "elapsed_time": "0:13:26", "remaining_time": "3:08:29"}
15
+ {"current_steps": 140, "total_steps": 1954, "loss": 1.1672, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.9369358837092853e-05, "epoch": 0.14, "percentage": 7.16, "elapsed_time": "0:14:25", "remaining_time": "3:06:49"}
16
+ {"current_steps": 150, "total_steps": 1954, "loss": 1.1674, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.927650220208495e-05, "epoch": 0.15, "percentage": 7.68, "elapsed_time": "0:15:27", "remaining_time": "3:05:55"}
17
+ {"current_steps": 160, "total_steps": 1954, "loss": 1.1511, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.917737036978689e-05, "epoch": 0.16, "percentage": 8.19, "elapsed_time": "0:16:32", "remaining_time": "3:05:28"}
18
+ {"current_steps": 170, "total_steps": 1954, "loss": 1.1497, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.907198896463996e-05, "epoch": 0.17, "percentage": 8.7, "elapsed_time": "0:17:33", "remaining_time": "3:04:12"}
19
+ {"current_steps": 180, "total_steps": 1954, "loss": 1.1271, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.8960385226528335e-05, "epoch": 0.18, "percentage": 9.21, "elapsed_time": "0:18:37", "remaining_time": "3:03:35"}
20
+ {"current_steps": 190, "total_steps": 1954, "loss": 1.1888, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.8842588003737854e-05, "epoch": 0.19, "percentage": 9.72, "elapsed_time": "0:19:35", "remaining_time": "3:01:49"}
21
+ {"current_steps": 200, "total_steps": 1954, "loss": 1.1494, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.87186277454991e-05, "epoch": 0.2, "percentage": 10.24, "elapsed_time": "0:20:39", "remaining_time": "3:01:11"}
22
+ {"current_steps": 200, "total_steps": 1954, "loss": null, "eval_loss": 1.1362522840499878, "predict_loss": null, "reward": null, "learning_rate": null, "epoch": 0.2, "percentage": 10.24, "elapsed_time": "0:20:39", "remaining_time": "3:01:11"}
23
+ {"current_steps": 210, "total_steps": 1954, "loss": 1.1425, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.858853649411662e-05, "epoch": 0.21, "percentage": 10.75, "elapsed_time": "0:22:06", "remaining_time": "3:03:34"}
24
+ {"current_steps": 220, "total_steps": 1954, "loss": 1.1591, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.845234787668632e-05, "epoch": 0.23, "percentage": 11.26, "elapsed_time": "0:23:09", "remaining_time": "3:02:29"}
25
+ {"current_steps": 230, "total_steps": 1954, "loss": 1.118, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.831009709640329e-05, "epoch": 0.24, "percentage": 11.77, "elapsed_time": "0:24:11", "remaining_time": "3:01:21"}
26
+ {"current_steps": 240, "total_steps": 1954, "loss": 1.1469, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.8161820923462165e-05, "epoch": 0.25, "percentage": 12.28, "elapsed_time": "0:25:11", "remaining_time": "2:59:52"}
27
+ {"current_steps": 250, "total_steps": 1954, "loss": 1.1128, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.800755768555244e-05, "epoch": 0.26, "percentage": 12.79, "elapsed_time": "0:26:09", "remaining_time": "2:58:15"}
28
+ {"current_steps": 260, "total_steps": 1954, "loss": 1.152, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.784734725795123e-05, "epoch": 0.27, "percentage": 13.31, "elapsed_time": "0:27:11", "remaining_time": "2:57:12"}
29
+ {"current_steps": 270, "total_steps": 1954, "loss": 1.125, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.768123105321596e-05, "epoch": 0.28, "percentage": 13.82, "elapsed_time": "0:28:13", "remaining_time": "2:56:02"}
30
+ {"current_steps": 280, "total_steps": 1954, "loss": 1.123, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.7509252010479645e-05, "epoch": 0.29, "percentage": 14.33, "elapsed_time": "0:29:19", "remaining_time": "2:55:22"}
31
+ {"current_steps": 290, "total_steps": 1954, "loss": 1.1294, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.7331454584351686e-05, "epoch": 0.3, "percentage": 14.84, "elapsed_time": "0:30:19", "remaining_time": "2:54:02"}
32
+ {"current_steps": 300, "total_steps": 1954, "loss": 1.1299, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.714788473342685e-05, "epoch": 0.31, "percentage": 15.35, "elapsed_time": "0:31:20", "remaining_time": "2:52:48"}
33
+ {"current_steps": 300, "total_steps": 1954, "loss": null, "eval_loss": 1.1234092712402344, "predict_loss": null, "reward": null, "learning_rate": null, "epoch": 0.31, "percentage": 15.35, "elapsed_time": "0:31:20", "remaining_time": "2:52:48"}
34
+ {"current_steps": 310, "total_steps": 1954, "loss": 1.1195, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.695858990840544e-05, "epoch": 0.32, "percentage": 15.86, "elapsed_time": "0:32:49", "remaining_time": "2:54:03"}
35
+ {"current_steps": 320, "total_steps": 1954, "loss": 1.1233, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.6763619039827936e-05, "epoch": 0.33, "percentage": 16.38, "elapsed_time": "0:33:55", "remaining_time": "2:53:15"}
36
+ {"current_steps": 330, "total_steps": 1954, "loss": 1.1281, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.6563022525426905e-05, "epoch": 0.34, "percentage": 16.89, "elapsed_time": "0:34:56", "remaining_time": "2:51:55"}
37
+ {"current_steps": 340, "total_steps": 1954, "loss": 1.0921, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.6356852217099856e-05, "epoch": 0.35, "percentage": 17.4, "elapsed_time": "0:35:52", "remaining_time": "2:50:20"}
38
+ {"current_steps": 350, "total_steps": 1954, "loss": 1.1106, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.614516140750604e-05, "epoch": 0.36, "percentage": 17.91, "elapsed_time": "0:36:53", "remaining_time": "2:49:04"}
39
+ {"current_steps": 360, "total_steps": 1954, "loss": 1.1103, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.592800481629097e-05, "epoch": 0.37, "percentage": 18.42, "elapsed_time": "0:37:54", "remaining_time": "2:47:49"}
40
+ {"current_steps": 370, "total_steps": 1954, "loss": 1.1162, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.570543857594201e-05, "epoch": 0.38, "percentage": 18.94, "elapsed_time": "0:38:58", "remaining_time": "2:46:52"}
41
+ {"current_steps": 380, "total_steps": 1954, "loss": 1.1341, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.547752021727873e-05, "epoch": 0.39, "percentage": 19.45, "elapsed_time": "0:40:08", "remaining_time": "2:46:17"}
42
+ {"current_steps": 390, "total_steps": 1954, "loss": 1.1098, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.52443086545819e-05, "epoch": 0.4, "percentage": 19.96, "elapsed_time": "0:41:14", "remaining_time": "2:45:24"}
43
+ {"current_steps": 400, "total_steps": 1954, "loss": 1.1109, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.5005864170364784e-05, "epoch": 0.41, "percentage": 20.47, "elapsed_time": "0:42:12", "remaining_time": "2:43:58"}
44
+ {"current_steps": 400, "total_steps": 1954, "loss": null, "eval_loss": 1.115441083908081, "predict_loss": null, "reward": null, "learning_rate": null, "epoch": 0.41, "percentage": 20.47, "elapsed_time": "0:42:12", "remaining_time": "2:43:58"}
45
+ {"current_steps": 410, "total_steps": 1954, "loss": 1.1114, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.476224839979084e-05, "epoch": 0.42, "percentage": 20.98, "elapsed_time": "0:43:36", "remaining_time": "2:44:13"}
46
+ {"current_steps": 420, "total_steps": 1954, "loss": 1.132, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.4513524314741714e-05, "epoch": 0.43, "percentage": 21.49, "elapsed_time": "0:44:37", "remaining_time": "2:43:00"}
47
+ {"current_steps": 430, "total_steps": 1954, "loss": 1.1282, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.425975620753973e-05, "epoch": 0.44, "percentage": 22.01, "elapsed_time": "0:45:40", "remaining_time": "2:41:53"}
48
+ {"current_steps": 440, "total_steps": 1954, "loss": 1.1135, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.4001009674329054e-05, "epoch": 0.45, "percentage": 22.52, "elapsed_time": "0:46:42", "remaining_time": "2:40:43"}
49
+ {"current_steps": 450, "total_steps": 1954, "loss": 1.1366, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.373735159811988e-05, "epoch": 0.46, "percentage": 23.03, "elapsed_time": "0:47:39", "remaining_time": "2:39:17"}
50
+ {"current_steps": 460, "total_steps": 1954, "loss": 1.1052, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.3468850131499917e-05, "epoch": 0.47, "percentage": 23.54, "elapsed_time": "0:48:43", "remaining_time": "2:38:13"}
51
+ {"current_steps": 470, "total_steps": 1954, "loss": 1.1148, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.31955746790177e-05, "epoch": 0.48, "percentage": 24.05, "elapsed_time": "0:49:47", "remaining_time": "2:37:14"}
52
+ {"current_steps": 480, "total_steps": 1954, "loss": 1.1329, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.291759587924237e-05, "epoch": 0.49, "percentage": 24.56, "elapsed_time": "0:50:54", "remaining_time": "2:36:20"}
53
+ {"current_steps": 490, "total_steps": 1954, "loss": 1.1064, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.263498558650434e-05, "epoch": 0.5, "percentage": 25.08, "elapsed_time": "0:51:54", "remaining_time": "2:35:06"}
54
+ {"current_steps": 500, "total_steps": 1954, "loss": 1.12, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.234781685232187e-05, "epoch": 0.51, "percentage": 25.59, "elapsed_time": "0:52:58", "remaining_time": "2:34:03"}
55
+ {"current_steps": 500, "total_steps": 1954, "loss": null, "eval_loss": 1.108825922012329, "predict_loss": null, "reward": null, "learning_rate": null, "epoch": 0.51, "percentage": 25.59, "elapsed_time": "0:52:58", "remaining_time": "2:34:03"}
56
+ {"current_steps": 510, "total_steps": 1954, "loss": 1.0966, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.205616390651796e-05, "epoch": 0.52, "percentage": 26.1, "elapsed_time": "0:54:27", "remaining_time": "2:34:12"}
57
+ {"current_steps": 520, "total_steps": 1954, "loss": 1.1145, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.1760102138032956e-05, "epoch": 0.53, "percentage": 26.61, "elapsed_time": "0:55:31", "remaining_time": "2:33:07"}
58
+ {"current_steps": 530, "total_steps": 1954, "loss": 1.1018, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.145970807543721e-05, "epoch": 0.54, "percentage": 27.12, "elapsed_time": "0:56:36", "remaining_time": "2:32:04"}
59
+ {"current_steps": 540, "total_steps": 1954, "loss": 1.1063, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.115505936714943e-05, "epoch": 0.55, "percentage": 27.64, "elapsed_time": "0:57:45", "remaining_time": "2:31:14"}
60
+ {"current_steps": 550, "total_steps": 1954, "loss": 1.1037, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.084623476136541e-05, "epoch": 0.56, "percentage": 28.15, "elapsed_time": "0:58:44", "remaining_time": "2:29:58"}
61
+ {"current_steps": 560, "total_steps": 1954, "loss": 1.0981, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.053331408570254e-05, "epoch": 0.57, "percentage": 28.66, "elapsed_time": "0:59:55", "remaining_time": "2:29:11"}
62
+ {"current_steps": 570, "total_steps": 1954, "loss": 1.1018, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.021637822656529e-05, "epoch": 0.58, "percentage": 29.17, "elapsed_time": "1:00:54", "remaining_time": "2:27:54"}
63
+ {"current_steps": 580, "total_steps": 1954, "loss": 1.1154, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.9895509108236956e-05, "epoch": 0.59, "percentage": 29.68, "elapsed_time": "1:01:56", "remaining_time": "2:26:43"}
64
+ {"current_steps": 590, "total_steps": 1954, "loss": 1.1498, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.957078967170325e-05, "epoch": 0.6, "percentage": 30.19, "elapsed_time": "1:02:58", "remaining_time": "2:25:36"}
65
+ {"current_steps": 600, "total_steps": 1954, "loss": 1.1071, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.9242303853212944e-05, "epoch": 0.61, "percentage": 30.71, "elapsed_time": "1:04:00", "remaining_time": "2:24:26"}
66
+ {"current_steps": 600, "total_steps": 1954, "loss": null, "eval_loss": 1.103700041770935, "predict_loss": null, "reward": null, "learning_rate": null, "epoch": 0.61, "percentage": 30.71, "elapsed_time": "1:04:00", "remaining_time": "2:24:26"}
67
+ {"current_steps": 610, "total_steps": 1954, "loss": 1.1107, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.891013656258133e-05, "epoch": 0.62, "percentage": 31.22, "elapsed_time": "1:05:27", "remaining_time": "2:24:12"}
68
+ {"current_steps": 620, "total_steps": 1954, "loss": 1.0919, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.857437366124202e-05, "epoch": 0.63, "percentage": 31.73, "elapsed_time": "1:06:30", "remaining_time": "2:23:06"}
69
+ {"current_steps": 630, "total_steps": 1954, "loss": 1.1019, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.823510194005273e-05, "epoch": 0.64, "percentage": 32.24, "elapsed_time": "1:07:31", "remaining_time": "2:21:55"}
70
+ {"current_steps": 640, "total_steps": 1954, "loss": 1.1217, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.789240909686087e-05, "epoch": 0.65, "percentage": 32.75, "elapsed_time": "1:08:31", "remaining_time": "2:20:40"}
71
+ {"current_steps": 650, "total_steps": 1954, "loss": 1.1171, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.754638371383461e-05, "epoch": 0.66, "percentage": 33.27, "elapsed_time": "1:09:34", "remaining_time": "2:19:35"}
72
+ {"current_steps": 660, "total_steps": 1954, "loss": 1.0918, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.719711523456545e-05, "epoch": 0.68, "percentage": 33.78, "elapsed_time": "1:10:36", "remaining_time": "2:18:26"}
73
+ {"current_steps": 670, "total_steps": 1954, "loss": 1.1195, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.684469394094805e-05, "epoch": 0.69, "percentage": 34.29, "elapsed_time": "1:11:38", "remaining_time": "2:17:17"}
74
+ {"current_steps": 680, "total_steps": 1954, "loss": 1.1083, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.648921092984342e-05, "epoch": 0.7, "percentage": 34.8, "elapsed_time": "1:12:40", "remaining_time": "2:16:09"}
75
+ {"current_steps": 690, "total_steps": 1954, "loss": 1.0735, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.6130758089531404e-05, "epoch": 0.71, "percentage": 35.31, "elapsed_time": "1:13:40", "remaining_time": "2:14:57"}
76
+ {"current_steps": 700, "total_steps": 1954, "loss": 1.1161, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.576942807595861e-05, "epoch": 0.72, "percentage": 35.82, "elapsed_time": "1:14:42", "remaining_time": "2:13:50"}
77
+ {"current_steps": 700, "total_steps": 1954, "loss": null, "eval_loss": 1.1002016067504883, "predict_loss": null, "reward": null, "learning_rate": null, "epoch": 0.72, "percentage": 35.82, "elapsed_time": "1:14:42", "remaining_time": "2:13:50"}
78
+ {"current_steps": 710, "total_steps": 1954, "loss": 1.0867, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.540531428878795e-05, "epoch": 0.73, "percentage": 36.34, "elapsed_time": "1:16:07", "remaining_time": "2:13:22"}
79
+ {"current_steps": 720, "total_steps": 1954, "loss": 1.105, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.5038510847255846e-05, "epoch": 0.74, "percentage": 36.85, "elapsed_time": "1:17:08", "remaining_time": "2:12:13"}
80
+ {"current_steps": 730, "total_steps": 1954, "loss": 1.1156, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.466911256584355e-05, "epoch": 0.75, "percentage": 37.36, "elapsed_time": "1:18:14", "remaining_time": "2:11:11"}
81
+ {"current_steps": 740, "total_steps": 1954, "loss": 1.099, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.42972149297686e-05, "epoch": 0.76, "percentage": 37.87, "elapsed_time": "1:19:12", "remaining_time": "2:09:56"}
82
+ {"current_steps": 750, "total_steps": 1954, "loss": 1.0994, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.3922914070303076e-05, "epoch": 0.77, "percentage": 38.38, "elapsed_time": "1:20:17", "remaining_time": "2:08:54"}
83
+ {"current_steps": 760, "total_steps": 1954, "loss": 1.1177, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.354630673992473e-05, "epoch": 0.78, "percentage": 38.89, "elapsed_time": "1:21:16", "remaining_time": "2:07:41"}
84
+ {"current_steps": 770, "total_steps": 1954, "loss": 1.1095, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.316749028730757e-05, "epoch": 0.79, "percentage": 39.41, "elapsed_time": "1:22:15", "remaining_time": "2:06:28"}
85
+ {"current_steps": 780, "total_steps": 1954, "loss": 1.1083, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.278656263215836e-05, "epoch": 0.8, "percentage": 39.92, "elapsed_time": "1:23:20", "remaining_time": "2:05:27"}
86
+ {"current_steps": 790, "total_steps": 1954, "loss": 1.0836, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.24036222399055e-05, "epoch": 0.81, "percentage": 40.43, "elapsed_time": "1:24:18", "remaining_time": "2:04:12"}
87
+ {"current_steps": 800, "total_steps": 1954, "loss": 1.0982, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.2018768096246834e-05, "epoch": 0.82, "percentage": 40.94, "elapsed_time": "1:25:17", "remaining_time": "2:03:01"}
88
+ {"current_steps": 800, "total_steps": 1954, "loss": null, "eval_loss": 1.0972065925598145, "predict_loss": null, "reward": null, "learning_rate": null, "epoch": 0.82, "percentage": 40.94, "elapsed_time": "1:25:17", "remaining_time": "2:03:01"}
89
+ {"current_steps": 810, "total_steps": 1954, "loss": 1.1041, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.1632099681562996e-05, "epoch": 0.83, "percentage": 41.45, "elapsed_time": "1:26:42", "remaining_time": "2:02:28"}
90
+ {"current_steps": 820, "total_steps": 1954, "loss": 1.0974, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.1243716945202864e-05, "epoch": 0.84, "percentage": 41.97, "elapsed_time": "1:27:46", "remaining_time": "2:01:22"}
91
+ {"current_steps": 830, "total_steps": 1954, "loss": 1.0945, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.085372027964777e-05, "epoch": 0.85, "percentage": 42.48, "elapsed_time": "1:28:44", "remaining_time": "2:00:10"}
92
+ {"current_steps": 840, "total_steps": 1954, "loss": 1.1045, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.0462210494561283e-05, "epoch": 0.86, "percentage": 42.99, "elapsed_time": "1:29:46", "remaining_time": "1:59:03"}
93
+ {"current_steps": 850, "total_steps": 1954, "loss": 1.0842, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.0069288790730966e-05, "epoch": 0.87, "percentage": 43.5, "elapsed_time": "1:30:51", "remaining_time": "1:58:00"}
94
+ {"current_steps": 860, "total_steps": 1954, "loss": 1.097, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.9675056733909196e-05, "epoch": 0.88, "percentage": 44.01, "elapsed_time": "1:31:47", "remaining_time": "1:56:46"}
95
+ {"current_steps": 870, "total_steps": 1954, "loss": 1.1034, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.9279616228559542e-05, "epoch": 0.89, "percentage": 44.52, "elapsed_time": "1:32:55", "remaining_time": "1:55:46"}
96
+ {"current_steps": 880, "total_steps": 1954, "loss": 1.1033, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.8883069491515696e-05, "epoch": 0.9, "percentage": 45.04, "elapsed_time": "1:34:04", "remaining_time": "1:54:49"}
97
+ {"current_steps": 890, "total_steps": 1954, "loss": 1.0894, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.8485519025559503e-05, "epoch": 0.91, "percentage": 45.55, "elapsed_time": "1:35:03", "remaining_time": "1:53:38"}
98
+ {"current_steps": 900, "total_steps": 1954, "loss": 1.1062, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.8087067592925252e-05, "epoch": 0.92, "percentage": 46.06, "elapsed_time": "1:36:02", "remaining_time": "1:52:28"}
99
+ {"current_steps": 900, "total_steps": 1954, "loss": null, "eval_loss": 1.0946753025054932, "predict_loss": null, "reward": null, "learning_rate": null, "epoch": 0.92, "percentage": 46.06, "elapsed_time": "1:36:02", "remaining_time": "1:52:28"}
100
+ {"current_steps": 910, "total_steps": 1954, "loss": 1.1243, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.7687818188736757e-05, "epoch": 0.93, "percentage": 46.57, "elapsed_time": "1:37:26", "remaining_time": "1:51:46"}
101
+ {"current_steps": 920, "total_steps": 1954, "loss": 1.0975, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.7287874014384346e-05, "epoch": 0.94, "percentage": 47.08, "elapsed_time": "1:38:29", "remaining_time": "1:50:41"}
102
+ {"current_steps": 930, "total_steps": 1954, "loss": 1.0885, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.6887338450848448e-05, "epoch": 0.95, "percentage": 47.59, "elapsed_time": "1:39:33", "remaining_time": "1:49:37"}
103
+ {"current_steps": 940, "total_steps": 1954, "loss": 1.0938, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.64863150319768e-05, "epoch": 0.96, "percentage": 48.11, "elapsed_time": "1:40:32", "remaining_time": "1:48:27"}
104
+ {"current_steps": 950, "total_steps": 1954, "loss": 1.1014, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.6084907417722176e-05, "epoch": 0.97, "percentage": 48.62, "elapsed_time": "1:41:35", "remaining_time": "1:47:22"}
105
+ {"current_steps": 960, "total_steps": 1954, "loss": 1.105, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.5683219367347432e-05, "epoch": 0.98, "percentage": 49.13, "elapsed_time": "1:42:35", "remaining_time": "1:46:13"}
106
+ {"current_steps": 970, "total_steps": 1954, "loss": 1.0993, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.528135471260493e-05, "epoch": 0.99, "percentage": 49.64, "elapsed_time": "1:43:37", "remaining_time": "1:45:07"}
107
+ {"current_steps": 980, "total_steps": 1954, "loss": 1.0867, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.4879417330897267e-05, "epoch": 1.0, "percentage": 50.15, "elapsed_time": "1:44:45", "remaining_time": "1:44:06"}
108
+ {"current_steps": 990, "total_steps": 1954, "loss": 1.059, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.4477511118426093e-05, "epoch": 1.01, "percentage": 50.67, "elapsed_time": "1:45:47", "remaining_time": "1:43:00"}
109
+ {"current_steps": 1000, "total_steps": 1954, "loss": 1.0744, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.4075739963336182e-05, "epoch": 1.02, "percentage": 51.18, "elapsed_time": "1:46:51", "remaining_time": "1:41:56"}
110
+ {"current_steps": 1000, "total_steps": 1954, "loss": null, "eval_loss": 1.0925548076629639, "predict_loss": null, "reward": null, "learning_rate": null, "epoch": 1.02, "percentage": 51.18, "elapsed_time": "1:46:51", "remaining_time": "1:41:56"}
111
+ {"current_steps": 1010, "total_steps": 1954, "loss": 1.096, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.367420771886154e-05, "epoch": 1.03, "percentage": 51.69, "elapsed_time": "1:48:14", "remaining_time": "1:41:10"}
112
+ {"current_steps": 1020, "total_steps": 1954, "loss": 1.0873, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.3273018176480492e-05, "epoch": 1.04, "percentage": 52.2, "elapsed_time": "1:49:18", "remaining_time": "1:40:05"}
113
+ {"current_steps": 1030, "total_steps": 1954, "loss": 1.097, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.2872275039086823e-05, "epoch": 1.05, "percentage": 52.71, "elapsed_time": "1:50:17", "remaining_time": "1:38:56"}
114
+ {"current_steps": 1040, "total_steps": 1954, "loss": 1.0801, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.2472081894183744e-05, "epoch": 1.06, "percentage": 53.22, "elapsed_time": "1:51:16", "remaining_time": "1:37:47"}
115
+ {"current_steps": 1050, "total_steps": 1954, "loss": 1.1006, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.207254218710772e-05, "epoch": 1.07, "percentage": 53.74, "elapsed_time": "1:52:26", "remaining_time": "1:36:48"}
116
+ {"current_steps": 1060, "total_steps": 1954, "loss": 1.0811, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.1673759194289033e-05, "epoch": 1.08, "percentage": 54.25, "elapsed_time": "1:53:30", "remaining_time": "1:35:44"}
117
+ {"current_steps": 1070, "total_steps": 1954, "loss": 1.0969, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.1275835996556e-05, "epoch": 1.09, "percentage": 54.76, "elapsed_time": "1:54:34", "remaining_time": "1:34:39"}
118
+ {"current_steps": 1080, "total_steps": 1954, "loss": 1.0888, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.08788754524898e-05, "epoch": 1.1, "percentage": 55.27, "elapsed_time": "1:55:33", "remaining_time": "1:33:30"}
119
+ {"current_steps": 1090, "total_steps": 1954, "loss": 1.09, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.0482980171836648e-05, "epoch": 1.11, "percentage": 55.78, "elapsed_time": "1:56:40", "remaining_time": "1:32:29"}
120
+ {"current_steps": 1100, "total_steps": 1954, "loss": 1.0867, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.008825248898443e-05, "epoch": 1.13, "percentage": 56.29, "elapsed_time": "1:57:43", "remaining_time": "1:31:23"}
121
+ {"current_steps": 1100, "total_steps": 1954, "loss": null, "eval_loss": 1.0904080867767334, "predict_loss": null, "reward": null, "learning_rate": null, "epoch": 1.13, "percentage": 56.29, "elapsed_time": "1:57:43", "remaining_time": "1:31:23"}
122
+ {"current_steps": 1110, "total_steps": 1954, "loss": 1.0925, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.969479443651036e-05, "epoch": 1.14, "percentage": 56.81, "elapsed_time": "1:59:09", "remaining_time": "1:30:36"}
123
+ {"current_steps": 1120, "total_steps": 1954, "loss": 1.0827, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.930270771880679e-05, "epoch": 1.15, "percentage": 57.32, "elapsed_time": "2:00:10", "remaining_time": "1:29:29"}
124
+ {"current_steps": 1130, "total_steps": 1954, "loss": 1.0976, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.8912093685791748e-05, "epoch": 1.16, "percentage": 57.83, "elapsed_time": "2:01:11", "remaining_time": "1:28:22"}
125
+ {"current_steps": 1140, "total_steps": 1954, "loss": 1.0746, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.8523053306711203e-05, "epoch": 1.17, "percentage": 58.34, "elapsed_time": "2:02:01", "remaining_time": "1:27:07"}
126
+ {"current_steps": 1150, "total_steps": 1954, "loss": 1.0922, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.813568714403957e-05, "epoch": 1.18, "percentage": 58.85, "elapsed_time": "2:03:09", "remaining_time": "1:26:06"}
127
+ {"current_steps": 1160, "total_steps": 1954, "loss": 1.121, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.7750095327485605e-05, "epoch": 1.19, "percentage": 59.37, "elapsed_time": "2:04:10", "remaining_time": "1:25:00"}
128
+ {"current_steps": 1170, "total_steps": 1954, "loss": 1.0915, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.7366377528109895e-05, "epoch": 1.2, "percentage": 59.88, "elapsed_time": "2:05:11", "remaining_time": "1:23:53"}
129
+ {"current_steps": 1180, "total_steps": 1954, "loss": 1.0866, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.6984632932561124e-05, "epoch": 1.21, "percentage": 60.39, "elapsed_time": "2:06:10", "remaining_time": "1:22:45"}
130
+ {"current_steps": 1190, "total_steps": 1954, "loss": 1.0822, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.6604960217437398e-05, "epoch": 1.22, "percentage": 60.9, "elapsed_time": "2:07:08", "remaining_time": "1:21:37"}
131
+ {"current_steps": 1200, "total_steps": 1954, "loss": 1.0809, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.6227457523779532e-05, "epoch": 1.23, "percentage": 61.41, "elapsed_time": "2:08:13", "remaining_time": "1:20:34"}
132
+ {"current_steps": 1200, "total_steps": 1954, "loss": null, "eval_loss": 1.088823914527893, "predict_loss": null, "reward": null, "learning_rate": null, "epoch": 1.23, "percentage": 61.41, "elapsed_time": "2:08:13", "remaining_time": "1:20:34"}
133
+ {"current_steps": 1210, "total_steps": 1954, "loss": 1.0903, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.5852222431702658e-05, "epoch": 1.24, "percentage": 61.92, "elapsed_time": "2:09:39", "remaining_time": "1:19:43"}
134
+ {"current_steps": 1220, "total_steps": 1954, "loss": 1.088, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.547935193517295e-05, "epoch": 1.25, "percentage": 62.44, "elapsed_time": "2:10:46", "remaining_time": "1:18:40"}
135
+ {"current_steps": 1230, "total_steps": 1954, "loss": 1.0988, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.510894241693573e-05, "epoch": 1.26, "percentage": 62.95, "elapsed_time": "2:11:44", "remaining_time": "1:17:32"}
136
+ {"current_steps": 1240, "total_steps": 1954, "loss": 1.0935, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.4741089623601678e-05, "epoch": 1.27, "percentage": 63.46, "elapsed_time": "2:12:44", "remaining_time": "1:16:26"}
137
+ {"current_steps": 1250, "total_steps": 1954, "loss": 1.0812, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.4375888640897438e-05, "epoch": 1.28, "percentage": 63.97, "elapsed_time": "2:13:45", "remaining_time": "1:15:19"}
138
+ {"current_steps": 1260, "total_steps": 1954, "loss": 1.1036, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.4013433869087006e-05, "epoch": 1.29, "percentage": 64.48, "elapsed_time": "2:14:45", "remaining_time": "1:14:13"}
139
+ {"current_steps": 1270, "total_steps": 1954, "loss": 1.0917, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.3653818998570378e-05, "epoch": 1.3, "percentage": 64.99, "elapsed_time": "2:15:51", "remaining_time": "1:13:10"}
140
+ {"current_steps": 1280, "total_steps": 1954, "loss": 1.0697, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.329713698566567e-05, "epoch": 1.31, "percentage": 65.51, "elapsed_time": "2:16:52", "remaining_time": "1:12:04"}
141
+ {"current_steps": 1290, "total_steps": 1954, "loss": 1.092, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.2943480028580917e-05, "epoch": 1.32, "percentage": 66.02, "elapsed_time": "2:17:52", "remaining_time": "1:10:57"}
142
+ {"current_steps": 1300, "total_steps": 1954, "loss": 1.0994, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.2592939543581947e-05, "epoch": 1.33, "percentage": 66.53, "elapsed_time": "2:18:53", "remaining_time": "1:09:52"}
143
+ {"current_steps": 1300, "total_steps": 1954, "loss": null, "eval_loss": 1.0873268842697144, "predict_loss": null, "reward": null, "learning_rate": null, "epoch": 1.33, "percentage": 66.53, "elapsed_time": "2:18:53", "remaining_time": "1:09:52"}
144
+ {"current_steps": 1310, "total_steps": 1954, "loss": 1.0737, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.2245606141362318e-05, "epoch": 1.34, "percentage": 67.04, "elapsed_time": "2:20:15", "remaining_time": "1:08:57"}
145
+ {"current_steps": 1320, "total_steps": 1954, "loss": 1.0718, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.1901569603621487e-05, "epoch": 1.35, "percentage": 67.55, "elapsed_time": "2:21:24", "remaining_time": "1:07:55"}
146
+ {"current_steps": 1330, "total_steps": 1954, "loss": 1.0992, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.1560918859857247e-05, "epoch": 1.36, "percentage": 68.07, "elapsed_time": "2:22:22", "remaining_time": "1:06:47"}
147
+ {"current_steps": 1340, "total_steps": 1954, "loss": 1.0757, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.1223741964378518e-05, "epoch": 1.37, "percentage": 68.58, "elapsed_time": "2:23:25", "remaining_time": "1:05:43"}
148
+ {"current_steps": 1350, "total_steps": 1954, "loss": 1.0916, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.0890126073544276e-05, "epoch": 1.38, "percentage": 69.09, "elapsed_time": "2:24:28", "remaining_time": "1:04:38"}
149
+ {"current_steps": 1360, "total_steps": 1954, "loss": 1.1017, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.0560157423234681e-05, "epoch": 1.39, "percentage": 69.6, "elapsed_time": "2:25:28", "remaining_time": "1:03:32"}
150
+ {"current_steps": 1370, "total_steps": 1954, "loss": 1.0897, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.0233921306560029e-05, "epoch": 1.4, "percentage": 70.11, "elapsed_time": "2:26:33", "remaining_time": "1:02:28"}
151
+ {"current_steps": 1380, "total_steps": 1954, "loss": 1.1047, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 9.911502051813499e-06, "epoch": 1.41, "percentage": 70.62, "elapsed_time": "2:27:35", "remaining_time": "1:01:23"}
152
+ {"current_steps": 1390, "total_steps": 1954, "loss": 1.0877, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 9.592983000673272e-06, "epoch": 1.42, "percentage": 71.14, "elapsed_time": "2:28:42", "remaining_time": "1:00:20"}
153
+ {"current_steps": 1400, "total_steps": 1954, "loss": 1.0622, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 9.278446486659642e-06, "epoch": 1.43, "percentage": 71.65, "elapsed_time": "2:29:46", "remaining_time": "0:59:16"}
154
+ {"current_steps": 1400, "total_steps": 1954, "loss": null, "eval_loss": 1.0861936807632446, "predict_loss": null, "reward": null, "learning_rate": null, "epoch": 1.43, "percentage": 71.65, "elapsed_time": "2:29:46", "remaining_time": "0:59:16"}
155
+ {"current_steps": 1410, "total_steps": 1954, "loss": 1.0791, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 8.967973813852743e-06, "epoch": 1.44, "percentage": 72.16, "elapsed_time": "2:31:11", "remaining_time": "0:58:19"}
156
+ {"current_steps": 1420, "total_steps": 1954, "loss": 1.0952, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 8.66164523587637e-06, "epoch": 1.45, "percentage": 72.67, "elapsed_time": "2:32:16", "remaining_time": "0:57:15"}
157
+ {"current_steps": 1430, "total_steps": 1954, "loss": 1.0998, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 8.359539935153368e-06, "epoch": 1.46, "percentage": 73.18, "elapsed_time": "2:33:17", "remaining_time": "0:56:10"}
158
+ {"current_steps": 1440, "total_steps": 1954, "loss": 1.0714, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 8.061736002437862e-06, "epoch": 1.47, "percentage": 73.69, "elapsed_time": "2:34:15", "remaining_time": "0:55:03"}
159
+ {"current_steps": 1450, "total_steps": 1954, "loss": 1.0934, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 7.768310416629703e-06, "epoch": 1.48, "percentage": 74.21, "elapsed_time": "2:35:15", "remaining_time": "0:53:58"}
160
+ {"current_steps": 1460, "total_steps": 1954, "loss": 1.1074, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 7.479339024876283e-06, "epoch": 1.49, "percentage": 74.72, "elapsed_time": "2:36:10", "remaining_time": "0:52:50"}
161
+ {"current_steps": 1470, "total_steps": 1954, "loss": 1.0993, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 7.1948965229669425e-06, "epoch": 1.5, "percentage": 75.23, "elapsed_time": "2:37:10", "remaining_time": "0:51:45"}
162
+ {"current_steps": 1480, "total_steps": 1954, "loss": 1.0962, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 6.915056436024964e-06, "epoch": 1.51, "percentage": 75.74, "elapsed_time": "2:38:14", "remaining_time": "0:50:40"}
163
+ {"current_steps": 1490, "total_steps": 1954, "loss": 1.077, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 6.63989109950216e-06, "epoch": 1.52, "percentage": 76.25, "elapsed_time": "2:39:21", "remaining_time": "0:49:37"}
164
+ {"current_steps": 1500, "total_steps": 1954, "loss": 1.1079, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 6.3694716404810065e-06, "epoch": 1.53, "percentage": 76.77, "elapsed_time": "2:40:29", "remaining_time": "0:48:34"}
165
+ {"current_steps": 1500, "total_steps": 1954, "loss": null, "eval_loss": 1.0856590270996094, "predict_loss": null, "reward": null, "learning_rate": null, "epoch": 1.53, "percentage": 76.77, "elapsed_time": "2:40:29", "remaining_time": "0:48:34"}
166
+ {"current_steps": 1510, "total_steps": 1954, "loss": 1.0622, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 6.10386795928907e-06, "epoch": 1.54, "percentage": 77.28, "elapsed_time": "2:42:00", "remaining_time": "0:47:38"}
167
+ {"current_steps": 1520, "total_steps": 1954, "loss": 1.1085, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 5.843148711430621e-06, "epoch": 1.55, "percentage": 77.79, "elapsed_time": "2:43:04", "remaining_time": "0:46:33"}
168
+ {"current_steps": 1530, "total_steps": 1954, "loss": 1.0871, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 5.5873812898399546e-06, "epoch": 1.56, "percentage": 78.3, "elapsed_time": "2:44:04", "remaining_time": "0:45:28"}
169
+ {"current_steps": 1540, "total_steps": 1954, "loss": 1.1106, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 5.336631807461076e-06, "epoch": 1.58, "percentage": 78.81, "elapsed_time": "2:45:07", "remaining_time": "0:44:23"}
170
+ {"current_steps": 1550, "total_steps": 1954, "loss": 1.0841, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 5.090965080158278e-06, "epoch": 1.59, "percentage": 79.32, "elapsed_time": "2:46:12", "remaining_time": "0:43:19"}
171
+ {"current_steps": 1560, "total_steps": 1954, "loss": 1.1075, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.850444609961988e-06, "epoch": 1.6, "percentage": 79.84, "elapsed_time": "2:47:15", "remaining_time": "0:42:14"}
172
+ {"current_steps": 1570, "total_steps": 1954, "loss": 1.0983, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.615132568654215e-06, "epoch": 1.61, "percentage": 80.35, "elapsed_time": "2:48:16", "remaining_time": "0:41:09"}
173
+ {"current_steps": 1580, "total_steps": 1954, "loss": 1.0859, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.385089781697863e-06, "epoch": 1.62, "percentage": 80.86, "elapsed_time": "2:49:22", "remaining_time": "0:40:05"}
174
+ {"current_steps": 1590, "total_steps": 1954, "loss": 1.0759, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.160375712514019e-06, "epoch": 1.63, "percentage": 81.37, "elapsed_time": "2:50:22", "remaining_time": "0:39:00"}
175
+ {"current_steps": 1600, "total_steps": 1954, "loss": 1.0612, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.941048447111387e-06, "epoch": 1.64, "percentage": 81.88, "elapsed_time": "2:51:27", "remaining_time": "0:37:56"}
176
+ {"current_steps": 1600, "total_steps": 1954, "loss": null, "eval_loss": 1.085319995880127, "predict_loss": null, "reward": null, "learning_rate": null, "epoch": 1.64, "percentage": 81.88, "elapsed_time": "2:51:27", "remaining_time": "0:37:56"}
177
+ {"current_steps": 1610, "total_steps": 1954, "loss": 1.0806, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.727164679071696e-06, "epoch": 1.65, "percentage": 82.4, "elapsed_time": "2:52:49", "remaining_time": "0:36:55"}
178
+ {"current_steps": 1620, "total_steps": 1954, "loss": 1.0861, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.5187796948950384e-06, "epoch": 1.66, "percentage": 82.91, "elapsed_time": "2:53:54", "remaining_time": "0:35:51"}
179
+ {"current_steps": 1630, "total_steps": 1954, "loss": 1.0712, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.315947359708993e-06, "epoch": 1.67, "percentage": 83.42, "elapsed_time": "2:54:52", "remaining_time": "0:34:45"}
180
+ {"current_steps": 1640, "total_steps": 1954, "loss": 1.086, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.118720103345063e-06, "epoch": 1.68, "percentage": 83.93, "elapsed_time": "2:55:54", "remaining_time": "0:33:40"}
181
+ {"current_steps": 1650, "total_steps": 1954, "loss": 1.1087, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.9271489067861953e-06, "epoch": 1.69, "percentage": 84.44, "elapsed_time": "2:56:56", "remaining_time": "0:32:35"}
182
+ {"current_steps": 1660, "total_steps": 1954, "loss": 1.1036, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.7412832889887664e-06, "epoch": 1.7, "percentage": 84.95, "elapsed_time": "2:57:57", "remaining_time": "0:31:31"}
183
+ {"current_steps": 1670, "total_steps": 1954, "loss": 1.0712, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.5611712940825065e-06, "epoch": 1.71, "percentage": 85.47, "elapsed_time": "2:58:56", "remaining_time": "0:30:25"}
184
+ {"current_steps": 1680, "total_steps": 1954, "loss": 1.0695, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.3868594789516336e-06, "epoch": 1.72, "percentage": 85.98, "elapsed_time": "2:59:52", "remaining_time": "0:29:20"}
185
+ {"current_steps": 1690, "total_steps": 1954, "loss": 1.0858, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.2183929012004527e-06, "epoch": 1.73, "percentage": 86.49, "elapsed_time": "3:00:55", "remaining_time": "0:28:15"}
186
+ {"current_steps": 1700, "total_steps": 1954, "loss": 1.0839, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.055815107506451e-06, "epoch": 1.74, "percentage": 87.0, "elapsed_time": "3:02:03", "remaining_time": "0:27:12"}
187
+ {"current_steps": 1700, "total_steps": 1954, "loss": null, "eval_loss": 1.0849649906158447, "predict_loss": null, "reward": null, "learning_rate": null, "epoch": 1.74, "percentage": 87.0, "elapsed_time": "3:02:03", "remaining_time": "0:27:12"}
188
+ {"current_steps": 1710, "total_steps": 1954, "loss": 1.0925, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.899168122364023e-06, "epoch": 1.75, "percentage": 87.51, "elapsed_time": "3:03:30", "remaining_time": "0:26:11"}
189
+ {"current_steps": 1720, "total_steps": 1954, "loss": 1.0888, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.7484924372215744e-06, "epoch": 1.76, "percentage": 88.02, "elapsed_time": "3:04:36", "remaining_time": "0:25:06"}
190
+ {"current_steps": 1730, "total_steps": 1954, "loss": 1.0667, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.6038270000149903e-06, "epoch": 1.77, "percentage": 88.54, "elapsed_time": "3:05:44", "remaining_time": "0:24:03"}
191
+ {"current_steps": 1740, "total_steps": 1954, "loss": 1.0893, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.4652092051000155e-06, "epoch": 1.78, "percentage": 89.05, "elapsed_time": "3:06:44", "remaining_time": "0:22:58"}
192
+ {"current_steps": 1750, "total_steps": 1954, "loss": 1.061, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.3326748835862463e-06, "epoch": 1.79, "percentage": 89.56, "elapsed_time": "3:07:46", "remaining_time": "0:21:53"}
193
+ {"current_steps": 1760, "total_steps": 1954, "loss": 1.1043, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.2062582940751965e-06, "epoch": 1.8, "percentage": 90.07, "elapsed_time": "3:08:48", "remaining_time": "0:20:48"}
194
+ {"current_steps": 1770, "total_steps": 1954, "loss": 1.0908, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.085992113804854e-06, "epoch": 1.81, "percentage": 90.58, "elapsed_time": "3:09:48", "remaining_time": "0:19:43"}
195
+ {"current_steps": 1780, "total_steps": 1954, "loss": 1.1128, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 9.719074302029813e-07, "epoch": 1.82, "percentage": 91.1, "elapsed_time": "3:10:46", "remaining_time": "0:18:38"}
196
+ {"current_steps": 1790, "total_steps": 1954, "loss": 1.082, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 8.640337328513743e-07, "epoch": 1.83, "percentage": 91.61, "elapsed_time": "3:11:46", "remaining_time": "0:17:34"}
197
+ {"current_steps": 1800, "total_steps": 1954, "loss": 1.0702, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 7.623989058631459e-07, "epoch": 1.84, "percentage": 92.12, "elapsed_time": "3:12:45", "remaining_time": "0:16:29"}
198
+ {"current_steps": 1800, "total_steps": 1954, "loss": null, "eval_loss": 1.0849045515060425, "predict_loss": null, "reward": null, "learning_rate": null, "epoch": 1.84, "percentage": 92.12, "elapsed_time": "3:12:45", "remaining_time": "0:16:29"}
199
+ {"current_steps": 1810, "total_steps": 1954, "loss": 1.1089, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 6.67029220674989e-07, "epoch": 1.85, "percentage": 92.63, "elapsed_time": "3:14:11", "remaining_time": "0:15:26"}
200
+ {"current_steps": 1820, "total_steps": 1954, "loss": 1.0929, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 5.779493292563304e-07, "epoch": 1.86, "percentage": 93.14, "elapsed_time": "3:15:10", "remaining_time": "0:14:22"}
201
+ {"current_steps": 1830, "total_steps": 1954, "loss": 1.0648, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.951822577370785e-07, "epoch": 1.87, "percentage": 93.65, "elapsed_time": "3:16:09", "remaining_time": "0:13:17"}
202
+ {"current_steps": 1840, "total_steps": 1954, "loss": 1.0654, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.1874940045561194e-07, "epoch": 1.88, "percentage": 94.17, "elapsed_time": "3:17:11", "remaining_time": "0:12:13"}
203
+ {"current_steps": 1850, "total_steps": 1954, "loss": 1.0822, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.4867051442860896e-07, "epoch": 1.89, "percentage": 94.68, "elapsed_time": "3:18:15", "remaining_time": "0:11:08"}
204
+ {"current_steps": 1860, "total_steps": 1954, "loss": 1.0873, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.8496371424407975e-07, "epoch": 1.9, "percentage": 95.19, "elapsed_time": "3:19:17", "remaining_time": "0:10:04"}
205
+ {"current_steps": 1870, "total_steps": 1954, "loss": 1.0725, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.2764546737895076e-07, "epoch": 1.91, "percentage": 95.7, "elapsed_time": "3:20:26", "remaining_time": "0:09:00"}
206
+ {"current_steps": 1880, "total_steps": 1954, "loss": 1.0772, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.7673058994241432e-07, "epoch": 1.92, "percentage": 96.21, "elapsed_time": "3:21:27", "remaining_time": "0:07:55"}
207
+ {"current_steps": 1890, "total_steps": 1954, "loss": 1.07, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.3223224284613366e-07, "epoch": 1.93, "percentage": 96.72, "elapsed_time": "3:22:26", "remaining_time": "0:06:51"}
208
+ {"current_steps": 1900, "total_steps": 1954, "loss": 1.0886, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 9.416192840228932e-08, "epoch": 1.94, "percentage": 97.24, "elapsed_time": "3:23:28", "remaining_time": "0:05:46"}
209
+ {"current_steps": 1900, "total_steps": 1954, "loss": null, "eval_loss": 1.0848859548568726, "predict_loss": null, "reward": null, "learning_rate": null, "epoch": 1.94, "percentage": 97.24, "elapsed_time": "3:23:28", "remaining_time": "0:05:46"}
210
+ {"current_steps": 1910, "total_steps": 1954, "loss": 1.0917, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 6.252948735037678e-08, "epoch": 1.95, "percentage": 97.75, "elapsed_time": "3:24:51", "remaining_time": "0:04:43"}
211
+ {"current_steps": 1920, "total_steps": 1954, "loss": 1.1042, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.734309631348854e-08, "epoch": 1.96, "percentage": 98.26, "elapsed_time": "3:25:57", "remaining_time": "0:03:38"}
212
+ {"current_steps": 1930, "total_steps": 1954, "loss": 1.0908, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.8609265684738086e-08, "epoch": 1.97, "percentage": 98.77, "elapsed_time": "3:26:55", "remaining_time": "0:02:34"}
213
+ {"current_steps": 1940, "total_steps": 1954, "loss": 1.0769, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 6.332837944400538e-09, "epoch": 1.98, "percentage": 99.28, "elapsed_time": "3:28:00", "remaining_time": "0:01:30"}
214
+ {"current_steps": 1950, "total_steps": 1954, "loss": 1.078, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 5.16986408199971e-10, "epoch": 1.99, "percentage": 99.8, "elapsed_time": "3:29:03", "remaining_time": "0:00:25"}
215
+ {"current_steps": 1954, "total_steps": 1954, "loss": null, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": null, "epoch": 2.0, "percentage": 100.0, "elapsed_time": "3:29:32", "remaining_time": "0:00:00"}
216
+ {"current_steps": 80, "total_steps": 80, "loss": null, "eval_loss": 1.0848859548568726, "predict_loss": null, "reward": null, "learning_rate": null, "epoch": 2.0, "percentage": 100.0, "elapsed_time": "3:29:56", "remaining_time": "0:00:00"}
Baichuan-13B-Chat-lora-Consulting/trainer_state.json ADDED
@@ -0,0 +1,1347 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 1.0848859548568726,
3
+ "best_model_checkpoint": "output/Baichuan-13B-Chat_lora_wqs/checkpoint-1900",
4
+ "epoch": 1.998465865507543,
5
+ "global_step": 1954,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.01,
12
+ "learning_rate": 4.9996768893414955e-05,
13
+ "loss": 1.5804,
14
+ "step": 10
15
+ },
16
+ {
17
+ "epoch": 0.02,
18
+ "learning_rate": 4.998707640886381e-05,
19
+ "loss": 1.36,
20
+ "step": 20
21
+ },
22
+ {
23
+ "epoch": 0.03,
24
+ "learning_rate": 4.99709250517426e-05,
25
+ "loss": 1.2505,
26
+ "step": 30
27
+ },
28
+ {
29
+ "epoch": 0.04,
30
+ "learning_rate": 4.994831899699185e-05,
31
+ "loss": 1.2495,
32
+ "step": 40
33
+ },
34
+ {
35
+ "epoch": 0.05,
36
+ "learning_rate": 4.9919264088017345e-05,
37
+ "loss": 1.2084,
38
+ "step": 50
39
+ },
40
+ {
41
+ "epoch": 0.06,
42
+ "learning_rate": 4.98837678351797e-05,
43
+ "loss": 1.1923,
44
+ "step": 60
45
+ },
46
+ {
47
+ "epoch": 0.07,
48
+ "learning_rate": 4.984183941385301e-05,
49
+ "loss": 1.1929,
50
+ "step": 70
51
+ },
52
+ {
53
+ "epoch": 0.08,
54
+ "learning_rate": 4.979348966205315e-05,
55
+ "loss": 1.1736,
56
+ "step": 80
57
+ },
58
+ {
59
+ "epoch": 0.09,
60
+ "learning_rate": 4.9738731077636225e-05,
61
+ "loss": 1.1626,
62
+ "step": 90
63
+ },
64
+ {
65
+ "epoch": 0.1,
66
+ "learning_rate": 4.9677577815068056e-05,
67
+ "loss": 1.1779,
68
+ "step": 100
69
+ },
70
+ {
71
+ "epoch": 0.1,
72
+ "eval_loss": 1.1614866256713867,
73
+ "eval_runtime": 26.6319,
74
+ "eval_samples_per_second": 23.768,
75
+ "eval_steps_per_second": 3.004,
76
+ "step": 100
77
+ },
78
+ {
79
+ "epoch": 0.11,
80
+ "learning_rate": 4.9610045681765385e-05,
81
+ "loss": 1.1527,
82
+ "step": 110
83
+ },
84
+ {
85
+ "epoch": 0.12,
86
+ "learning_rate": 4.953615213400987e-05,
87
+ "loss": 1.1638,
88
+ "step": 120
89
+ },
90
+ {
91
+ "epoch": 0.13,
92
+ "learning_rate": 4.945591627243581e-05,
93
+ "loss": 1.155,
94
+ "step": 130
95
+ },
96
+ {
97
+ "epoch": 0.14,
98
+ "learning_rate": 4.9369358837092853e-05,
99
+ "loss": 1.1672,
100
+ "step": 140
101
+ },
102
+ {
103
+ "epoch": 0.15,
104
+ "learning_rate": 4.927650220208495e-05,
105
+ "loss": 1.1674,
106
+ "step": 150
107
+ },
108
+ {
109
+ "epoch": 0.16,
110
+ "learning_rate": 4.917737036978689e-05,
111
+ "loss": 1.1511,
112
+ "step": 160
113
+ },
114
+ {
115
+ "epoch": 0.17,
116
+ "learning_rate": 4.907198896463996e-05,
117
+ "loss": 1.1497,
118
+ "step": 170
119
+ },
120
+ {
121
+ "epoch": 0.18,
122
+ "learning_rate": 4.8960385226528335e-05,
123
+ "loss": 1.1271,
124
+ "step": 180
125
+ },
126
+ {
127
+ "epoch": 0.19,
128
+ "learning_rate": 4.8842588003737854e-05,
129
+ "loss": 1.1888,
130
+ "step": 190
131
+ },
132
+ {
133
+ "epoch": 0.2,
134
+ "learning_rate": 4.87186277454991e-05,
135
+ "loss": 1.1494,
136
+ "step": 200
137
+ },
138
+ {
139
+ "epoch": 0.2,
140
+ "eval_loss": 1.1362522840499878,
141
+ "eval_runtime": 23.9991,
142
+ "eval_samples_per_second": 26.376,
143
+ "eval_steps_per_second": 3.333,
144
+ "step": 200
145
+ },
146
+ {
147
+ "epoch": 0.21,
148
+ "learning_rate": 4.858853649411662e-05,
149
+ "loss": 1.1425,
150
+ "step": 210
151
+ },
152
+ {
153
+ "epoch": 0.23,
154
+ "learning_rate": 4.845234787668632e-05,
155
+ "loss": 1.1591,
156
+ "step": 220
157
+ },
158
+ {
159
+ "epoch": 0.24,
160
+ "learning_rate": 4.831009709640329e-05,
161
+ "loss": 1.118,
162
+ "step": 230
163
+ },
164
+ {
165
+ "epoch": 0.25,
166
+ "learning_rate": 4.8161820923462165e-05,
167
+ "loss": 1.1469,
168
+ "step": 240
169
+ },
170
+ {
171
+ "epoch": 0.26,
172
+ "learning_rate": 4.800755768555244e-05,
173
+ "loss": 1.1128,
174
+ "step": 250
175
+ },
176
+ {
177
+ "epoch": 0.27,
178
+ "learning_rate": 4.784734725795123e-05,
179
+ "loss": 1.152,
180
+ "step": 260
181
+ },
182
+ {
183
+ "epoch": 0.28,
184
+ "learning_rate": 4.768123105321596e-05,
185
+ "loss": 1.125,
186
+ "step": 270
187
+ },
188
+ {
189
+ "epoch": 0.29,
190
+ "learning_rate": 4.7509252010479645e-05,
191
+ "loss": 1.123,
192
+ "step": 280
193
+ },
194
+ {
195
+ "epoch": 0.3,
196
+ "learning_rate": 4.7331454584351686e-05,
197
+ "loss": 1.1294,
198
+ "step": 290
199
+ },
200
+ {
201
+ "epoch": 0.31,
202
+ "learning_rate": 4.714788473342685e-05,
203
+ "loss": 1.1299,
204
+ "step": 300
205
+ },
206
+ {
207
+ "epoch": 0.31,
208
+ "eval_loss": 1.1234092712402344,
209
+ "eval_runtime": 23.8841,
210
+ "eval_samples_per_second": 26.503,
211
+ "eval_steps_per_second": 3.35,
212
+ "step": 300
213
+ },
214
+ {
215
+ "epoch": 0.32,
216
+ "learning_rate": 4.695858990840544e-05,
217
+ "loss": 1.1195,
218
+ "step": 310
219
+ },
220
+ {
221
+ "epoch": 0.33,
222
+ "learning_rate": 4.6763619039827936e-05,
223
+ "loss": 1.1233,
224
+ "step": 320
225
+ },
226
+ {
227
+ "epoch": 0.34,
228
+ "learning_rate": 4.6563022525426905e-05,
229
+ "loss": 1.1281,
230
+ "step": 330
231
+ },
232
+ {
233
+ "epoch": 0.35,
234
+ "learning_rate": 4.6356852217099856e-05,
235
+ "loss": 1.0921,
236
+ "step": 340
237
+ },
238
+ {
239
+ "epoch": 0.36,
240
+ "learning_rate": 4.614516140750604e-05,
241
+ "loss": 1.1106,
242
+ "step": 350
243
+ },
244
+ {
245
+ "epoch": 0.37,
246
+ "learning_rate": 4.592800481629097e-05,
247
+ "loss": 1.1103,
248
+ "step": 360
249
+ },
250
+ {
251
+ "epoch": 0.38,
252
+ "learning_rate": 4.570543857594201e-05,
253
+ "loss": 1.1162,
254
+ "step": 370
255
+ },
256
+ {
257
+ "epoch": 0.39,
258
+ "learning_rate": 4.547752021727873e-05,
259
+ "loss": 1.1341,
260
+ "step": 380
261
+ },
262
+ {
263
+ "epoch": 0.4,
264
+ "learning_rate": 4.52443086545819e-05,
265
+ "loss": 1.1098,
266
+ "step": 390
267
+ },
268
+ {
269
+ "epoch": 0.41,
270
+ "learning_rate": 4.5005864170364784e-05,
271
+ "loss": 1.1109,
272
+ "step": 400
273
+ },
274
+ {
275
+ "epoch": 0.41,
276
+ "eval_loss": 1.115441083908081,
277
+ "eval_runtime": 23.9395,
278
+ "eval_samples_per_second": 26.442,
279
+ "eval_steps_per_second": 3.342,
280
+ "step": 400
281
+ },
282
+ {
283
+ "epoch": 0.42,
284
+ "learning_rate": 4.476224839979084e-05,
285
+ "loss": 1.1114,
286
+ "step": 410
287
+ },
288
+ {
289
+ "epoch": 0.43,
290
+ "learning_rate": 4.4513524314741714e-05,
291
+ "loss": 1.132,
292
+ "step": 420
293
+ },
294
+ {
295
+ "epoch": 0.44,
296
+ "learning_rate": 4.425975620753973e-05,
297
+ "loss": 1.1282,
298
+ "step": 430
299
+ },
300
+ {
301
+ "epoch": 0.45,
302
+ "learning_rate": 4.4001009674329054e-05,
303
+ "loss": 1.1135,
304
+ "step": 440
305
+ },
306
+ {
307
+ "epoch": 0.46,
308
+ "learning_rate": 4.373735159811988e-05,
309
+ "loss": 1.1366,
310
+ "step": 450
311
+ },
312
+ {
313
+ "epoch": 0.47,
314
+ "learning_rate": 4.3468850131499917e-05,
315
+ "loss": 1.1052,
316
+ "step": 460
317
+ },
318
+ {
319
+ "epoch": 0.48,
320
+ "learning_rate": 4.31955746790177e-05,
321
+ "loss": 1.1148,
322
+ "step": 470
323
+ },
324
+ {
325
+ "epoch": 0.49,
326
+ "learning_rate": 4.291759587924237e-05,
327
+ "loss": 1.1329,
328
+ "step": 480
329
+ },
330
+ {
331
+ "epoch": 0.5,
332
+ "learning_rate": 4.263498558650434e-05,
333
+ "loss": 1.1064,
334
+ "step": 490
335
+ },
336
+ {
337
+ "epoch": 0.51,
338
+ "learning_rate": 4.234781685232187e-05,
339
+ "loss": 1.12,
340
+ "step": 500
341
+ },
342
+ {
343
+ "epoch": 0.51,
344
+ "eval_loss": 1.108825922012329,
345
+ "eval_runtime": 23.967,
346
+ "eval_samples_per_second": 26.411,
347
+ "eval_steps_per_second": 3.338,
348
+ "step": 500
349
+ },
350
+ {
351
+ "epoch": 0.52,
352
+ "learning_rate": 4.205616390651796e-05,
353
+ "loss": 1.0966,
354
+ "step": 510
355
+ },
356
+ {
357
+ "epoch": 0.53,
358
+ "learning_rate": 4.1760102138032956e-05,
359
+ "loss": 1.1145,
360
+ "step": 520
361
+ },
362
+ {
363
+ "epoch": 0.54,
364
+ "learning_rate": 4.145970807543721e-05,
365
+ "loss": 1.1018,
366
+ "step": 530
367
+ },
368
+ {
369
+ "epoch": 0.55,
370
+ "learning_rate": 4.115505936714943e-05,
371
+ "loss": 1.1063,
372
+ "step": 540
373
+ },
374
+ {
375
+ "epoch": 0.56,
376
+ "learning_rate": 4.084623476136541e-05,
377
+ "loss": 1.1037,
378
+ "step": 550
379
+ },
380
+ {
381
+ "epoch": 0.57,
382
+ "learning_rate": 4.053331408570254e-05,
383
+ "loss": 1.0981,
384
+ "step": 560
385
+ },
386
+ {
387
+ "epoch": 0.58,
388
+ "learning_rate": 4.021637822656529e-05,
389
+ "loss": 1.1018,
390
+ "step": 570
391
+ },
392
+ {
393
+ "epoch": 0.59,
394
+ "learning_rate": 3.9895509108236956e-05,
395
+ "loss": 1.1154,
396
+ "step": 580
397
+ },
398
+ {
399
+ "epoch": 0.6,
400
+ "learning_rate": 3.957078967170325e-05,
401
+ "loss": 1.1498,
402
+ "step": 590
403
+ },
404
+ {
405
+ "epoch": 0.61,
406
+ "learning_rate": 3.9242303853212944e-05,
407
+ "loss": 1.1071,
408
+ "step": 600
409
+ },
410
+ {
411
+ "epoch": 0.61,
412
+ "eval_loss": 1.103700041770935,
413
+ "eval_runtime": 23.9888,
414
+ "eval_samples_per_second": 26.387,
415
+ "eval_steps_per_second": 3.335,
416
+ "step": 600
417
+ },
418
+ {
419
+ "epoch": 0.62,
420
+ "learning_rate": 3.891013656258133e-05,
421
+ "loss": 1.1107,
422
+ "step": 610
423
+ },
424
+ {
425
+ "epoch": 0.63,
426
+ "learning_rate": 3.857437366124202e-05,
427
+ "loss": 1.0919,
428
+ "step": 620
429
+ },
430
+ {
431
+ "epoch": 0.64,
432
+ "learning_rate": 3.823510194005273e-05,
433
+ "loss": 1.1019,
434
+ "step": 630
435
+ },
436
+ {
437
+ "epoch": 0.65,
438
+ "learning_rate": 3.789240909686087e-05,
439
+ "loss": 1.1217,
440
+ "step": 640
441
+ },
442
+ {
443
+ "epoch": 0.66,
444
+ "learning_rate": 3.754638371383461e-05,
445
+ "loss": 1.1171,
446
+ "step": 650
447
+ },
448
+ {
449
+ "epoch": 0.68,
450
+ "learning_rate": 3.719711523456545e-05,
451
+ "loss": 1.0918,
452
+ "step": 660
453
+ },
454
+ {
455
+ "epoch": 0.69,
456
+ "learning_rate": 3.684469394094805e-05,
457
+ "loss": 1.1195,
458
+ "step": 670
459
+ },
460
+ {
461
+ "epoch": 0.7,
462
+ "learning_rate": 3.648921092984342e-05,
463
+ "loss": 1.1083,
464
+ "step": 680
465
+ },
466
+ {
467
+ "epoch": 0.71,
468
+ "learning_rate": 3.6130758089531404e-05,
469
+ "loss": 1.0735,
470
+ "step": 690
471
+ },
472
+ {
473
+ "epoch": 0.72,
474
+ "learning_rate": 3.576942807595861e-05,
475
+ "loss": 1.1161,
476
+ "step": 700
477
+ },
478
+ {
479
+ "epoch": 0.72,
480
+ "eval_loss": 1.1002016067504883,
481
+ "eval_runtime": 24.0418,
482
+ "eval_samples_per_second": 26.329,
483
+ "eval_steps_per_second": 3.328,
484
+ "step": 700
485
+ },
486
+ {
487
+ "epoch": 0.73,
488
+ "learning_rate": 3.540531428878795e-05,
489
+ "loss": 1.0867,
490
+ "step": 710
491
+ },
492
+ {
493
+ "epoch": 0.74,
494
+ "learning_rate": 3.5038510847255846e-05,
495
+ "loss": 1.105,
496
+ "step": 720
497
+ },
498
+ {
499
+ "epoch": 0.75,
500
+ "learning_rate": 3.466911256584355e-05,
501
+ "loss": 1.1156,
502
+ "step": 730
503
+ },
504
+ {
505
+ "epoch": 0.76,
506
+ "learning_rate": 3.42972149297686e-05,
507
+ "loss": 1.099,
508
+ "step": 740
509
+ },
510
+ {
511
+ "epoch": 0.77,
512
+ "learning_rate": 3.3922914070303076e-05,
513
+ "loss": 1.0994,
514
+ "step": 750
515
+ },
516
+ {
517
+ "epoch": 0.78,
518
+ "learning_rate": 3.354630673992473e-05,
519
+ "loss": 1.1177,
520
+ "step": 760
521
+ },
522
+ {
523
+ "epoch": 0.79,
524
+ "learning_rate": 3.316749028730757e-05,
525
+ "loss": 1.1095,
526
+ "step": 770
527
+ },
528
+ {
529
+ "epoch": 0.8,
530
+ "learning_rate": 3.278656263215836e-05,
531
+ "loss": 1.1083,
532
+ "step": 780
533
+ },
534
+ {
535
+ "epoch": 0.81,
536
+ "learning_rate": 3.24036222399055e-05,
537
+ "loss": 1.0836,
538
+ "step": 790
539
+ },
540
+ {
541
+ "epoch": 0.82,
542
+ "learning_rate": 3.2018768096246834e-05,
543
+ "loss": 1.0982,
544
+ "step": 800
545
+ },
546
+ {
547
+ "epoch": 0.82,
548
+ "eval_loss": 1.0972065925598145,
549
+ "eval_runtime": 24.0261,
550
+ "eval_samples_per_second": 26.346,
551
+ "eval_steps_per_second": 3.33,
552
+ "step": 800
553
+ },
554
+ {
555
+ "epoch": 0.83,
556
+ "learning_rate": 3.1632099681562996e-05,
557
+ "loss": 1.1041,
558
+ "step": 810
559
+ },
560
+ {
561
+ "epoch": 0.84,
562
+ "learning_rate": 3.1243716945202864e-05,
563
+ "loss": 1.0974,
564
+ "step": 820
565
+ },
566
+ {
567
+ "epoch": 0.85,
568
+ "learning_rate": 3.085372027964777e-05,
569
+ "loss": 1.0945,
570
+ "step": 830
571
+ },
572
+ {
573
+ "epoch": 0.86,
574
+ "learning_rate": 3.0462210494561283e-05,
575
+ "loss": 1.1045,
576
+ "step": 840
577
+ },
578
+ {
579
+ "epoch": 0.87,
580
+ "learning_rate": 3.0069288790730966e-05,
581
+ "loss": 1.0842,
582
+ "step": 850
583
+ },
584
+ {
585
+ "epoch": 0.88,
586
+ "learning_rate": 2.9675056733909196e-05,
587
+ "loss": 1.097,
588
+ "step": 860
589
+ },
590
+ {
591
+ "epoch": 0.89,
592
+ "learning_rate": 2.9279616228559542e-05,
593
+ "loss": 1.1034,
594
+ "step": 870
595
+ },
596
+ {
597
+ "epoch": 0.9,
598
+ "learning_rate": 2.8883069491515696e-05,
599
+ "loss": 1.1033,
600
+ "step": 880
601
+ },
602
+ {
603
+ "epoch": 0.91,
604
+ "learning_rate": 2.8485519025559503e-05,
605
+ "loss": 1.0894,
606
+ "step": 890
607
+ },
608
+ {
609
+ "epoch": 0.92,
610
+ "learning_rate": 2.8087067592925252e-05,
611
+ "loss": 1.1062,
612
+ "step": 900
613
+ },
614
+ {
615
+ "epoch": 0.92,
616
+ "eval_loss": 1.0946753025054932,
617
+ "eval_runtime": 23.9793,
618
+ "eval_samples_per_second": 26.398,
619
+ "eval_steps_per_second": 3.336,
620
+ "step": 900
621
+ },
622
+ {
623
+ "epoch": 0.93,
624
+ "learning_rate": 2.7687818188736757e-05,
625
+ "loss": 1.1243,
626
+ "step": 910
627
+ },
628
+ {
629
+ "epoch": 0.94,
630
+ "learning_rate": 2.7287874014384346e-05,
631
+ "loss": 1.0975,
632
+ "step": 920
633
+ },
634
+ {
635
+ "epoch": 0.95,
636
+ "learning_rate": 2.6887338450848448e-05,
637
+ "loss": 1.0885,
638
+ "step": 930
639
+ },
640
+ {
641
+ "epoch": 0.96,
642
+ "learning_rate": 2.64863150319768e-05,
643
+ "loss": 1.0938,
644
+ "step": 940
645
+ },
646
+ {
647
+ "epoch": 0.97,
648
+ "learning_rate": 2.6084907417722176e-05,
649
+ "loss": 1.1014,
650
+ "step": 950
651
+ },
652
+ {
653
+ "epoch": 0.98,
654
+ "learning_rate": 2.5683219367347432e-05,
655
+ "loss": 1.105,
656
+ "step": 960
657
+ },
658
+ {
659
+ "epoch": 0.99,
660
+ "learning_rate": 2.528135471260493e-05,
661
+ "loss": 1.0993,
662
+ "step": 970
663
+ },
664
+ {
665
+ "epoch": 1.0,
666
+ "learning_rate": 2.4879417330897267e-05,
667
+ "loss": 1.0867,
668
+ "step": 980
669
+ },
670
+ {
671
+ "epoch": 1.01,
672
+ "learning_rate": 2.4477511118426093e-05,
673
+ "loss": 1.059,
674
+ "step": 990
675
+ },
676
+ {
677
+ "epoch": 1.02,
678
+ "learning_rate": 2.4075739963336182e-05,
679
+ "loss": 1.0744,
680
+ "step": 1000
681
+ },
682
+ {
683
+ "epoch": 1.02,
684
+ "eval_loss": 1.0925548076629639,
685
+ "eval_runtime": 23.9765,
686
+ "eval_samples_per_second": 26.401,
687
+ "eval_steps_per_second": 3.337,
688
+ "step": 1000
689
+ },
690
+ {
691
+ "epoch": 1.03,
692
+ "learning_rate": 2.367420771886154e-05,
693
+ "loss": 1.096,
694
+ "step": 1010
695
+ },
696
+ {
697
+ "epoch": 1.04,
698
+ "learning_rate": 2.3273018176480492e-05,
699
+ "loss": 1.0873,
700
+ "step": 1020
701
+ },
702
+ {
703
+ "epoch": 1.05,
704
+ "learning_rate": 2.2872275039086823e-05,
705
+ "loss": 1.097,
706
+ "step": 1030
707
+ },
708
+ {
709
+ "epoch": 1.06,
710
+ "learning_rate": 2.2472081894183744e-05,
711
+ "loss": 1.0801,
712
+ "step": 1040
713
+ },
714
+ {
715
+ "epoch": 1.07,
716
+ "learning_rate": 2.207254218710772e-05,
717
+ "loss": 1.1006,
718
+ "step": 1050
719
+ },
720
+ {
721
+ "epoch": 1.08,
722
+ "learning_rate": 2.1673759194289033e-05,
723
+ "loss": 1.0811,
724
+ "step": 1060
725
+ },
726
+ {
727
+ "epoch": 1.09,
728
+ "learning_rate": 2.1275835996556e-05,
729
+ "loss": 1.0969,
730
+ "step": 1070
731
+ },
732
+ {
733
+ "epoch": 1.1,
734
+ "learning_rate": 2.08788754524898e-05,
735
+ "loss": 1.0888,
736
+ "step": 1080
737
+ },
738
+ {
739
+ "epoch": 1.11,
740
+ "learning_rate": 2.0482980171836648e-05,
741
+ "loss": 1.09,
742
+ "step": 1090
743
+ },
744
+ {
745
+ "epoch": 1.13,
746
+ "learning_rate": 2.008825248898443e-05,
747
+ "loss": 1.0867,
748
+ "step": 1100
749
+ },
750
+ {
751
+ "epoch": 1.13,
752
+ "eval_loss": 1.0904080867767334,
753
+ "eval_runtime": 23.9806,
754
+ "eval_samples_per_second": 26.396,
755
+ "eval_steps_per_second": 3.336,
756
+ "step": 1100
757
+ },
758
+ {
759
+ "epoch": 1.14,
760
+ "learning_rate": 1.969479443651036e-05,
761
+ "loss": 1.0925,
762
+ "step": 1110
763
+ },
764
+ {
765
+ "epoch": 1.15,
766
+ "learning_rate": 1.930270771880679e-05,
767
+ "loss": 1.0827,
768
+ "step": 1120
769
+ },
770
+ {
771
+ "epoch": 1.16,
772
+ "learning_rate": 1.8912093685791748e-05,
773
+ "loss": 1.0976,
774
+ "step": 1130
775
+ },
776
+ {
777
+ "epoch": 1.17,
778
+ "learning_rate": 1.8523053306711203e-05,
779
+ "loss": 1.0746,
780
+ "step": 1140
781
+ },
782
+ {
783
+ "epoch": 1.18,
784
+ "learning_rate": 1.813568714403957e-05,
785
+ "loss": 1.0922,
786
+ "step": 1150
787
+ },
788
+ {
789
+ "epoch": 1.19,
790
+ "learning_rate": 1.7750095327485605e-05,
791
+ "loss": 1.121,
792
+ "step": 1160
793
+ },
794
+ {
795
+ "epoch": 1.2,
796
+ "learning_rate": 1.7366377528109895e-05,
797
+ "loss": 1.0915,
798
+ "step": 1170
799
+ },
800
+ {
801
+ "epoch": 1.21,
802
+ "learning_rate": 1.6984632932561124e-05,
803
+ "loss": 1.0866,
804
+ "step": 1180
805
+ },
806
+ {
807
+ "epoch": 1.22,
808
+ "learning_rate": 1.6604960217437398e-05,
809
+ "loss": 1.0822,
810
+ "step": 1190
811
+ },
812
+ {
813
+ "epoch": 1.23,
814
+ "learning_rate": 1.6227457523779532e-05,
815
+ "loss": 1.0809,
816
+ "step": 1200
817
+ },
818
+ {
819
+ "epoch": 1.23,
820
+ "eval_loss": 1.088823914527893,
821
+ "eval_runtime": 23.9277,
822
+ "eval_samples_per_second": 26.455,
823
+ "eval_steps_per_second": 3.343,
824
+ "step": 1200
825
+ },
826
+ {
827
+ "epoch": 1.24,
828
+ "learning_rate": 1.5852222431702658e-05,
829
+ "loss": 1.0903,
830
+ "step": 1210
831
+ },
832
+ {
833
+ "epoch": 1.25,
834
+ "learning_rate": 1.547935193517295e-05,
835
+ "loss": 1.088,
836
+ "step": 1220
837
+ },
838
+ {
839
+ "epoch": 1.26,
840
+ "learning_rate": 1.510894241693573e-05,
841
+ "loss": 1.0988,
842
+ "step": 1230
843
+ },
844
+ {
845
+ "epoch": 1.27,
846
+ "learning_rate": 1.4741089623601678e-05,
847
+ "loss": 1.0935,
848
+ "step": 1240
849
+ },
850
+ {
851
+ "epoch": 1.28,
852
+ "learning_rate": 1.4375888640897438e-05,
853
+ "loss": 1.0812,
854
+ "step": 1250
855
+ },
856
+ {
857
+ "epoch": 1.29,
858
+ "learning_rate": 1.4013433869087006e-05,
859
+ "loss": 1.1036,
860
+ "step": 1260
861
+ },
862
+ {
863
+ "epoch": 1.3,
864
+ "learning_rate": 1.3653818998570378e-05,
865
+ "loss": 1.0917,
866
+ "step": 1270
867
+ },
868
+ {
869
+ "epoch": 1.31,
870
+ "learning_rate": 1.329713698566567e-05,
871
+ "loss": 1.0697,
872
+ "step": 1280
873
+ },
874
+ {
875
+ "epoch": 1.32,
876
+ "learning_rate": 1.2943480028580917e-05,
877
+ "loss": 1.092,
878
+ "step": 1290
879
+ },
880
+ {
881
+ "epoch": 1.33,
882
+ "learning_rate": 1.2592939543581947e-05,
883
+ "loss": 1.0994,
884
+ "step": 1300
885
+ },
886
+ {
887
+ "epoch": 1.33,
888
+ "eval_loss": 1.0873268842697144,
889
+ "eval_runtime": 23.9478,
890
+ "eval_samples_per_second": 26.433,
891
+ "eval_steps_per_second": 3.341,
892
+ "step": 1300
893
+ },
894
+ {
895
+ "epoch": 1.34,
896
+ "learning_rate": 1.2245606141362318e-05,
897
+ "loss": 1.0737,
898
+ "step": 1310
899
+ },
900
+ {
901
+ "epoch": 1.35,
902
+ "learning_rate": 1.1901569603621487e-05,
903
+ "loss": 1.0718,
904
+ "step": 1320
905
+ },
906
+ {
907
+ "epoch": 1.36,
908
+ "learning_rate": 1.1560918859857247e-05,
909
+ "loss": 1.0992,
910
+ "step": 1330
911
+ },
912
+ {
913
+ "epoch": 1.37,
914
+ "learning_rate": 1.1223741964378518e-05,
915
+ "loss": 1.0757,
916
+ "step": 1340
917
+ },
918
+ {
919
+ "epoch": 1.38,
920
+ "learning_rate": 1.0890126073544276e-05,
921
+ "loss": 1.0916,
922
+ "step": 1350
923
+ },
924
+ {
925
+ "epoch": 1.39,
926
+ "learning_rate": 1.0560157423234681e-05,
927
+ "loss": 1.1017,
928
+ "step": 1360
929
+ },
930
+ {
931
+ "epoch": 1.4,
932
+ "learning_rate": 1.0233921306560029e-05,
933
+ "loss": 1.0897,
934
+ "step": 1370
935
+ },
936
+ {
937
+ "epoch": 1.41,
938
+ "learning_rate": 9.911502051813499e-06,
939
+ "loss": 1.1047,
940
+ "step": 1380
941
+ },
942
+ {
943
+ "epoch": 1.42,
944
+ "learning_rate": 9.592983000673272e-06,
945
+ "loss": 1.0877,
946
+ "step": 1390
947
+ },
948
+ {
949
+ "epoch": 1.43,
950
+ "learning_rate": 9.278446486659642e-06,
951
+ "loss": 1.0622,
952
+ "step": 1400
953
+ },
954
+ {
955
+ "epoch": 1.43,
956
+ "eval_loss": 1.0861936807632446,
957
+ "eval_runtime": 23.9135,
958
+ "eval_samples_per_second": 26.47,
959
+ "eval_steps_per_second": 3.345,
960
+ "step": 1400
961
+ },
962
+ {
963
+ "epoch": 1.44,
964
+ "learning_rate": 8.967973813852743e-06,
965
+ "loss": 1.0791,
966
+ "step": 1410
967
+ },
968
+ {
969
+ "epoch": 1.45,
970
+ "learning_rate": 8.66164523587637e-06,
971
+ "loss": 1.0952,
972
+ "step": 1420
973
+ },
974
+ {
975
+ "epoch": 1.46,
976
+ "learning_rate": 8.359539935153368e-06,
977
+ "loss": 1.0998,
978
+ "step": 1430
979
+ },
980
+ {
981
+ "epoch": 1.47,
982
+ "learning_rate": 8.061736002437862e-06,
983
+ "loss": 1.0714,
984
+ "step": 1440
985
+ },
986
+ {
987
+ "epoch": 1.48,
988
+ "learning_rate": 7.768310416629703e-06,
989
+ "loss": 1.0934,
990
+ "step": 1450
991
+ },
992
+ {
993
+ "epoch": 1.49,
994
+ "learning_rate": 7.479339024876283e-06,
995
+ "loss": 1.1074,
996
+ "step": 1460
997
+ },
998
+ {
999
+ "epoch": 1.5,
1000
+ "learning_rate": 7.1948965229669425e-06,
1001
+ "loss": 1.0993,
1002
+ "step": 1470
1003
+ },
1004
+ {
1005
+ "epoch": 1.51,
1006
+ "learning_rate": 6.915056436024964e-06,
1007
+ "loss": 1.0962,
1008
+ "step": 1480
1009
+ },
1010
+ {
1011
+ "epoch": 1.52,
1012
+ "learning_rate": 6.63989109950216e-06,
1013
+ "loss": 1.077,
1014
+ "step": 1490
1015
+ },
1016
+ {
1017
+ "epoch": 1.53,
1018
+ "learning_rate": 6.3694716404810065e-06,
1019
+ "loss": 1.1079,
1020
+ "step": 1500
1021
+ },
1022
+ {
1023
+ "epoch": 1.53,
1024
+ "eval_loss": 1.0856590270996094,
1025
+ "eval_runtime": 23.9592,
1026
+ "eval_samples_per_second": 26.42,
1027
+ "eval_steps_per_second": 3.339,
1028
+ "step": 1500
1029
+ },
1030
+ {
1031
+ "epoch": 1.54,
1032
+ "learning_rate": 6.10386795928907e-06,
1033
+ "loss": 1.0622,
1034
+ "step": 1510
1035
+ },
1036
+ {
1037
+ "epoch": 1.55,
1038
+ "learning_rate": 5.843148711430621e-06,
1039
+ "loss": 1.1085,
1040
+ "step": 1520
1041
+ },
1042
+ {
1043
+ "epoch": 1.56,
1044
+ "learning_rate": 5.5873812898399546e-06,
1045
+ "loss": 1.0871,
1046
+ "step": 1530
1047
+ },
1048
+ {
1049
+ "epoch": 1.58,
1050
+ "learning_rate": 5.336631807461076e-06,
1051
+ "loss": 1.1106,
1052
+ "step": 1540
1053
+ },
1054
+ {
1055
+ "epoch": 1.59,
1056
+ "learning_rate": 5.090965080158278e-06,
1057
+ "loss": 1.0841,
1058
+ "step": 1550
1059
+ },
1060
+ {
1061
+ "epoch": 1.6,
1062
+ "learning_rate": 4.850444609961988e-06,
1063
+ "loss": 1.1075,
1064
+ "step": 1560
1065
+ },
1066
+ {
1067
+ "epoch": 1.61,
1068
+ "learning_rate": 4.615132568654215e-06,
1069
+ "loss": 1.0983,
1070
+ "step": 1570
1071
+ },
1072
+ {
1073
+ "epoch": 1.62,
1074
+ "learning_rate": 4.385089781697863e-06,
1075
+ "loss": 1.0859,
1076
+ "step": 1580
1077
+ },
1078
+ {
1079
+ "epoch": 1.63,
1080
+ "learning_rate": 4.160375712514019e-06,
1081
+ "loss": 1.0759,
1082
+ "step": 1590
1083
+ },
1084
+ {
1085
+ "epoch": 1.64,
1086
+ "learning_rate": 3.941048447111387e-06,
1087
+ "loss": 1.0612,
1088
+ "step": 1600
1089
+ },
1090
+ {
1091
+ "epoch": 1.64,
1092
+ "eval_loss": 1.085319995880127,
1093
+ "eval_runtime": 23.9411,
1094
+ "eval_samples_per_second": 26.44,
1095
+ "eval_steps_per_second": 3.342,
1096
+ "step": 1600
1097
+ },
1098
+ {
1099
+ "epoch": 1.65,
1100
+ "learning_rate": 3.727164679071696e-06,
1101
+ "loss": 1.0806,
1102
+ "step": 1610
1103
+ },
1104
+ {
1105
+ "epoch": 1.66,
1106
+ "learning_rate": 3.5187796948950384e-06,
1107
+ "loss": 1.0861,
1108
+ "step": 1620
1109
+ },
1110
+ {
1111
+ "epoch": 1.67,
1112
+ "learning_rate": 3.315947359708993e-06,
1113
+ "loss": 1.0712,
1114
+ "step": 1630
1115
+ },
1116
+ {
1117
+ "epoch": 1.68,
1118
+ "learning_rate": 3.118720103345063e-06,
1119
+ "loss": 1.086,
1120
+ "step": 1640
1121
+ },
1122
+ {
1123
+ "epoch": 1.69,
1124
+ "learning_rate": 2.9271489067861953e-06,
1125
+ "loss": 1.1087,
1126
+ "step": 1650
1127
+ },
1128
+ {
1129
+ "epoch": 1.7,
1130
+ "learning_rate": 2.7412832889887664e-06,
1131
+ "loss": 1.1036,
1132
+ "step": 1660
1133
+ },
1134
+ {
1135
+ "epoch": 1.71,
1136
+ "learning_rate": 2.5611712940825065e-06,
1137
+ "loss": 1.0712,
1138
+ "step": 1670
1139
+ },
1140
+ {
1141
+ "epoch": 1.72,
1142
+ "learning_rate": 2.3868594789516336e-06,
1143
+ "loss": 1.0695,
1144
+ "step": 1680
1145
+ },
1146
+ {
1147
+ "epoch": 1.73,
1148
+ "learning_rate": 2.2183929012004527e-06,
1149
+ "loss": 1.0858,
1150
+ "step": 1690
1151
+ },
1152
+ {
1153
+ "epoch": 1.74,
1154
+ "learning_rate": 2.055815107506451e-06,
1155
+ "loss": 1.0839,
1156
+ "step": 1700
1157
+ },
1158
+ {
1159
+ "epoch": 1.74,
1160
+ "eval_loss": 1.0849649906158447,
1161
+ "eval_runtime": 23.9223,
1162
+ "eval_samples_per_second": 26.461,
1163
+ "eval_steps_per_second": 3.344,
1164
+ "step": 1700
1165
+ },
1166
+ {
1167
+ "epoch": 1.75,
1168
+ "learning_rate": 1.899168122364023e-06,
1169
+ "loss": 1.0925,
1170
+ "step": 1710
1171
+ },
1172
+ {
1173
+ "epoch": 1.76,
1174
+ "learning_rate": 1.7484924372215744e-06,
1175
+ "loss": 1.0888,
1176
+ "step": 1720
1177
+ },
1178
+ {
1179
+ "epoch": 1.77,
1180
+ "learning_rate": 1.6038270000149903e-06,
1181
+ "loss": 1.0667,
1182
+ "step": 1730
1183
+ },
1184
+ {
1185
+ "epoch": 1.78,
1186
+ "learning_rate": 1.4652092051000155e-06,
1187
+ "loss": 1.0893,
1188
+ "step": 1740
1189
+ },
1190
+ {
1191
+ "epoch": 1.79,
1192
+ "learning_rate": 1.3326748835862463e-06,
1193
+ "loss": 1.061,
1194
+ "step": 1750
1195
+ },
1196
+ {
1197
+ "epoch": 1.8,
1198
+ "learning_rate": 1.2062582940751965e-06,
1199
+ "loss": 1.1043,
1200
+ "step": 1760
1201
+ },
1202
+ {
1203
+ "epoch": 1.81,
1204
+ "learning_rate": 1.085992113804854e-06,
1205
+ "loss": 1.0908,
1206
+ "step": 1770
1207
+ },
1208
+ {
1209
+ "epoch": 1.82,
1210
+ "learning_rate": 9.719074302029813e-07,
1211
+ "loss": 1.1128,
1212
+ "step": 1780
1213
+ },
1214
+ {
1215
+ "epoch": 1.83,
1216
+ "learning_rate": 8.640337328513743e-07,
1217
+ "loss": 1.082,
1218
+ "step": 1790
1219
+ },
1220
+ {
1221
+ "epoch": 1.84,
1222
+ "learning_rate": 7.623989058631459e-07,
1223
+ "loss": 1.0702,
1224
+ "step": 1800
1225
+ },
1226
+ {
1227
+ "epoch": 1.84,
1228
+ "eval_loss": 1.0849045515060425,
1229
+ "eval_runtime": 23.9602,
1230
+ "eval_samples_per_second": 26.419,
1231
+ "eval_steps_per_second": 3.339,
1232
+ "step": 1800
1233
+ },
1234
+ {
1235
+ "epoch": 1.85,
1236
+ "learning_rate": 6.67029220674989e-07,
1237
+ "loss": 1.1089,
1238
+ "step": 1810
1239
+ },
1240
+ {
1241
+ "epoch": 1.86,
1242
+ "learning_rate": 5.779493292563304e-07,
1243
+ "loss": 1.0929,
1244
+ "step": 1820
1245
+ },
1246
+ {
1247
+ "epoch": 1.87,
1248
+ "learning_rate": 4.951822577370785e-07,
1249
+ "loss": 1.0648,
1250
+ "step": 1830
1251
+ },
1252
+ {
1253
+ "epoch": 1.88,
1254
+ "learning_rate": 4.1874940045561194e-07,
1255
+ "loss": 1.0654,
1256
+ "step": 1840
1257
+ },
1258
+ {
1259
+ "epoch": 1.89,
1260
+ "learning_rate": 3.4867051442860896e-07,
1261
+ "loss": 1.0822,
1262
+ "step": 1850
1263
+ },
1264
+ {
1265
+ "epoch": 1.9,
1266
+ "learning_rate": 2.8496371424407975e-07,
1267
+ "loss": 1.0873,
1268
+ "step": 1860
1269
+ },
1270
+ {
1271
+ "epoch": 1.91,
1272
+ "learning_rate": 2.2764546737895076e-07,
1273
+ "loss": 1.0725,
1274
+ "step": 1870
1275
+ },
1276
+ {
1277
+ "epoch": 1.92,
1278
+ "learning_rate": 1.7673058994241432e-07,
1279
+ "loss": 1.0772,
1280
+ "step": 1880
1281
+ },
1282
+ {
1283
+ "epoch": 1.93,
1284
+ "learning_rate": 1.3223224284613366e-07,
1285
+ "loss": 1.07,
1286
+ "step": 1890
1287
+ },
1288
+ {
1289
+ "epoch": 1.94,
1290
+ "learning_rate": 9.416192840228932e-08,
1291
+ "loss": 1.0886,
1292
+ "step": 1900
1293
+ },
1294
+ {
1295
+ "epoch": 1.94,
1296
+ "eval_loss": 1.0848859548568726,
1297
+ "eval_runtime": 23.9384,
1298
+ "eval_samples_per_second": 26.443,
1299
+ "eval_steps_per_second": 3.342,
1300
+ "step": 1900
1301
+ },
1302
+ {
1303
+ "epoch": 1.95,
1304
+ "learning_rate": 6.252948735037678e-08,
1305
+ "loss": 1.0917,
1306
+ "step": 1910
1307
+ },
1308
+ {
1309
+ "epoch": 1.96,
1310
+ "learning_rate": 3.734309631348854e-08,
1311
+ "loss": 1.1042,
1312
+ "step": 1920
1313
+ },
1314
+ {
1315
+ "epoch": 1.97,
1316
+ "learning_rate": 1.8609265684738086e-08,
1317
+ "loss": 1.0908,
1318
+ "step": 1930
1319
+ },
1320
+ {
1321
+ "epoch": 1.98,
1322
+ "learning_rate": 6.332837944400538e-09,
1323
+ "loss": 1.0769,
1324
+ "step": 1940
1325
+ },
1326
+ {
1327
+ "epoch": 1.99,
1328
+ "learning_rate": 5.16986408199971e-10,
1329
+ "loss": 1.078,
1330
+ "step": 1950
1331
+ },
1332
+ {
1333
+ "epoch": 2.0,
1334
+ "step": 1954,
1335
+ "total_flos": 4.405931905703215e+18,
1336
+ "train_loss": 1.1088850573849165,
1337
+ "train_runtime": 12573.3474,
1338
+ "train_samples_per_second": 9.954,
1339
+ "train_steps_per_second": 0.155
1340
+ }
1341
+ ],
1342
+ "max_steps": 1954,
1343
+ "num_train_epochs": 2,
1344
+ "total_flos": 4.405931905703215e+18,
1345
+ "trial_name": null,
1346
+ "trial_params": null
1347
+ }
Baichuan-13B-Chat-lora-Consulting/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:85acc7804e8cca26406eba54215ae0ac6e6af395c0067e8c0c097cc36e163d81
3
+ size 4600
Baichuan-13B-Chat-lora-Consulting/training_eval_loss.png ADDED
Baichuan-13B-Chat-lora-Consulting/training_loss.png ADDED