aoc01936 commited on
Commit
3d5fd80
1 Parent(s): 98fcb76

Upload 15 files

Browse files
README.md ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: other
3
+ library_name: peft
4
+ tags:
5
+ - llama-factory
6
+ - lora
7
+ - generated_from_trainer
8
+ base_model: qwen/Qwen-72B-Chat
9
+ model-index:
10
+ - name: path_to_sft_checkpoint_eevalplusceval
11
+ results: []
12
+ ---
13
+
14
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
+ should probably proofread and complete it, then remove this comment. -->
16
+
17
+ # path_to_sft_checkpoint_eevalplusceval
18
+
19
+ This model is a fine-tuned version of [qwen/Qwen-72B-Chat](qwen/Qwen-72B-Chat) on the e_eval dataset.
20
+
21
+ ## Model description
22
+
23
+ More information needed
24
+
25
+ ## Intended uses & limitations
26
+
27
+ More information needed
28
+
29
+ ## Training and evaluation data
30
+
31
+ More information needed
32
+
33
+ ## Training procedure
34
+
35
+ ### Training hyperparameters
36
+
37
+ The following hyperparameters were used during training:
38
+ - learning_rate: 5e-05
39
+ - train_batch_size: 1
40
+ - eval_batch_size: 8
41
+ - seed: 42
42
+ - distributed_type: multi-GPU
43
+ - num_devices: 4
44
+ - gradient_accumulation_steps: 8
45
+ - total_train_batch_size: 32
46
+ - total_eval_batch_size: 32
47
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
48
+ - lr_scheduler_type: cosine
49
+ - num_epochs: 3.0
50
+ - mixed_precision_training: Native AMP
51
+
52
+ ### Training results
53
+
54
+
55
+
56
+ ### Framework versions
57
+
58
+ - PEFT 0.8.2
59
+ - Transformers 4.37.2
60
+ - Pytorch 2.1.2+cu121
61
+ - Datasets 2.15.0
62
+ - Tokenizers 0.15.1
adapter_config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "qwen/Qwen-72B-Chat",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layers_pattern": null,
10
+ "layers_to_transform": null,
11
+ "loftq_config": {},
12
+ "lora_alpha": 16,
13
+ "lora_dropout": 0.0,
14
+ "megatron_config": null,
15
+ "megatron_core": "megatron.core",
16
+ "modules_to_save": null,
17
+ "peft_type": "LORA",
18
+ "r": 8,
19
+ "rank_pattern": {},
20
+ "revision": null,
21
+ "target_modules": [
22
+ "c_attn"
23
+ ],
24
+ "task_type": "CAUSAL_LM",
25
+ "use_rslora": false
26
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0a810d0575fcf03c415b79b0cc868c28d4e48b0108f57a1d82ab8fa3cba8ec8c
3
+ size 41963856
all_results.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.0,
3
+ "train_loss": 0.5275618712107341,
4
+ "train_runtime": 2086.9604,
5
+ "train_samples_per_second": 2.069,
6
+ "train_steps_per_second": 0.065
7
+ }
configuration.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"framework":"Pytorch","task":"text-generation"}
qwen.json ADDED
The diff for this file is too large to render. See raw diff
 
qwen.tiktoken ADDED
The diff for this file is too large to render. See raw diff
 
special_tokens_map.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "eos_token": {
3
+ "content": "<|im_end|>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "pad_token": "<|im_end|>"
10
+ }
tokenization_qwen.py ADDED
@@ -0,0 +1,276 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Alibaba Cloud.
2
+ #
3
+ # This source code is licensed under the license found in the
4
+ # LICENSE file in the root directory of this source tree.
5
+
6
+ """Tokenization classes for QWen."""
7
+
8
+ import base64
9
+ import logging
10
+ import os
11
+ import unicodedata
12
+ from typing import Collection, Dict, List, Set, Tuple, Union
13
+
14
+ import tiktoken
15
+ from transformers import PreTrainedTokenizer, AddedToken
16
+
17
+ logger = logging.getLogger(__name__)
18
+
19
+
20
+ VOCAB_FILES_NAMES = {"vocab_file": "qwen.tiktoken"}
21
+
22
+ PAT_STR = r"""(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\r\n\p{L}\p{N}]?\p{L}+|\p{N}| ?[^\s\p{L}\p{N}]+[\r\n]*|\s*[\r\n]+|\s+(?!\S)|\s+"""
23
+ ENDOFTEXT = "<|endoftext|>"
24
+ IMSTART = "<|im_start|>"
25
+ IMEND = "<|im_end|>"
26
+ # as the default behavior is changed to allow special tokens in
27
+ # regular texts, the surface forms of special tokens need to be
28
+ # as different as possible to minimize the impact
29
+ EXTRAS = tuple((f"<|extra_{i}|>" for i in range(205)))
30
+ # changed to use actual index to avoid misconfiguration with vocabulary expansion
31
+ SPECIAL_START_ID = 151643
32
+ SPECIAL_TOKENS = tuple(
33
+ enumerate(
34
+ (
35
+ (
36
+ ENDOFTEXT,
37
+ IMSTART,
38
+ IMEND,
39
+ )
40
+ + EXTRAS
41
+ ),
42
+ start=SPECIAL_START_ID,
43
+ )
44
+ )
45
+ SPECIAL_TOKENS_SET = set(t for i, t in SPECIAL_TOKENS)
46
+
47
+
48
+ def _load_tiktoken_bpe(tiktoken_bpe_file: str) -> Dict[bytes, int]:
49
+ with open(tiktoken_bpe_file, "rb") as f:
50
+ contents = f.read()
51
+ return {
52
+ base64.b64decode(token): int(rank)
53
+ for token, rank in (line.split() for line in contents.splitlines() if line)
54
+ }
55
+
56
+
57
+ class QWenTokenizer(PreTrainedTokenizer):
58
+ """QWen tokenizer."""
59
+
60
+ vocab_files_names = VOCAB_FILES_NAMES
61
+
62
+ def __init__(
63
+ self,
64
+ vocab_file,
65
+ errors="replace",
66
+ extra_vocab_file=None,
67
+ **kwargs,
68
+ ):
69
+ super().__init__(**kwargs)
70
+
71
+ # how to handle errors in decoding UTF-8 byte sequences
72
+ # use ignore if you are in streaming inference
73
+ self.errors = errors
74
+
75
+ self.mergeable_ranks = _load_tiktoken_bpe(vocab_file) # type: Dict[bytes, int]
76
+ self.special_tokens = {
77
+ token: index
78
+ for index, token in SPECIAL_TOKENS
79
+ }
80
+
81
+ # try load extra vocab from file
82
+ if extra_vocab_file is not None:
83
+ used_ids = set(self.mergeable_ranks.values()) | set(self.special_tokens.values())
84
+ extra_mergeable_ranks = _load_tiktoken_bpe(extra_vocab_file)
85
+ for token, index in extra_mergeable_ranks.items():
86
+ if token in self.mergeable_ranks:
87
+ logger.info(f"extra token {token} exists, skipping")
88
+ continue
89
+ if index in used_ids:
90
+ logger.info(f'the index {index} for extra token {token} exists, skipping')
91
+ continue
92
+ self.mergeable_ranks[token] = index
93
+ # the index may be sparse after this, but don't worry tiktoken.Encoding will handle this
94
+
95
+ enc = tiktoken.Encoding(
96
+ "Qwen",
97
+ pat_str=PAT_STR,
98
+ mergeable_ranks=self.mergeable_ranks,
99
+ special_tokens=self.special_tokens,
100
+ )
101
+ assert (
102
+ len(self.mergeable_ranks) + len(self.special_tokens) == enc.n_vocab
103
+ ), f"{len(self.mergeable_ranks) + len(self.special_tokens)} != {enc.n_vocab} in encoding"
104
+
105
+ self.decoder = {
106
+ v: k for k, v in self.mergeable_ranks.items()
107
+ } # type: dict[int, bytes|str]
108
+ self.decoder.update({v: k for k, v in self.special_tokens.items()})
109
+
110
+ self.tokenizer = enc # type: tiktoken.Encoding
111
+
112
+ self.eod_id = self.tokenizer.eot_token
113
+ self.im_start_id = self.special_tokens[IMSTART]
114
+ self.im_end_id = self.special_tokens[IMEND]
115
+
116
+ def __getstate__(self):
117
+ # for pickle lovers
118
+ state = self.__dict__.copy()
119
+ del state["tokenizer"]
120
+ return state
121
+
122
+ def __setstate__(self, state):
123
+ # tokenizer is not python native; don't pass it; rebuild it
124
+ self.__dict__.update(state)
125
+ enc = tiktoken.Encoding(
126
+ "Qwen",
127
+ pat_str=PAT_STR,
128
+ mergeable_ranks=self.mergeable_ranks,
129
+ special_tokens=self.special_tokens,
130
+ )
131
+ self.tokenizer = enc
132
+
133
+ def __len__(self) -> int:
134
+ return self.tokenizer.n_vocab
135
+
136
+ def get_vocab(self) -> Dict[bytes, int]:
137
+ return self.mergeable_ranks
138
+
139
+ def convert_tokens_to_ids(
140
+ self, tokens: Union[bytes, str, List[Union[bytes, str]]]
141
+ ) -> List[int]:
142
+ ids = []
143
+ if isinstance(tokens, (str, bytes)):
144
+ if tokens in self.special_tokens:
145
+ return self.special_tokens[tokens]
146
+ else:
147
+ return self.mergeable_ranks.get(tokens)
148
+ for token in tokens:
149
+ if token in self.special_tokens:
150
+ ids.append(self.special_tokens[token])
151
+ else:
152
+ ids.append(self.mergeable_ranks.get(token))
153
+ return ids
154
+
155
+ def _add_tokens(
156
+ self,
157
+ new_tokens: Union[List[str], List[AddedToken]],
158
+ special_tokens: bool = False,
159
+ ) -> int:
160
+ if not special_tokens and new_tokens:
161
+ raise ValueError("Adding regular tokens is not supported")
162
+ for token in new_tokens:
163
+ surface_form = token.content if isinstance(token, AddedToken) else token
164
+ if surface_form not in SPECIAL_TOKENS_SET:
165
+ raise ValueError("Adding unknown special tokens is not supported")
166
+ return 0
167
+
168
+ def save_vocabulary(self, save_directory: str, **kwargs) -> Tuple[str]:
169
+ """
170
+ Save only the vocabulary of the tokenizer (vocabulary).
171
+
172
+ Returns:
173
+ `Tuple(str)`: Paths to the files saved.
174
+ """
175
+ file_path = os.path.join(save_directory, "qwen.tiktoken")
176
+ with open(file_path, "w", encoding="utf8") as w:
177
+ for k, v in self.mergeable_ranks.items():
178
+ line = base64.b64encode(k).decode("utf8") + " " + str(v) + "\n"
179
+ w.write(line)
180
+ return (file_path,)
181
+
182
+ def tokenize(
183
+ self,
184
+ text: str,
185
+ allowed_special: Union[Set, str] = "all",
186
+ disallowed_special: Union[Collection, str] = (),
187
+ **kwargs,
188
+ ) -> List[Union[bytes, str]]:
189
+ """
190
+ Converts a string in a sequence of tokens.
191
+
192
+ Args:
193
+ text (`str`):
194
+ The sequence to be encoded.
195
+ allowed_special (`Literal["all"]` or `set`):
196
+ The surface forms of the tokens to be encoded as special tokens in regular texts.
197
+ Default to "all".
198
+ disallowed_special (`Literal["all"]` or `Collection`):
199
+ The surface forms of the tokens that should not be in regular texts and trigger errors.
200
+ Default to an empty tuple.
201
+
202
+ kwargs (additional keyword arguments, *optional*):
203
+ Will be passed to the underlying model specific encode method.
204
+
205
+ Returns:
206
+ `List[bytes|str]`: The list of tokens.
207
+ """
208
+ tokens = []
209
+ text = unicodedata.normalize("NFC", text)
210
+
211
+ # this implementation takes a detour: text -> token id -> token surface forms
212
+ for t in self.tokenizer.encode(
213
+ text, allowed_special=allowed_special, disallowed_special=disallowed_special
214
+ ):
215
+ tokens.append(self.decoder[t])
216
+ return tokens
217
+
218
+ def convert_tokens_to_string(self, tokens: List[Union[bytes, str]]) -> str:
219
+ """
220
+ Converts a sequence of tokens in a single string.
221
+ """
222
+ text = ""
223
+ temp = b""
224
+ for t in tokens:
225
+ if isinstance(t, str):
226
+ if temp:
227
+ text += temp.decode("utf-8", errors=self.errors)
228
+ temp = b""
229
+ text += t
230
+ elif isinstance(t, bytes):
231
+ temp += t
232
+ else:
233
+ raise TypeError("token should only be of type types or str")
234
+ if temp:
235
+ text += temp.decode("utf-8", errors=self.errors)
236
+ return text
237
+
238
+ @property
239
+ def vocab_size(self):
240
+ return self.tokenizer.n_vocab
241
+
242
+ def _convert_id_to_token(self, index: int) -> Union[bytes, str]:
243
+ """Converts an id to a token, special tokens included"""
244
+ if index in self.decoder:
245
+ return self.decoder[index]
246
+ raise ValueError("unknown ids")
247
+
248
+ def _convert_token_to_id(self, token: Union[bytes, str]) -> int:
249
+ """Converts a token to an id using the vocab, special tokens included"""
250
+ if token in self.special_tokens:
251
+ return self.special_tokens[token]
252
+ if token in self.mergeable_ranks:
253
+ return self.mergeable_ranks[token]
254
+ raise ValueError("unknown token")
255
+
256
+ def _tokenize(self, text: str, **kwargs):
257
+ """
258
+ Converts a string in a sequence of tokens (string), using the tokenizer. Split in words for word-based
259
+ vocabulary or sub-words for sub-word-based vocabularies (BPE/SentencePieces/WordPieces).
260
+
261
+ Do NOT take care of added tokens.
262
+ """
263
+ raise NotImplementedError
264
+
265
+ def _decode(
266
+ self,
267
+ token_ids: Union[int, List[int]],
268
+ skip_special_tokens: bool = False,
269
+ errors: str = None,
270
+ **kwargs,
271
+ ) -> str:
272
+ if isinstance(token_ids, int):
273
+ token_ids = [token_ids]
274
+ if skip_special_tokens:
275
+ token_ids = [i for i in token_ids if i < self.eod_id]
276
+ return self.tokenizer.decode(token_ids, errors=errors or self.errors)
tokenizer_config.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {},
3
+ "auto_map": {
4
+ "AutoTokenizer": [
5
+ "tokenization_qwen.QWenTokenizer",
6
+ null
7
+ ]
8
+ },
9
+ "clean_up_tokenization_spaces": true,
10
+ "eos_token": "<|im_end|>",
11
+ "model_max_length": 8192,
12
+ "pad_token": "<|im_end|>",
13
+ "padding_side": "right",
14
+ "split_special_tokens": false,
15
+ "tokenizer_class": "QWenTokenizer"
16
+ }
train_results.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.0,
3
+ "train_loss": 0.5275618712107341,
4
+ "train_runtime": 2086.9604,
5
+ "train_samples_per_second": 2.069,
6
+ "train_steps_per_second": 0.065
7
+ }
trainer_log.jsonl ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"current_steps": 10, "total_steps": 135, "loss": 4.7333, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 5e-05, "epoch": 0.22, "percentage": 7.41, "elapsed_time": "0:02:42", "remaining_time": "0:33:46"}
2
+ {"current_steps": 20, "total_steps": 135, "loss": 1.0511, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 5e-05, "epoch": 0.44, "percentage": 14.81, "elapsed_time": "0:05:16", "remaining_time": "0:30:20"}
3
+ {"current_steps": 30, "total_steps": 135, "loss": 0.2765, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 5e-05, "epoch": 0.67, "percentage": 22.22, "elapsed_time": "0:07:50", "remaining_time": "0:27:26"}
4
+ {"current_steps": 40, "total_steps": 135, "loss": 0.1875, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 5e-05, "epoch": 0.89, "percentage": 29.63, "elapsed_time": "0:10:24", "remaining_time": "0:24:42"}
5
+ {"current_steps": 50, "total_steps": 135, "loss": 0.2089, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 5e-05, "epoch": 1.11, "percentage": 37.04, "elapsed_time": "0:12:57", "remaining_time": "0:22:02"}
6
+ {"current_steps": 60, "total_steps": 135, "loss": 0.1893, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 5e-05, "epoch": 1.33, "percentage": 44.44, "elapsed_time": "0:15:32", "remaining_time": "0:19:25"}
7
+ {"current_steps": 70, "total_steps": 135, "loss": 0.1092, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 5e-05, "epoch": 1.56, "percentage": 51.85, "elapsed_time": "0:18:06", "remaining_time": "0:16:48"}
8
+ {"current_steps": 80, "total_steps": 135, "loss": 0.1111, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 5e-05, "epoch": 1.78, "percentage": 59.26, "elapsed_time": "0:20:40", "remaining_time": "0:14:12"}
9
+ {"current_steps": 90, "total_steps": 135, "loss": 0.0726, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 5e-05, "epoch": 2.0, "percentage": 66.67, "elapsed_time": "0:23:14", "remaining_time": "0:11:37"}
10
+ {"current_steps": 100, "total_steps": 135, "loss": 0.048, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 5e-05, "epoch": 2.22, "percentage": 74.07, "elapsed_time": "0:25:48", "remaining_time": "0:09:01"}
11
+ {"current_steps": 110, "total_steps": 135, "loss": 0.029, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 5e-05, "epoch": 2.44, "percentage": 81.48, "elapsed_time": "0:28:21", "remaining_time": "0:06:26"}
12
+ {"current_steps": 120, "total_steps": 135, "loss": 0.037, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 5e-05, "epoch": 2.67, "percentage": 88.89, "elapsed_time": "0:30:55", "remaining_time": "0:03:51"}
13
+ {"current_steps": 130, "total_steps": 135, "loss": 0.0554, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 5e-05, "epoch": 2.89, "percentage": 96.3, "elapsed_time": "0:33:30", "remaining_time": "0:01:17"}
14
+ {"current_steps": 135, "total_steps": 135, "loss": null, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": null, "epoch": 3.0, "percentage": 100.0, "elapsed_time": "0:34:46", "remaining_time": "0:00:00"}
trainer_state.json ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 3.0,
5
+ "eval_steps": 500,
6
+ "global_step": 135,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.22,
13
+ "learning_rate": 5e-05,
14
+ "loss": 4.7333,
15
+ "step": 10
16
+ },
17
+ {
18
+ "epoch": 0.44,
19
+ "learning_rate": 5e-05,
20
+ "loss": 1.0511,
21
+ "step": 20
22
+ },
23
+ {
24
+ "epoch": 0.67,
25
+ "learning_rate": 5e-05,
26
+ "loss": 0.2765,
27
+ "step": 30
28
+ },
29
+ {
30
+ "epoch": 0.89,
31
+ "learning_rate": 5e-05,
32
+ "loss": 0.1875,
33
+ "step": 40
34
+ },
35
+ {
36
+ "epoch": 1.11,
37
+ "learning_rate": 5e-05,
38
+ "loss": 0.2089,
39
+ "step": 50
40
+ },
41
+ {
42
+ "epoch": 1.33,
43
+ "learning_rate": 5e-05,
44
+ "loss": 0.1893,
45
+ "step": 60
46
+ },
47
+ {
48
+ "epoch": 1.56,
49
+ "learning_rate": 5e-05,
50
+ "loss": 0.1092,
51
+ "step": 70
52
+ },
53
+ {
54
+ "epoch": 1.78,
55
+ "learning_rate": 5e-05,
56
+ "loss": 0.1111,
57
+ "step": 80
58
+ },
59
+ {
60
+ "epoch": 2.0,
61
+ "learning_rate": 5e-05,
62
+ "loss": 0.0726,
63
+ "step": 90
64
+ },
65
+ {
66
+ "epoch": 2.22,
67
+ "learning_rate": 5e-05,
68
+ "loss": 0.048,
69
+ "step": 100
70
+ },
71
+ {
72
+ "epoch": 2.44,
73
+ "learning_rate": 5e-05,
74
+ "loss": 0.029,
75
+ "step": 110
76
+ },
77
+ {
78
+ "epoch": 2.67,
79
+ "learning_rate": 5e-05,
80
+ "loss": 0.037,
81
+ "step": 120
82
+ },
83
+ {
84
+ "epoch": 2.89,
85
+ "learning_rate": 5e-05,
86
+ "loss": 0.0554,
87
+ "step": 130
88
+ },
89
+ {
90
+ "epoch": 3.0,
91
+ "step": 135,
92
+ "total_flos": 26945073709056.0,
93
+ "train_loss": 0.5275618712107341,
94
+ "train_runtime": 2086.9604,
95
+ "train_samples_per_second": 2.069,
96
+ "train_steps_per_second": 0.065
97
+ }
98
+ ],
99
+ "logging_steps": 10,
100
+ "max_steps": 135,
101
+ "num_input_tokens_seen": 0,
102
+ "num_train_epochs": 3,
103
+ "save_steps": 1000,
104
+ "total_flos": 26945073709056.0,
105
+ "train_batch_size": 1,
106
+ "trial_name": null,
107
+ "trial_params": null
108
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f101cba683a42d4cf967d5baa893c0b73fc95dec41338e58bde1698e156605e6
3
+ size 7288
training_loss.png ADDED