LLMNick commited on
Commit
4a696be
1 Parent(s): 61dc66c

Upload folder using huggingface_hub

Browse files
chatglm3-6b/lora/sft-ruozhiba/README.md ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: other
3
+ library_name: peft
4
+ tags:
5
+ - sft-tools
6
+ - lora
7
+ - generated_from_trainer
8
+ base_model: THUDM/chatglm3-6b
9
+ model-index:
10
+ - name: sft-ruozhiba
11
+ results: []
12
+ ---
13
+
14
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
+ should probably proofread and complete it, then remove this comment. -->
16
+
17
+ # sft-ruozhiba
18
+
19
+ This model is a fine-tuned version of [THUDM/chatglm3-6b](https://huggingface.co/THUDM/chatglm3-6b) on the ruozhiba_identity and the ruozhiba datasets.
20
+ It achieves the following results on the evaluation set:
21
+ - Loss: 1.5318
22
+
23
+ ## Model description
24
+
25
+ More information needed
26
+
27
+ ## Intended uses & limitations
28
+
29
+ More information needed
30
+
31
+ ## Training and evaluation data
32
+
33
+ More information needed
34
+
35
+ ## Training procedure
36
+
37
+ ### Training hyperparameters
38
+
39
+ The following hyperparameters were used during training:
40
+ - learning_rate: 5e-05
41
+ - train_batch_size: 2
42
+ - eval_batch_size: 1
43
+ - seed: 42
44
+ - gradient_accumulation_steps: 4
45
+ - total_train_batch_size: 8
46
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
47
+ - lr_scheduler_type: cosine
48
+ - lr_scheduler_warmup_steps: 0.1
49
+ - num_epochs: 4.0
50
+ - mixed_precision_training: Native AMP
51
+
52
+ ### Training results
53
+
54
+
55
+
56
+ ### Framework versions
57
+
58
+ - PEFT 0.11.1
59
+ - Transformers 4.39.3
60
+ - Pytorch 2.1.2
61
+ - Datasets 2.18.0
62
+ - Tokenizers 0.15.2
chatglm3-6b/lora/sft-ruozhiba/adapter_config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "THUDM/chatglm3-6b",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layer_replication": null,
10
+ "layers_pattern": null,
11
+ "layers_to_transform": null,
12
+ "loftq_config": {},
13
+ "lora_alpha": 16,
14
+ "lora_dropout": 0.0,
15
+ "megatron_config": null,
16
+ "megatron_core": "megatron.core",
17
+ "modules_to_save": null,
18
+ "peft_type": "LORA",
19
+ "r": 8,
20
+ "rank_pattern": {},
21
+ "revision": null,
22
+ "target_modules": [
23
+ "dense",
24
+ "dense_h_to_4h",
25
+ "query_key_value",
26
+ "dense_4h_to_h"
27
+ ],
28
+ "task_type": "CAUSAL_LM",
29
+ "use_dora": false,
30
+ "use_rslora": false
31
+ }
chatglm3-6b/lora/sft-ruozhiba/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c0031bc5780e54f17ec724d3e59f4933f618c83c0237558d19e148b6abdeef58
3
+ size 59327904
chatglm3-6b/lora/sft-ruozhiba/all_results.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.98,
3
+ "eval_loss": 1.531842589378357,
4
+ "eval_runtime": 12.5881,
5
+ "eval_samples_per_second": 8.738,
6
+ "eval_steps_per_second": 8.738,
7
+ "train_loss": 1.5705121149782275,
8
+ "train_runtime": 3078.3762,
9
+ "train_samples_per_second": 1.276,
10
+ "train_steps_per_second": 0.159
11
+ }
chatglm3-6b/lora/sft-ruozhiba/eval_results.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.98,
3
+ "eval_loss": 1.531842589378357,
4
+ "eval_runtime": 12.5881,
5
+ "eval_samples_per_second": 8.738,
6
+ "eval_steps_per_second": 8.738
7
+ }
chatglm3-6b/lora/sft-ruozhiba/runs/May19_12-24-27_5cfc7f17b165/events.out.tfevents.1716121557.5cfc7f17b165.181.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0919f761cf37c63cdc946cc2419fdbf68d006aeb3dfdeb5114aefef07cd3b9a6
3
+ size 16153
chatglm3-6b/lora/sft-ruozhiba/runs/May19_12-24-27_5cfc7f17b165/events.out.tfevents.1716124649.5cfc7f17b165.181.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2177598a657dab2333b20dbf8271f05d54f04e4c550f9bd7f703cd04f576b0f9
3
+ size 359
chatglm3-6b/lora/sft-ruozhiba/special_tokens_map.json ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ {
4
+ "content": "<|user|>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false
9
+ },
10
+ {
11
+ "content": "<|observation|>",
12
+ "lstrip": false,
13
+ "normalized": false,
14
+ "rstrip": false,
15
+ "single_word": false
16
+ }
17
+ ]
18
+ }
chatglm3-6b/lora/sft-ruozhiba/tokenization_chatglm.py ADDED
@@ -0,0 +1,328 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ import re
4
+ from typing import List, Optional, Union, Dict
5
+ from sentencepiece import SentencePieceProcessor
6
+ from transformers import PreTrainedTokenizer
7
+ from transformers.utils import logging, PaddingStrategy
8
+ from transformers.tokenization_utils_base import EncodedInput, BatchEncoding
9
+
10
+
11
+ logger = logging.get_logger(__name__)
12
+
13
+
14
+ class SPTokenizer:
15
+ def __init__(self, model_path: str):
16
+ # reload tokenizer
17
+ assert os.path.isfile(model_path), model_path
18
+ self.sp_model = SentencePieceProcessor(model_file=model_path)
19
+
20
+ # BOS / EOS token IDs
21
+ self.n_words: int = self.sp_model.vocab_size()
22
+ self.bos_id: int = self.sp_model.bos_id()
23
+ self.eos_id: int = self.sp_model.eos_id()
24
+ self.pad_id: int = self.sp_model.unk_id()
25
+ assert self.sp_model.vocab_size() == self.sp_model.get_piece_size()
26
+
27
+ role_special_tokens = ["<|system|>", "<|user|>", "<|assistant|>", "<|observation|>"]
28
+ special_tokens = ["[MASK]", "[gMASK]", "[sMASK]", "sop", "eop"] + role_special_tokens
29
+ self.special_tokens = {}
30
+ self.index_special_tokens = {}
31
+ for token in special_tokens:
32
+ self.special_tokens[token] = self.n_words
33
+ self.index_special_tokens[self.n_words] = token
34
+ self.n_words += 1
35
+ self.role_special_token_expression = "|".join([re.escape(token) for token in special_tokens]) # for apply_chat_template
36
+
37
+ def tokenize(self, s: str, encode_special_tokens=False):
38
+ if encode_special_tokens:
39
+ last_index = 0
40
+ t = []
41
+ for match in re.finditer(self.role_special_token_expression, s):
42
+ if last_index < match.start():
43
+ t.extend(self.sp_model.EncodeAsPieces(s[last_index:match.start()]))
44
+ t.append(s[match.start():match.end()])
45
+ last_index = match.end()
46
+ if last_index < len(s):
47
+ t.extend(self.sp_model.EncodeAsPieces(s[last_index:]))
48
+ return t
49
+ else:
50
+ return self.sp_model.EncodeAsPieces(s)
51
+
52
+ def encode(self, s: str, bos: bool = False, eos: bool = False) -> List[int]:
53
+ assert type(s) is str
54
+ t = self.sp_model.encode(s)
55
+ if bos:
56
+ t = [self.bos_id] + t
57
+ if eos:
58
+ t = t + [self.eos_id]
59
+ return t
60
+
61
+ def decode(self, t: List[int]) -> str:
62
+ text, buffer = "", []
63
+ for token in t:
64
+ if token in self.index_special_tokens:
65
+ if buffer:
66
+ text += self.sp_model.decode(buffer)
67
+ buffer = []
68
+ text += self.index_special_tokens[token]
69
+ else:
70
+ buffer.append(token)
71
+ if buffer:
72
+ text += self.sp_model.decode(buffer)
73
+ return text
74
+
75
+ def decode_tokens(self, tokens: List[str]) -> str:
76
+ text = self.sp_model.DecodePieces(tokens)
77
+ return text
78
+
79
+ def convert_token_to_id(self, token):
80
+ """ Converts a token (str) in an id using the vocab. """
81
+ if token in self.special_tokens:
82
+ return self.special_tokens[token]
83
+ return self.sp_model.PieceToId(token)
84
+
85
+ def convert_id_to_token(self, index):
86
+ """Converts an index (integer) in a token (str) using the vocab."""
87
+ if index in self.index_special_tokens:
88
+ return self.index_special_tokens[index]
89
+ if index in [self.eos_id, self.bos_id, self.pad_id] or index < 0 or index > self.sp_model.vocab_size():
90
+ return ""
91
+ return self.sp_model.IdToPiece(index)
92
+
93
+
94
+ class ChatGLMTokenizer(PreTrainedTokenizer):
95
+
96
+ vocab_files_names = {"vocab_file": "tokenizer.model"}
97
+ model_input_names = ["input_ids", "attention_mask", "position_ids"]
98
+
99
+ def __init__(
100
+ self,
101
+ vocab_file,
102
+ padding_side="left",
103
+ clean_up_tokenization_spaces=False,
104
+ encode_special_tokens=False,
105
+ **kwargs
106
+ ):
107
+ self.name = "GLMTokenizer"
108
+ self.vocab_file = vocab_file
109
+ self.tokenizer = SPTokenizer(vocab_file)
110
+ self.special_tokens = {
111
+ "<bos>": self.tokenizer.bos_id,
112
+ "<eos>": self.tokenizer.eos_id,
113
+ "<unk>": self.tokenizer.pad_id,
114
+ "<pad>": self.tokenizer.pad_id
115
+ }
116
+ self.encode_special_tokens = encode_special_tokens
117
+
118
+ super().__init__(
119
+ padding_side=padding_side,
120
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
121
+ **kwargs
122
+ )
123
+
124
+ def get_command(self, token):
125
+ if token in self.special_tokens:
126
+ return self.special_tokens[token]
127
+ assert token in self.tokenizer.special_tokens, f"{token} is not a special token for {self.name}"
128
+ return self.tokenizer.special_tokens[token]
129
+
130
+ @property
131
+ def unk_token(self) -> str:
132
+ return self.tokenizer.sp_model.IdToPiece(self.get_command("<unk>"))
133
+
134
+ @property
135
+ def pad_token(self) -> str:
136
+ return self.tokenizer.sp_model.IdToPiece(self.get_command("<pad>"))
137
+
138
+ @property
139
+ def eos_token(self) -> str:
140
+ return self.tokenizer.sp_model.IdToPiece(self.get_command("<eos>"))
141
+
142
+ @property
143
+ def unk_token_id(self) -> int:
144
+ return self.get_command("<unk>")
145
+
146
+ @property
147
+ def pad_token_id(self) -> int:
148
+ return self.get_command("<pad>")
149
+
150
+ @property
151
+ def eos_token_id(self):
152
+ return self.get_command("<eos>")
153
+
154
+ @unk_token.setter
155
+ def unk_token(self, value):
156
+ logger.warning("Setting unk_token is not supported, use the default one.")
157
+
158
+ @pad_token.setter
159
+ def pad_token(self, value):
160
+ logger.warning("Setting pad_token is not supported, use the default one.")
161
+
162
+ @eos_token.setter
163
+ def eos_token(self, value):
164
+ logger.warning("Setting eos_token is not supported, use the default one.")
165
+
166
+ @property
167
+ def vocab_size(self):
168
+ return self.tokenizer.n_words
169
+
170
+ def get_vocab(self):
171
+ """ Returns vocab as a dict """
172
+ vocab = {self._convert_id_to_token(i): i for i in range(self.vocab_size)}
173
+ vocab.update(self.added_tokens_encoder)
174
+ return vocab
175
+
176
+ def _tokenize(self, text, **kwargs):
177
+ return self.tokenizer.tokenize(text, encode_special_tokens=self.encode_special_tokens)
178
+
179
+ def _convert_token_to_id(self, token):
180
+ """ Converts a token (str) in an id using the vocab. """
181
+ return self.tokenizer.convert_token_to_id(token)
182
+
183
+ def _convert_id_to_token(self, index):
184
+ """Converts an index (integer) in a token (str) using the vocab."""
185
+ return self.tokenizer.convert_id_to_token(index)
186
+
187
+ def convert_tokens_to_string(self, tokens: List[str]) -> str:
188
+ return self.tokenizer.decode_tokens(tokens)
189
+
190
+ def save_vocabulary(self, save_directory, filename_prefix=None):
191
+ """
192
+ Save the vocabulary and special tokens file to a directory.
193
+
194
+ Args:
195
+ save_directory (`str`):
196
+ The directory in which to save the vocabulary.
197
+ filename_prefix (`str`, *optional*):
198
+ An optional prefix to add to the named of the saved files.
199
+
200
+ Returns:
201
+ `Tuple(str)`: Paths to the files saved.
202
+ """
203
+ if os.path.isdir(save_directory):
204
+ vocab_file = os.path.join(
205
+ save_directory, self.vocab_files_names["vocab_file"]
206
+ )
207
+ else:
208
+ vocab_file = save_directory
209
+
210
+ with open(self.vocab_file, 'rb') as fin:
211
+ proto_str = fin.read()
212
+
213
+ with open(vocab_file, "wb") as writer:
214
+ writer.write(proto_str)
215
+
216
+ return (vocab_file,)
217
+
218
+ def get_prefix_tokens(self):
219
+ prefix_tokens = [self.get_command("[gMASK]"), self.get_command("sop")]
220
+ return prefix_tokens
221
+
222
+ def build_single_message(self, role, metadata, message):
223
+ assert role in ["system", "user", "assistant", "observation"], role
224
+ role_tokens = [self.get_command(f"<|{role}|>")] + self.tokenizer.encode(f"{metadata}\n")
225
+ message_tokens = self.tokenizer.encode(message)
226
+ tokens = role_tokens + message_tokens
227
+ return tokens
228
+
229
+ def build_chat_input(self, query, history=None, role="user"):
230
+ if history is None:
231
+ history = []
232
+ input_ids = []
233
+ for item in history:
234
+ content = item["content"]
235
+ if item["role"] == "system" and "tools" in item:
236
+ content = content + "\n" + json.dumps(item["tools"], indent=4, ensure_ascii=False)
237
+ input_ids.extend(self.build_single_message(item["role"], item.get("metadata", ""), content))
238
+ input_ids.extend(self.build_single_message(role, "", query))
239
+ input_ids.extend([self.get_command("<|assistant|>")])
240
+ return self.batch_encode_plus([input_ids], return_tensors="pt", is_split_into_words=True)
241
+
242
+ def build_inputs_with_special_tokens(
243
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
244
+ ) -> List[int]:
245
+ """
246
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
247
+ adding special tokens. A BERT sequence has the following format:
248
+
249
+ - single sequence: `[CLS] X [SEP]`
250
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
251
+
252
+ Args:
253
+ token_ids_0 (`List[int]`):
254
+ List of IDs to which the special tokens will be added.
255
+ token_ids_1 (`List[int]`, *optional*):
256
+ Optional second list of IDs for sequence pairs.
257
+
258
+ Returns:
259
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
260
+ """
261
+ prefix_tokens = self.get_prefix_tokens()
262
+ token_ids_0 = prefix_tokens + token_ids_0
263
+ if token_ids_1 is not None:
264
+ token_ids_0 = token_ids_0 + token_ids_1 + [self.get_command("<eos>")]
265
+ return token_ids_0
266
+
267
+ def _pad(
268
+ self,
269
+ encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding],
270
+ max_length: Optional[int] = None,
271
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
272
+ pad_to_multiple_of: Optional[int] = None,
273
+ return_attention_mask: Optional[bool] = None,
274
+ ) -> dict:
275
+ """
276
+ Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
277
+
278
+ Args:
279
+ encoded_inputs:
280
+ Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).
281
+ max_length: maximum length of the returned list and optionally padding length (see below).
282
+ Will truncate by taking into account the special tokens.
283
+ padding_strategy: PaddingStrategy to use for padding.
284
+
285
+ - PaddingStrategy.LONGEST Pad to the longest sequence in the batch
286
+ - PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
287
+ - PaddingStrategy.DO_NOT_PAD: Do not pad
288
+ The tokenizer padding sides are defined in self.padding_side:
289
+
290
+ - 'left': pads on the left of the sequences
291
+ - 'right': pads on the right of the sequences
292
+ pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
293
+ This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
294
+ `>= 7.5` (Volta).
295
+ return_attention_mask:
296
+ (optional) Set to False to avoid returning attention mask (default: set to model specifics)
297
+ """
298
+ # Load from model defaults
299
+ assert self.padding_side == "left"
300
+
301
+ required_input = encoded_inputs[self.model_input_names[0]]
302
+ seq_length = len(required_input)
303
+
304
+ if padding_strategy == PaddingStrategy.LONGEST:
305
+ max_length = len(required_input)
306
+
307
+ if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
308
+ max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
309
+
310
+ needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length
311
+
312
+ # Initialize attention mask if not present.
313
+ if "attention_mask" not in encoded_inputs:
314
+ encoded_inputs["attention_mask"] = [1] * seq_length
315
+
316
+ if "position_ids" not in encoded_inputs:
317
+ encoded_inputs["position_ids"] = list(range(seq_length))
318
+
319
+ if needs_to_be_padded:
320
+ difference = max_length - len(required_input)
321
+
322
+ if "attention_mask" in encoded_inputs:
323
+ encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"]
324
+ if "position_ids" in encoded_inputs:
325
+ encoded_inputs["position_ids"] = [0] * difference + encoded_inputs["position_ids"]
326
+ encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input
327
+
328
+ return encoded_inputs
chatglm3-6b/lora/sft-ruozhiba/tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e7dc4c393423b76e4373e5157ddc34803a0189ba96b21ddbb40269d31468a6f2
3
+ size 1018370
chatglm3-6b/lora/sft-ruozhiba/tokenizer_config.json ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "64790": {
4
+ "content": "[gMASK]",
5
+ "lstrip": false,
6
+ "normalized": true,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": false
10
+ },
11
+ "64792": {
12
+ "content": "sop",
13
+ "lstrip": false,
14
+ "normalized": true,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": false
18
+ },
19
+ "64795": {
20
+ "content": "<|user|>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "64796": {
28
+ "content": "<|assistant|>",
29
+ "lstrip": false,
30
+ "normalized": true,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": false
34
+ },
35
+ "64797": {
36
+ "content": "<|observation|>",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "additional_special_tokens": [
45
+ "<|user|>",
46
+ "<|observation|>"
47
+ ],
48
+ "auto_map": {
49
+ "AutoTokenizer": [
50
+ "tokenization_chatglm.ChatGLMTokenizer",
51
+ null
52
+ ]
53
+ },
54
+ "chat_template": "{% for message in messages %}{% if loop.first %}[gMASK]sop<|{{ message['role'] }}|>\n {{ message['content'] }}{% else %}<|{{ message['role'] }}|>\n {{ message['content'] }}{% endif %}{% endfor %}{% if add_generation_prompt %}<|assistant|>{% endif %}",
55
+ "clean_up_tokenization_spaces": false,
56
+ "do_lower_case": false,
57
+ "eos_token": "</s>",
58
+ "model_max_length": 1000000000000000019884624838656,
59
+ "pad_token": "<unk>",
60
+ "padding_side": "right",
61
+ "remove_space": false,
62
+ "split_special_tokens": false,
63
+ "tokenizer_class": "ChatGLMTokenizer",
64
+ "unk_token": "<unk>"
65
+ }
chatglm3-6b/lora/sft-ruozhiba/train_results.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.98,
3
+ "train_loss": 1.5705121149782275,
4
+ "train_runtime": 3078.3762,
5
+ "train_samples_per_second": 1.276,
6
+ "train_steps_per_second": 0.159
7
+ }
chatglm3-6b/lora/sft-ruozhiba/trainer_log.jsonl ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"current_steps": 10, "total_steps": 488, "loss": 2.5464, "learning_rate": 4.994922248529205e-05, "epoch": 0.08, "percentage": 2.05, "elapsed_time": "0:00:59", "remaining_time": "0:47:30"}
2
+ {"current_steps": 20, "total_steps": 488, "loss": 2.4707, "learning_rate": 4.9795044312909347e-05, "epoch": 0.16, "percentage": 4.1, "elapsed_time": "0:02:00", "remaining_time": "0:46:53"}
3
+ {"current_steps": 30, "total_steps": 488, "loss": 2.4142, "learning_rate": 4.9538099315089056e-05, "epoch": 0.24, "percentage": 6.15, "elapsed_time": "0:03:01", "remaining_time": "0:46:11"}
4
+ {"current_steps": 40, "total_steps": 488, "loss": 2.1245, "learning_rate": 4.9179452439382994e-05, "epoch": 0.33, "percentage": 8.2, "elapsed_time": "0:04:05", "remaining_time": "0:45:45"}
5
+ {"current_steps": 50, "total_steps": 488, "loss": 2.0647, "learning_rate": 4.872059015221584e-05, "epoch": 0.41, "percentage": 10.25, "elapsed_time": "0:05:08", "remaining_time": "0:45:02"}
6
+ {"current_steps": 60, "total_steps": 488, "loss": 1.9556, "learning_rate": 4.8163414277999356e-05, "epoch": 0.49, "percentage": 12.3, "elapsed_time": "0:06:13", "remaining_time": "0:44:24"}
7
+ {"current_steps": 70, "total_steps": 488, "loss": 1.8587, "learning_rate": 4.751023411673241e-05, "epoch": 0.57, "percentage": 14.34, "elapsed_time": "0:07:17", "remaining_time": "0:43:32"}
8
+ {"current_steps": 80, "total_steps": 488, "loss": 1.8275, "learning_rate": 4.6763756872756525e-05, "epoch": 0.65, "percentage": 16.39, "elapsed_time": "0:08:21", "remaining_time": "0:42:39"}
9
+ {"current_steps": 90, "total_steps": 488, "loss": 1.83, "learning_rate": 4.59270764343365e-05, "epoch": 0.73, "percentage": 18.44, "elapsed_time": "0:09:26", "remaining_time": "0:41:44"}
10
+ {"current_steps": 100, "total_steps": 488, "loss": 1.751, "learning_rate": 4.500366055057077e-05, "epoch": 0.81, "percentage": 20.49, "elapsed_time": "0:10:31", "remaining_time": "0:40:50"}
11
+ {"current_steps": 110, "total_steps": 488, "loss": 1.7397, "learning_rate": 4.3997336458778874e-05, "epoch": 0.9, "percentage": 22.54, "elapsed_time": "0:11:34", "remaining_time": "0:39:47"}
12
+ {"current_steps": 120, "total_steps": 488, "loss": 1.74, "learning_rate": 4.2912275021935244e-05, "epoch": 0.98, "percentage": 24.59, "elapsed_time": "0:12:39", "remaining_time": "0:38:48"}
13
+ {"current_steps": 130, "total_steps": 488, "loss": 1.6727, "learning_rate": 4.1752973441894504e-05, "epoch": 1.06, "percentage": 26.64, "elapsed_time": "0:13:42", "remaining_time": "0:37:44"}
14
+ {"current_steps": 140, "total_steps": 488, "loss": 1.5492, "learning_rate": 4.052423662005558e-05, "epoch": 1.14, "percentage": 28.69, "elapsed_time": "0:14:48", "remaining_time": "0:36:49"}
15
+ {"current_steps": 150, "total_steps": 488, "loss": 1.6558, "learning_rate": 3.923115724271841e-05, "epoch": 1.22, "percentage": 30.74, "elapsed_time": "0:15:49", "remaining_time": "0:35:39"}
16
+ {"current_steps": 160, "total_steps": 488, "loss": 1.5154, "learning_rate": 3.78790946736724e-05, "epoch": 1.3, "percentage": 32.79, "elapsed_time": "0:16:55", "remaining_time": "0:34:42"}
17
+ {"current_steps": 170, "total_steps": 488, "loss": 1.6066, "learning_rate": 3.647365274149962e-05, "epoch": 1.38, "percentage": 34.84, "elapsed_time": "0:17:58", "remaining_time": "0:33:36"}
18
+ {"current_steps": 180, "total_steps": 488, "loss": 1.6032, "learning_rate": 3.502065651365643e-05, "epoch": 1.47, "percentage": 36.89, "elapsed_time": "0:19:04", "remaining_time": "0:32:37"}
19
+ {"current_steps": 190, "total_steps": 488, "loss": 1.5316, "learning_rate": 3.3526128153597086e-05, "epoch": 1.55, "percentage": 38.93, "elapsed_time": "0:20:06", "remaining_time": "0:31:32"}
20
+ {"current_steps": 200, "total_steps": 488, "loss": 1.5697, "learning_rate": 3.1996261961003084e-05, "epoch": 1.63, "percentage": 40.98, "elapsed_time": "0:21:13", "remaining_time": "0:30:33"}
21
+ {"current_steps": 210, "total_steps": 488, "loss": 1.5249, "learning_rate": 3.043739869856768e-05, "epoch": 1.71, "percentage": 43.03, "elapsed_time": "0:22:15", "remaining_time": "0:29:27"}
22
+ {"current_steps": 220, "total_steps": 488, "loss": 1.5295, "learning_rate": 2.8855999311742328e-05, "epoch": 1.79, "percentage": 45.08, "elapsed_time": "0:23:19", "remaining_time": "0:28:24"}
23
+ {"current_steps": 230, "total_steps": 488, "loss": 1.5792, "learning_rate": 2.7258618150367328e-05, "epoch": 1.87, "percentage": 47.13, "elapsed_time": "0:24:20", "remaining_time": "0:27:17"}
24
+ {"current_steps": 240, "total_steps": 488, "loss": 1.4859, "learning_rate": 2.5651875803173912e-05, "epoch": 1.96, "percentage": 49.18, "elapsed_time": "0:25:23", "remaining_time": "0:26:14"}
25
+ {"current_steps": 250, "total_steps": 488, "loss": 1.5071, "learning_rate": 2.4042431657749117e-05, "epoch": 2.04, "percentage": 51.23, "elapsed_time": "0:26:24", "remaining_time": "0:25:08"}
26
+ {"current_steps": 260, "total_steps": 488, "loss": 1.3746, "learning_rate": 2.2436956299692906e-05, "epoch": 2.12, "percentage": 53.28, "elapsed_time": "0:27:27", "remaining_time": "0:24:04"}
27
+ {"current_steps": 270, "total_steps": 488, "loss": 1.5165, "learning_rate": 2.084210386536349e-05, "epoch": 2.2, "percentage": 55.33, "elapsed_time": "0:28:28", "remaining_time": "0:22:59"}
28
+ {"current_steps": 280, "total_steps": 488, "loss": 1.4035, "learning_rate": 1.926448446279894e-05, "epoch": 2.28, "percentage": 57.38, "elapsed_time": "0:29:30", "remaining_time": "0:21:55"}
29
+ {"current_steps": 290, "total_steps": 488, "loss": 1.4952, "learning_rate": 1.7710636775120946e-05, "epoch": 2.36, "percentage": 59.43, "elapsed_time": "0:30:33", "remaining_time": "0:20:52"}
30
+ {"current_steps": 300, "total_steps": 488, "loss": 1.3679, "learning_rate": 1.6187000959969926e-05, "epoch": 2.44, "percentage": 61.48, "elapsed_time": "0:31:34", "remaining_time": "0:19:47"}
31
+ {"current_steps": 310, "total_steps": 488, "loss": 1.348, "learning_rate": 1.469989195729396e-05, "epoch": 2.53, "percentage": 63.52, "elapsed_time": "0:32:32", "remaining_time": "0:18:40"}
32
+ {"current_steps": 320, "total_steps": 488, "loss": 1.4076, "learning_rate": 1.3255473316121486e-05, "epoch": 2.61, "percentage": 65.57, "elapsed_time": "0:33:36", "remaining_time": "0:17:38"}
33
+ {"current_steps": 330, "total_steps": 488, "loss": 1.37, "learning_rate": 1.1859731648796588e-05, "epoch": 2.69, "percentage": 67.62, "elapsed_time": "0:34:39", "remaining_time": "0:16:35"}
34
+ {"current_steps": 340, "total_steps": 488, "loss": 1.351, "learning_rate": 1.0518451818555322e-05, "epoch": 2.77, "percentage": 69.67, "elapsed_time": "0:35:37", "remaining_time": "0:15:30"}
35
+ {"current_steps": 350, "total_steps": 488, "loss": 1.3333, "learning_rate": 9.237192963281768e-06, "epoch": 2.85, "percentage": 71.72, "elapsed_time": "0:36:42", "remaining_time": "0:14:28"}
36
+ {"current_steps": 360, "total_steps": 488, "loss": 1.3649, "learning_rate": 8.021265454817112e-06, "epoch": 2.93, "percentage": 73.77, "elapsed_time": "0:37:42", "remaining_time": "0:13:24"}
37
+ {"current_steps": 370, "total_steps": 488, "loss": 1.3476, "learning_rate": 6.875708889317353e-06, "epoch": 3.01, "percentage": 75.82, "elapsed_time": "0:38:44", "remaining_time": "0:12:21"}
38
+ {"current_steps": 380, "total_steps": 488, "loss": 1.277, "learning_rate": 5.8052711998819395e-06, "epoch": 3.1, "percentage": 77.87, "elapsed_time": "0:39:41", "remaining_time": "0:11:16"}
39
+ {"current_steps": 390, "total_steps": 488, "loss": 1.3065, "learning_rate": 4.814388978024237e-06, "epoch": 3.18, "percentage": 79.92, "elapsed_time": "0:40:42", "remaining_time": "0:10:13"}
40
+ {"current_steps": 400, "total_steps": 488, "loss": 1.2941, "learning_rate": 3.907169085544424e-06, "epoch": 3.26, "percentage": 81.97, "elapsed_time": "0:41:41", "remaining_time": "0:09:10"}
41
+ {"current_steps": 410, "total_steps": 488, "loss": 1.33, "learning_rate": 3.0873716330173356e-06, "epoch": 3.34, "percentage": 84.02, "elapsed_time": "0:42:47", "remaining_time": "0:08:08"}
42
+ {"current_steps": 420, "total_steps": 488, "loss": 1.2729, "learning_rate": 2.3583943954432725e-06, "epoch": 3.42, "percentage": 86.07, "elapsed_time": "0:43:50", "remaining_time": "0:07:05"}
43
+ {"current_steps": 430, "total_steps": 488, "loss": 1.3031, "learning_rate": 1.7232587296537233e-06, "epoch": 3.5, "percentage": 88.11, "elapsed_time": "0:44:52", "remaining_time": "0:06:03"}
44
+ {"current_steps": 440, "total_steps": 488, "loss": 1.2636, "learning_rate": 1.1845970518392591e-06, "epoch": 3.58, "percentage": 90.16, "elapsed_time": "0:45:50", "remaining_time": "0:05:00"}
45
+ {"current_steps": 450, "total_steps": 488, "loss": 1.2348, "learning_rate": 7.446419271010113e-07, "epoch": 3.67, "percentage": 92.21, "elapsed_time": "0:46:51", "remaining_time": "0:03:57"}
46
+ {"current_steps": 460, "total_steps": 488, "loss": 1.3258, "learning_rate": 4.0521681624565434e-07, "epoch": 3.75, "percentage": 94.26, "elapsed_time": "0:48:02", "remaining_time": "0:02:55"}
47
+ {"current_steps": 470, "total_steps": 488, "loss": 1.3038, "learning_rate": 1.6772851817526414e-07, "epoch": 3.83, "percentage": 96.31, "elapsed_time": "0:49:07", "remaining_time": "0:01:52"}
48
+ {"current_steps": 480, "total_steps": 488, "loss": 1.342, "learning_rate": 3.3161339195697526e-08, "epoch": 3.91, "percentage": 98.36, "elapsed_time": "0:50:10", "remaining_time": "0:00:50"}
49
+ {"current_steps": 488, "total_steps": 488, "epoch": 3.98, "percentage": 100.0, "elapsed_time": "0:51:01", "remaining_time": "0:00:00"}
chatglm3-6b/lora/sft-ruozhiba/trainer_state.json ADDED
@@ -0,0 +1,366 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 3.975560081466395,
5
+ "eval_steps": 500,
6
+ "global_step": 488,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.08,
13
+ "grad_norm": 0.917630672454834,
14
+ "learning_rate": 4.994922248529205e-05,
15
+ "loss": 2.5464,
16
+ "step": 10
17
+ },
18
+ {
19
+ "epoch": 0.16,
20
+ "grad_norm": 1.4127905368804932,
21
+ "learning_rate": 4.9795044312909347e-05,
22
+ "loss": 2.4707,
23
+ "step": 20
24
+ },
25
+ {
26
+ "epoch": 0.24,
27
+ "grad_norm": 1.5394383668899536,
28
+ "learning_rate": 4.9538099315089056e-05,
29
+ "loss": 2.4142,
30
+ "step": 30
31
+ },
32
+ {
33
+ "epoch": 0.33,
34
+ "grad_norm": 1.3929855823516846,
35
+ "learning_rate": 4.9179452439382994e-05,
36
+ "loss": 2.1245,
37
+ "step": 40
38
+ },
39
+ {
40
+ "epoch": 0.41,
41
+ "grad_norm": 1.5902705192565918,
42
+ "learning_rate": 4.872059015221584e-05,
43
+ "loss": 2.0647,
44
+ "step": 50
45
+ },
46
+ {
47
+ "epoch": 0.49,
48
+ "grad_norm": 1.7802547216415405,
49
+ "learning_rate": 4.8163414277999356e-05,
50
+ "loss": 1.9556,
51
+ "step": 60
52
+ },
53
+ {
54
+ "epoch": 0.57,
55
+ "grad_norm": 1.5567349195480347,
56
+ "learning_rate": 4.751023411673241e-05,
57
+ "loss": 1.8587,
58
+ "step": 70
59
+ },
60
+ {
61
+ "epoch": 0.65,
62
+ "grad_norm": 1.3391444683074951,
63
+ "learning_rate": 4.6763756872756525e-05,
64
+ "loss": 1.8275,
65
+ "step": 80
66
+ },
67
+ {
68
+ "epoch": 0.73,
69
+ "grad_norm": 1.4643337726593018,
70
+ "learning_rate": 4.59270764343365e-05,
71
+ "loss": 1.83,
72
+ "step": 90
73
+ },
74
+ {
75
+ "epoch": 0.81,
76
+ "grad_norm": 1.537935733795166,
77
+ "learning_rate": 4.500366055057077e-05,
78
+ "loss": 1.751,
79
+ "step": 100
80
+ },
81
+ {
82
+ "epoch": 0.9,
83
+ "grad_norm": 1.5327485799789429,
84
+ "learning_rate": 4.3997336458778874e-05,
85
+ "loss": 1.7397,
86
+ "step": 110
87
+ },
88
+ {
89
+ "epoch": 0.98,
90
+ "grad_norm": 1.2399487495422363,
91
+ "learning_rate": 4.2912275021935244e-05,
92
+ "loss": 1.74,
93
+ "step": 120
94
+ },
95
+ {
96
+ "epoch": 1.06,
97
+ "grad_norm": 1.7062827348709106,
98
+ "learning_rate": 4.1752973441894504e-05,
99
+ "loss": 1.6727,
100
+ "step": 130
101
+ },
102
+ {
103
+ "epoch": 1.14,
104
+ "grad_norm": 1.4249871969223022,
105
+ "learning_rate": 4.052423662005558e-05,
106
+ "loss": 1.5492,
107
+ "step": 140
108
+ },
109
+ {
110
+ "epoch": 1.22,
111
+ "grad_norm": 1.435344934463501,
112
+ "learning_rate": 3.923115724271841e-05,
113
+ "loss": 1.6558,
114
+ "step": 150
115
+ },
116
+ {
117
+ "epoch": 1.3,
118
+ "grad_norm": 1.6837048530578613,
119
+ "learning_rate": 3.78790946736724e-05,
120
+ "loss": 1.5154,
121
+ "step": 160
122
+ },
123
+ {
124
+ "epoch": 1.38,
125
+ "grad_norm": 1.6067469120025635,
126
+ "learning_rate": 3.647365274149962e-05,
127
+ "loss": 1.6066,
128
+ "step": 170
129
+ },
130
+ {
131
+ "epoch": 1.47,
132
+ "grad_norm": 1.7231266498565674,
133
+ "learning_rate": 3.502065651365643e-05,
134
+ "loss": 1.6032,
135
+ "step": 180
136
+ },
137
+ {
138
+ "epoch": 1.55,
139
+ "grad_norm": 2.0479469299316406,
140
+ "learning_rate": 3.3526128153597086e-05,
141
+ "loss": 1.5316,
142
+ "step": 190
143
+ },
144
+ {
145
+ "epoch": 1.63,
146
+ "grad_norm": 1.8524290323257446,
147
+ "learning_rate": 3.1996261961003084e-05,
148
+ "loss": 1.5697,
149
+ "step": 200
150
+ },
151
+ {
152
+ "epoch": 1.71,
153
+ "grad_norm": 1.7942742109298706,
154
+ "learning_rate": 3.043739869856768e-05,
155
+ "loss": 1.5249,
156
+ "step": 210
157
+ },
158
+ {
159
+ "epoch": 1.79,
160
+ "grad_norm": 1.4377387762069702,
161
+ "learning_rate": 2.8855999311742328e-05,
162
+ "loss": 1.5295,
163
+ "step": 220
164
+ },
165
+ {
166
+ "epoch": 1.87,
167
+ "grad_norm": 1.591834306716919,
168
+ "learning_rate": 2.7258618150367328e-05,
169
+ "loss": 1.5792,
170
+ "step": 230
171
+ },
172
+ {
173
+ "epoch": 1.96,
174
+ "grad_norm": 2.633655071258545,
175
+ "learning_rate": 2.5651875803173912e-05,
176
+ "loss": 1.4859,
177
+ "step": 240
178
+ },
179
+ {
180
+ "epoch": 2.04,
181
+ "grad_norm": 1.6626724004745483,
182
+ "learning_rate": 2.4042431657749117e-05,
183
+ "loss": 1.5071,
184
+ "step": 250
185
+ },
186
+ {
187
+ "epoch": 2.12,
188
+ "grad_norm": 1.819585919380188,
189
+ "learning_rate": 2.2436956299692906e-05,
190
+ "loss": 1.3746,
191
+ "step": 260
192
+ },
193
+ {
194
+ "epoch": 2.2,
195
+ "grad_norm": 1.9247617721557617,
196
+ "learning_rate": 2.084210386536349e-05,
197
+ "loss": 1.5165,
198
+ "step": 270
199
+ },
200
+ {
201
+ "epoch": 2.28,
202
+ "grad_norm": 2.381666898727417,
203
+ "learning_rate": 1.926448446279894e-05,
204
+ "loss": 1.4035,
205
+ "step": 280
206
+ },
207
+ {
208
+ "epoch": 2.36,
209
+ "grad_norm": 2.1338634490966797,
210
+ "learning_rate": 1.7710636775120946e-05,
211
+ "loss": 1.4952,
212
+ "step": 290
213
+ },
214
+ {
215
+ "epoch": 2.44,
216
+ "grad_norm": 1.8547377586364746,
217
+ "learning_rate": 1.6187000959969926e-05,
218
+ "loss": 1.3679,
219
+ "step": 300
220
+ },
221
+ {
222
+ "epoch": 2.53,
223
+ "grad_norm": 1.9622243642807007,
224
+ "learning_rate": 1.469989195729396e-05,
225
+ "loss": 1.348,
226
+ "step": 310
227
+ },
228
+ {
229
+ "epoch": 2.61,
230
+ "grad_norm": 2.2169859409332275,
231
+ "learning_rate": 1.3255473316121486e-05,
232
+ "loss": 1.4076,
233
+ "step": 320
234
+ },
235
+ {
236
+ "epoch": 2.69,
237
+ "grad_norm": 1.745296835899353,
238
+ "learning_rate": 1.1859731648796588e-05,
239
+ "loss": 1.37,
240
+ "step": 330
241
+ },
242
+ {
243
+ "epoch": 2.77,
244
+ "grad_norm": 2.050562858581543,
245
+ "learning_rate": 1.0518451818555322e-05,
246
+ "loss": 1.351,
247
+ "step": 340
248
+ },
249
+ {
250
+ "epoch": 2.85,
251
+ "grad_norm": 2.3423192501068115,
252
+ "learning_rate": 9.237192963281768e-06,
253
+ "loss": 1.3333,
254
+ "step": 350
255
+ },
256
+ {
257
+ "epoch": 2.93,
258
+ "grad_norm": 2.36187744140625,
259
+ "learning_rate": 8.021265454817112e-06,
260
+ "loss": 1.3649,
261
+ "step": 360
262
+ },
263
+ {
264
+ "epoch": 3.01,
265
+ "grad_norm": 2.3094940185546875,
266
+ "learning_rate": 6.875708889317353e-06,
267
+ "loss": 1.3476,
268
+ "step": 370
269
+ },
270
+ {
271
+ "epoch": 3.1,
272
+ "grad_norm": 2.6048502922058105,
273
+ "learning_rate": 5.8052711998819395e-06,
274
+ "loss": 1.277,
275
+ "step": 380
276
+ },
277
+ {
278
+ "epoch": 3.18,
279
+ "grad_norm": 2.370173215866089,
280
+ "learning_rate": 4.814388978024237e-06,
281
+ "loss": 1.3065,
282
+ "step": 390
283
+ },
284
+ {
285
+ "epoch": 3.26,
286
+ "grad_norm": 2.1737473011016846,
287
+ "learning_rate": 3.907169085544424e-06,
288
+ "loss": 1.2941,
289
+ "step": 400
290
+ },
291
+ {
292
+ "epoch": 3.34,
293
+ "grad_norm": 2.18640398979187,
294
+ "learning_rate": 3.0873716330173356e-06,
295
+ "loss": 1.33,
296
+ "step": 410
297
+ },
298
+ {
299
+ "epoch": 3.42,
300
+ "grad_norm": 3.3506879806518555,
301
+ "learning_rate": 2.3583943954432725e-06,
302
+ "loss": 1.2729,
303
+ "step": 420
304
+ },
305
+ {
306
+ "epoch": 3.5,
307
+ "grad_norm": 1.9210536479949951,
308
+ "learning_rate": 1.7232587296537233e-06,
309
+ "loss": 1.3031,
310
+ "step": 430
311
+ },
312
+ {
313
+ "epoch": 3.58,
314
+ "grad_norm": 1.9734101295471191,
315
+ "learning_rate": 1.1845970518392591e-06,
316
+ "loss": 1.2636,
317
+ "step": 440
318
+ },
319
+ {
320
+ "epoch": 3.67,
321
+ "grad_norm": 1.9752649068832397,
322
+ "learning_rate": 7.446419271010113e-07,
323
+ "loss": 1.2348,
324
+ "step": 450
325
+ },
326
+ {
327
+ "epoch": 3.75,
328
+ "grad_norm": 1.8863799571990967,
329
+ "learning_rate": 4.0521681624565434e-07,
330
+ "loss": 1.3258,
331
+ "step": 460
332
+ },
333
+ {
334
+ "epoch": 3.83,
335
+ "grad_norm": 2.084764242172241,
336
+ "learning_rate": 1.6772851817526414e-07,
337
+ "loss": 1.3038,
338
+ "step": 470
339
+ },
340
+ {
341
+ "epoch": 3.91,
342
+ "grad_norm": 2.2681760787963867,
343
+ "learning_rate": 3.3161339195697526e-08,
344
+ "loss": 1.342,
345
+ "step": 480
346
+ },
347
+ {
348
+ "epoch": 3.98,
349
+ "step": 488,
350
+ "total_flos": 1.368665285492736e+16,
351
+ "train_loss": 1.5705121149782275,
352
+ "train_runtime": 3078.3762,
353
+ "train_samples_per_second": 1.276,
354
+ "train_steps_per_second": 0.159
355
+ }
356
+ ],
357
+ "logging_steps": 10,
358
+ "max_steps": 488,
359
+ "num_input_tokens_seen": 0,
360
+ "num_train_epochs": 4,
361
+ "save_steps": 500,
362
+ "total_flos": 1.368665285492736e+16,
363
+ "train_batch_size": 2,
364
+ "trial_name": null,
365
+ "trial_params": null
366
+ }
chatglm3-6b/lora/sft-ruozhiba/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:97c9784270ec1d666203a77dfabfe5755ec1550e3f548b9b7715cf302e9e918d
3
+ size 5112
chatglm3-6b/lora/sft-ruozhiba/training_loss.png ADDED