MAnfaal commited on
Commit
1d621f2
1 Parent(s): 07963fe

Upload 14 files

Browse files
README.md ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: other
3
+ library_name: peft
4
+ tags:
5
+ - llama-factory
6
+ - lora
7
+ - generated_from_trainer
8
+ base_model: TinyLlama/TinyLlama-1.1B-Chat-v1.0
9
+ model-index:
10
+ - name: train_2024-05-05-10-57-50
11
+ results: []
12
+ ---
13
+
14
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
+ should probably proofread and complete it, then remove this comment. -->
16
+
17
+ # train_2024-05-05-10-57-50
18
+
19
+ This model is a fine-tuned version of [TinyLlama/TinyLlama-1.1B-Chat-v1.0](https://huggingface.co/TinyLlama/TinyLlama-1.1B-Chat-v1.0) on the Reseacrhed_data dataset.
20
+
21
+ ## Model description
22
+
23
+ More information needed
24
+
25
+ ## Intended uses & limitations
26
+
27
+ More information needed
28
+
29
+ ## Training and evaluation data
30
+
31
+ More information needed
32
+
33
+ ## Training procedure
34
+
35
+ ### Training hyperparameters
36
+
37
+ The following hyperparameters were used during training:
38
+ - learning_rate: 2e-05
39
+ - train_batch_size: 2
40
+ - eval_batch_size: 8
41
+ - seed: 42
42
+ - gradient_accumulation_steps: 8
43
+ - total_train_batch_size: 16
44
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
45
+ - lr_scheduler_type: cosine
46
+ - num_epochs: 2.0
47
+ - mixed_precision_training: Native AMP
48
+
49
+ ### Training results
50
+
51
+
52
+
53
+ ### Framework versions
54
+
55
+ - PEFT 0.10.0
56
+ - Transformers 4.40.1
57
+ - Pytorch 2.2.1+cu121
58
+ - Datasets 2.19.0
59
+ - Tokenizers 0.19.1
adapter_config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "TinyLlama/TinyLlama-1.1B-Chat-v1.0",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layer_replication": null,
10
+ "layers_pattern": null,
11
+ "layers_to_transform": null,
12
+ "loftq_config": {},
13
+ "lora_alpha": 16,
14
+ "lora_dropout": 0,
15
+ "megatron_config": null,
16
+ "megatron_core": "megatron.core",
17
+ "modules_to_save": null,
18
+ "peft_type": "LORA",
19
+ "r": 8,
20
+ "rank_pattern": {},
21
+ "revision": null,
22
+ "target_modules": [
23
+ "q_proj",
24
+ "v_proj"
25
+ ],
26
+ "task_type": "CAUSAL_LM",
27
+ "use_dora": false,
28
+ "use_rslora": false
29
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dc828b5a7e19f979062b3f050cf3157cfee47b828bb8599721588c6f8f73e7b0
3
+ size 4517152
all_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.9813084112149533,
3
+ "total_flos": 1.078522514767872e+16,
4
+ "train_loss": 2.9435044122192093,
5
+ "train_runtime": 1048.2079,
6
+ "train_samples_per_second": 1.631,
7
+ "train_steps_per_second": 0.101
8
+ }
running_log.txt ADDED
@@ -0,0 +1,227 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 05/05/2024 11:09:07 - INFO - transformers.tokenization_utils_base - loading file tokenizer.model from cache at /root/.cache/huggingface/hub/models--TinyLlama--TinyLlama-1.1B-Chat-v1.0/snapshots/fe8a4ea1ffedaf415f4da2f062534de366a451e6/tokenizer.model
2
+
3
+ 05/05/2024 11:09:07 - INFO - transformers.tokenization_utils_base - loading file tokenizer.json from cache at /root/.cache/huggingface/hub/models--TinyLlama--TinyLlama-1.1B-Chat-v1.0/snapshots/fe8a4ea1ffedaf415f4da2f062534de366a451e6/tokenizer.json
4
+
5
+ 05/05/2024 11:09:07 - INFO - transformers.tokenization_utils_base - loading file added_tokens.json from cache at None
6
+
7
+ 05/05/2024 11:09:07 - INFO - transformers.tokenization_utils_base - loading file special_tokens_map.json from cache at /root/.cache/huggingface/hub/models--TinyLlama--TinyLlama-1.1B-Chat-v1.0/snapshots/fe8a4ea1ffedaf415f4da2f062534de366a451e6/special_tokens_map.json
8
+
9
+ 05/05/2024 11:09:07 - INFO - transformers.tokenization_utils_base - loading file tokenizer_config.json from cache at /root/.cache/huggingface/hub/models--TinyLlama--TinyLlama-1.1B-Chat-v1.0/snapshots/fe8a4ea1ffedaf415f4da2f062534de366a451e6/tokenizer_config.json
10
+
11
+ 05/05/2024 11:09:07 - INFO - llmtuner.data.loader - Loading dataset Reseacrhed_data.json...
12
+
13
+ 05/05/2024 11:09:07 - WARNING - llmtuner.data.utils - Checksum failed: missing SHA-1 hash value in dataset_info.json.
14
+
15
+ 05/05/2024 11:09:09 - WARNING - transformers.tokenization_utils_base - Token indices sequence length is longer than the specified maximum sequence length for this model (2387 > 2048). Running this sequence through the model will result in indexing errors
16
+
17
+ 05/05/2024 11:09:09 - INFO - transformers.configuration_utils - loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--TinyLlama--TinyLlama-1.1B-Chat-v1.0/snapshots/fe8a4ea1ffedaf415f4da2f062534de366a451e6/config.json
18
+
19
+ 05/05/2024 11:09:09 - INFO - transformers.configuration_utils - Model config LlamaConfig {
20
+ "_name_or_path": "TinyLlama/TinyLlama-1.1B-Chat-v1.0",
21
+ "architectures": [
22
+ "LlamaForCausalLM"
23
+ ],
24
+ "attention_bias": false,
25
+ "attention_dropout": 0.0,
26
+ "bos_token_id": 1,
27
+ "eos_token_id": 2,
28
+ "hidden_act": "silu",
29
+ "hidden_size": 2048,
30
+ "initializer_range": 0.02,
31
+ "intermediate_size": 5632,
32
+ "max_position_embeddings": 2048,
33
+ "model_type": "llama",
34
+ "num_attention_heads": 32,
35
+ "num_hidden_layers": 22,
36
+ "num_key_value_heads": 4,
37
+ "pretraining_tp": 1,
38
+ "rms_norm_eps": 1e-05,
39
+ "rope_scaling": null,
40
+ "rope_theta": 10000.0,
41
+ "tie_word_embeddings": false,
42
+ "torch_dtype": "bfloat16",
43
+ "transformers_version": "4.40.1",
44
+ "use_cache": true,
45
+ "vocab_size": 32000
46
+ }
47
+
48
+
49
+ 05/05/2024 11:09:09 - INFO - llmtuner.model.utils.quantization - Quantizing model to 8 bit.
50
+
51
+ 05/05/2024 11:09:30 - INFO - transformers.modeling_utils - loading weights file model.safetensors from cache at /root/.cache/huggingface/hub/models--TinyLlama--TinyLlama-1.1B-Chat-v1.0/snapshots/fe8a4ea1ffedaf415f4da2f062534de366a451e6/model.safetensors
52
+
53
+ 05/05/2024 11:09:30 - INFO - transformers.modeling_utils - Instantiating LlamaForCausalLM model under default dtype torch.float16.
54
+
55
+ 05/05/2024 11:09:30 - INFO - transformers.generation.configuration_utils - Generate config GenerationConfig {
56
+ "bos_token_id": 1,
57
+ "eos_token_id": 2
58
+ }
59
+
60
+
61
+ 05/05/2024 11:09:34 - INFO - transformers.modeling_utils - All model checkpoint weights were used when initializing LlamaForCausalLM.
62
+
63
+
64
+ 05/05/2024 11:09:34 - INFO - transformers.modeling_utils - All the weights of LlamaForCausalLM were initialized from the model checkpoint at TinyLlama/TinyLlama-1.1B-Chat-v1.0.
65
+ If your task is similar to the task the model of the checkpoint was trained on, you can already use LlamaForCausalLM for predictions without further training.
66
+
67
+ 05/05/2024 11:09:35 - INFO - transformers.generation.configuration_utils - loading configuration file generation_config.json from cache at /root/.cache/huggingface/hub/models--TinyLlama--TinyLlama-1.1B-Chat-v1.0/snapshots/fe8a4ea1ffedaf415f4da2f062534de366a451e6/generation_config.json
68
+
69
+ 05/05/2024 11:09:35 - INFO - transformers.generation.configuration_utils - Generate config GenerationConfig {
70
+ "bos_token_id": 1,
71
+ "eos_token_id": 2,
72
+ "max_length": 2048,
73
+ "pad_token_id": 0
74
+ }
75
+
76
+
77
+ 05/05/2024 11:09:35 - INFO - llmtuner.model.utils.checkpointing - Gradient checkpointing enabled.
78
+
79
+ 05/05/2024 11:09:35 - INFO - llmtuner.model.utils.attention - Using torch SDPA for faster training and inference.
80
+
81
+ 05/05/2024 11:09:35 - INFO - llmtuner.model.adapter - Fine-tuning method: LoRA
82
+
83
+ 05/05/2024 11:09:35 - INFO - llmtuner.model.loader - trainable params: 1126400 || all params: 1101174784 || trainable%: 0.1023
84
+
85
+ 05/05/2024 11:09:35 - INFO - transformers.trainer - Using auto half precision backend
86
+
87
+ 05/05/2024 11:09:35 - INFO - transformers.trainer - ***** Running training *****
88
+
89
+ 05/05/2024 11:09:35 - INFO - transformers.trainer - Num examples = 855
90
+
91
+ 05/05/2024 11:09:35 - INFO - transformers.trainer - Num Epochs = 2
92
+
93
+ 05/05/2024 11:09:35 - INFO - transformers.trainer - Instantaneous batch size per device = 2
94
+
95
+ 05/05/2024 11:09:35 - INFO - transformers.trainer - Total train batch size (w. parallel, distributed & accumulation) = 16
96
+
97
+ 05/05/2024 11:09:35 - INFO - transformers.trainer - Gradient Accumulation steps = 8
98
+
99
+ 05/05/2024 11:09:35 - INFO - transformers.trainer - Total optimization steps = 106
100
+
101
+ 05/05/2024 11:09:35 - INFO - transformers.trainer - Number of trainable parameters = 1,126,400
102
+
103
+ 05/05/2024 11:10:23 - INFO - llmtuner.extras.callbacks - {'loss': 2.9430, 'learning_rate': 1.9890e-05, 'epoch': 0.09}
104
+
105
+ 05/05/2024 11:11:11 - INFO - llmtuner.extras.callbacks - {'loss': 3.0203, 'learning_rate': 1.9564e-05, 'epoch': 0.19}
106
+
107
+ 05/05/2024 11:11:59 - INFO - llmtuner.extras.callbacks - {'loss': 2.8306, 'learning_rate': 1.9028e-05, 'epoch': 0.28}
108
+
109
+ 05/05/2024 11:12:48 - INFO - llmtuner.extras.callbacks - {'loss': 2.9983, 'learning_rate': 1.8294e-05, 'epoch': 0.37}
110
+
111
+ 05/05/2024 11:13:37 - INFO - llmtuner.extras.callbacks - {'loss': 3.0059, 'learning_rate': 1.7378e-05, 'epoch': 0.47}
112
+
113
+ 05/05/2024 11:14:26 - INFO - llmtuner.extras.callbacks - {'loss': 2.8450, 'learning_rate': 1.6301e-05, 'epoch': 0.56}
114
+
115
+ 05/05/2024 11:15:16 - INFO - llmtuner.extras.callbacks - {'loss': 2.9230, 'learning_rate': 1.5085e-05, 'epoch': 0.65}
116
+
117
+ 05/05/2024 11:16:05 - INFO - llmtuner.extras.callbacks - {'loss': 2.9725, 'learning_rate': 1.3758e-05, 'epoch': 0.75}
118
+
119
+ 05/05/2024 11:16:55 - INFO - llmtuner.extras.callbacks - {'loss': 2.9861, 'learning_rate': 1.2349e-05, 'epoch': 0.84}
120
+
121
+ 05/05/2024 11:17:45 - INFO - llmtuner.extras.callbacks - {'loss': 2.9897, 'learning_rate': 1.0888e-05, 'epoch': 0.93}
122
+
123
+ 05/05/2024 11:18:35 - INFO - llmtuner.extras.callbacks - {'loss': 2.9677, 'learning_rate': 9.4076e-06, 'epoch': 1.03}
124
+
125
+ 05/05/2024 11:19:24 - INFO - llmtuner.extras.callbacks - {'loss': 2.9620, 'learning_rate': 7.9402e-06, 'epoch': 1.12}
126
+
127
+ 05/05/2024 11:20:14 - INFO - llmtuner.extras.callbacks - {'loss': 2.9335, 'learning_rate': 6.5180e-06, 'epoch': 1.21}
128
+
129
+ 05/05/2024 11:21:04 - INFO - llmtuner.extras.callbacks - {'loss': 2.9682, 'learning_rate': 5.1721e-06, 'epoch': 1.31}
130
+
131
+ 05/05/2024 11:21:54 - INFO - llmtuner.extras.callbacks - {'loss': 2.9475, 'learning_rate': 3.9320e-06, 'epoch': 1.40}
132
+
133
+ 05/05/2024 11:22:43 - INFO - llmtuner.extras.callbacks - {'loss': 2.9373, 'learning_rate': 2.8249e-06, 'epoch': 1.50}
134
+
135
+ 05/05/2024 11:23:34 - INFO - llmtuner.extras.callbacks - {'loss': 2.9115, 'learning_rate': 1.8751e-06, 'epoch': 1.59}
136
+
137
+ 05/05/2024 11:24:24 - INFO - llmtuner.extras.callbacks - {'loss': 2.9237, 'learning_rate': 1.1034e-06, 'epoch': 1.68}
138
+
139
+ 05/05/2024 11:25:13 - INFO - llmtuner.extras.callbacks - {'loss': 2.8901, 'learning_rate': 5.2674e-07, 'epoch': 1.78}
140
+
141
+ 05/05/2024 11:26:03 - INFO - llmtuner.extras.callbacks - {'loss': 2.8905, 'learning_rate': 1.5769e-07, 'epoch': 1.87}
142
+
143
+ 05/05/2024 11:26:03 - INFO - transformers.trainer - Saving model checkpoint to saves/LLaMA-tiny/lora/train_2024-05-05-10-57-50/checkpoint-100
144
+
145
+ 05/05/2024 11:26:03 - INFO - transformers.configuration_utils - loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--TinyLlama--TinyLlama-1.1B-Chat-v1.0/snapshots/fe8a4ea1ffedaf415f4da2f062534de366a451e6/config.json
146
+
147
+ 05/05/2024 11:26:03 - INFO - transformers.configuration_utils - Model config LlamaConfig {
148
+ "architectures": [
149
+ "LlamaForCausalLM"
150
+ ],
151
+ "attention_bias": false,
152
+ "attention_dropout": 0.0,
153
+ "bos_token_id": 1,
154
+ "eos_token_id": 2,
155
+ "hidden_act": "silu",
156
+ "hidden_size": 2048,
157
+ "initializer_range": 0.02,
158
+ "intermediate_size": 5632,
159
+ "max_position_embeddings": 2048,
160
+ "model_type": "llama",
161
+ "num_attention_heads": 32,
162
+ "num_hidden_layers": 22,
163
+ "num_key_value_heads": 4,
164
+ "pretraining_tp": 1,
165
+ "rms_norm_eps": 1e-05,
166
+ "rope_scaling": null,
167
+ "rope_theta": 10000.0,
168
+ "tie_word_embeddings": false,
169
+ "torch_dtype": "bfloat16",
170
+ "transformers_version": "4.40.1",
171
+ "use_cache": true,
172
+ "vocab_size": 32000
173
+ }
174
+
175
+
176
+ 05/05/2024 11:26:03 - INFO - transformers.tokenization_utils_base - tokenizer config file saved in saves/LLaMA-tiny/lora/train_2024-05-05-10-57-50/checkpoint-100/tokenizer_config.json
177
+
178
+ 05/05/2024 11:26:03 - INFO - transformers.tokenization_utils_base - Special tokens file saved in saves/LLaMA-tiny/lora/train_2024-05-05-10-57-50/checkpoint-100/special_tokens_map.json
179
+
180
+ 05/05/2024 11:26:53 - INFO - llmtuner.extras.callbacks - {'loss': 2.9548, 'learning_rate': 4.3916e-09, 'epoch': 1.96}
181
+
182
+ 05/05/2024 11:27:03 - INFO - transformers.trainer -
183
+
184
+ Training completed. Do not forget to share your model on huggingface.co/models =)
185
+
186
+
187
+
188
+ 05/05/2024 11:27:03 - INFO - transformers.trainer - Saving model checkpoint to saves/LLaMA-tiny/lora/train_2024-05-05-10-57-50
189
+
190
+ 05/05/2024 11:27:03 - INFO - transformers.configuration_utils - loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--TinyLlama--TinyLlama-1.1B-Chat-v1.0/snapshots/fe8a4ea1ffedaf415f4da2f062534de366a451e6/config.json
191
+
192
+ 05/05/2024 11:27:03 - INFO - transformers.configuration_utils - Model config LlamaConfig {
193
+ "architectures": [
194
+ "LlamaForCausalLM"
195
+ ],
196
+ "attention_bias": false,
197
+ "attention_dropout": 0.0,
198
+ "bos_token_id": 1,
199
+ "eos_token_id": 2,
200
+ "hidden_act": "silu",
201
+ "hidden_size": 2048,
202
+ "initializer_range": 0.02,
203
+ "intermediate_size": 5632,
204
+ "max_position_embeddings": 2048,
205
+ "model_type": "llama",
206
+ "num_attention_heads": 32,
207
+ "num_hidden_layers": 22,
208
+ "num_key_value_heads": 4,
209
+ "pretraining_tp": 1,
210
+ "rms_norm_eps": 1e-05,
211
+ "rope_scaling": null,
212
+ "rope_theta": 10000.0,
213
+ "tie_word_embeddings": false,
214
+ "torch_dtype": "bfloat16",
215
+ "transformers_version": "4.40.1",
216
+ "use_cache": true,
217
+ "vocab_size": 32000
218
+ }
219
+
220
+
221
+ 05/05/2024 11:27:03 - INFO - transformers.tokenization_utils_base - tokenizer config file saved in saves/LLaMA-tiny/lora/train_2024-05-05-10-57-50/tokenizer_config.json
222
+
223
+ 05/05/2024 11:27:03 - INFO - transformers.tokenization_utils_base - Special tokens file saved in saves/LLaMA-tiny/lora/train_2024-05-05-10-57-50/special_tokens_map.json
224
+
225
+ 05/05/2024 11:27:03 - INFO - transformers.modelcard - Dropping the following result as it does not have all the necessary fields:
226
+ {'task': {'name': 'Causal Language Modeling', 'type': 'text-generation'}}
227
+
special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "</s>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "<unk>",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
tokenizer_config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "added_tokens_decoder": {
5
+ "0": {
6
+ "content": "<unk>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "1": {
14
+ "content": "<s>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "2": {
22
+ "content": "</s>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ }
29
+ },
30
+ "bos_token": "<s>",
31
+ "chat_template": "{% if messages[0]['role'] == 'system' %}{% set system_message = messages[0]['content'] %}{% endif %}{% if system_message is defined %}{{ system_message + '\\n' }}{% endif %}{% for message in messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ 'Human: ' + content + '\\nAssistant: ' }}{% elif message['role'] == 'assistant' %}{{ content + '</s>' + '\\n' }}{% endif %}{% endfor %}",
32
+ "clean_up_tokenization_spaces": false,
33
+ "eos_token": "</s>",
34
+ "legacy": false,
35
+ "model_max_length": 2048,
36
+ "pad_token": "</s>",
37
+ "padding_side": "right",
38
+ "sp_model_kwargs": {},
39
+ "split_special_tokens": false,
40
+ "tokenizer_class": "LlamaTokenizer",
41
+ "unk_token": "<unk>",
42
+ "use_default_system_prompt": false
43
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.9813084112149533,
3
+ "total_flos": 1.078522514767872e+16,
4
+ "train_loss": 2.9435044122192093,
5
+ "train_runtime": 1048.2079,
6
+ "train_samples_per_second": 1.631,
7
+ "train_steps_per_second": 0.101
8
+ }
trainer_config.yaml ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ cutoff_len: 1024
2
+ dataset: Reseacrhed_data
3
+ dataset_dir: data
4
+ do_train: true
5
+ finetuning_type: lora
6
+ flash_attn: auto
7
+ fp16: true
8
+ gradient_accumulation_steps: 8
9
+ learning_rate: 2.0e-05
10
+ logging_steps: 5
11
+ lora_alpha: 16
12
+ lora_dropout: 0
13
+ lora_rank: 8
14
+ lora_target: q_proj,v_proj
15
+ lr_scheduler_type: cosine
16
+ max_grad_norm: 1.0
17
+ max_samples: 10000
18
+ model_name_or_path: TinyLlama/TinyLlama-1.1B-Chat-v1.0
19
+ num_train_epochs: 2.0
20
+ optim: adamw_torch
21
+ output_dir: saves/LLaMA-tiny/lora/train_2024-05-05-10-57-50
22
+ packing: true
23
+ per_device_train_batch_size: 2
24
+ quantization_bit: 8
25
+ report_to: none
26
+ save_steps: 100
27
+ stage: pt
28
+ template: default
29
+ warmup_steps: 0
trainer_log.jsonl ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"current_steps": 5, "total_steps": 106, "loss": 2.943, "learning_rate": 1.9890401873221642e-05, "epoch": 0.09345794392523364, "percentage": 4.72, "elapsed_time": "0:00:47", "remaining_time": "0:16:06"}
2
+ {"current_steps": 10, "total_steps": 106, "loss": 3.0203, "learning_rate": 1.9564009842765225e-05, "epoch": 0.18691588785046728, "percentage": 9.43, "elapsed_time": "0:01:35", "remaining_time": "0:15:16"}
3
+ {"current_steps": 15, "total_steps": 106, "loss": 2.8306, "learning_rate": 1.9027978299657436e-05, "epoch": 0.2803738317757009, "percentage": 14.15, "elapsed_time": "0:02:23", "remaining_time": "0:14:32"}
4
+ {"current_steps": 20, "total_steps": 106, "loss": 2.9983, "learning_rate": 1.829405685450202e-05, "epoch": 0.37383177570093457, "percentage": 18.87, "elapsed_time": "0:03:13", "remaining_time": "0:13:49"}
5
+ {"current_steps": 25, "total_steps": 106, "loss": 3.0059, "learning_rate": 1.7378332790417275e-05, "epoch": 0.4672897196261682, "percentage": 23.58, "elapsed_time": "0:04:02", "remaining_time": "0:13:04"}
6
+ {"current_steps": 30, "total_steps": 106, "loss": 2.845, "learning_rate": 1.6300878435817115e-05, "epoch": 0.5607476635514018, "percentage": 28.3, "elapsed_time": "0:04:51", "remaining_time": "0:12:18"}
7
+ {"current_steps": 35, "total_steps": 106, "loss": 2.923, "learning_rate": 1.5085311186492206e-05, "epoch": 0.6542056074766355, "percentage": 33.02, "elapsed_time": "0:05:40", "remaining_time": "0:11:31"}
8
+ {"current_steps": 40, "total_steps": 106, "loss": 2.9725, "learning_rate": 1.3758275821142382e-05, "epoch": 0.7476635514018691, "percentage": 37.74, "elapsed_time": "0:06:30", "remaining_time": "0:10:44"}
9
+ {"current_steps": 45, "total_steps": 106, "loss": 2.9861, "learning_rate": 1.234886045780984e-05, "epoch": 0.8411214953271028, "percentage": 42.45, "elapsed_time": "0:07:20", "remaining_time": "0:09:56"}
10
+ {"current_steps": 50, "total_steps": 106, "loss": 2.9897, "learning_rate": 1.0887958953229349e-05, "epoch": 0.9345794392523364, "percentage": 47.17, "elapsed_time": "0:08:10", "remaining_time": "0:09:08"}
11
+ {"current_steps": 55, "total_steps": 106, "loss": 2.9677, "learning_rate": 9.407593721062858e-06, "epoch": 1.02803738317757, "percentage": 51.89, "elapsed_time": "0:08:59", "remaining_time": "0:08:20"}
12
+ {"current_steps": 60, "total_steps": 106, "loss": 2.962, "learning_rate": 7.940213812589018e-06, "epoch": 1.1214953271028036, "percentage": 56.6, "elapsed_time": "0:09:49", "remaining_time": "0:07:31"}
13
+ {"current_steps": 65, "total_steps": 106, "loss": 2.9335, "learning_rate": 6.517983645656014e-06, "epoch": 1.2149532710280373, "percentage": 61.32, "elapsed_time": "0:10:39", "remaining_time": "0:06:43"}
14
+ {"current_steps": 70, "total_steps": 106, "loss": 2.9682, "learning_rate": 5.172077972692553e-06, "epoch": 1.308411214953271, "percentage": 66.04, "elapsed_time": "0:11:29", "remaining_time": "0:05:54"}
15
+ {"current_steps": 75, "total_steps": 106, "loss": 2.9475, "learning_rate": 3.931998541814069e-06, "epoch": 1.4018691588785046, "percentage": 70.75, "elapsed_time": "0:12:18", "remaining_time": "0:05:05"}
16
+ {"current_steps": 80, "total_steps": 106, "loss": 2.9373, "learning_rate": 2.8249274295566863e-06, "epoch": 1.4953271028037383, "percentage": 75.47, "elapsed_time": "0:13:08", "remaining_time": "0:04:16"}
17
+ {"current_steps": 85, "total_steps": 106, "loss": 2.9115, "learning_rate": 1.875131219943187e-06, "epoch": 1.588785046728972, "percentage": 80.19, "elapsed_time": "0:13:58", "remaining_time": "0:03:27"}
18
+ {"current_steps": 90, "total_steps": 106, "loss": 2.9237, "learning_rate": 1.1034290900525279e-06, "epoch": 1.6822429906542056, "percentage": 84.91, "elapsed_time": "0:14:48", "remaining_time": "0:02:37"}
19
+ {"current_steps": 95, "total_steps": 106, "loss": 2.8901, "learning_rate": 5.267364614580861e-07, "epoch": 1.7757009345794392, "percentage": 89.62, "elapsed_time": "0:15:38", "remaining_time": "0:01:48"}
20
+ {"current_steps": 100, "total_steps": 106, "loss": 2.8905, "learning_rate": 1.5769422052403172e-07, "epoch": 1.8691588785046729, "percentage": 94.34, "elapsed_time": "0:16:28", "remaining_time": "0:00:59"}
21
+ {"current_steps": 105, "total_steps": 106, "loss": 2.9548, "learning_rate": 4.39163491205652e-09, "epoch": 1.9626168224299065, "percentage": 99.06, "elapsed_time": "0:17:18", "remaining_time": "0:00:09"}
22
+ {"current_steps": 106, "total_steps": 106, "epoch": 1.9813084112149533, "percentage": 100.0, "elapsed_time": "0:17:28", "remaining_time": "0:00:00"}
trainer_state.json ADDED
@@ -0,0 +1,177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 1.9813084112149533,
5
+ "eval_steps": 500,
6
+ "global_step": 106,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.09345794392523364,
13
+ "grad_norm": 0.37823450565338135,
14
+ "learning_rate": 1.9890401873221642e-05,
15
+ "loss": 2.943,
16
+ "step": 5
17
+ },
18
+ {
19
+ "epoch": 0.18691588785046728,
20
+ "grad_norm": 0.3274012804031372,
21
+ "learning_rate": 1.9564009842765225e-05,
22
+ "loss": 3.0203,
23
+ "step": 10
24
+ },
25
+ {
26
+ "epoch": 0.2803738317757009,
27
+ "grad_norm": 0.2663849890232086,
28
+ "learning_rate": 1.9027978299657436e-05,
29
+ "loss": 2.8306,
30
+ "step": 15
31
+ },
32
+ {
33
+ "epoch": 0.37383177570093457,
34
+ "grad_norm": 0.2998671233654022,
35
+ "learning_rate": 1.829405685450202e-05,
36
+ "loss": 2.9983,
37
+ "step": 20
38
+ },
39
+ {
40
+ "epoch": 0.4672897196261682,
41
+ "grad_norm": 0.30643677711486816,
42
+ "learning_rate": 1.7378332790417275e-05,
43
+ "loss": 3.0059,
44
+ "step": 25
45
+ },
46
+ {
47
+ "epoch": 0.5607476635514018,
48
+ "grad_norm": 0.3167150318622589,
49
+ "learning_rate": 1.6300878435817115e-05,
50
+ "loss": 2.845,
51
+ "step": 30
52
+ },
53
+ {
54
+ "epoch": 0.6542056074766355,
55
+ "grad_norm": 0.29322898387908936,
56
+ "learning_rate": 1.5085311186492206e-05,
57
+ "loss": 2.923,
58
+ "step": 35
59
+ },
60
+ {
61
+ "epoch": 0.7476635514018691,
62
+ "grad_norm": 0.3546823263168335,
63
+ "learning_rate": 1.3758275821142382e-05,
64
+ "loss": 2.9725,
65
+ "step": 40
66
+ },
67
+ {
68
+ "epoch": 0.8411214953271028,
69
+ "grad_norm": 0.35268881916999817,
70
+ "learning_rate": 1.234886045780984e-05,
71
+ "loss": 2.9861,
72
+ "step": 45
73
+ },
74
+ {
75
+ "epoch": 0.9345794392523364,
76
+ "grad_norm": 0.3079037666320801,
77
+ "learning_rate": 1.0887958953229349e-05,
78
+ "loss": 2.9897,
79
+ "step": 50
80
+ },
81
+ {
82
+ "epoch": 1.02803738317757,
83
+ "grad_norm": 0.33415308594703674,
84
+ "learning_rate": 9.407593721062858e-06,
85
+ "loss": 2.9677,
86
+ "step": 55
87
+ },
88
+ {
89
+ "epoch": 1.1214953271028036,
90
+ "grad_norm": 0.3114282190799713,
91
+ "learning_rate": 7.940213812589018e-06,
92
+ "loss": 2.962,
93
+ "step": 60
94
+ },
95
+ {
96
+ "epoch": 1.2149532710280373,
97
+ "grad_norm": 0.3043849766254425,
98
+ "learning_rate": 6.517983645656014e-06,
99
+ "loss": 2.9335,
100
+ "step": 65
101
+ },
102
+ {
103
+ "epoch": 1.308411214953271,
104
+ "grad_norm": 0.35792258381843567,
105
+ "learning_rate": 5.172077972692553e-06,
106
+ "loss": 2.9682,
107
+ "step": 70
108
+ },
109
+ {
110
+ "epoch": 1.4018691588785046,
111
+ "grad_norm": 0.30202534794807434,
112
+ "learning_rate": 3.931998541814069e-06,
113
+ "loss": 2.9475,
114
+ "step": 75
115
+ },
116
+ {
117
+ "epoch": 1.4953271028037383,
118
+ "grad_norm": 0.40600359439849854,
119
+ "learning_rate": 2.8249274295566863e-06,
120
+ "loss": 2.9373,
121
+ "step": 80
122
+ },
123
+ {
124
+ "epoch": 1.588785046728972,
125
+ "grad_norm": 0.31094491481781006,
126
+ "learning_rate": 1.875131219943187e-06,
127
+ "loss": 2.9115,
128
+ "step": 85
129
+ },
130
+ {
131
+ "epoch": 1.6822429906542056,
132
+ "grad_norm": 0.3808990716934204,
133
+ "learning_rate": 1.1034290900525279e-06,
134
+ "loss": 2.9237,
135
+ "step": 90
136
+ },
137
+ {
138
+ "epoch": 1.7757009345794392,
139
+ "grad_norm": 0.33799558877944946,
140
+ "learning_rate": 5.267364614580861e-07,
141
+ "loss": 2.8901,
142
+ "step": 95
143
+ },
144
+ {
145
+ "epoch": 1.8691588785046729,
146
+ "grad_norm": 0.3236595690250397,
147
+ "learning_rate": 1.5769422052403172e-07,
148
+ "loss": 2.8905,
149
+ "step": 100
150
+ },
151
+ {
152
+ "epoch": 1.9626168224299065,
153
+ "grad_norm": 0.3427414000034332,
154
+ "learning_rate": 4.39163491205652e-09,
155
+ "loss": 2.9548,
156
+ "step": 105
157
+ },
158
+ {
159
+ "epoch": 1.9813084112149533,
160
+ "step": 106,
161
+ "total_flos": 1.078522514767872e+16,
162
+ "train_loss": 2.9435044122192093,
163
+ "train_runtime": 1048.2079,
164
+ "train_samples_per_second": 1.631,
165
+ "train_steps_per_second": 0.101
166
+ }
167
+ ],
168
+ "logging_steps": 5,
169
+ "max_steps": 106,
170
+ "num_input_tokens_seen": 0,
171
+ "num_train_epochs": 2,
172
+ "save_steps": 100,
173
+ "total_flos": 1.078522514767872e+16,
174
+ "train_batch_size": 2,
175
+ "trial_name": null,
176
+ "trial_params": null
177
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7c8b10cfcdadcb4d9c13a2f7dc44a0e53f10d1b92bcbc2b09c784742ad8b6290
3
+ size 5176