juyongjiang commited on
Commit
8397c55
1 Parent(s): 6ba6d64

upload model checkpoint

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,3 +1,76 @@
1
- ---
2
- license: gemma
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ tags:
4
+ - alignment-handbook
5
+ - generated_from_trainer
6
+ datasets:
7
+ - llama-duo/synth_summarize_dataset_dedup
8
+ base_model: google/gemma-7b
9
+ model-index:
10
+ - name: gemma7b-summarize-claude3sonnet-8k
11
+ results: []
12
+ ---
13
+
14
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
+ should probably proofread and complete it, then remove this comment. -->
16
+
17
+ # gemma7b-summarize-claude3sonnet-8k
18
+
19
+ This model is a fine-tuned version of [google/gemma-7b](https://huggingface.co/google/gemma-7b) on the llama-duo/synth_summarize_dataset_dedup dataset.
20
+ It achieves the following results on the evaluation set:
21
+ - Loss: 2.7259
22
+
23
+ ## Model description
24
+
25
+ More information needed
26
+
27
+ ## Intended uses & limitations
28
+
29
+ More information needed
30
+
31
+ ## Training and evaluation data
32
+
33
+ More information needed
34
+
35
+ ## Training procedure
36
+
37
+ ### Training hyperparameters
38
+
39
+ The following hyperparameters were used during training:
40
+ - learning_rate: 0.0002
41
+ - train_batch_size: 4
42
+ - eval_batch_size: 2
43
+ - seed: 42
44
+ - distributed_type: multi-GPU
45
+ - num_devices: 8
46
+ - gradient_accumulation_steps: 2
47
+ - total_train_batch_size: 64
48
+ - total_eval_batch_size: 16
49
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
50
+ - lr_scheduler_type: cosine
51
+ - lr_scheduler_warmup_ratio: 0.1
52
+ - num_epochs: 10
53
+
54
+ ### Training results
55
+
56
+ | Training Loss | Epoch | Step | Validation Loss |
57
+ |:-------------:|:------:|:----:|:---------------:|
58
+ | 18.539 | 0.9744 | 19 | 8.6238 |
59
+ | 11.8891 | 2.0 | 39 | 6.5199 |
60
+ | 2.3149 | 2.9744 | 58 | 3.2759 |
61
+ | 1.5266 | 4.0 | 78 | 2.8999 |
62
+ | 1.3332 | 4.9744 | 97 | 2.7966 |
63
+ | 1.2502 | 6.0 | 117 | 2.7460 |
64
+ | 1.2007 | 6.9744 | 136 | 2.7332 |
65
+ | 1.1904 | 8.0 | 156 | 2.7283 |
66
+ | 1.1866 | 8.9744 | 175 | 2.7323 |
67
+ | 1.1715 | 9.7436 | 190 | 2.7259 |
68
+
69
+
70
+ ### Framework versions
71
+
72
+ - PEFT 0.10.0
73
+ - Transformers 4.40.0
74
+ - Pytorch 2.1.2+cu121
75
+ - Datasets 2.18.0
76
+ - Tokenizers 0.19.1
adapter_config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "google/gemma-7b",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layer_replication": null,
10
+ "layers_pattern": null,
11
+ "layers_to_transform": null,
12
+ "loftq_config": {},
13
+ "lora_alpha": 16,
14
+ "lora_dropout": 0.05,
15
+ "megatron_config": null,
16
+ "megatron_core": "megatron.core",
17
+ "modules_to_save": null,
18
+ "peft_type": "LORA",
19
+ "r": 8,
20
+ "rank_pattern": {},
21
+ "revision": null,
22
+ "target_modules": [
23
+ "q_proj",
24
+ "v_proj"
25
+ ],
26
+ "task_type": "CAUSAL_LM",
27
+ "use_dora": false,
28
+ "use_rslora": false
29
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0a0a9d19c029590a8cfac94ecb1d74f39b305ef1fabf65f22c32a60137a5e2a6
3
+ size 6437384
all_results.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 9.743589743589745,
3
+ "eval_loss": 2.725883960723877,
4
+ "eval_runtime": 0.2382,
5
+ "eval_samples": 25,
6
+ "eval_samples_per_second": 41.979,
7
+ "eval_steps_per_second": 4.198,
8
+ "total_flos": 5.793437974192456e+17,
9
+ "train_loss": 5.009378814697266,
10
+ "train_runtime": 464.8218,
11
+ "train_samples": 7936,
12
+ "train_samples_per_second": 26.204,
13
+ "train_steps_per_second": 0.409
14
+ }
config.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/gemma-7b",
3
+ "architectures": [
4
+ "GemmaForCausalLM"
5
+ ],
6
+ "attention_bias": false,
7
+ "attention_dropout": 0.0,
8
+ "bos_token_id": 2,
9
+ "eos_token_id": 1,
10
+ "head_dim": 256,
11
+ "hidden_act": "gelu",
12
+ "hidden_activation": null,
13
+ "hidden_size": 3072,
14
+ "initializer_range": 0.02,
15
+ "intermediate_size": 24576,
16
+ "max_position_embeddings": 8192,
17
+ "model_type": "gemma",
18
+ "num_attention_heads": 16,
19
+ "num_hidden_layers": 28,
20
+ "num_key_value_heads": 16,
21
+ "pad_token_id": 0,
22
+ "quantization_config": {
23
+ "_load_in_4bit": true,
24
+ "_load_in_8bit": false,
25
+ "bnb_4bit_compute_dtype": "bfloat16",
26
+ "bnb_4bit_quant_storage": "uint8",
27
+ "bnb_4bit_quant_type": "nf4",
28
+ "bnb_4bit_use_double_quant": false,
29
+ "llm_int8_enable_fp32_cpu_offload": false,
30
+ "llm_int8_has_fp16_weight": false,
31
+ "llm_int8_skip_modules": null,
32
+ "llm_int8_threshold": 6.0,
33
+ "load_in_4bit": true,
34
+ "load_in_8bit": false,
35
+ "quant_method": "bitsandbytes"
36
+ },
37
+ "rms_norm_eps": 1e-06,
38
+ "rope_scaling": null,
39
+ "rope_theta": 10000.0,
40
+ "torch_dtype": "bfloat16",
41
+ "transformers_version": "4.40.0",
42
+ "use_cache": true,
43
+ "vocab_size": 256000
44
+ }
eval_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 9.743589743589745,
3
+ "eval_loss": 2.725883960723877,
4
+ "eval_runtime": 0.2382,
5
+ "eval_samples": 25,
6
+ "eval_samples_per_second": 41.979,
7
+ "eval_steps_per_second": 4.198
8
+ }
runs/Jun13_05-30-34_gpu1-1/events.out.tfevents.1718227892.gpu1-1.431348.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:182c761e53f4ed40df1014ab3a341aeb48b33e3e941cf99bd4dda3f13957d9cc
3
+ size 16638
runs/Jun13_05-30-34_gpu1-1/events.out.tfevents.1718228358.gpu1-1.431348.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2f690885039e3c8ff95279ebfab56c393c720254593491dafe8d76ac773c465a
3
+ size 359
special_tokens_map.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>"
5
+ ],
6
+ "bos_token": {
7
+ "content": "<bos>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false
12
+ },
13
+ "eos_token": {
14
+ "content": "<eos>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false
19
+ },
20
+ "pad_token": {
21
+ "content": "<pad>",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false
26
+ },
27
+ "unk_token": {
28
+ "content": "<unk>",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false
33
+ }
34
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:322a5f52ab5cab196761ab397a022d6fa3a2e1418585e532bb6efb2fedd2ae94
3
+ size 17477501
tokenizer_config.json ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_eos_token": false,
4
+ "added_tokens_decoder": {
5
+ "0": {
6
+ "content": "<pad>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "1": {
14
+ "content": "<eos>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "2": {
22
+ "content": "<bos>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "3": {
30
+ "content": "<unk>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "106": {
38
+ "content": "<|im_start|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "107": {
46
+ "content": "<|im_end|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ }
53
+ },
54
+ "additional_special_tokens": [
55
+ "<|im_start|>",
56
+ "<|im_end|>"
57
+ ],
58
+ "bos_token": "<bos>",
59
+ "chat_template": "{% if messages[0]['role'] == 'user' or messages[0]['role'] == 'system' %}{{ bos_token }}{% endif %}{% for message in messages %}{{ '<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n' }}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% elif messages[-1]['role'] == 'assistant' %}{{ eos_token }}{% endif %}",
60
+ "clean_up_tokenization_spaces": false,
61
+ "eos_token": "<eos>",
62
+ "legacy": null,
63
+ "model_max_length": 2048,
64
+ "pad_token": "<pad>",
65
+ "sp_model_kwargs": {},
66
+ "spaces_between_special_tokens": false,
67
+ "tokenizer_class": "GemmaTokenizer",
68
+ "unk_token": "<unk>",
69
+ "use_default_system_prompt": false
70
+ }
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 9.743589743589745,
3
+ "total_flos": 5.793437974192456e+17,
4
+ "train_loss": 5.009378814697266,
5
+ "train_runtime": 464.8218,
6
+ "train_samples": 7936,
7
+ "train_samples_per_second": 26.204,
8
+ "train_steps_per_second": 0.409
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,383 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 9.743589743589745,
5
+ "eval_steps": 500,
6
+ "global_step": 190,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.05128205128205128,
13
+ "grad_norm": 198.0,
14
+ "learning_rate": 1.0526315789473684e-05,
15
+ "loss": 36.097,
16
+ "step": 1
17
+ },
18
+ {
19
+ "epoch": 0.2564102564102564,
20
+ "grad_norm": 142.0,
21
+ "learning_rate": 5.2631578947368424e-05,
22
+ "loss": 33.9388,
23
+ "step": 5
24
+ },
25
+ {
26
+ "epoch": 0.5128205128205128,
27
+ "grad_norm": 25.625,
28
+ "learning_rate": 0.00010526315789473685,
29
+ "loss": 24.944,
30
+ "step": 10
31
+ },
32
+ {
33
+ "epoch": 0.7692307692307693,
34
+ "grad_norm": 14.9375,
35
+ "learning_rate": 0.00015789473684210527,
36
+ "loss": 18.539,
37
+ "step": 15
38
+ },
39
+ {
40
+ "epoch": 0.9743589743589743,
41
+ "eval_loss": 8.623817443847656,
42
+ "eval_runtime": 0.2547,
43
+ "eval_samples_per_second": 39.26,
44
+ "eval_steps_per_second": 3.926,
45
+ "step": 19
46
+ },
47
+ {
48
+ "epoch": 1.0256410256410255,
49
+ "grad_norm": 4.1875,
50
+ "learning_rate": 0.00019998312416333227,
51
+ "loss": 15.3835,
52
+ "step": 20
53
+ },
54
+ {
55
+ "epoch": 1.282051282051282,
56
+ "grad_norm": 3.28125,
57
+ "learning_rate": 0.00019939306773179497,
58
+ "loss": 14.2252,
59
+ "step": 25
60
+ },
61
+ {
62
+ "epoch": 1.5384615384615383,
63
+ "grad_norm": 6.4375,
64
+ "learning_rate": 0.0001979649067087574,
65
+ "loss": 13.4082,
66
+ "step": 30
67
+ },
68
+ {
69
+ "epoch": 1.7948717948717947,
70
+ "grad_norm": 9.9375,
71
+ "learning_rate": 0.00019571068366759143,
72
+ "loss": 11.8891,
73
+ "step": 35
74
+ },
75
+ {
76
+ "epoch": 2.0,
77
+ "eval_loss": 6.5198655128479,
78
+ "eval_runtime": 0.2367,
79
+ "eval_samples_per_second": 42.251,
80
+ "eval_steps_per_second": 4.225,
81
+ "step": 39
82
+ },
83
+ {
84
+ "epoch": 2.051282051282051,
85
+ "grad_norm": 16.375,
86
+ "learning_rate": 0.00019264940672148018,
87
+ "loss": 9.8637,
88
+ "step": 40
89
+ },
90
+ {
91
+ "epoch": 2.3076923076923075,
92
+ "grad_norm": 20.25,
93
+ "learning_rate": 0.00018880688924275378,
94
+ "loss": 6.7911,
95
+ "step": 45
96
+ },
97
+ {
98
+ "epoch": 2.564102564102564,
99
+ "grad_norm": 7.21875,
100
+ "learning_rate": 0.00018421553219875658,
101
+ "loss": 3.3014,
102
+ "step": 50
103
+ },
104
+ {
105
+ "epoch": 2.8205128205128203,
106
+ "grad_norm": 5.4375,
107
+ "learning_rate": 0.00017891405093963938,
108
+ "loss": 2.3149,
109
+ "step": 55
110
+ },
111
+ {
112
+ "epoch": 2.9743589743589745,
113
+ "eval_loss": 3.2759299278259277,
114
+ "eval_runtime": 0.2543,
115
+ "eval_samples_per_second": 39.323,
116
+ "eval_steps_per_second": 3.932,
117
+ "step": 58
118
+ },
119
+ {
120
+ "epoch": 3.076923076923077,
121
+ "grad_norm": 2.703125,
122
+ "learning_rate": 0.0001729471487418621,
123
+ "loss": 1.9629,
124
+ "step": 60
125
+ },
126
+ {
127
+ "epoch": 3.3333333333333335,
128
+ "grad_norm": 2.078125,
129
+ "learning_rate": 0.00016636513986016213,
130
+ "loss": 1.7292,
131
+ "step": 65
132
+ },
133
+ {
134
+ "epoch": 3.58974358974359,
135
+ "grad_norm": 0.9765625,
136
+ "learning_rate": 0.00015922352526649803,
137
+ "loss": 1.6224,
138
+ "step": 70
139
+ },
140
+ {
141
+ "epoch": 3.8461538461538463,
142
+ "grad_norm": 0.7109375,
143
+ "learning_rate": 0.00015158252465343242,
144
+ "loss": 1.5266,
145
+ "step": 75
146
+ },
147
+ {
148
+ "epoch": 4.0,
149
+ "eval_loss": 2.8999454975128174,
150
+ "eval_runtime": 0.2358,
151
+ "eval_samples_per_second": 42.402,
152
+ "eval_steps_per_second": 4.24,
153
+ "step": 78
154
+ },
155
+ {
156
+ "epoch": 4.102564102564102,
157
+ "grad_norm": 0.56640625,
158
+ "learning_rate": 0.00014350656864820733,
159
+ "loss": 1.4469,
160
+ "step": 80
161
+ },
162
+ {
163
+ "epoch": 4.358974358974359,
164
+ "grad_norm": 0.5625,
165
+ "learning_rate": 0.00013506375551927547,
166
+ "loss": 1.3937,
167
+ "step": 85
168
+ },
169
+ {
170
+ "epoch": 4.615384615384615,
171
+ "grad_norm": 0.703125,
172
+ "learning_rate": 0.00012632527695645993,
173
+ "loss": 1.3638,
174
+ "step": 90
175
+ },
176
+ {
177
+ "epoch": 4.871794871794872,
178
+ "grad_norm": 0.416015625,
179
+ "learning_rate": 0.00011736481776669306,
180
+ "loss": 1.3332,
181
+ "step": 95
182
+ },
183
+ {
184
+ "epoch": 4.9743589743589745,
185
+ "eval_loss": 2.796644926071167,
186
+ "eval_runtime": 0.2552,
187
+ "eval_samples_per_second": 39.191,
188
+ "eval_steps_per_second": 3.919,
189
+ "step": 97
190
+ },
191
+ {
192
+ "epoch": 5.128205128205128,
193
+ "grad_norm": 0.80859375,
194
+ "learning_rate": 0.00010825793454723325,
195
+ "loss": 1.3079,
196
+ "step": 100
197
+ },
198
+ {
199
+ "epoch": 5.384615384615385,
200
+ "grad_norm": 0.69140625,
201
+ "learning_rate": 9.908141857552737e-05,
202
+ "loss": 1.2787,
203
+ "step": 105
204
+ },
205
+ {
206
+ "epoch": 5.641025641025641,
207
+ "grad_norm": 0.482421875,
208
+ "learning_rate": 8.991264828797319e-05,
209
+ "loss": 1.2515,
210
+ "step": 110
211
+ },
212
+ {
213
+ "epoch": 5.897435897435898,
214
+ "grad_norm": 0.6171875,
215
+ "learning_rate": 8.082893680762619e-05,
216
+ "loss": 1.2502,
217
+ "step": 115
218
+ },
219
+ {
220
+ "epoch": 6.0,
221
+ "eval_loss": 2.7460193634033203,
222
+ "eval_runtime": 0.2367,
223
+ "eval_samples_per_second": 42.241,
224
+ "eval_steps_per_second": 4.224,
225
+ "step": 117
226
+ },
227
+ {
228
+ "epoch": 6.153846153846154,
229
+ "grad_norm": 0.466796875,
230
+ "learning_rate": 7.190688002264308e-05,
231
+ "loss": 1.2261,
232
+ "step": 120
233
+ },
234
+ {
235
+ "epoch": 6.410256410256411,
236
+ "grad_norm": 0.6640625,
237
+ "learning_rate": 6.322171071261071e-05,
238
+ "loss": 1.2127,
239
+ "step": 125
240
+ },
241
+ {
242
+ "epoch": 6.666666666666667,
243
+ "grad_norm": 0.5078125,
244
+ "learning_rate": 5.484666416891109e-05,
245
+ "loss": 1.2119,
246
+ "step": 130
247
+ },
248
+ {
249
+ "epoch": 6.923076923076923,
250
+ "grad_norm": 0.578125,
251
+ "learning_rate": 4.685236065835443e-05,
252
+ "loss": 1.2007,
253
+ "step": 135
254
+ },
255
+ {
256
+ "epoch": 6.9743589743589745,
257
+ "eval_loss": 2.733168840408325,
258
+ "eval_runtime": 0.2595,
259
+ "eval_samples_per_second": 38.536,
260
+ "eval_steps_per_second": 3.854,
261
+ "step": 136
262
+ },
263
+ {
264
+ "epoch": 7.17948717948718,
265
+ "grad_norm": 0.55859375,
266
+ "learning_rate": 3.9306209937284346e-05,
267
+ "loss": 1.1979,
268
+ "step": 140
269
+ },
270
+ {
271
+ "epoch": 7.435897435897436,
272
+ "grad_norm": 0.55859375,
273
+ "learning_rate": 3.227184283742591e-05,
274
+ "loss": 1.1863,
275
+ "step": 145
276
+ },
277
+ {
278
+ "epoch": 7.6923076923076925,
279
+ "grad_norm": 0.5859375,
280
+ "learning_rate": 2.5808574716471856e-05,
281
+ "loss": 1.1845,
282
+ "step": 150
283
+ },
284
+ {
285
+ "epoch": 7.948717948717949,
286
+ "grad_norm": 0.416015625,
287
+ "learning_rate": 1.9970905297711606e-05,
288
+ "loss": 1.1904,
289
+ "step": 155
290
+ },
291
+ {
292
+ "epoch": 8.0,
293
+ "eval_loss": 2.728332281112671,
294
+ "eval_runtime": 0.2381,
295
+ "eval_samples_per_second": 41.991,
296
+ "eval_steps_per_second": 4.199,
297
+ "step": 156
298
+ },
299
+ {
300
+ "epoch": 8.205128205128204,
301
+ "grad_norm": 0.44140625,
302
+ "learning_rate": 1.4808059116167305e-05,
303
+ "loss": 1.1728,
304
+ "step": 160
305
+ },
306
+ {
307
+ "epoch": 8.461538461538462,
308
+ "grad_norm": 0.53515625,
309
+ "learning_rate": 1.0363570446297999e-05,
310
+ "loss": 1.184,
311
+ "step": 165
312
+ },
313
+ {
314
+ "epoch": 8.717948717948717,
315
+ "grad_norm": 0.65234375,
316
+ "learning_rate": 6.674916211254289e-06,
317
+ "loss": 1.1746,
318
+ "step": 170
319
+ },
320
+ {
321
+ "epoch": 8.974358974358974,
322
+ "grad_norm": 0.392578125,
323
+ "learning_rate": 3.7731999690749585e-06,
324
+ "loss": 1.1866,
325
+ "step": 175
326
+ },
327
+ {
328
+ "epoch": 8.974358974358974,
329
+ "eval_loss": 2.7323360443115234,
330
+ "eval_runtime": 0.2585,
331
+ "eval_samples_per_second": 38.691,
332
+ "eval_steps_per_second": 3.869,
333
+ "step": 175
334
+ },
335
+ {
336
+ "epoch": 9.23076923076923,
337
+ "grad_norm": 0.396484375,
338
+ "learning_rate": 1.6828896405244988e-06,
339
+ "loss": 1.175,
340
+ "step": 180
341
+ },
342
+ {
343
+ "epoch": 9.487179487179487,
344
+ "grad_norm": 0.404296875,
345
+ "learning_rate": 4.216111901092501e-07,
346
+ "loss": 1.1839,
347
+ "step": 185
348
+ },
349
+ {
350
+ "epoch": 9.743589743589745,
351
+ "grad_norm": 0.4765625,
352
+ "learning_rate": 0.0,
353
+ "loss": 1.1715,
354
+ "step": 190
355
+ },
356
+ {
357
+ "epoch": 9.743589743589745,
358
+ "eval_loss": 2.725883960723877,
359
+ "eval_runtime": 0.2361,
360
+ "eval_samples_per_second": 42.355,
361
+ "eval_steps_per_second": 4.236,
362
+ "step": 190
363
+ },
364
+ {
365
+ "epoch": 9.743589743589745,
366
+ "step": 190,
367
+ "total_flos": 5.793437974192456e+17,
368
+ "train_loss": 5.009378814697266,
369
+ "train_runtime": 464.8218,
370
+ "train_samples_per_second": 26.204,
371
+ "train_steps_per_second": 0.409
372
+ }
373
+ ],
374
+ "logging_steps": 5,
375
+ "max_steps": 190,
376
+ "num_input_tokens_seen": 0,
377
+ "num_train_epochs": 10,
378
+ "save_steps": 100,
379
+ "total_flos": 5.793437974192456e+17,
380
+ "train_batch_size": 4,
381
+ "trial_name": null,
382
+ "trial_params": null
383
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e01f72ae3b992c68bbd4c6ba1aea246ff2bc948a4d250a8bd10fabf30345c59a
3
+ size 5176