Kat380 commited on
Commit
096cbd5
1 Parent(s): 2947faf

upload model checkpoints

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,3 +1,76 @@
1
- ---
2
- license: gemma
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ tags:
4
+ - alignment-handbook
5
+ - generated_from_trainer
6
+ datasets:
7
+ - llama-duo/synth_summarize_dataset_dedup
8
+ base_model: google/gemma-7b
9
+ model-index:
10
+ - name: gemma7b-summarize-gemini1_5flash-8k
11
+ results: []
12
+ ---
13
+
14
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
+ should probably proofread and complete it, then remove this comment. -->
16
+
17
+ # gemma7b-summarize-gemini1_5flash-8k
18
+
19
+ This model is a fine-tuned version of [google/gemma-7b](https://huggingface.co/google/gemma-7b) on the llama-duo/synth_summarize_dataset_dedup dataset.
20
+ It achieves the following results on the evaluation set:
21
+ - Loss: 2.8396
22
+
23
+ ## Model description
24
+
25
+ More information needed
26
+
27
+ ## Intended uses & limitations
28
+
29
+ More information needed
30
+
31
+ ## Training and evaluation data
32
+
33
+ More information needed
34
+
35
+ ## Training procedure
36
+
37
+ ### Training hyperparameters
38
+
39
+ The following hyperparameters were used during training:
40
+ - learning_rate: 0.0002
41
+ - train_batch_size: 4
42
+ - eval_batch_size: 2
43
+ - seed: 42
44
+ - distributed_type: multi-GPU
45
+ - num_devices: 8
46
+ - gradient_accumulation_steps: 2
47
+ - total_train_batch_size: 64
48
+ - total_eval_batch_size: 16
49
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
50
+ - lr_scheduler_type: cosine
51
+ - lr_scheduler_warmup_ratio: 0.1
52
+ - num_epochs: 10
53
+
54
+ ### Training results
55
+
56
+ | Training Loss | Epoch | Step | Validation Loss |
57
+ |:-------------:|:------:|:----:|:---------------:|
58
+ | 31.311 | 0.9630 | 13 | 11.0230 |
59
+ | 19.0651 | 2.0 | 27 | 7.4342 |
60
+ | 11.34 | 2.9630 | 40 | 6.8118 |
61
+ | 3.0136 | 4.0 | 54 | 3.6308 |
62
+ | 1.7786 | 4.9630 | 67 | 3.0973 |
63
+ | 1.4865 | 6.0 | 81 | 2.9241 |
64
+ | 1.4036 | 6.9630 | 94 | 2.8645 |
65
+ | 1.3424 | 8.0 | 108 | 2.8510 |
66
+ | 1.3298 | 8.9630 | 121 | 2.8410 |
67
+ | 1.3245 | 9.6296 | 130 | 2.8396 |
68
+
69
+
70
+ ### Framework versions
71
+
72
+ - PEFT 0.10.0
73
+ - Transformers 4.40.0
74
+ - Pytorch 2.1.2+cu121
75
+ - Datasets 2.18.0
76
+ - Tokenizers 0.19.1
adapter_config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "google/gemma-7b",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layer_replication": null,
10
+ "layers_pattern": null,
11
+ "layers_to_transform": null,
12
+ "loftq_config": {},
13
+ "lora_alpha": 16,
14
+ "lora_dropout": 0.05,
15
+ "megatron_config": null,
16
+ "megatron_core": "megatron.core",
17
+ "modules_to_save": null,
18
+ "peft_type": "LORA",
19
+ "r": 8,
20
+ "rank_pattern": {},
21
+ "revision": null,
22
+ "target_modules": [
23
+ "v_proj",
24
+ "q_proj"
25
+ ],
26
+ "task_type": "CAUSAL_LM",
27
+ "use_dora": false,
28
+ "use_rslora": false
29
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6229fc9ac1efc88d558f0bd2f363a10205884dcaf1e97306d89c2c0e99209d92
3
+ size 6437384
all_results.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 9.62962962962963,
3
+ "eval_loss": 2.839648723602295,
4
+ "eval_runtime": 0.2362,
5
+ "eval_samples": 25,
6
+ "eval_samples_per_second": 42.345,
7
+ "eval_steps_per_second": 4.234,
8
+ "total_flos": 3.9639312421093376e+17,
9
+ "train_loss": 8.499957752227782,
10
+ "train_runtime": 318.2463,
11
+ "train_samples": 7919,
12
+ "train_samples_per_second": 26.238,
13
+ "train_steps_per_second": 0.408
14
+ }
config.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/gemma-7b",
3
+ "architectures": [
4
+ "GemmaForCausalLM"
5
+ ],
6
+ "attention_bias": false,
7
+ "attention_dropout": 0.0,
8
+ "bos_token_id": 2,
9
+ "eos_token_id": 1,
10
+ "head_dim": 256,
11
+ "hidden_act": "gelu",
12
+ "hidden_activation": null,
13
+ "hidden_size": 3072,
14
+ "initializer_range": 0.02,
15
+ "intermediate_size": 24576,
16
+ "max_position_embeddings": 8192,
17
+ "model_type": "gemma",
18
+ "num_attention_heads": 16,
19
+ "num_hidden_layers": 28,
20
+ "num_key_value_heads": 16,
21
+ "pad_token_id": 0,
22
+ "quantization_config": {
23
+ "_load_in_4bit": true,
24
+ "_load_in_8bit": false,
25
+ "bnb_4bit_compute_dtype": "bfloat16",
26
+ "bnb_4bit_quant_storage": "uint8",
27
+ "bnb_4bit_quant_type": "nf4",
28
+ "bnb_4bit_use_double_quant": false,
29
+ "llm_int8_enable_fp32_cpu_offload": false,
30
+ "llm_int8_has_fp16_weight": false,
31
+ "llm_int8_skip_modules": null,
32
+ "llm_int8_threshold": 6.0,
33
+ "load_in_4bit": true,
34
+ "load_in_8bit": false,
35
+ "quant_method": "bitsandbytes"
36
+ },
37
+ "rms_norm_eps": 1e-06,
38
+ "rope_scaling": null,
39
+ "rope_theta": 10000.0,
40
+ "torch_dtype": "bfloat16",
41
+ "transformers_version": "4.40.0",
42
+ "use_cache": true,
43
+ "vocab_size": 256000
44
+ }
eval_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 9.62962962962963,
3
+ "eval_loss": 2.839648723602295,
4
+ "eval_runtime": 0.2362,
5
+ "eval_samples": 25,
6
+ "eval_samples_per_second": 42.345,
7
+ "eval_steps_per_second": 4.234
8
+ }
runs/Jun13_16-13-44_gpu1-1/events.out.tfevents.1718266897.gpu1-1.616100.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3812375f6d1b4bd411141a2061ec803a297339d2e6bdd8be0f29b0208838da75
3
+ size 14095
runs/Jun13_16-13-44_gpu1-1/events.out.tfevents.1718267216.gpu1-1.616100.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4a8bbc3525c5d122ee5fa7b35cccc4556ae19f06a3099ff3aad8c16b426ea77a
3
+ size 359
special_tokens_map.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>"
5
+ ],
6
+ "bos_token": {
7
+ "content": "<bos>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false
12
+ },
13
+ "eos_token": {
14
+ "content": "<eos>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false
19
+ },
20
+ "pad_token": {
21
+ "content": "<pad>",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false
26
+ },
27
+ "unk_token": {
28
+ "content": "<unk>",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false
33
+ }
34
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:322a5f52ab5cab196761ab397a022d6fa3a2e1418585e532bb6efb2fedd2ae94
3
+ size 17477501
tokenizer_config.json ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_eos_token": false,
4
+ "added_tokens_decoder": {
5
+ "0": {
6
+ "content": "<pad>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "1": {
14
+ "content": "<eos>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "2": {
22
+ "content": "<bos>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "3": {
30
+ "content": "<unk>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "106": {
38
+ "content": "<|im_start|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "107": {
46
+ "content": "<|im_end|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ }
53
+ },
54
+ "additional_special_tokens": [
55
+ "<|im_start|>",
56
+ "<|im_end|>"
57
+ ],
58
+ "bos_token": "<bos>",
59
+ "chat_template": "{% if messages[0]['role'] == 'user' or messages[0]['role'] == 'system' %}{{ bos_token }}{% endif %}{% for message in messages %}{{ '<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n' }}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% elif messages[-1]['role'] == 'assistant' %}{{ eos_token }}{% endif %}",
60
+ "clean_up_tokenization_spaces": false,
61
+ "eos_token": "<eos>",
62
+ "legacy": null,
63
+ "model_max_length": 2048,
64
+ "pad_token": "<pad>",
65
+ "sp_model_kwargs": {},
66
+ "spaces_between_special_tokens": false,
67
+ "tokenizer_class": "GemmaTokenizer",
68
+ "unk_token": "<unk>",
69
+ "use_default_system_prompt": false
70
+ }
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 9.62962962962963,
3
+ "total_flos": 3.9639312421093376e+17,
4
+ "train_loss": 8.499957752227782,
5
+ "train_runtime": 318.2463,
6
+ "train_samples": 7919,
7
+ "train_samples_per_second": 26.238,
8
+ "train_steps_per_second": 0.408
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,299 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 9.62962962962963,
5
+ "eval_steps": 500,
6
+ "global_step": 130,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.07407407407407407,
13
+ "grad_norm": 178.0,
14
+ "learning_rate": 1.5384615384615387e-05,
15
+ "loss": 48.7098,
16
+ "step": 1
17
+ },
18
+ {
19
+ "epoch": 0.37037037037037035,
20
+ "grad_norm": 141.0,
21
+ "learning_rate": 7.692307692307693e-05,
22
+ "loss": 49.2488,
23
+ "step": 5
24
+ },
25
+ {
26
+ "epoch": 0.7407407407407407,
27
+ "grad_norm": 17.75,
28
+ "learning_rate": 0.00015384615384615385,
29
+ "loss": 31.311,
30
+ "step": 10
31
+ },
32
+ {
33
+ "epoch": 0.9629629629629629,
34
+ "eval_loss": 11.023000717163086,
35
+ "eval_runtime": 0.2736,
36
+ "eval_samples_per_second": 36.555,
37
+ "eval_steps_per_second": 3.655,
38
+ "step": 13
39
+ },
40
+ {
41
+ "epoch": 1.1111111111111112,
42
+ "grad_norm": 10.0,
43
+ "learning_rate": 0.00019985583705641418,
44
+ "loss": 23.1246,
45
+ "step": 15
46
+ },
47
+ {
48
+ "epoch": 1.4814814814814814,
49
+ "grad_norm": 4.46875,
50
+ "learning_rate": 0.00019823877374156647,
51
+ "loss": 20.1941,
52
+ "step": 20
53
+ },
54
+ {
55
+ "epoch": 1.8518518518518519,
56
+ "grad_norm": 6.0625,
57
+ "learning_rate": 0.00019485364419471454,
58
+ "loss": 19.0651,
59
+ "step": 25
60
+ },
61
+ {
62
+ "epoch": 2.0,
63
+ "eval_loss": 7.434217929840088,
64
+ "eval_runtime": 0.2352,
65
+ "eval_samples_per_second": 42.524,
66
+ "eval_steps_per_second": 4.252,
67
+ "step": 27
68
+ },
69
+ {
70
+ "epoch": 2.2222222222222223,
71
+ "grad_norm": 11.5,
72
+ "learning_rate": 0.0001897613727639014,
73
+ "loss": 17.6692,
74
+ "step": 30
75
+ },
76
+ {
77
+ "epoch": 2.5925925925925926,
78
+ "grad_norm": 18.625,
79
+ "learning_rate": 0.00018305360832480117,
80
+ "loss": 15.2716,
81
+ "step": 35
82
+ },
83
+ {
84
+ "epoch": 2.962962962962963,
85
+ "grad_norm": 25.75,
86
+ "learning_rate": 0.00017485107481711012,
87
+ "loss": 11.34,
88
+ "step": 40
89
+ },
90
+ {
91
+ "epoch": 2.962962962962963,
92
+ "eval_loss": 6.81179666519165,
93
+ "eval_runtime": 0.2444,
94
+ "eval_samples_per_second": 40.915,
95
+ "eval_steps_per_second": 4.091,
96
+ "step": 40
97
+ },
98
+ {
99
+ "epoch": 3.3333333333333335,
100
+ "grad_norm": 26.125,
101
+ "learning_rate": 0.0001653013984983585,
102
+ "loss": 6.3302,
103
+ "step": 45
104
+ },
105
+ {
106
+ "epoch": 3.7037037037037037,
107
+ "grad_norm": 7.375,
108
+ "learning_rate": 0.00015457645101945046,
109
+ "loss": 3.0136,
110
+ "step": 50
111
+ },
112
+ {
113
+ "epoch": 4.0,
114
+ "eval_loss": 3.630751848220825,
115
+ "eval_runtime": 0.2376,
116
+ "eval_samples_per_second": 42.085,
117
+ "eval_steps_per_second": 4.208,
118
+ "step": 54
119
+ },
120
+ {
121
+ "epoch": 4.074074074074074,
122
+ "grad_norm": 5.59375,
123
+ "learning_rate": 0.00014286925614030542,
124
+ "loss": 2.4006,
125
+ "step": 55
126
+ },
127
+ {
128
+ "epoch": 4.444444444444445,
129
+ "grad_norm": 2.859375,
130
+ "learning_rate": 0.0001303905157574247,
131
+ "loss": 2.0441,
132
+ "step": 60
133
+ },
134
+ {
135
+ "epoch": 4.814814814814815,
136
+ "grad_norm": 1.046875,
137
+ "learning_rate": 0.00011736481776669306,
138
+ "loss": 1.7786,
139
+ "step": 65
140
+ },
141
+ {
142
+ "epoch": 4.962962962962963,
143
+ "eval_loss": 3.097276210784912,
144
+ "eval_runtime": 0.2571,
145
+ "eval_samples_per_second": 38.893,
146
+ "eval_steps_per_second": 3.889,
147
+ "step": 67
148
+ },
149
+ {
150
+ "epoch": 5.185185185185185,
151
+ "grad_norm": 0.703125,
152
+ "learning_rate": 0.00010402659401094152,
153
+ "loss": 1.6515,
154
+ "step": 70
155
+ },
156
+ {
157
+ "epoch": 5.555555555555555,
158
+ "grad_norm": 0.69921875,
159
+ "learning_rate": 9.061590105968208e-05,
160
+ "loss": 1.5441,
161
+ "step": 75
162
+ },
163
+ {
164
+ "epoch": 5.925925925925926,
165
+ "grad_norm": 1.5,
166
+ "learning_rate": 7.73740997570278e-05,
167
+ "loss": 1.4865,
168
+ "step": 80
169
+ },
170
+ {
171
+ "epoch": 6.0,
172
+ "eval_loss": 2.9240853786468506,
173
+ "eval_runtime": 0.2376,
174
+ "eval_samples_per_second": 42.082,
175
+ "eval_steps_per_second": 4.208,
176
+ "step": 81
177
+ },
178
+ {
179
+ "epoch": 6.296296296296296,
180
+ "grad_norm": 1.2421875,
181
+ "learning_rate": 6.453951129574644e-05,
182
+ "loss": 1.448,
183
+ "step": 85
184
+ },
185
+ {
186
+ "epoch": 6.666666666666667,
187
+ "grad_norm": 0.5,
188
+ "learning_rate": 5.234312799786921e-05,
189
+ "loss": 1.4036,
190
+ "step": 90
191
+ },
192
+ {
193
+ "epoch": 6.962962962962963,
194
+ "eval_loss": 2.864516019821167,
195
+ "eval_runtime": 0.263,
196
+ "eval_samples_per_second": 38.017,
197
+ "eval_steps_per_second": 3.802,
198
+ "step": 94
199
+ },
200
+ {
201
+ "epoch": 7.037037037037037,
202
+ "grad_norm": 0.484375,
203
+ "learning_rate": 4.100445599768774e-05,
204
+ "loss": 1.3829,
205
+ "step": 95
206
+ },
207
+ {
208
+ "epoch": 7.407407407407407,
209
+ "grad_norm": 0.4765625,
210
+ "learning_rate": 3.072756464904006e-05,
211
+ "loss": 1.3704,
212
+ "step": 100
213
+ },
214
+ {
215
+ "epoch": 7.777777777777778,
216
+ "grad_norm": 0.51953125,
217
+ "learning_rate": 2.1697413758237784e-05,
218
+ "loss": 1.3424,
219
+ "step": 105
220
+ },
221
+ {
222
+ "epoch": 8.0,
223
+ "eval_loss": 2.851011276245117,
224
+ "eval_runtime": 0.2392,
225
+ "eval_samples_per_second": 41.815,
226
+ "eval_steps_per_second": 4.181,
227
+ "step": 108
228
+ },
229
+ {
230
+ "epoch": 8.148148148148149,
231
+ "grad_norm": 0.447265625,
232
+ "learning_rate": 1.4076524743778319e-05,
233
+ "loss": 1.3438,
234
+ "step": 110
235
+ },
236
+ {
237
+ "epoch": 8.518518518518519,
238
+ "grad_norm": 0.451171875,
239
+ "learning_rate": 8.002055634117578e-06,
240
+ "loss": 1.3448,
241
+ "step": 115
242
+ },
243
+ {
244
+ "epoch": 8.88888888888889,
245
+ "grad_norm": 0.54296875,
246
+ "learning_rate": 3.5833325466437694e-06,
247
+ "loss": 1.3298,
248
+ "step": 120
249
+ },
250
+ {
251
+ "epoch": 8.962962962962964,
252
+ "eval_loss": 2.8409743309020996,
253
+ "eval_runtime": 0.2622,
254
+ "eval_samples_per_second": 38.132,
255
+ "eval_steps_per_second": 3.813,
256
+ "step": 121
257
+ },
258
+ {
259
+ "epoch": 9.25925925925926,
260
+ "grad_norm": 0.451171875,
261
+ "learning_rate": 8.998820754091531e-07,
262
+ "loss": 1.3428,
263
+ "step": 125
264
+ },
265
+ {
266
+ "epoch": 9.62962962962963,
267
+ "grad_norm": 0.40625,
268
+ "learning_rate": 0.0,
269
+ "loss": 1.3245,
270
+ "step": 130
271
+ },
272
+ {
273
+ "epoch": 9.62962962962963,
274
+ "eval_loss": 2.839648723602295,
275
+ "eval_runtime": 0.2376,
276
+ "eval_samples_per_second": 42.081,
277
+ "eval_steps_per_second": 4.208,
278
+ "step": 130
279
+ },
280
+ {
281
+ "epoch": 9.62962962962963,
282
+ "step": 130,
283
+ "total_flos": 3.9639312421093376e+17,
284
+ "train_loss": 8.499957752227782,
285
+ "train_runtime": 318.2463,
286
+ "train_samples_per_second": 26.238,
287
+ "train_steps_per_second": 0.408
288
+ }
289
+ ],
290
+ "logging_steps": 5,
291
+ "max_steps": 130,
292
+ "num_input_tokens_seen": 0,
293
+ "num_train_epochs": 10,
294
+ "save_steps": 100,
295
+ "total_flos": 3.9639312421093376e+17,
296
+ "train_batch_size": 4,
297
+ "trial_name": null,
298
+ "trial_params": null
299
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1d5dbae39d16366f390d08c76e743cee69ed721f47930e89c14ef648062bc188
3
+ size 5176