ryanyip7777 commited on
Commit
99fcf3a
1 Parent(s): 54da9cd

Upload 10 files

Browse files
README.md CHANGED
@@ -1,6 +1,53 @@
1
  ---
2
- datasets:
3
- - axiong/pmc_oa
4
- language:
5
- - en
6
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ base_model: openai/clip-vit-large-patch14
3
+ tags:
4
+ - generated_from_trainer
5
+ model-index:
6
+ - name: clip-vit-l-14-pmc-finetuned
7
+ results: []
8
+ ---
9
+
10
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
11
+ should probably proofread and complete it, then remove this comment. -->
12
+
13
+ # clip-vit-l-14-pmc-finetuned
14
+
15
+ This model is a fine-tuned version of [openai/clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) on an unknown dataset.
16
+ It achieves the following results on the evaluation set:
17
+ - Loss: 1.0125
18
+
19
+ ## Model description
20
+
21
+ More information needed
22
+
23
+ ## Intended uses & limitations
24
+
25
+ More information needed
26
+
27
+ ## Training and evaluation data
28
+
29
+ More information needed
30
+
31
+ ## Training procedure
32
+
33
+ ### Training hyperparameters
34
+
35
+ The following hyperparameters were used during training:
36
+ - learning_rate: 5e-05
37
+ - train_batch_size: 16
38
+ - eval_batch_size: 8
39
+ - seed: 42
40
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
41
+ - lr_scheduler_type: linear
42
+ - num_epochs: 10.0
43
+
44
+ ### Training results
45
+
46
+
47
+
48
+ ### Framework versions
49
+
50
+ - Transformers 4.31.0
51
+ - Pytorch 2.0.1
52
+ - Datasets 2.14.4
53
+ - Tokenizers 0.13.3
all_results.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 10.0,
3
+ "eval_loss": 1.0124750137329102,
4
+ "eval_runtime": 97.9114,
5
+ "eval_samples_per_second": 55.54,
6
+ "eval_steps_per_second": 6.945,
7
+ "train_loss": 0.30929526851019973,
8
+ "train_runtime": 55900.09,
9
+ "train_samples_per_second": 11.761,
10
+ "train_steps_per_second": 0.735
11
+ }
config.json ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_commit_hash": "8d052a0f05efbaefbc9e8786ba291cfdf93e5bff",
3
+ "_name_or_path": "openai/clip-vit-large-patch14",
4
+ "architectures": [
5
+ "CLIPModel"
6
+ ],
7
+ "initializer_factor": 1.0,
8
+ "logit_scale_init_value": 2.6592,
9
+ "model_type": "clip",
10
+ "projection_dim": 768,
11
+ "text_config": {
12
+ "_name_or_path": "",
13
+ "add_cross_attention": false,
14
+ "architectures": null,
15
+ "attention_dropout": 0.0,
16
+ "bad_words_ids": null,
17
+ "begin_suppress_tokens": null,
18
+ "bos_token_id": 49406,
19
+ "chunk_size_feed_forward": 0,
20
+ "cross_attention_hidden_size": null,
21
+ "decoder_start_token_id": null,
22
+ "diversity_penalty": 0.0,
23
+ "do_sample": false,
24
+ "dropout": 0.0,
25
+ "early_stopping": false,
26
+ "encoder_no_repeat_ngram_size": 0,
27
+ "eos_token_id": 49407,
28
+ "exponential_decay_length_penalty": null,
29
+ "finetuning_task": null,
30
+ "forced_bos_token_id": null,
31
+ "forced_eos_token_id": null,
32
+ "hidden_act": "quick_gelu",
33
+ "hidden_size": 768,
34
+ "id2label": {
35
+ "0": "LABEL_0",
36
+ "1": "LABEL_1"
37
+ },
38
+ "initializer_factor": 1.0,
39
+ "initializer_range": 0.02,
40
+ "intermediate_size": 3072,
41
+ "is_decoder": false,
42
+ "is_encoder_decoder": false,
43
+ "label2id": {
44
+ "LABEL_0": 0,
45
+ "LABEL_1": 1
46
+ },
47
+ "layer_norm_eps": 1e-05,
48
+ "length_penalty": 1.0,
49
+ "max_length": 20,
50
+ "max_position_embeddings": 77,
51
+ "min_length": 0,
52
+ "model_type": "clip_text_model",
53
+ "no_repeat_ngram_size": 0,
54
+ "num_attention_heads": 12,
55
+ "num_beam_groups": 1,
56
+ "num_beams": 1,
57
+ "num_hidden_layers": 12,
58
+ "num_return_sequences": 1,
59
+ "output_attentions": false,
60
+ "output_hidden_states": false,
61
+ "output_scores": false,
62
+ "pad_token_id": 1,
63
+ "prefix": null,
64
+ "problem_type": null,
65
+ "projection_dim": 768,
66
+ "pruned_heads": {},
67
+ "remove_invalid_values": false,
68
+ "repetition_penalty": 1.0,
69
+ "return_dict": true,
70
+ "return_dict_in_generate": false,
71
+ "sep_token_id": null,
72
+ "suppress_tokens": null,
73
+ "task_specific_params": null,
74
+ "temperature": 1.0,
75
+ "tf_legacy_loss": false,
76
+ "tie_encoder_decoder": false,
77
+ "tie_word_embeddings": true,
78
+ "tokenizer_class": null,
79
+ "top_k": 50,
80
+ "top_p": 1.0,
81
+ "torch_dtype": null,
82
+ "torchscript": false,
83
+ "transformers_version": "4.31.0",
84
+ "typical_p": 1.0,
85
+ "use_bfloat16": false,
86
+ "vocab_size": 49408
87
+ },
88
+ "torch_dtype": "float32",
89
+ "transformers_version": null,
90
+ "vision_config": {
91
+ "_name_or_path": "",
92
+ "add_cross_attention": false,
93
+ "architectures": null,
94
+ "attention_dropout": 0.0,
95
+ "bad_words_ids": null,
96
+ "begin_suppress_tokens": null,
97
+ "bos_token_id": null,
98
+ "chunk_size_feed_forward": 0,
99
+ "cross_attention_hidden_size": null,
100
+ "decoder_start_token_id": null,
101
+ "diversity_penalty": 0.0,
102
+ "do_sample": false,
103
+ "dropout": 0.0,
104
+ "early_stopping": false,
105
+ "encoder_no_repeat_ngram_size": 0,
106
+ "eos_token_id": null,
107
+ "exponential_decay_length_penalty": null,
108
+ "finetuning_task": null,
109
+ "forced_bos_token_id": null,
110
+ "forced_eos_token_id": null,
111
+ "hidden_act": "quick_gelu",
112
+ "hidden_size": 1024,
113
+ "id2label": {
114
+ "0": "LABEL_0",
115
+ "1": "LABEL_1"
116
+ },
117
+ "image_size": 224,
118
+ "initializer_factor": 1.0,
119
+ "initializer_range": 0.02,
120
+ "intermediate_size": 4096,
121
+ "is_decoder": false,
122
+ "is_encoder_decoder": false,
123
+ "label2id": {
124
+ "LABEL_0": 0,
125
+ "LABEL_1": 1
126
+ },
127
+ "layer_norm_eps": 1e-05,
128
+ "length_penalty": 1.0,
129
+ "max_length": 20,
130
+ "min_length": 0,
131
+ "model_type": "clip_vision_model",
132
+ "no_repeat_ngram_size": 0,
133
+ "num_attention_heads": 16,
134
+ "num_beam_groups": 1,
135
+ "num_beams": 1,
136
+ "num_channels": 3,
137
+ "num_hidden_layers": 24,
138
+ "num_return_sequences": 1,
139
+ "output_attentions": false,
140
+ "output_hidden_states": false,
141
+ "output_scores": false,
142
+ "pad_token_id": null,
143
+ "patch_size": 14,
144
+ "prefix": null,
145
+ "problem_type": null,
146
+ "projection_dim": 768,
147
+ "pruned_heads": {},
148
+ "remove_invalid_values": false,
149
+ "repetition_penalty": 1.0,
150
+ "return_dict": true,
151
+ "return_dict_in_generate": false,
152
+ "sep_token_id": null,
153
+ "suppress_tokens": null,
154
+ "task_specific_params": null,
155
+ "temperature": 1.0,
156
+ "tf_legacy_loss": false,
157
+ "tie_encoder_decoder": false,
158
+ "tie_word_embeddings": true,
159
+ "tokenizer_class": null,
160
+ "top_k": 50,
161
+ "top_p": 1.0,
162
+ "torch_dtype": null,
163
+ "torchscript": false,
164
+ "transformers_version": "4.31.0",
165
+ "typical_p": 1.0,
166
+ "use_bfloat16": false
167
+ }
168
+ }
eval_results.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 10.0,
3
+ "eval_loss": 1.0124750137329102,
4
+ "eval_runtime": 97.9114,
5
+ "eval_samples_per_second": 55.54,
6
+ "eval_steps_per_second": 6.945
7
+ }
preprocessor_config.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crop_size": 224,
3
+ "do_center_crop": true,
4
+ "do_normalize": true,
5
+ "do_resize": true,
6
+ "feature_extractor_type": "CLIPFeatureExtractor",
7
+ "image_mean": [
8
+ 0.48145466,
9
+ 0.4578275,
10
+ 0.40821073
11
+ ],
12
+ "image_std": [
13
+ 0.26862954,
14
+ 0.26130258,
15
+ 0.27577711
16
+ ],
17
+ "resample": 3,
18
+ "size": 224
19
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "unk_token": {
3
+ "content": "<|endoftext|>",
4
+ "single_word": false,
5
+ "lstrip": false,
6
+ "rstrip": false,
7
+ "normalized": true,
8
+ "__type": "AddedToken"
9
+ },
10
+ "bos_token": {
11
+ "content": "<|startoftext|>",
12
+ "single_word": false,
13
+ "lstrip": false,
14
+ "rstrip": false,
15
+ "normalized": true,
16
+ "__type": "AddedToken"
17
+ },
18
+ "eos_token": {
19
+ "content": "<|endoftext|>",
20
+ "single_word": false,
21
+ "lstrip": false,
22
+ "rstrip": false,
23
+ "normalized": true,
24
+ "__type": "AddedToken"
25
+ },
26
+ "pad_token": "<|endoftext|>",
27
+ "add_prefix_space": false,
28
+ "errors": "replace",
29
+ "do_lower_case": true,
30
+ "name_or_path": "openai/clip-vit-base-patch32",
31
+ "model_max_length": 77,
32
+ "special_tokens_map_file": "./special_tokens_map.json",
33
+ "tokenizer_class": "CLIPTokenizer"
34
+ }
train_results.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 10.0,
3
+ "train_loss": 0.30929526851019973,
4
+ "train_runtime": 55900.09,
5
+ "train_samples_per_second": 11.761,
6
+ "train_steps_per_second": 0.735
7
+ }
trainer_state.json ADDED
@@ -0,0 +1,517 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 10.0,
5
+ "global_step": 41090,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.12,
12
+ "learning_rate": 4.9391579459722564e-05,
13
+ "loss": 1.7982,
14
+ "step": 500
15
+ },
16
+ {
17
+ "epoch": 0.24,
18
+ "learning_rate": 4.878315891944512e-05,
19
+ "loss": 1.2914,
20
+ "step": 1000
21
+ },
22
+ {
23
+ "epoch": 0.37,
24
+ "learning_rate": 4.817473837916768e-05,
25
+ "loss": 1.1456,
26
+ "step": 1500
27
+ },
28
+ {
29
+ "epoch": 0.49,
30
+ "learning_rate": 4.756631783889024e-05,
31
+ "loss": 1.0345,
32
+ "step": 2000
33
+ },
34
+ {
35
+ "epoch": 0.61,
36
+ "learning_rate": 4.6957897298612804e-05,
37
+ "loss": 0.9665,
38
+ "step": 2500
39
+ },
40
+ {
41
+ "epoch": 0.73,
42
+ "learning_rate": 4.6349476758335365e-05,
43
+ "loss": 0.9272,
44
+ "step": 3000
45
+ },
46
+ {
47
+ "epoch": 0.85,
48
+ "learning_rate": 4.574105621805793e-05,
49
+ "loss": 0.8571,
50
+ "step": 3500
51
+ },
52
+ {
53
+ "epoch": 0.97,
54
+ "learning_rate": 4.513263567778048e-05,
55
+ "loss": 0.8139,
56
+ "step": 4000
57
+ },
58
+ {
59
+ "epoch": 1.1,
60
+ "learning_rate": 4.4524215137503043e-05,
61
+ "loss": 0.6986,
62
+ "step": 4500
63
+ },
64
+ {
65
+ "epoch": 1.22,
66
+ "learning_rate": 4.3915794597225605e-05,
67
+ "loss": 0.6375,
68
+ "step": 5000
69
+ },
70
+ {
71
+ "epoch": 1.34,
72
+ "learning_rate": 4.330737405694817e-05,
73
+ "loss": 0.6407,
74
+ "step": 5500
75
+ },
76
+ {
77
+ "epoch": 1.46,
78
+ "learning_rate": 4.269895351667073e-05,
79
+ "loss": 0.6139,
80
+ "step": 6000
81
+ },
82
+ {
83
+ "epoch": 1.58,
84
+ "learning_rate": 4.209053297639329e-05,
85
+ "loss": 0.5973,
86
+ "step": 6500
87
+ },
88
+ {
89
+ "epoch": 1.7,
90
+ "learning_rate": 4.1482112436115845e-05,
91
+ "loss": 0.6094,
92
+ "step": 7000
93
+ },
94
+ {
95
+ "epoch": 1.83,
96
+ "learning_rate": 4.0873691895838406e-05,
97
+ "loss": 0.5824,
98
+ "step": 7500
99
+ },
100
+ {
101
+ "epoch": 1.95,
102
+ "learning_rate": 4.026527135556097e-05,
103
+ "loss": 0.5459,
104
+ "step": 8000
105
+ },
106
+ {
107
+ "epoch": 2.07,
108
+ "learning_rate": 3.965685081528352e-05,
109
+ "loss": 0.4764,
110
+ "step": 8500
111
+ },
112
+ {
113
+ "epoch": 2.19,
114
+ "learning_rate": 3.9048430275006084e-05,
115
+ "loss": 0.427,
116
+ "step": 9000
117
+ },
118
+ {
119
+ "epoch": 2.31,
120
+ "learning_rate": 3.8440009734728646e-05,
121
+ "loss": 0.4192,
122
+ "step": 9500
123
+ },
124
+ {
125
+ "epoch": 2.43,
126
+ "learning_rate": 3.783158919445121e-05,
127
+ "loss": 0.4278,
128
+ "step": 10000
129
+ },
130
+ {
131
+ "epoch": 2.56,
132
+ "learning_rate": 3.722316865417377e-05,
133
+ "loss": 0.4157,
134
+ "step": 10500
135
+ },
136
+ {
137
+ "epoch": 2.68,
138
+ "learning_rate": 3.6614748113896324e-05,
139
+ "loss": 0.4156,
140
+ "step": 11000
141
+ },
142
+ {
143
+ "epoch": 2.8,
144
+ "learning_rate": 3.6006327573618886e-05,
145
+ "loss": 0.4137,
146
+ "step": 11500
147
+ },
148
+ {
149
+ "epoch": 2.92,
150
+ "learning_rate": 3.539790703334145e-05,
151
+ "loss": 0.3731,
152
+ "step": 12000
153
+ },
154
+ {
155
+ "epoch": 3.04,
156
+ "learning_rate": 3.4789486493064e-05,
157
+ "loss": 0.3516,
158
+ "step": 12500
159
+ },
160
+ {
161
+ "epoch": 3.16,
162
+ "learning_rate": 3.418106595278657e-05,
163
+ "loss": 0.3098,
164
+ "step": 13000
165
+ },
166
+ {
167
+ "epoch": 3.29,
168
+ "learning_rate": 3.357264541250913e-05,
169
+ "loss": 0.3051,
170
+ "step": 13500
171
+ },
172
+ {
173
+ "epoch": 3.41,
174
+ "learning_rate": 3.296422487223169e-05,
175
+ "loss": 0.3029,
176
+ "step": 14000
177
+ },
178
+ {
179
+ "epoch": 3.53,
180
+ "learning_rate": 3.235580433195425e-05,
181
+ "loss": 0.3107,
182
+ "step": 14500
183
+ },
184
+ {
185
+ "epoch": 3.65,
186
+ "learning_rate": 3.174738379167681e-05,
187
+ "loss": 0.2969,
188
+ "step": 15000
189
+ },
190
+ {
191
+ "epoch": 3.77,
192
+ "learning_rate": 3.1138963251399365e-05,
193
+ "loss": 0.2886,
194
+ "step": 15500
195
+ },
196
+ {
197
+ "epoch": 3.89,
198
+ "learning_rate": 3.053054271112193e-05,
199
+ "loss": 0.2887,
200
+ "step": 16000
201
+ },
202
+ {
203
+ "epoch": 4.02,
204
+ "learning_rate": 2.9922122170844492e-05,
205
+ "loss": 0.2803,
206
+ "step": 16500
207
+ },
208
+ {
209
+ "epoch": 4.14,
210
+ "learning_rate": 2.9313701630567047e-05,
211
+ "loss": 0.2256,
212
+ "step": 17000
213
+ },
214
+ {
215
+ "epoch": 4.26,
216
+ "learning_rate": 2.8705281090289608e-05,
217
+ "loss": 0.2209,
218
+ "step": 17500
219
+ },
220
+ {
221
+ "epoch": 4.38,
222
+ "learning_rate": 2.8096860550012173e-05,
223
+ "loss": 0.2197,
224
+ "step": 18000
225
+ },
226
+ {
227
+ "epoch": 4.5,
228
+ "learning_rate": 2.7488440009734728e-05,
229
+ "loss": 0.2305,
230
+ "step": 18500
231
+ },
232
+ {
233
+ "epoch": 4.62,
234
+ "learning_rate": 2.688001946945729e-05,
235
+ "loss": 0.2252,
236
+ "step": 19000
237
+ },
238
+ {
239
+ "epoch": 4.75,
240
+ "learning_rate": 2.627159892917985e-05,
241
+ "loss": 0.207,
242
+ "step": 19500
243
+ },
244
+ {
245
+ "epoch": 4.87,
246
+ "learning_rate": 2.566317838890241e-05,
247
+ "loss": 0.2187,
248
+ "step": 20000
249
+ },
250
+ {
251
+ "epoch": 4.99,
252
+ "learning_rate": 2.505475784862497e-05,
253
+ "loss": 0.207,
254
+ "step": 20500
255
+ },
256
+ {
257
+ "epoch": 5.11,
258
+ "learning_rate": 2.444633730834753e-05,
259
+ "loss": 0.1685,
260
+ "step": 21000
261
+ },
262
+ {
263
+ "epoch": 5.23,
264
+ "learning_rate": 2.383791676807009e-05,
265
+ "loss": 0.1772,
266
+ "step": 21500
267
+ },
268
+ {
269
+ "epoch": 5.35,
270
+ "learning_rate": 2.3229496227792653e-05,
271
+ "loss": 0.1728,
272
+ "step": 22000
273
+ },
274
+ {
275
+ "epoch": 5.48,
276
+ "learning_rate": 2.262107568751521e-05,
277
+ "loss": 0.1678,
278
+ "step": 22500
279
+ },
280
+ {
281
+ "epoch": 5.6,
282
+ "learning_rate": 2.201265514723777e-05,
283
+ "loss": 0.1642,
284
+ "step": 23000
285
+ },
286
+ {
287
+ "epoch": 5.72,
288
+ "learning_rate": 2.1404234606960334e-05,
289
+ "loss": 0.1664,
290
+ "step": 23500
291
+ },
292
+ {
293
+ "epoch": 5.84,
294
+ "learning_rate": 2.0795814066682892e-05,
295
+ "loss": 0.1575,
296
+ "step": 24000
297
+ },
298
+ {
299
+ "epoch": 5.96,
300
+ "learning_rate": 2.018739352640545e-05,
301
+ "loss": 0.1599,
302
+ "step": 24500
303
+ },
304
+ {
305
+ "epoch": 6.08,
306
+ "learning_rate": 1.9578972986128012e-05,
307
+ "loss": 0.1249,
308
+ "step": 25000
309
+ },
310
+ {
311
+ "epoch": 6.21,
312
+ "learning_rate": 1.8970552445850574e-05,
313
+ "loss": 0.1266,
314
+ "step": 25500
315
+ },
316
+ {
317
+ "epoch": 6.33,
318
+ "learning_rate": 1.8362131905573132e-05,
319
+ "loss": 0.1304,
320
+ "step": 26000
321
+ },
322
+ {
323
+ "epoch": 6.45,
324
+ "learning_rate": 1.7753711365295694e-05,
325
+ "loss": 0.1161,
326
+ "step": 26500
327
+ },
328
+ {
329
+ "epoch": 6.57,
330
+ "learning_rate": 1.7145290825018255e-05,
331
+ "loss": 0.1253,
332
+ "step": 27000
333
+ },
334
+ {
335
+ "epoch": 6.69,
336
+ "learning_rate": 1.6536870284740814e-05,
337
+ "loss": 0.1215,
338
+ "step": 27500
339
+ },
340
+ {
341
+ "epoch": 6.81,
342
+ "learning_rate": 1.5928449744463375e-05,
343
+ "loss": 0.1158,
344
+ "step": 28000
345
+ },
346
+ {
347
+ "epoch": 6.94,
348
+ "learning_rate": 1.5320029204185933e-05,
349
+ "loss": 0.1219,
350
+ "step": 28500
351
+ },
352
+ {
353
+ "epoch": 7.06,
354
+ "learning_rate": 1.4711608663908493e-05,
355
+ "loss": 0.1039,
356
+ "step": 29000
357
+ },
358
+ {
359
+ "epoch": 7.18,
360
+ "learning_rate": 1.4103188123631053e-05,
361
+ "loss": 0.081,
362
+ "step": 29500
363
+ },
364
+ {
365
+ "epoch": 7.3,
366
+ "learning_rate": 1.3494767583353615e-05,
367
+ "loss": 0.0922,
368
+ "step": 30000
369
+ },
370
+ {
371
+ "epoch": 7.42,
372
+ "learning_rate": 1.2886347043076175e-05,
373
+ "loss": 0.0844,
374
+ "step": 30500
375
+ },
376
+ {
377
+ "epoch": 7.54,
378
+ "learning_rate": 1.2277926502798735e-05,
379
+ "loss": 0.0759,
380
+ "step": 31000
381
+ },
382
+ {
383
+ "epoch": 7.67,
384
+ "learning_rate": 1.1669505962521295e-05,
385
+ "loss": 0.0882,
386
+ "step": 31500
387
+ },
388
+ {
389
+ "epoch": 7.79,
390
+ "learning_rate": 1.1061085422243855e-05,
391
+ "loss": 0.0856,
392
+ "step": 32000
393
+ },
394
+ {
395
+ "epoch": 7.91,
396
+ "learning_rate": 1.0452664881966416e-05,
397
+ "loss": 0.0881,
398
+ "step": 32500
399
+ },
400
+ {
401
+ "epoch": 8.03,
402
+ "learning_rate": 9.844244341688976e-06,
403
+ "loss": 0.0708,
404
+ "step": 33000
405
+ },
406
+ {
407
+ "epoch": 8.15,
408
+ "learning_rate": 9.235823801411536e-06,
409
+ "loss": 0.0538,
410
+ "step": 33500
411
+ },
412
+ {
413
+ "epoch": 8.27,
414
+ "learning_rate": 8.627403261134098e-06,
415
+ "loss": 0.0554,
416
+ "step": 34000
417
+ },
418
+ {
419
+ "epoch": 8.4,
420
+ "learning_rate": 8.018982720856656e-06,
421
+ "loss": 0.06,
422
+ "step": 34500
423
+ },
424
+ {
425
+ "epoch": 8.52,
426
+ "learning_rate": 7.410562180579217e-06,
427
+ "loss": 0.0514,
428
+ "step": 35000
429
+ },
430
+ {
431
+ "epoch": 8.64,
432
+ "learning_rate": 6.802141640301777e-06,
433
+ "loss": 0.0536,
434
+ "step": 35500
435
+ },
436
+ {
437
+ "epoch": 8.76,
438
+ "learning_rate": 6.193721100024337e-06,
439
+ "loss": 0.0583,
440
+ "step": 36000
441
+ },
442
+ {
443
+ "epoch": 8.88,
444
+ "learning_rate": 5.585300559746897e-06,
445
+ "loss": 0.052,
446
+ "step": 36500
447
+ },
448
+ {
449
+ "epoch": 9.0,
450
+ "learning_rate": 4.976880019469458e-06,
451
+ "loss": 0.0481,
452
+ "step": 37000
453
+ },
454
+ {
455
+ "epoch": 9.13,
456
+ "learning_rate": 4.368459479192018e-06,
457
+ "loss": 0.0378,
458
+ "step": 37500
459
+ },
460
+ {
461
+ "epoch": 9.25,
462
+ "learning_rate": 3.760038938914578e-06,
463
+ "loss": 0.0296,
464
+ "step": 38000
465
+ },
466
+ {
467
+ "epoch": 9.37,
468
+ "learning_rate": 3.151618398637138e-06,
469
+ "loss": 0.037,
470
+ "step": 38500
471
+ },
472
+ {
473
+ "epoch": 9.49,
474
+ "learning_rate": 2.543197858359698e-06,
475
+ "loss": 0.0339,
476
+ "step": 39000
477
+ },
478
+ {
479
+ "epoch": 9.61,
480
+ "learning_rate": 1.9347773180822585e-06,
481
+ "loss": 0.0369,
482
+ "step": 39500
483
+ },
484
+ {
485
+ "epoch": 9.73,
486
+ "learning_rate": 1.3263567778048189e-06,
487
+ "loss": 0.0344,
488
+ "step": 40000
489
+ },
490
+ {
491
+ "epoch": 9.86,
492
+ "learning_rate": 7.17936237527379e-07,
493
+ "loss": 0.0301,
494
+ "step": 40500
495
+ },
496
+ {
497
+ "epoch": 9.98,
498
+ "learning_rate": 1.0951569724993917e-07,
499
+ "loss": 0.0325,
500
+ "step": 41000
501
+ },
502
+ {
503
+ "epoch": 10.0,
504
+ "step": 41090,
505
+ "total_flos": 1.1825796643443088e+17,
506
+ "train_loss": 0.30929526851019973,
507
+ "train_runtime": 55900.09,
508
+ "train_samples_per_second": 11.761,
509
+ "train_steps_per_second": 0.735
510
+ }
511
+ ],
512
+ "max_steps": 41090,
513
+ "num_train_epochs": 10,
514
+ "total_flos": 1.1825796643443088e+17,
515
+ "trial_name": null,
516
+ "trial_params": null
517
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:615c91ee2578bd75aa451a22a60b066acc3c6666c6372a7566fcb99c95aae679
3
+ size 3963