mimicheng commited on
Commit
8df1d79
1 Parent(s): baebbc5

End of training

Browse files
README.md ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - generated_from_trainer
4
+ datasets:
5
+ - samsum
6
+ metrics:
7
+ - rouge
8
+ model-index:
9
+ - name: bert-base-cased-samsum
10
+ results:
11
+ - task:
12
+ name: Sequence-to-sequence Language Modeling
13
+ type: text2text-generation
14
+ dataset:
15
+ name: samsum
16
+ type: samsum
17
+ config: samsum
18
+ split: test
19
+ args: samsum
20
+ metrics:
21
+ - name: Rouge1
22
+ type: rouge
23
+ value: 34.9636
24
+ ---
25
+
26
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
27
+ should probably proofread and complete it, then remove this comment. -->
28
+
29
+ # bert-base-cased-samsum
30
+
31
+ This model is a fine-tuned version of [](https://huggingface.co/) on the samsum dataset.
32
+ It achieves the following results on the evaluation set:
33
+ - Loss: 2.7369
34
+ - Rouge1: 34.9636
35
+ - Rouge2: 10.6358
36
+ - Rougel: 27.6003
37
+ - Rougelsum: 30.9654
38
+ - Gen Len: 17.6020
39
+
40
+ ## Model description
41
+
42
+ More information needed
43
+
44
+ ## Intended uses & limitations
45
+
46
+ More information needed
47
+
48
+ ## Training and evaluation data
49
+
50
+ More information needed
51
+
52
+ ## Training procedure
53
+
54
+ ### Training hyperparameters
55
+
56
+ The following hyperparameters were used during training:
57
+ - learning_rate: 5e-05
58
+ - train_batch_size: 36
59
+ - eval_batch_size: 36
60
+ - seed: 42
61
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
62
+ - lr_scheduler_type: linear
63
+ - num_epochs: 10
64
+
65
+ ### Training results
66
+
67
+ | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len |
68
+ |:-------------:|:-----:|:----:|:---------------:|:-------:|:-------:|:-------:|:---------:|:-------:|
69
+ | No log | 1.0 | 410 | 2.9218 | 29.2936 | 7.4008 | 23.9609 | 26.3194 | 17.2332 |
70
+ | 2.6834 | 2.0 | 820 | 2.7635 | 31.9826 | 8.9758 | 26.1311 | 28.7458 | 16.9866 |
71
+ | 2.3529 | 3.0 | 1230 | 2.7369 | 34.9636 | 10.6358 | 27.6003 | 30.9654 | 17.6020 |
72
+ | 1.9608 | 4.0 | 1640 | 2.7711 | 35.8322 | 11.3676 | 29.0276 | 32.2881 | 16.9133 |
73
+ | 1.6459 | 5.0 | 2050 | 2.7832 | 36.8688 | 11.8883 | 29.3721 | 32.8683 | 17.0879 |
74
+ | 1.6459 | 6.0 | 2460 | 2.8334 | 36.489 | 11.5372 | 29.2263 | 32.5406 | 17.8901 |
75
+ | 1.3791 | 7.0 | 2870 | 2.8767 | 37.0743 | 11.8554 | 29.4063 | 32.7543 | 17.6093 |
76
+ | 1.1687 | 8.0 | 3280 | 2.9232 | 37.2 | 11.8723 | 29.5194 | 32.9481 | 17.6581 |
77
+ | 1.0249 | 9.0 | 3690 | 2.9456 | 37.1872 | 12.0958 | 29.621 | 33.0073 | 17.8840 |
78
+ | 0.9259 | 10.0 | 4100 | 2.9719 | 37.1213 | 12.1068 | 29.5138 | 33.0372 | 17.8278 |
79
+
80
+
81
+ ### Framework versions
82
+
83
+ - Transformers 4.34.0
84
+ - Pytorch 2.0.1+cu118
85
+ - Datasets 2.14.5
86
+ - Tokenizers 0.14.1
added_tokens.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "[CLS]": 101,
3
+ "[MASK]": 103,
4
+ "[PAD]": 0,
5
+ "[SEP]": 102,
6
+ "[UNK]": 100
7
+ }
config.json ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "EncoderDecoderModel"
4
+ ],
5
+ "decoder": {
6
+ "_name_or_path": "bert-base-cased",
7
+ "add_cross_attention": true,
8
+ "architectures": [
9
+ "BertForMaskedLM"
10
+ ],
11
+ "attention_probs_dropout_prob": 0.1,
12
+ "bad_words_ids": null,
13
+ "begin_suppress_tokens": null,
14
+ "bos_token_id": null,
15
+ "chunk_size_feed_forward": 0,
16
+ "classifier_dropout": null,
17
+ "cross_attention_hidden_size": null,
18
+ "decoder_start_token_id": null,
19
+ "diversity_penalty": 0.0,
20
+ "do_sample": false,
21
+ "early_stopping": false,
22
+ "encoder_no_repeat_ngram_size": 0,
23
+ "eos_token_id": null,
24
+ "exponential_decay_length_penalty": null,
25
+ "finetuning_task": null,
26
+ "forced_bos_token_id": null,
27
+ "forced_eos_token_id": null,
28
+ "gradient_checkpointing": false,
29
+ "hidden_act": "gelu",
30
+ "hidden_dropout_prob": 0.1,
31
+ "hidden_size": 768,
32
+ "id2label": {
33
+ "0": "LABEL_0",
34
+ "1": "LABEL_1"
35
+ },
36
+ "initializer_range": 0.02,
37
+ "intermediate_size": 3072,
38
+ "is_decoder": true,
39
+ "is_encoder_decoder": false,
40
+ "label2id": {
41
+ "LABEL_0": 0,
42
+ "LABEL_1": 1
43
+ },
44
+ "layer_norm_eps": 1e-12,
45
+ "length_penalty": 1.0,
46
+ "max_length": 20,
47
+ "max_position_embeddings": 512,
48
+ "min_length": 0,
49
+ "model_type": "bert",
50
+ "no_repeat_ngram_size": 0,
51
+ "num_attention_heads": 12,
52
+ "num_beam_groups": 1,
53
+ "num_beams": 1,
54
+ "num_hidden_layers": 12,
55
+ "num_return_sequences": 1,
56
+ "output_attentions": false,
57
+ "output_hidden_states": false,
58
+ "output_scores": false,
59
+ "pad_token_id": 0,
60
+ "position_embedding_type": "absolute",
61
+ "prefix": null,
62
+ "problem_type": null,
63
+ "pruned_heads": {},
64
+ "remove_invalid_values": false,
65
+ "repetition_penalty": 1.0,
66
+ "return_dict": true,
67
+ "return_dict_in_generate": false,
68
+ "sep_token_id": null,
69
+ "suppress_tokens": null,
70
+ "task_specific_params": null,
71
+ "temperature": 1.0,
72
+ "tf_legacy_loss": false,
73
+ "tie_encoder_decoder": false,
74
+ "tie_word_embeddings": true,
75
+ "tokenizer_class": null,
76
+ "top_k": 50,
77
+ "top_p": 1.0,
78
+ "torch_dtype": null,
79
+ "torchscript": false,
80
+ "type_vocab_size": 2,
81
+ "typical_p": 1.0,
82
+ "use_bfloat16": false,
83
+ "use_cache": true,
84
+ "vocab_size": 28996
85
+ },
86
+ "decoder_start_token_id": 101,
87
+ "encoder": {
88
+ "_name_or_path": "bert-base-cased",
89
+ "add_cross_attention": false,
90
+ "architectures": [
91
+ "BertForMaskedLM"
92
+ ],
93
+ "attention_probs_dropout_prob": 0.1,
94
+ "bad_words_ids": null,
95
+ "begin_suppress_tokens": null,
96
+ "bos_token_id": null,
97
+ "chunk_size_feed_forward": 0,
98
+ "classifier_dropout": null,
99
+ "cross_attention_hidden_size": null,
100
+ "decoder_start_token_id": null,
101
+ "diversity_penalty": 0.0,
102
+ "do_sample": false,
103
+ "early_stopping": false,
104
+ "encoder_no_repeat_ngram_size": 0,
105
+ "eos_token_id": null,
106
+ "exponential_decay_length_penalty": null,
107
+ "finetuning_task": null,
108
+ "forced_bos_token_id": null,
109
+ "forced_eos_token_id": null,
110
+ "gradient_checkpointing": false,
111
+ "hidden_act": "gelu",
112
+ "hidden_dropout_prob": 0.1,
113
+ "hidden_size": 768,
114
+ "id2label": {
115
+ "0": "LABEL_0",
116
+ "1": "LABEL_1"
117
+ },
118
+ "initializer_range": 0.02,
119
+ "intermediate_size": 3072,
120
+ "is_decoder": false,
121
+ "is_encoder_decoder": false,
122
+ "label2id": {
123
+ "LABEL_0": 0,
124
+ "LABEL_1": 1
125
+ },
126
+ "layer_norm_eps": 1e-12,
127
+ "length_penalty": 1.0,
128
+ "max_length": 20,
129
+ "max_position_embeddings": 512,
130
+ "min_length": 0,
131
+ "model_type": "bert",
132
+ "no_repeat_ngram_size": 0,
133
+ "num_attention_heads": 12,
134
+ "num_beam_groups": 1,
135
+ "num_beams": 1,
136
+ "num_hidden_layers": 12,
137
+ "num_return_sequences": 1,
138
+ "output_attentions": false,
139
+ "output_hidden_states": false,
140
+ "output_scores": false,
141
+ "pad_token_id": 0,
142
+ "position_embedding_type": "absolute",
143
+ "prefix": null,
144
+ "problem_type": null,
145
+ "pruned_heads": {},
146
+ "remove_invalid_values": false,
147
+ "repetition_penalty": 1.0,
148
+ "return_dict": true,
149
+ "return_dict_in_generate": false,
150
+ "sep_token_id": null,
151
+ "suppress_tokens": null,
152
+ "task_specific_params": null,
153
+ "temperature": 1.0,
154
+ "tf_legacy_loss": false,
155
+ "tie_encoder_decoder": false,
156
+ "tie_word_embeddings": true,
157
+ "tokenizer_class": null,
158
+ "top_k": 50,
159
+ "top_p": 1.0,
160
+ "torch_dtype": null,
161
+ "torchscript": false,
162
+ "type_vocab_size": 2,
163
+ "typical_p": 1.0,
164
+ "use_bfloat16": false,
165
+ "use_cache": true,
166
+ "vocab_size": 28996
167
+ },
168
+ "eos_token_id": 102,
169
+ "is_encoder_decoder": true,
170
+ "model_type": "encoder-decoder",
171
+ "pad_token_id": 0,
172
+ "torch_dtype": "float32",
173
+ "transformers_version": "4.34.0"
174
+ }
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "decoder_start_token_id": 101,
3
+ "eos_token_id": 102,
4
+ "pad_token_id": 0,
5
+ "transformers_version": "4.34.0"
6
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:746c97e05bd3cd2c584d0bf381bc75d9a1d7ec7805e5f13887d628043bca6a2f
3
+ size 980254129
special_tokens_map.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "[CLS]",
3
+ "cls_token": "[CLS]",
4
+ "eos_token": "[SEP]",
5
+ "mask_token": "[MASK]",
6
+ "pad_token": "[PAD]",
7
+ "sep_token": "[SEP]",
8
+ "unk_token": "[UNK]"
9
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "100": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "101": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "102": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "103": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "additional_special_tokens": [],
45
+ "clean_up_tokenization_spaces": true,
46
+ "cls_token": "[CLS]",
47
+ "do_lower_case": false,
48
+ "mask_token": "[MASK]",
49
+ "model_max_length": 512,
50
+ "pad_token": "[PAD]",
51
+ "sep_token": "[SEP]",
52
+ "strip_accents": null,
53
+ "tokenize_chinese_chars": true,
54
+ "tokenizer_class": "BertTokenizer",
55
+ "unk_token": "[UNK]"
56
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b9d42f89998c754ec8feca192d897d07f2a596a4b1d1f961ab0c04ecaf0c8d22
3
+ size 4219
vocab.txt ADDED
The diff for this file is too large to render. See raw diff