Ali Safaya commited on
Commit
98030fb
1 Parent(s): 9e93be3

initial commit

Browse files
README.md CHANGED
@@ -1,3 +1,91 @@
1
- ---
2
- license: cc
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ tags:
4
+ - generated_from_trainer
5
+ datasets:
6
+ - mlsum
7
+ metrics:
8
+ - rouge
9
+ model-index:
10
+ - name: eval-mt5-base-aggressive
11
+ results:
12
+ - task:
13
+ name: Summarization
14
+ type: summarization
15
+ dataset:
16
+ name: mlsum tu
17
+ type: mlsum
18
+ args: tu
19
+ metrics:
20
+ - name: Rouge1
21
+ type: rouge
22
+ value: 47.4222
23
+ ---
24
+
25
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
26
+ should probably proofread and complete it, then remove this comment. -->
27
+
28
+ # eval-mt5-base-aggressive
29
+
30
+ This model is a fine-tuned version of [google/mt5-base](https://huggingface.co/google/mt5-base) on the mlsum tu dataset.
31
+ It achieves the following results on the evaluation set:
32
+ - Loss: 2.7801
33
+ - Rouge1: 47.4222
34
+ - Rouge2: 34.8624
35
+ - Rougel: 42.2487
36
+ - Rougelsum: 43.9494
37
+ - Gen Len: 51.3525
38
+
39
+ ## Model description
40
+
41
+ More information needed
42
+
43
+ ## Intended uses & limitations
44
+
45
+ More information needed
46
+
47
+ ## Training and evaluation data
48
+
49
+ More information needed
50
+
51
+ ## Training procedure
52
+
53
+ ### Training hyperparameters
54
+
55
+ The following hyperparameters were used during training:
56
+ - learning_rate: 0.0005
57
+ - train_batch_size: 2
58
+ - eval_batch_size: 4
59
+ - seed: 42
60
+ - distributed_type: multi-GPU
61
+ - num_devices: 8
62
+ - gradient_accumulation_steps: 4
63
+ - total_train_batch_size: 64
64
+ - total_eval_batch_size: 32
65
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
66
+ - lr_scheduler_type: linear
67
+ - num_epochs: 10.0
68
+ - label_smoothing_factor: 0.1
69
+
70
+ ### Training results
71
+
72
+ | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len |
73
+ |:-------------:|:-----:|:-----:|:---------------:|:-------:|:-------:|:-------:|:---------:|:-------:|
74
+ | 3.084 | 1.0 | 3895 | 2.9282 | 31.6872 | 22.1113 | 29.2851 | 29.7608 | 18.9861 |
75
+ | 2.9162 | 2.0 | 7790 | 2.8552 | 32.1716 | 22.5001 | 29.6845 | 30.1887 | 18.9938 |
76
+ | 2.8149 | 3.0 | 11685 | 2.8089 | 32.5681 | 22.689 | 30.0409 | 30.5507 | 18.9959 |
77
+ | 2.7325 | 4.0 | 15580 | 2.7948 | 33.1236 | 23.1775 | 30.5156 | 31.0461 | 18.9958 |
78
+ | 2.6679 | 5.0 | 19475 | 2.7810 | 33.1766 | 23.162 | 30.4802 | 31.0527 | 18.9967 |
79
+ | 2.6237 | 6.0 | 23370 | 2.7790 | 33.1118 | 23.2043 | 30.5064 | 31.0096 | 18.9978 |
80
+ | 2.5711 | 7.0 | 27265 | 2.7801 | 33.2033 | 23.2957 | 30.59 | 31.1504 | 18.9979 |
81
+ | 2.538 | 8.0 | 31160 | 2.7777 | 33.0256 | 23.0621 | 30.3818 | 30.978 | 18.998 |
82
+ | 2.5 | 9.0 | 35055 | 2.7839 | 33.2288 | 23.2361 | 30.5421 | 31.1573 | 18.998 |
83
+ | 2.4719 | 10.0 | 38950 | 2.7832 | 33.2098 | 23.2274 | 30.5164 | 31.1094 | 18.9981 |
84
+
85
+
86
+ ### Framework versions
87
+
88
+ - Transformers 4.11.3
89
+ - Pytorch 1.8.2+cu111
90
+ - Datasets 1.14.0
91
+ - Tokenizers 0.10.3
config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/mt5-base",
3
+ "architectures": [
4
+ "MT5ForConditionalGeneration"
5
+ ],
6
+ "d_ff": 2048,
7
+ "d_kv": 64,
8
+ "d_model": 768,
9
+ "decoder_start_token_id": 0,
10
+ "dropout_rate": 0.1,
11
+ "eos_token_id": 1,
12
+ "feed_forward_proj": "gated-gelu",
13
+ "initializer_factor": 1.0,
14
+ "is_encoder_decoder": true,
15
+ "layer_norm_epsilon": 1e-06,
16
+ "model_type": "mt5",
17
+ "num_decoder_layers": 12,
18
+ "num_heads": 12,
19
+ "num_layers": 12,
20
+ "output_past": true,
21
+ "pad_token_id": 0,
22
+ "relative_attention_num_buckets": 32,
23
+ "tie_word_embeddings": false,
24
+ "tokenizer_class": "T5Tokenizer",
25
+ "torch_dtype": "float32",
26
+ "transformers_version": "4.11.3",
27
+ "use_cache": true,
28
+ "vocab_size": 250100
29
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:18b608a7836e1e81f15b72d9bbd255919547dbb21b035c9cf0abcad95e4fc8f2
3
+ size 2329633625
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
1
+ {"eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"}
spiece.model ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ef78f86560d809067d12bac6c09f19a462cb3af3f54d2b8acbba26e1433125d6
3
+ size 4309802
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
1
+ {"eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>", "extra_ids": 0, "additional_special_tokens": null, "special_tokens_map_file": "/home/patrick/.cache/torch/transformers/685ac0ca8568ec593a48b61b0a3c272beee9bc194a3c7241d15dcadb5f875e53.f76030f3ec1b96a8199b2593390c610e76ca8028ef3d24680000619ffb646276", "name_or_path": "google/mt5-base", "sp_model_kwargs": {}, "tokenizer_class": "T5Tokenizer"}