Areeb123 commited on
Commit
43e94e9
1 Parent(s): 98f015f

Training in progress, step 500

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ base_model: google/mt5-small
4
+ tags:
5
+ - summarization
6
+ - generated_from_trainer
7
+ datasets:
8
+ - samsum
9
+ metrics:
10
+ - rouge
11
+ model-index:
12
+ - name: mt5-small-finetuned_samsum_summarization_model
13
+ results:
14
+ - task:
15
+ name: Sequence-to-sequence Language Modeling
16
+ type: text2text-generation
17
+ dataset:
18
+ name: samsum
19
+ type: samsum
20
+ config: samsum
21
+ split: validation
22
+ args: samsum
23
+ metrics:
24
+ - name: Rouge1
25
+ type: rouge
26
+ value: 38.4852
27
+ ---
28
+
29
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
30
+ should probably proofread and complete it, then remove this comment. -->
31
+
32
+ # mt5-small-finetuned_samsum_summarization_model
33
+
34
+ This model is a fine-tuned version of [google/mt5-small](https://huggingface.co/google/mt5-small) on the samsum dataset.
35
+ It achieves the following results on the evaluation set:
36
+ - Loss: 2.0164
37
+ - Rouge1: 38.4852
38
+ - Rouge2: 16.4292
39
+ - Rougel: 32.9585
40
+ - Rougelsum: 36.0185
41
+
42
+ ## Model description
43
+
44
+ More information needed
45
+
46
+ ## Intended uses & limitations
47
+
48
+ More information needed
49
+
50
+ ## Training and evaluation data
51
+
52
+ More information needed
53
+
54
+ ## Training procedure
55
+
56
+ ### Training hyperparameters
57
+
58
+ The following hyperparameters were used during training:
59
+ - learning_rate: 5.6e-05
60
+ - train_batch_size: 14
61
+ - eval_batch_size: 14
62
+ - seed: 42
63
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
64
+ - lr_scheduler_type: linear
65
+ - num_epochs: 5
66
+
67
+ ### Training results
68
+
69
+ | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum |
70
+ |:-------------:|:-----:|:----:|:---------------:|:-------:|:-------:|:-------:|:---------:|
71
+ | 4.9849 | 1.0 | 1050 | 2.2071 | 34.8128 | 14.0544 | 29.8982 | 32.2776 |
72
+ | 2.7097 | 2.0 | 2100 | 2.1157 | 37.7348 | 15.9587 | 32.2724 | 35.2982 |
73
+ | 2.5305 | 3.0 | 3150 | 2.0553 | 38.4581 | 16.4518 | 32.7643 | 35.936 |
74
+ | 2.451 | 4.0 | 4200 | 2.0253 | 38.3972 | 16.3508 | 32.7684 | 35.9072 |
75
+ | 2.4132 | 5.0 | 5250 | 2.0164 | 38.4852 | 16.4292 | 32.9585 | 36.0185 |
76
+
77
+
78
+ ### Framework versions
79
+
80
+ - Transformers 4.35.2
81
+ - Pytorch 2.1.0+cu118
82
+ - Datasets 2.15.0
83
+ - Tokenizers 0.15.0
config.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/mt5-small",
3
+ "architectures": [
4
+ "MT5ForConditionalGeneration"
5
+ ],
6
+ "classifier_dropout": 0.0,
7
+ "d_ff": 1024,
8
+ "d_kv": 64,
9
+ "d_model": 512,
10
+ "decoder_start_token_id": 0,
11
+ "dense_act_fn": "gelu_new",
12
+ "dropout_rate": 0.1,
13
+ "eos_token_id": 1,
14
+ "feed_forward_proj": "gated-gelu",
15
+ "initializer_factor": 1.0,
16
+ "is_encoder_decoder": true,
17
+ "is_gated_act": true,
18
+ "layer_norm_epsilon": 1e-06,
19
+ "model_type": "mt5",
20
+ "num_decoder_layers": 8,
21
+ "num_heads": 6,
22
+ "num_layers": 8,
23
+ "pad_token_id": 0,
24
+ "relative_attention_max_distance": 128,
25
+ "relative_attention_num_buckets": 32,
26
+ "tie_word_embeddings": false,
27
+ "tokenizer_class": "T5Tokenizer",
28
+ "torch_dtype": "float32",
29
+ "transformers_version": "4.35.2",
30
+ "use_cache": true,
31
+ "vocab_size": 250112
32
+ }
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "decoder_start_token_id": 0,
3
+ "eos_token_id": 1,
4
+ "pad_token_id": 0,
5
+ "transformers_version": "4.35.2"
6
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:472c9df498faa69d99e3aa0ca442d46a7c221bddb3009f96aba36e3cb1ac082f
3
+ size 1200729512
runs/Nov30_10-38-17_13547b54126b/events.out.tfevents.1701340714.13547b54126b.492.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c530971f2f8d63e2ff3fd4794e0dc69aa759242ac0485412c05aecd67738cac9
3
+ size 8095
runs/Nov30_12-04-36_13547b54126b/events.out.tfevents.1701345894.13547b54126b.21741.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f695edf17c1e03519e98fc859d86819f84fab7ea2b10eb98462cc71a6987d609
3
+ size 5848
runs/Nov30_12-38-30_13547b54126b/events.out.tfevents.1701347922.13547b54126b.30872.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:781e69961c8c69a40513e56f0c9e5c6b80e8feec3d3279ac57ee12f379552fe8
3
+ size 8095
runs/Nov30_13-50-10_13547b54126b/events.out.tfevents.1701352225.13547b54126b.48239.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b9688d0124bde926d54687c5c26ec9e3a883ec7878eba0b5fc7a1ddc43b51997
3
+ size 4586
special_tokens_map.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "eos_token": {
3
+ "content": "</s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "pad_token": {
10
+ "content": "<pad>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "unk_token": {
17
+ "content": "<unk>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ }
23
+ }
spiece.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ef78f86560d809067d12bac6c09f19a462cb3af3f54d2b8acbba26e1433125d6
3
+ size 4309802
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cfeaaf4007b61ad5df2070239ed1f7f2d2e1d743cc66bd0db752e051e80e7f00
3
+ size 16330558
tokenizer_config.json ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "<pad>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "</s>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "<unk>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ }
27
+ },
28
+ "additional_special_tokens": [],
29
+ "clean_up_tokenization_spaces": true,
30
+ "eos_token": "</s>",
31
+ "extra_ids": 0,
32
+ "legacy": true,
33
+ "model_max_length": 1000000000000000019884624838656,
34
+ "pad_token": "<pad>",
35
+ "sp_model_kwargs": {},
36
+ "tokenizer_class": "T5Tokenizer",
37
+ "unk_token": "<unk>"
38
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:555039f3a7d85dfe1f7de31689b381691b2ba4059a6c6892aa5891ae7303b729
3
+ size 4792