José Ángel González commited on
Commit
9e49bf2
1 Parent(s): aa20c78
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ checkpoint-*/
README.md ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ tags:
4
+ - generated_from_trainer
5
+ metrics:
6
+ - rouge
7
+ model-index:
8
+ - name: mbarthez-davide_articles-copy_enhanced
9
+ results:
10
+ - task:
11
+ name: Summarization
12
+ type: summarization
13
+ metrics:
14
+ - name: Rouge1
15
+ type: rouge
16
+ value: 36.548
17
+ ---
18
+
19
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
20
+ should probably proofread and complete it, then remove this comment. -->
21
+
22
+ # mbarthez-davide_articles-copy_enhanced
23
+
24
+ This model is a fine-tuned version of [moussaKam/mbarthez](https://huggingface.co/moussaKam/mbarthez) on an unknown dataset.
25
+ It achieves the following results on the evaluation set:
26
+ - Loss: 1.4905
27
+ - Rouge1: 36.548
28
+ - Rouge2: 19.6282
29
+ - Rougel: 30.2513
30
+ - Rougelsum: 30.2765
31
+ - Gen Len: 25.7238
32
+
33
+ ## Model description
34
+
35
+ More information needed
36
+
37
+ ## Intended uses & limitations
38
+
39
+ More information needed
40
+
41
+ ## Training and evaluation data
42
+
43
+ More information needed
44
+
45
+ ## Training procedure
46
+
47
+ ### Training hyperparameters
48
+
49
+ The following hyperparameters were used during training:
50
+ - learning_rate: 3e-05
51
+ - train_batch_size: 8
52
+ - eval_batch_size: 8
53
+ - seed: 42
54
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
55
+ - lr_scheduler_type: linear
56
+ - num_epochs: 3.0
57
+ - mixed_precision_training: Native AMP
58
+
59
+ ### Training results
60
+
61
+ | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len |
62
+ |:-------------:|:-----:|:------:|:---------------:|:-------:|:-------:|:-------:|:---------:|:-------:|
63
+ | 1.6706 | 1.0 | 33552 | 1.5690 | 31.2477 | 16.5455 | 26.9855 | 26.9754 | 18.6217 |
64
+ | 1.3446 | 2.0 | 67104 | 1.5060 | 32.1108 | 17.1408 | 27.7833 | 27.7703 | 18.9115 |
65
+ | 1.3245 | 3.0 | 100656 | 1.4905 | 32.9084 | 17.7027 | 28.2912 | 28.2975 | 18.9801 |
66
+
67
+
68
+ ### Framework versions
69
+
70
+ - Transformers 4.10.2
71
+ - Pytorch 1.7.1+cu110
72
+ - Datasets 1.11.0
73
+ - Tokenizers 0.10.3
all_results.json ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.0,
3
+ "eval_gen_len": 25.7238,
4
+ "eval_loss": 1.4905033111572266,
5
+ "eval_rouge1": 36.548,
6
+ "eval_rouge2": 19.6282,
7
+ "eval_rougeL": 30.2513,
8
+ "eval_rougeLsum": 30.2765,
9
+ "eval_runtime": 530.3745,
10
+ "eval_samples": 2712,
11
+ "eval_samples_per_second": 5.113,
12
+ "eval_steps_per_second": 0.639,
13
+ "train_loss": 1.5143268576412658,
14
+ "train_runtime": 40494.9096,
15
+ "train_samples": 268414,
16
+ "train_samples_per_second": 19.885,
17
+ "train_steps_per_second": 2.486
18
+ }
config.json ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "moussaKam/mbarthez",
3
+ "activation_dropout": 0.1,
4
+ "activation_function": "gelu",
5
+ "add_bias_logits": false,
6
+ "add_final_layer_norm": true,
7
+ "architectures": [
8
+ "MBartCopyEnhanced"
9
+ ],
10
+ "attention_dropout": 0.1,
11
+ "bos_token_id": 0,
12
+ "classif_dropout": 0.1,
13
+ "classifier_dropout": 0.0,
14
+ "d_model": 1024,
15
+ "decoder_attention_heads": 16,
16
+ "decoder_ffn_dim": 4096,
17
+ "decoder_layerdrop": 0.0,
18
+ "decoder_layers": 12,
19
+ "decoder_start_token_id": 2,
20
+ "dim_proj": 1024,
21
+ "do_blenderbot_90_layernorm": false,
22
+ "dropout": 0.1,
23
+ "early_stopping": true,
24
+ "encoder_attention_heads": 16,
25
+ "encoder_ffn_dim": 4096,
26
+ "encoder_layerdrop": 0.0,
27
+ "encoder_layers": 12,
28
+ "eos_token_id": 2,
29
+ "extra_pos_embeddings": 2,
30
+ "force_bos_token_to_be_generated": false,
31
+ "forced_eos_token_id": 2,
32
+ "gradient_checkpointing": false,
33
+ "id2label": {
34
+ "0": "LABEL_0",
35
+ "1": "LABEL_1",
36
+ "2": "LABEL_2"
37
+ },
38
+ "init_std": 0.02,
39
+ "is_encoder_decoder": true,
40
+ "label2id": {
41
+ "LABEL_0": 0,
42
+ "LABEL_1": 1,
43
+ "LABEL_2": 2
44
+ },
45
+ "max_position_embeddings": 1024,
46
+ "model_type": "mbart",
47
+ "no_repeat_ngram_size": 3,
48
+ "normalize_before": true,
49
+ "normalize_embedding": true,
50
+ "num_beams": 4,
51
+ "num_hidden_layers": 12,
52
+ "pad_token_id": 1,
53
+ "scale_embedding": true,
54
+ "static_position_embeddings": false,
55
+ "tokenizer_class": "BarthezTokenizer",
56
+ "torch_dtype": "float32",
57
+ "transformers_version": "4.10.2",
58
+ "use_cache": true,
59
+ "vocab_size": 101122
60
+ }
eval_results.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.0,
3
+ "eval_gen_len": 25.7238,
4
+ "eval_loss": 1.4905033111572266,
5
+ "eval_rouge1": 36.548,
6
+ "eval_rouge2": 19.6282,
7
+ "eval_rougeL": 30.2513,
8
+ "eval_rougeLsum": 30.2765,
9
+ "eval_runtime": 530.3745,
10
+ "eval_samples": 2712,
11
+ "eval_samples_per_second": 5.113,
12
+ "eval_steps_per_second": 0.639
13
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b8499db984189f64dc103cc07d6913f7ad2dd079518881565271140390bfc671
3
+ size 1834120283
sentencepiece.bpe.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f7fff580d02c71f72575747fdc1dffa5698f8a88a02dfafa25c0a63df6dbe967
3
+ size 1966695
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "sep_token": "</s>", "pad_token": "<pad>", "cls_token": "<s>", "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": false}}
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "sep_token": "</s>", "cls_token": "<s>", "pad_token": "<pad>", "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "model_max_length": 1024, "special_tokens_map_file": null, "name_or_path": "moussaKam/mbarthez", "tokenizer_class": "BarthezTokenizer"}
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.0,
3
+ "train_loss": 1.5143268576412658,
4
+ "train_runtime": 40494.9096,
5
+ "train_samples": 268414,
6
+ "train_samples_per_second": 19.885,
7
+ "train_steps_per_second": 2.486
8
+ }
trainer_state.json ADDED
The diff for this file is too large to render. See raw diff
 
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3c3c2f68a6d422960ca5a47f11fddaadcf1280c8d5bbb30a2650521d6a5e0a7d
3
+ size 2927