MorenoLaQuatra commited on
Commit
464e27c
1 Parent(s): 52b7ab7

Model Publication

Browse files
README.md CHANGED
@@ -1,3 +1,66 @@
1
  ---
 
2
  license: mit
 
 
 
 
 
 
 
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ language: "it"
3
  license: mit
4
+ datasets:
5
+ - ARTeLab/ilpost
6
+ tags:
7
+ - bart
8
+ - pytorch
9
+ pipeline:
10
+ - summarization
11
  ---
12
+
13
+ # BART-IT - FanPage abstractive summarization
14
+
15
+ BART-IT is a sequence-to-sequence model, based on the BART architecture that is specifically tailored to the Italian language. The model is pre-trained on a [large corpus of Italian text](https://huggingface.co/datasets/gsarti/clean_mc4_it), and can be fine-tuned on a variety of tasks.
16
+
17
+ ## Model description
18
+
19
+ The model is a `base-`sized BART model, with a vocabulary size of 52,000 tokens. It has 140M parameters and can be used for any task that requires a sequence-to-sequence model. It is trained from scratch on a large corpus of Italian text, and can be fine-tuned on a variety of tasks.
20
+
21
+
22
+ ## Pre-training
23
+
24
+ The code used to pre-train BART-IT together with additional information on model parameters can be found [here](https://github.com/MorenoLaQuatra/bart-it).
25
+
26
+ ## Fine-tuning
27
+
28
+ The model has been fine-tuned for the abstractive summarization task on 3 different Italian datasets:
29
+
30
+ - [FanPage](https://huggingface.co/datasets/ARTeLab/fanpage) - finetuned model [here](https://huggingface.co/MorenoLaQuatra/bart-it-fanpage)
31
+ - **This model** [IlPost](https://huggingface.co/datasets/ARTeLab/ilpost) - finetuned model [here](https://huggingface.co/MorenoLaQuatra/bart-it-ilpost)
32
+ - [WITS](https://huggingface.co/datasets/Silvia/WITS) - finetuned model [here](https://huggingface.co/MorenoLaQuatra/bart-it-WITS)
33
+
34
+ ## Usage
35
+
36
+ In order to use the model, you can use the following code:
37
+
38
+ ```python
39
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
40
+
41
+ tokenizer = AutoTokenizer.from_pretrained("morenolq/bart-it-ilpost")
42
+ model = AutoModelForSeq2SeqLM.from_pretrained("morenolq/bart-it-ilpost")
43
+
44
+ input_ids = tokenizer.encode("Il modello BART-IT è stato pre-addestrato su un corpus di testo italiano", return_tensors="pt")
45
+ outputs = model.generate(input_ids, max_length=40, num_beams=4, early_stopping=True)
46
+ print(tokenizer.decode(outputs[0], skip_special_tokens=True))
47
+ ```
48
+
49
+ # Citation
50
+
51
+ If you find this model useful for your research, please cite the following paper:
52
+
53
+ ```bibtex
54
+ @Article{BARTIT,
55
+ AUTHOR = {La Quatra, Moreno and Cagliero, Luca},
56
+ TITLE = {BART-IT: An Efficient Sequence-to-Sequence Model for Italian Text Summarization},
57
+ JOURNAL = {Future Internet},
58
+ VOLUME = {15},
59
+ YEAR = {2023},
60
+ NUMBER = {1},
61
+ ARTICLE-NUMBER = {15},
62
+ URL = {https://www.mdpi.com/1999-5903/15/1/15},
63
+ ISSN = {1999-5903},
64
+ DOI = {10.3390/fi15010015}
65
+ }
66
+ ```
config.json ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "../bart-it-s",
3
+ "activation_dropout": 0.0,
4
+ "activation_function": "gelu",
5
+ "architectures": [
6
+ "BartForConditionalGeneration"
7
+ ],
8
+ "attention_dropout": 0.0,
9
+ "bos_token_id": 0,
10
+ "classifier_dropout": 0.0,
11
+ "d_model": 768,
12
+ "decoder_attention_heads": 12,
13
+ "decoder_ffn_dim": 3072,
14
+ "decoder_layerdrop": 0.0,
15
+ "decoder_layers": 6,
16
+ "decoder_start_token_id": 2,
17
+ "dropout": 0.1,
18
+ "encoder_attention_heads": 12,
19
+ "encoder_ffn_dim": 3072,
20
+ "encoder_layerdrop": 0.0,
21
+ "encoder_layers": 6,
22
+ "eos_token_id": 2,
23
+ "forced_eos_token_id": 2,
24
+ "id2label": {
25
+ "0": "LABEL_0",
26
+ "1": "LABEL_1",
27
+ "2": "LABEL_2"
28
+ },
29
+ "init_std": 0.02,
30
+ "is_encoder_decoder": true,
31
+ "label2id": {
32
+ "LABEL_0": 0,
33
+ "LABEL_1": 1,
34
+ "LABEL_2": 2
35
+ },
36
+ "max_position_embeddings": 1024,
37
+ "model_type": "bart",
38
+ "num_hidden_layers": 6,
39
+ "pad_token_id": 1,
40
+ "scale_embedding": false,
41
+ "torch_dtype": "float32",
42
+ "transformers_version": "4.22.1",
43
+ "use_cache": true,
44
+ "vocab_size": 52000
45
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7d5596993e2811d9c85651cc6620df38d130d0a46bd11ceb652e3cc287254252
3
+ size 563305977
special_tokens_map.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "cls_token": {
10
+ "content": "<s>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "eos_token": {
17
+ "content": "</s>",
18
+ "lstrip": false,
19
+ "normalized": true,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "mask_token": {
24
+ "content": "<mask>",
25
+ "lstrip": true,
26
+ "normalized": true,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "pad_token": {
31
+ "content": "<pad>",
32
+ "lstrip": false,
33
+ "normalized": true,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ },
37
+ "sep_token": {
38
+ "content": "</s>",
39
+ "lstrip": false,
40
+ "normalized": true,
41
+ "rstrip": false,
42
+ "single_word": false
43
+ },
44
+ "unk_token": {
45
+ "content": "<unk>",
46
+ "lstrip": false,
47
+ "normalized": true,
48
+ "rstrip": false,
49
+ "single_word": false
50
+ }
51
+ }
tokenizer_config.json ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "bos_token": {
4
+ "__type": "AddedToken",
5
+ "content": "<s>",
6
+ "lstrip": false,
7
+ "normalized": true,
8
+ "rstrip": false,
9
+ "single_word": false
10
+ },
11
+ "cls_token": {
12
+ "__type": "AddedToken",
13
+ "content": "<s>",
14
+ "lstrip": false,
15
+ "normalized": true,
16
+ "rstrip": false,
17
+ "single_word": false
18
+ },
19
+ "eos_token": {
20
+ "__type": "AddedToken",
21
+ "content": "</s>",
22
+ "lstrip": false,
23
+ "normalized": true,
24
+ "rstrip": false,
25
+ "single_word": false
26
+ },
27
+ "errors": "replace",
28
+ "mask_token": {
29
+ "__type": "AddedToken",
30
+ "content": "<mask>",
31
+ "lstrip": true,
32
+ "normalized": true,
33
+ "rstrip": false,
34
+ "single_word": false
35
+ },
36
+ "pad_token": {
37
+ "__type": "AddedToken",
38
+ "content": "<pad>",
39
+ "lstrip": false,
40
+ "normalized": true,
41
+ "rstrip": false,
42
+ "single_word": false
43
+ },
44
+ "sep_token": {
45
+ "__type": "AddedToken",
46
+ "content": "</s>",
47
+ "lstrip": false,
48
+ "normalized": true,
49
+ "rstrip": false,
50
+ "single_word": false
51
+ },
52
+ "tokenizer_class": "BartTokenizer",
53
+ "unk_token": {
54
+ "__type": "AddedToken",
55
+ "content": "<unk>",
56
+ "lstrip": false,
57
+ "normalized": true,
58
+ "rstrip": false,
59
+ "single_word": false
60
+ }
61
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8d9eeeb1f0a85cdac66a717153ef2041b15ca9b52e193488c238d7eced11b262
3
+ size 3375
vocab.json ADDED
The diff for this file is too large to render. See raw diff