mlenjoyneer commited on
Commit
e107cff
1 Parent(s): 94438b0

Upload 6 files

Browse files
Files changed (6) hide show
  1. README.md +55 -0
  2. config.json +63 -0
  3. generation_config.json +9 -0
  4. old_spiece.model +3 -0
  5. pytorch_model.bin +3 -0
  6. spiece.model +3 -0
README.md CHANGED
@@ -1,3 +1,58 @@
1
  ---
 
2
  license: apache-2.0
 
 
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ language: ru
3
  license: apache-2.0
4
+ datasets:
5
+ - IlyaGusev/gazeta
6
  ---
7
+
8
+ # RuT5LargeSumGazeta
9
+
10
+ ## Model description
11
+
12
+ This is the model for abstractive summarization for Russian based on ai-forever/ruT5-large.
13
+
14
+ ## Intended uses & limitations
15
+
16
+ ### How to use
17
+
18
+ Here is how to use this model in PyTorch:
19
+
20
+ ```python
21
+ from transformers import AutoTokenizer, T5ForConditionalGeneration
22
+
23
+ model_name = "mlenjoyneer/rut5_large_sum_gazeta"
24
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
25
+ model = T5ForConditionalGeneration.from_pretrained(model_name)
26
+
27
+ article_text = "..."
28
+
29
+ input_ids = tokenizer(
30
+ [article_text],
31
+ max_length=600,
32
+ add_special_tokens=True,
33
+ padding="max_length",
34
+ truncation=True,
35
+ return_tensors="pt"
36
+ )["input_ids"]
37
+
38
+ output_ids = model.generate(
39
+ input_ids=input_ids,
40
+ no_repeat_ngram_size=4
41
+ )[0]
42
+
43
+ summary = tokenizer.decode(output_ids, skip_special_tokens=True)
44
+ print(summary)
45
+ ```
46
+
47
+ ## Training data
48
+
49
+ - Dataset: [Gazeta](https://huggingface.co/datasets/IlyaGusev/gazeta)
50
+
51
+ ## Evaluation results
52
+
53
+ | Model | R-1-f | R-2-f | R-L-f | chrF | BLEU | Avg char length |
54
+ | -------------------------------------------- | ----- | ----- | ----- | ---- | ---- | --------------- |
55
+ | IlyaGusev/mbart_ru_sum_gazeta | 28.7 | 11.1 | 24.4 | **37.3** | **9.4** | 373 |
56
+ | IlyaGusev/rut5_base_sum_gazeta | 28.6 | 11.1 | 24.5 | 37.2 | **9.4** | 331 |
57
+ | IlyaGusev/rugpt3medium_sum_gazeta | 24.1 | 6.5 | 19.8 | 32.1 | 3.6 | 242 |
58
+ | rut5-large_sum_gazeta | **29.6** | **11.7** | **25.2** | **37.3** | **9.4** | 304 |
config.json ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "./rut5-large",
3
+ "architectures": [
4
+ "T5ForConditionalGeneration"
5
+ ],
6
+ "bos_token_id": 1,
7
+ "d_ff": 4096,
8
+ "d_kv": 64,
9
+ "d_model": 1024,
10
+ "decoder_start_token_id": 1,
11
+ "dense_act_fn": "relu",
12
+ "dropout_rate": 0.1,
13
+ "eos_token_id": 2,
14
+ "feed_forward_proj": "relu",
15
+ "initializer_factor": 1.0,
16
+ "is_encoder_decoder": true,
17
+ "is_gated_act": false,
18
+ "layer_norm_epsilon": 1e-06,
19
+ "max_length": 120,
20
+ "model_type": "t5",
21
+ "n_positions": 512,
22
+ "num_beams": 4,
23
+ "num_decoder_layers": 24,
24
+ "num_heads": 16,
25
+ "num_layers": 24,
26
+ "output_past": true,
27
+ "pad_token_id": 0,
28
+ "relative_attention_max_distance": 128,
29
+ "relative_attention_num_buckets": 32,
30
+ "task_specific_params": {
31
+ "summarization": {
32
+ "early_stopping": true,
33
+ "length_penalty": 2.0,
34
+ "max_length": 200,
35
+ "min_length": 30,
36
+ "no_repeat_ngram_size": 3,
37
+ "num_beams": 4,
38
+ "prefix": "summarize: "
39
+ },
40
+ "translation_en_to_de": {
41
+ "early_stopping": true,
42
+ "max_length": 300,
43
+ "num_beams": 4,
44
+ "prefix": "translate English to German: "
45
+ },
46
+ "translation_en_to_fr": {
47
+ "early_stopping": true,
48
+ "max_length": 300,
49
+ "num_beams": 4,
50
+ "prefix": "translate English to French: "
51
+ },
52
+ "translation_en_to_ro": {
53
+ "early_stopping": true,
54
+ "max_length": 300,
55
+ "num_beams": 4,
56
+ "prefix": "translate English to Romanian: "
57
+ }
58
+ },
59
+ "torch_dtype": "float32",
60
+ "transformers_version": "4.31.0",
61
+ "use_cache": true,
62
+ "vocab_size": 32128
63
+ }
generation_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 1,
3
+ "decoder_start_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "max_length": 120,
6
+ "num_beams": 4,
7
+ "pad_token_id": 0,
8
+ "transformers_version": "4.31.0"
9
+ }
old_spiece.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c43d5d89523c7b40603a3164ce65244b8f604adea7c191ab7ce55f3ef3907700
3
+ size 1003142
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b660bc62b1a76950be1555559c5c1728e6b156aa69dd43dd63025d6da80bb889
3
+ size 2950848513
spiece.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7a4eb87011448a4564a3144979384da51eee1da95e554feb22ccc85529535dd5
3
+ size 1003118