A-C-E commited on
Commit
bc45211
1 Parent(s): d7550f5

Upload 29 files

Browse files
checkpoint-1000/config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "google/pegasus-large",
3
  "activation_dropout": 0.1,
4
  "activation_function": "relu",
5
  "add_bias_logits": false,
@@ -10,7 +10,6 @@
10
  "attention_dropout": 0.1,
11
  "bos_token_id": 0,
12
  "classif_dropout": 0.0,
13
- "classifier_dropout": 0.0,
14
  "d_model": 1024,
15
  "decoder_attention_heads": 16,
16
  "decoder_ffn_dim": 4096,
@@ -24,9 +23,7 @@
24
  "encoder_layers": 16,
25
  "eos_token_id": 1,
26
  "extra_pos_embeddings": 1,
27
- "force_bos_token_to_be_generated": false,
28
  "forced_eos_token_id": 1,
29
- "gradient_checkpointing": false,
30
  "id2label": {
31
  "0": "LABEL_0",
32
  "1": "LABEL_1",
@@ -40,8 +37,9 @@
40
  "LABEL_2": 2
41
  },
42
  "length_penalty": 0.8,
43
- "max_length": 256,
44
  "max_position_embeddings": 1024,
 
45
  "model_type": "pegasus",
46
  "normalize_before": true,
47
  "normalize_embedding": false,
@@ -50,73 +48,6 @@
50
  "pad_token_id": 0,
51
  "scale_embedding": true,
52
  "static_position_embeddings": true,
53
- "task_specific_params": {
54
- "summarization_aeslc": {
55
- "length_penalty": 0.6,
56
- "max_length": 32,
57
- "max_position_embeddings": 512
58
- },
59
- "summarization_arxiv": {
60
- "length_penalty": 0.8,
61
- "max_length": 256,
62
- "max_position_embeddings": 1024
63
- },
64
- "summarization_big_patent": {
65
- "length_penalty": 0.7,
66
- "max_length": 256,
67
- "max_position_embeddings": 1024
68
- },
69
- "summarization_billsum": {
70
- "length_penalty": 0.6,
71
- "max_length": 256,
72
- "max_position_embeddings": 1024
73
- },
74
- "summarization_cnn_dailymail": {
75
- "length_penalty": 0.8,
76
- "max_length": 128,
77
- "max_position_embeddings": 1024
78
- },
79
- "summarization_gigaword": {
80
- "length_penalty": 0.6,
81
- "max_length": 32,
82
- "max_position_embeddings": 128
83
- },
84
- "summarization_large": {
85
- "length_penalty": 0.8,
86
- "max_length": 256,
87
- "max_position_embeddings": 1024
88
- },
89
- "summarization_multi_news": {
90
- "length_penalty": 0.8,
91
- "max_length": 256,
92
- "max_position_embeddings": 1024
93
- },
94
- "summarization_newsroom": {
95
- "length_penalty": 0.8,
96
- "max_length": 128,
97
- "max_position_embeddings": 512
98
- },
99
- "summarization_pubmed": {
100
- "length_penalty": 0.8,
101
- "max_length": 256,
102
- "max_position_embeddings": 1024
103
- },
104
- "summarization_reddit_tifu": {
105
- "length_penalty": 0.6,
106
- "max_length": 128,
107
- "max_position_embeddings": 512
108
- },
109
- "summarization_wikihow": {
110
- "length_penalty": 0.6,
111
- "max_length": 256,
112
- "max_position_embeddings": 512
113
- },
114
- "summarization_xsum": {
115
- "length_penalty": 0.8,
116
- "max_length": 64,
117
- "max_position_embeddings": 512
118
- }
119
- },
120
  "torch_dtype": "float32",
121
  "transformers_version": "4.35.2",
122
  "use_cache": true,
 
1
  {
2
+ "_name_or_path": "google/pegasus-cnn_dailymail",
3
  "activation_dropout": 0.1,
4
  "activation_function": "relu",
5
  "add_bias_logits": false,
 
10
  "attention_dropout": 0.1,
11
  "bos_token_id": 0,
12
  "classif_dropout": 0.0,
 
13
  "d_model": 1024,
14
  "decoder_attention_heads": 16,
15
  "decoder_ffn_dim": 4096,
 
23
  "encoder_layers": 16,
24
  "eos_token_id": 1,
25
  "extra_pos_embeddings": 1,
 
26
  "forced_eos_token_id": 1,
 
27
  "id2label": {
28
  "0": "LABEL_0",
29
  "1": "LABEL_1",
 
37
  "LABEL_2": 2
38
  },
39
  "length_penalty": 0.8,
40
+ "max_length": 128,
41
  "max_position_embeddings": 1024,
42
+ "min_length": 32,
43
  "model_type": "pegasus",
44
  "normalize_before": true,
45
  "normalize_embedding": false,
 
48
  "pad_token_id": 0,
49
  "scale_embedding": true,
50
  "static_position_embeddings": true,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
51
  "torch_dtype": "float32",
52
  "transformers_version": "4.35.2",
53
  "use_cache": true,
checkpoint-1000/generation_config.json CHANGED
@@ -4,7 +4,8 @@
4
  "eos_token_id": 1,
5
  "forced_eos_token_id": 1,
6
  "length_penalty": 0.8,
7
- "max_length": 256,
 
8
  "num_beams": 8,
9
  "pad_token_id": 0,
10
  "transformers_version": "4.35.2"
 
4
  "eos_token_id": 1,
5
  "forced_eos_token_id": 1,
6
  "length_penalty": 0.8,
7
+ "max_length": 128,
8
+ "min_length": 32,
9
  "num_beams": 8,
10
  "pad_token_id": 0,
11
  "transformers_version": "4.35.2"
checkpoint-1000/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:19d44606646abedfeff7cf7c6ee2a06921e1022af697cd6a9e3390391a155ac3
3
  size 2283652852
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc975443e2eecbee388630f54f420bcec81737434bc9d6d46b0561f601e67572
3
  size 2283652852
checkpoint-1000/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cc2e5a49d6f16090a3cedb1222c08610acc19fc48c0fa7595ed341cb53b9d87f
3
  size 4550170737
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f126fb78514701ee7ab1e8251fae95a6fb208e28cd53f0ed8d7407bdb2451fc9
3
  size 4550170737
checkpoint-1000/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:24331779cf5a879ef2d67d98058afca3fe15b1b15824c82e8dc22985f06fde1c
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:39321034682cdf62b8b5f1b226a7deb62ebf0e5e619ffc243c1f9bf89df4089b
3
  size 14244
checkpoint-1000/trainer_state.json CHANGED
@@ -10,60 +10,60 @@
10
  "log_history": [
11
  {
12
  "epoch": 1.0,
13
- "eval_loss": 1.7080590724945068,
14
  "eval_rouge-1": {
15
- "f": 0.3602114751016854,
16
- "p": 0.34496959357843615,
17
- "r": 0.4226024767202879
18
  },
19
  "eval_rouge-2": {
20
- "f": 0.15643009303399508,
21
- "p": 0.15275740366063076,
22
- "r": 0.19066724740497631
23
  },
24
  "eval_rouge-l": {
25
- "f": 0.32208903782335413,
26
- "p": 0.30833161329969466,
27
- "r": 0.37836219852997405
28
  },
29
- "eval_runtime": 2219.4171,
30
- "eval_samples_per_second": 0.382,
31
- "eval_steps_per_second": 0.048,
32
  "step": 423
33
  },
34
  {
35
  "epoch": 1.18,
36
  "learning_rate": 6.1150512214342e-07,
37
- "loss": 2.1563,
38
  "step": 500
39
  },
40
  {
41
  "epoch": 2.0,
42
- "eval_loss": 1.6040149927139282,
43
  "eval_rouge-1": {
44
- "f": 0.3612603719250026,
45
- "p": 0.34358610140933893,
46
- "r": 0.42870738318419865
47
  },
48
  "eval_rouge-2": {
49
- "f": 0.157552839681331,
50
- "p": 0.15225476322754022,
51
- "r": 0.19552963369178558
52
  },
53
  "eval_rouge-l": {
54
- "f": 0.3229502551530144,
55
- "p": 0.30691966907819435,
56
- "r": 0.3838586158422521
57
  },
58
- "eval_runtime": 2223.1583,
59
- "eval_samples_per_second": 0.381,
60
- "eval_steps_per_second": 0.048,
61
  "step": 846
62
  },
63
  {
64
  "epoch": 2.36,
65
  "learning_rate": 2.1907013396375099e-07,
66
- "loss": 1.8944,
67
  "step": 1000
68
  }
69
  ],
@@ -71,7 +71,7 @@
71
  "max_steps": 1269,
72
  "num_train_epochs": 3,
73
  "save_steps": 500,
74
- "total_flos": 1.1508669039968256e+16,
75
  "trial_name": null,
76
  "trial_params": null
77
  }
 
10
  "log_history": [
11
  {
12
  "epoch": 1.0,
13
+ "eval_loss": 2.069897174835205,
14
  "eval_rouge-1": {
15
+ "f": 0.3774477031775325,
16
+ "p": 0.3995562278312168,
17
+ "r": 0.3726788874248043
18
  },
19
  "eval_rouge-2": {
20
+ "f": 0.17076277482918784,
21
+ "p": 0.18355330883104226,
22
+ "r": 0.16801465382058134
23
  },
24
  "eval_rouge-l": {
25
+ "f": 0.3426353352122299,
26
+ "p": 0.3630648264869712,
27
+ "r": 0.33789919649146194
28
  },
29
+ "eval_runtime": 951.0251,
30
+ "eval_samples_per_second": 0.891,
31
+ "eval_steps_per_second": 0.111,
32
  "step": 423
33
  },
34
  {
35
  "epoch": 1.18,
36
  "learning_rate": 6.1150512214342e-07,
37
+ "loss": 2.7455,
38
  "step": 500
39
  },
40
  {
41
  "epoch": 2.0,
42
+ "eval_loss": 1.9773768186569214,
43
  "eval_rouge-1": {
44
+ "f": 0.38869549773455014,
45
+ "p": 0.4028061100223781,
46
+ "r": 0.39196258894703817
47
  },
48
  "eval_rouge-2": {
49
+ "f": 0.18064299244610635,
50
+ "p": 0.18994216402253805,
51
+ "r": 0.18189618469586705
52
  },
53
  "eval_rouge-l": {
54
+ "f": 0.3535216132968263,
55
+ "p": 0.3670341581740442,
56
+ "r": 0.3558599440954195
57
  },
58
+ "eval_runtime": 966.3939,
59
+ "eval_samples_per_second": 0.876,
60
+ "eval_steps_per_second": 0.11,
61
  "step": 846
62
  },
63
  {
64
  "epoch": 2.36,
65
  "learning_rate": 2.1907013396375099e-07,
66
+ "loss": 2.5092,
67
  "step": 1000
68
  }
69
  ],
 
71
  "max_steps": 1269,
72
  "num_train_epochs": 3,
73
  "save_steps": 500,
74
+ "total_flos": 1.1514312525152256e+16,
75
  "trial_name": null,
76
  "trial_params": null
77
  }
checkpoint-1000/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:516e059b7c8633fc443edc3f49f50ac8c78fe9a0ca1e274222be68a0dfd4a5b9
3
  size 4728
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d857e9181d05ce6c81a8a974a12df0eabe666a7930c097557fcfb2f44cbb2e1e
3
  size 4728
checkpoint-500/config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "google/pegasus-large",
3
  "activation_dropout": 0.1,
4
  "activation_function": "relu",
5
  "add_bias_logits": false,
@@ -10,7 +10,6 @@
10
  "attention_dropout": 0.1,
11
  "bos_token_id": 0,
12
  "classif_dropout": 0.0,
13
- "classifier_dropout": 0.0,
14
  "d_model": 1024,
15
  "decoder_attention_heads": 16,
16
  "decoder_ffn_dim": 4096,
@@ -24,9 +23,7 @@
24
  "encoder_layers": 16,
25
  "eos_token_id": 1,
26
  "extra_pos_embeddings": 1,
27
- "force_bos_token_to_be_generated": false,
28
  "forced_eos_token_id": 1,
29
- "gradient_checkpointing": false,
30
  "id2label": {
31
  "0": "LABEL_0",
32
  "1": "LABEL_1",
@@ -40,8 +37,9 @@
40
  "LABEL_2": 2
41
  },
42
  "length_penalty": 0.8,
43
- "max_length": 256,
44
  "max_position_embeddings": 1024,
 
45
  "model_type": "pegasus",
46
  "normalize_before": true,
47
  "normalize_embedding": false,
@@ -50,73 +48,6 @@
50
  "pad_token_id": 0,
51
  "scale_embedding": true,
52
  "static_position_embeddings": true,
53
- "task_specific_params": {
54
- "summarization_aeslc": {
55
- "length_penalty": 0.6,
56
- "max_length": 32,
57
- "max_position_embeddings": 512
58
- },
59
- "summarization_arxiv": {
60
- "length_penalty": 0.8,
61
- "max_length": 256,
62
- "max_position_embeddings": 1024
63
- },
64
- "summarization_big_patent": {
65
- "length_penalty": 0.7,
66
- "max_length": 256,
67
- "max_position_embeddings": 1024
68
- },
69
- "summarization_billsum": {
70
- "length_penalty": 0.6,
71
- "max_length": 256,
72
- "max_position_embeddings": 1024
73
- },
74
- "summarization_cnn_dailymail": {
75
- "length_penalty": 0.8,
76
- "max_length": 128,
77
- "max_position_embeddings": 1024
78
- },
79
- "summarization_gigaword": {
80
- "length_penalty": 0.6,
81
- "max_length": 32,
82
- "max_position_embeddings": 128
83
- },
84
- "summarization_large": {
85
- "length_penalty": 0.8,
86
- "max_length": 256,
87
- "max_position_embeddings": 1024
88
- },
89
- "summarization_multi_news": {
90
- "length_penalty": 0.8,
91
- "max_length": 256,
92
- "max_position_embeddings": 1024
93
- },
94
- "summarization_newsroom": {
95
- "length_penalty": 0.8,
96
- "max_length": 128,
97
- "max_position_embeddings": 512
98
- },
99
- "summarization_pubmed": {
100
- "length_penalty": 0.8,
101
- "max_length": 256,
102
- "max_position_embeddings": 1024
103
- },
104
- "summarization_reddit_tifu": {
105
- "length_penalty": 0.6,
106
- "max_length": 128,
107
- "max_position_embeddings": 512
108
- },
109
- "summarization_wikihow": {
110
- "length_penalty": 0.6,
111
- "max_length": 256,
112
- "max_position_embeddings": 512
113
- },
114
- "summarization_xsum": {
115
- "length_penalty": 0.8,
116
- "max_length": 64,
117
- "max_position_embeddings": 512
118
- }
119
- },
120
  "torch_dtype": "float32",
121
  "transformers_version": "4.35.2",
122
  "use_cache": true,
 
1
  {
2
+ "_name_or_path": "google/pegasus-cnn_dailymail",
3
  "activation_dropout": 0.1,
4
  "activation_function": "relu",
5
  "add_bias_logits": false,
 
10
  "attention_dropout": 0.1,
11
  "bos_token_id": 0,
12
  "classif_dropout": 0.0,
 
13
  "d_model": 1024,
14
  "decoder_attention_heads": 16,
15
  "decoder_ffn_dim": 4096,
 
23
  "encoder_layers": 16,
24
  "eos_token_id": 1,
25
  "extra_pos_embeddings": 1,
 
26
  "forced_eos_token_id": 1,
 
27
  "id2label": {
28
  "0": "LABEL_0",
29
  "1": "LABEL_1",
 
37
  "LABEL_2": 2
38
  },
39
  "length_penalty": 0.8,
40
+ "max_length": 128,
41
  "max_position_embeddings": 1024,
42
+ "min_length": 32,
43
  "model_type": "pegasus",
44
  "normalize_before": true,
45
  "normalize_embedding": false,
 
48
  "pad_token_id": 0,
49
  "scale_embedding": true,
50
  "static_position_embeddings": true,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
51
  "torch_dtype": "float32",
52
  "transformers_version": "4.35.2",
53
  "use_cache": true,
checkpoint-500/generation_config.json CHANGED
@@ -4,7 +4,8 @@
4
  "eos_token_id": 1,
5
  "forced_eos_token_id": 1,
6
  "length_penalty": 0.8,
7
- "max_length": 256,
 
8
  "num_beams": 8,
9
  "pad_token_id": 0,
10
  "transformers_version": "4.35.2"
 
4
  "eos_token_id": 1,
5
  "forced_eos_token_id": 1,
6
  "length_penalty": 0.8,
7
+ "max_length": 128,
8
+ "min_length": 32,
9
  "num_beams": 8,
10
  "pad_token_id": 0,
11
  "transformers_version": "4.35.2"
checkpoint-500/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:82cd843244eafc2ef804646426b877107fc55ef0e31d9e7f6f91e2d6ddd235e1
3
  size 2283652852
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5a55fc9a51b4dafd4e2da8a9991ea57100024c2756c6b76601c0479b20aef950
3
  size 2283652852
checkpoint-500/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:22641152bbf9588c1cdef6a0daf16c4fa0ad61683710bacf7347774f42c547b2
3
  size 4550170737
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c847315530b20a629e7913e40956f0bd8201ba86e1acb6d34649be925603d40c
3
  size 4550170737
checkpoint-500/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:720e736989b19f04509323a646e0f7ac91dd5fe6920d5a542aaa26a2b8f738ac
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6e19a3fa6b1bb8fe344226cf1d6a69b5fa2f0142cb55e74a7738945e2b10eafd
3
  size 14244
checkpoint-500/trainer_state.json CHANGED
@@ -10,31 +10,31 @@
10
  "log_history": [
11
  {
12
  "epoch": 1.0,
13
- "eval_loss": 1.7080590724945068,
14
  "eval_rouge-1": {
15
- "f": 0.3602114751016854,
16
- "p": 0.34496959357843615,
17
- "r": 0.4226024767202879
18
  },
19
  "eval_rouge-2": {
20
- "f": 0.15643009303399508,
21
- "p": 0.15275740366063076,
22
- "r": 0.19066724740497631
23
  },
24
  "eval_rouge-l": {
25
- "f": 0.32208903782335413,
26
- "p": 0.30833161329969466,
27
- "r": 0.37836219852997405
28
  },
29
- "eval_runtime": 2219.4171,
30
- "eval_samples_per_second": 0.382,
31
- "eval_steps_per_second": 0.048,
32
  "step": 423
33
  },
34
  {
35
  "epoch": 1.18,
36
  "learning_rate": 6.1150512214342e-07,
37
- "loss": 2.1563,
38
  "step": 500
39
  }
40
  ],
@@ -42,7 +42,7 @@
42
  "max_steps": 1269,
43
  "num_train_epochs": 3,
44
  "save_steps": 500,
45
- "total_flos": 5752472169873408.0,
46
  "trial_name": null,
47
  "trial_params": null
48
  }
 
10
  "log_history": [
11
  {
12
  "epoch": 1.0,
13
+ "eval_loss": 2.069897174835205,
14
  "eval_rouge-1": {
15
+ "f": 0.3774477031775325,
16
+ "p": 0.3995562278312168,
17
+ "r": 0.3726788874248043
18
  },
19
  "eval_rouge-2": {
20
+ "f": 0.17076277482918784,
21
+ "p": 0.18355330883104226,
22
+ "r": 0.16801465382058134
23
  },
24
  "eval_rouge-l": {
25
+ "f": 0.3426353352122299,
26
+ "p": 0.3630648264869712,
27
+ "r": 0.33789919649146194
28
  },
29
+ "eval_runtime": 951.0251,
30
+ "eval_samples_per_second": 0.891,
31
+ "eval_steps_per_second": 0.111,
32
  "step": 423
33
  },
34
  {
35
  "epoch": 1.18,
36
  "learning_rate": 6.1150512214342e-07,
37
+ "loss": 2.7455,
38
  "step": 500
39
  }
40
  ],
 
42
  "max_steps": 1269,
43
  "num_train_epochs": 3,
44
  "save_steps": 500,
45
+ "total_flos": 5756986958020608.0,
46
  "trial_name": null,
47
  "trial_params": null
48
  }
checkpoint-500/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:516e059b7c8633fc443edc3f49f50ac8c78fe9a0ca1e274222be68a0dfd4a5b9
3
  size 4728
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d857e9181d05ce6c81a8a974a12df0eabe666a7930c097557fcfb2f44cbb2e1e
3
  size 4728
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "google/pegasus-large",
3
  "activation_dropout": 0.1,
4
  "activation_function": "relu",
5
  "add_bias_logits": false,
@@ -10,7 +10,6 @@
10
  "attention_dropout": 0.1,
11
  "bos_token_id": 0,
12
  "classif_dropout": 0.0,
13
- "classifier_dropout": 0.0,
14
  "d_model": 1024,
15
  "decoder_attention_heads": 16,
16
  "decoder_ffn_dim": 4096,
@@ -24,9 +23,7 @@
24
  "encoder_layers": 16,
25
  "eos_token_id": 1,
26
  "extra_pos_embeddings": 1,
27
- "force_bos_token_to_be_generated": false,
28
  "forced_eos_token_id": 1,
29
- "gradient_checkpointing": false,
30
  "id2label": {
31
  "0": "LABEL_0",
32
  "1": "LABEL_1",
@@ -40,8 +37,9 @@
40
  "LABEL_2": 2
41
  },
42
  "length_penalty": 0.8,
43
- "max_length": 256,
44
  "max_position_embeddings": 1024,
 
45
  "model_type": "pegasus",
46
  "normalize_before": true,
47
  "normalize_embedding": false,
@@ -50,73 +48,6 @@
50
  "pad_token_id": 0,
51
  "scale_embedding": true,
52
  "static_position_embeddings": true,
53
- "task_specific_params": {
54
- "summarization_aeslc": {
55
- "length_penalty": 0.6,
56
- "max_length": 32,
57
- "max_position_embeddings": 512
58
- },
59
- "summarization_arxiv": {
60
- "length_penalty": 0.8,
61
- "max_length": 256,
62
- "max_position_embeddings": 1024
63
- },
64
- "summarization_big_patent": {
65
- "length_penalty": 0.7,
66
- "max_length": 256,
67
- "max_position_embeddings": 1024
68
- },
69
- "summarization_billsum": {
70
- "length_penalty": 0.6,
71
- "max_length": 256,
72
- "max_position_embeddings": 1024
73
- },
74
- "summarization_cnn_dailymail": {
75
- "length_penalty": 0.8,
76
- "max_length": 128,
77
- "max_position_embeddings": 1024
78
- },
79
- "summarization_gigaword": {
80
- "length_penalty": 0.6,
81
- "max_length": 32,
82
- "max_position_embeddings": 128
83
- },
84
- "summarization_large": {
85
- "length_penalty": 0.8,
86
- "max_length": 256,
87
- "max_position_embeddings": 1024
88
- },
89
- "summarization_multi_news": {
90
- "length_penalty": 0.8,
91
- "max_length": 256,
92
- "max_position_embeddings": 1024
93
- },
94
- "summarization_newsroom": {
95
- "length_penalty": 0.8,
96
- "max_length": 128,
97
- "max_position_embeddings": 512
98
- },
99
- "summarization_pubmed": {
100
- "length_penalty": 0.8,
101
- "max_length": 256,
102
- "max_position_embeddings": 1024
103
- },
104
- "summarization_reddit_tifu": {
105
- "length_penalty": 0.6,
106
- "max_length": 128,
107
- "max_position_embeddings": 512
108
- },
109
- "summarization_wikihow": {
110
- "length_penalty": 0.6,
111
- "max_length": 256,
112
- "max_position_embeddings": 512
113
- },
114
- "summarization_xsum": {
115
- "length_penalty": 0.8,
116
- "max_length": 64,
117
- "max_position_embeddings": 512
118
- }
119
- },
120
  "torch_dtype": "float32",
121
  "transformers_version": "4.35.2",
122
  "use_cache": true,
 
1
  {
2
+ "_name_or_path": "google/pegasus-cnn_dailymail",
3
  "activation_dropout": 0.1,
4
  "activation_function": "relu",
5
  "add_bias_logits": false,
 
10
  "attention_dropout": 0.1,
11
  "bos_token_id": 0,
12
  "classif_dropout": 0.0,
 
13
  "d_model": 1024,
14
  "decoder_attention_heads": 16,
15
  "decoder_ffn_dim": 4096,
 
23
  "encoder_layers": 16,
24
  "eos_token_id": 1,
25
  "extra_pos_embeddings": 1,
 
26
  "forced_eos_token_id": 1,
 
27
  "id2label": {
28
  "0": "LABEL_0",
29
  "1": "LABEL_1",
 
37
  "LABEL_2": 2
38
  },
39
  "length_penalty": 0.8,
40
+ "max_length": 128,
41
  "max_position_embeddings": 1024,
42
+ "min_length": 32,
43
  "model_type": "pegasus",
44
  "normalize_before": true,
45
  "normalize_embedding": false,
 
48
  "pad_token_id": 0,
49
  "scale_embedding": true,
50
  "static_position_embeddings": true,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
51
  "torch_dtype": "float32",
52
  "transformers_version": "4.35.2",
53
  "use_cache": true,
generation_config.json CHANGED
@@ -4,7 +4,8 @@
4
  "eos_token_id": 1,
5
  "forced_eos_token_id": 1,
6
  "length_penalty": 0.8,
7
- "max_length": 256,
 
8
  "num_beams": 8,
9
  "pad_token_id": 0,
10
  "transformers_version": "4.35.2"
 
4
  "eos_token_id": 1,
5
  "forced_eos_token_id": 1,
6
  "length_penalty": 0.8,
7
+ "max_length": 128,
8
+ "min_length": 32,
9
  "num_beams": 8,
10
  "pad_token_id": 0,
11
  "transformers_version": "4.35.2"
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:595c05347075430c6ca1de7f3cbd98a61c95d9304b9ebe973f17a362d92bca01
3
  size 2283652852
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:309daf1e4a09dbad57538ba41a24416a4390f5b8244a0477986e0b2140029b93
3
  size 2283652852
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:516e059b7c8633fc443edc3f49f50ac8c78fe9a0ca1e274222be68a0dfd4a5b9
3
  size 4728
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d857e9181d05ce6c81a8a974a12df0eabe666a7930c097557fcfb2f44cbb2e1e
3
  size 4728