spacemanidol commited on
Commit
af4c416
1 Parent(s): 99e116e

Upload 13 files

Browse files
README.md CHANGED
@@ -6,7 +6,7 @@ datasets:
6
  metrics:
7
  - rouge
8
  model-index:
9
- - name: base-6-2-t
10
  results:
11
  - task:
12
  name: Summarization
@@ -20,22 +20,22 @@ model-index:
20
  metrics:
21
  - name: Rouge1
22
  type: rouge
23
- value: 8.6978
24
  ---
25
 
26
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
27
  should probably proofread and complete it, then remove this comment. -->
28
 
29
- # base-6-2-t
30
 
31
- This model is a fine-tuned version of [cnn/base-6-2](https://huggingface.co/cnn/base-6-2) on the cnn_dailymail 3.0.0 dataset.
32
  It achieves the following results on the evaluation set:
33
- - Loss: nan
34
- - Rouge1: 8.6978
35
- - Rouge2: 0.5375
36
- - Rougel: 6.7006
37
- - Rougelsum: 7.8899
38
- - Gen Len: 113.9803
39
 
40
  ## Model description
41
 
@@ -63,7 +63,6 @@ The following hyperparameters were used during training:
63
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
64
  - lr_scheduler_type: constant
65
  - num_epochs: 3.0
66
- - mixed_precision_training: Native AMP
67
 
68
  ### Training results
69
 
 
6
  metrics:
7
  - rouge
8
  model-index:
9
+ - name: base-6-2
10
  results:
11
  - task:
12
  name: Summarization
 
20
  metrics:
21
  - name: Rouge1
22
  type: rouge
23
+ value: 40.3079
24
  ---
25
 
26
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
27
  should probably proofread and complete it, then remove this comment. -->
28
 
29
+ # base-6-2
30
 
31
+ This model is a fine-tuned version of [cnn/base-6-2/](https://huggingface.co/cnn/base-6-2/) on the cnn_dailymail 3.0.0 dataset.
32
  It achieves the following results on the evaluation set:
33
+ - Loss: 1.6768
34
+ - Rouge1: 40.3079
35
+ - Rouge2: 18.6849
36
+ - Rougel: 29.4498
37
+ - Rougelsum: 37.7098
38
+ - Gen Len: 67.5191
39
 
40
  ## Model description
41
 
 
63
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
64
  - lr_scheduler_type: constant
65
  - num_epochs: 3.0
 
66
 
67
  ### Training results
68
 
all_results.json CHANGED
@@ -1,18 +1,18 @@
1
  {
2
  "epoch": 3.0,
3
- "eval_gen_len": 113.9802513464991,
4
- "eval_loss": NaN,
5
- "eval_rouge1": 8.6978,
6
- "eval_rouge2": 0.5375,
7
- "eval_rougeL": 6.7006,
8
- "eval_rougeLsum": 7.8899,
9
- "eval_runtime": 3122.7028,
10
  "eval_samples": 13368,
11
- "eval_samples_per_second": 4.281,
12
- "eval_steps_per_second": 1.07,
13
- "train_loss": 0.0,
14
- "train_runtime": 77963.665,
15
  "train_samples": 287113,
16
- "train_samples_per_second": 11.048,
17
- "train_steps_per_second": 0.173
18
  }
 
1
  {
2
  "epoch": 3.0,
3
+ "eval_gen_len": 67.51907540394973,
4
+ "eval_loss": 1.6768145561218262,
5
+ "eval_rouge1": 40.3079,
6
+ "eval_rouge2": 18.6849,
7
+ "eval_rougeL": 29.4498,
8
+ "eval_rougeLsum": 37.7098,
9
+ "eval_runtime": 2561.1074,
10
  "eval_samples": 13368,
11
+ "eval_samples_per_second": 5.22,
12
+ "eval_steps_per_second": 1.305,
13
+ "train_loss": 2.0534920626541253,
14
+ "train_runtime": 109961.7964,
15
  "train_samples": 287113,
16
+ "train_samples_per_second": 7.833,
17
+ "train_steps_per_second": 0.122
18
  }
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "cnn/base-6-2",
3
  "architectures": [
4
  "T5ForConditionalGeneration"
5
  ],
 
1
  {
2
+ "_name_or_path": "cnn/base-6-2/",
3
  "architectures": [
4
  "T5ForConditionalGeneration"
5
  ],
eval_results.json CHANGED
@@ -1,13 +1,13 @@
1
  {
2
  "epoch": 3.0,
3
- "eval_gen_len": 113.9802513464991,
4
- "eval_loss": NaN,
5
- "eval_rouge1": 8.6978,
6
- "eval_rouge2": 0.5375,
7
- "eval_rougeL": 6.7006,
8
- "eval_rougeLsum": 7.8899,
9
- "eval_runtime": 3122.7028,
10
  "eval_samples": 13368,
11
- "eval_samples_per_second": 4.281,
12
- "eval_steps_per_second": 1.07
13
  }
 
1
  {
2
  "epoch": 3.0,
3
+ "eval_gen_len": 67.51907540394973,
4
+ "eval_loss": 1.6768145561218262,
5
+ "eval_rouge1": 40.3079,
6
+ "eval_rouge2": 18.6849,
7
+ "eval_rougeL": 29.4498,
8
+ "eval_rougeLsum": 37.7098,
9
+ "eval_runtime": 2561.1074,
10
  "eval_samples": 13368,
11
+ "eval_samples_per_second": 5.22,
12
+ "eval_steps_per_second": 1.305
13
  }
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8c96aac1f1c4216f282c1a0bb7fb991800fdd2721285db4176dcb27b13cee35f
3
  size 688306421
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:292d282710339c24bfbfadfdbbb84d4a8cd9f40638710ced7a28a7b507d4abff
3
  size 688306421
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "epoch": 3.0,
3
- "train_loss": 0.0,
4
- "train_runtime": 77963.665,
5
  "train_samples": 287113,
6
- "train_samples_per_second": 11.048,
7
- "train_steps_per_second": 0.173
8
  }
 
1
  {
2
  "epoch": 3.0,
3
+ "train_loss": 2.0534920626541253,
4
+ "train_runtime": 109961.7964,
5
  "train_samples": 287113,
6
+ "train_samples_per_second": 7.833,
7
+ "train_steps_per_second": 0.122
8
  }
trainer_state.json CHANGED
@@ -10,172 +10,172 @@
10
  {
11
  "epoch": 0.11,
12
  "learning_rate": 0.0001,
13
- "loss": 0.0,
14
  "step": 500
15
  },
16
  {
17
  "epoch": 0.22,
18
  "learning_rate": 0.0001,
19
- "loss": 0.0,
20
  "step": 1000
21
  },
22
  {
23
  "epoch": 0.33,
24
  "learning_rate": 0.0001,
25
- "loss": 0.0,
26
  "step": 1500
27
  },
28
  {
29
  "epoch": 0.45,
30
  "learning_rate": 0.0001,
31
- "loss": 0.0,
32
  "step": 2000
33
  },
34
  {
35
  "epoch": 0.56,
36
  "learning_rate": 0.0001,
37
- "loss": 0.0,
38
  "step": 2500
39
  },
40
  {
41
  "epoch": 0.67,
42
  "learning_rate": 0.0001,
43
- "loss": 0.0,
44
  "step": 3000
45
  },
46
  {
47
  "epoch": 0.78,
48
  "learning_rate": 0.0001,
49
- "loss": 0.0,
50
  "step": 3500
51
  },
52
  {
53
  "epoch": 0.89,
54
  "learning_rate": 0.0001,
55
- "loss": 0.0,
56
  "step": 4000
57
  },
58
  {
59
  "epoch": 1.0,
60
  "learning_rate": 0.0001,
61
- "loss": 0.0,
62
  "step": 4500
63
  },
64
  {
65
  "epoch": 1.11,
66
  "learning_rate": 0.0001,
67
- "loss": 0.0,
68
  "step": 5000
69
  },
70
  {
71
  "epoch": 1.23,
72
  "learning_rate": 0.0001,
73
- "loss": 0.0,
74
  "step": 5500
75
  },
76
  {
77
  "epoch": 1.34,
78
  "learning_rate": 0.0001,
79
- "loss": 0.0,
80
  "step": 6000
81
  },
82
  {
83
  "epoch": 1.45,
84
  "learning_rate": 0.0001,
85
- "loss": 0.0,
86
  "step": 6500
87
  },
88
  {
89
  "epoch": 1.56,
90
  "learning_rate": 0.0001,
91
- "loss": 0.0,
92
  "step": 7000
93
  },
94
  {
95
  "epoch": 1.67,
96
  "learning_rate": 0.0001,
97
- "loss": 0.0,
98
  "step": 7500
99
  },
100
  {
101
  "epoch": 1.78,
102
  "learning_rate": 0.0001,
103
- "loss": 0.0,
104
  "step": 8000
105
  },
106
  {
107
  "epoch": 1.89,
108
  "learning_rate": 0.0001,
109
- "loss": 0.0,
110
  "step": 8500
111
  },
112
  {
113
  "epoch": 2.01,
114
  "learning_rate": 0.0001,
115
- "loss": 0.0,
116
  "step": 9000
117
  },
118
  {
119
  "epoch": 2.12,
120
  "learning_rate": 0.0001,
121
- "loss": 0.0,
122
  "step": 9500
123
  },
124
  {
125
  "epoch": 2.23,
126
  "learning_rate": 0.0001,
127
- "loss": 0.0,
128
  "step": 10000
129
  },
130
  {
131
  "epoch": 2.34,
132
  "learning_rate": 0.0001,
133
- "loss": 0.0,
134
  "step": 10500
135
  },
136
  {
137
  "epoch": 2.45,
138
  "learning_rate": 0.0001,
139
- "loss": 0.0,
140
  "step": 11000
141
  },
142
  {
143
  "epoch": 2.56,
144
  "learning_rate": 0.0001,
145
- "loss": 0.0,
146
  "step": 11500
147
  },
148
  {
149
  "epoch": 2.67,
150
  "learning_rate": 0.0001,
151
- "loss": 0.0,
152
  "step": 12000
153
  },
154
  {
155
  "epoch": 2.79,
156
  "learning_rate": 0.0001,
157
- "loss": 0.0,
158
  "step": 12500
159
  },
160
  {
161
  "epoch": 2.9,
162
  "learning_rate": 0.0001,
163
- "loss": 0.0,
164
  "step": 13000
165
  },
166
  {
167
  "epoch": 3.0,
168
  "step": 13458,
169
- "total_flos": 7.169782919100826e+17,
170
- "train_loss": 0.0,
171
- "train_runtime": 77963.665,
172
- "train_samples_per_second": 11.048,
173
- "train_steps_per_second": 0.173
174
  }
175
  ],
176
  "max_steps": 13458,
177
  "num_train_epochs": 3,
178
- "total_flos": 7.169782919100826e+17,
179
  "trial_name": null,
180
  "trial_params": null
181
  }
 
10
  {
11
  "epoch": 0.11,
12
  "learning_rate": 0.0001,
13
+ "loss": 2.6686,
14
  "step": 500
15
  },
16
  {
17
  "epoch": 0.22,
18
  "learning_rate": 0.0001,
19
+ "loss": 2.3316,
20
  "step": 1000
21
  },
22
  {
23
  "epoch": 0.33,
24
  "learning_rate": 0.0001,
25
+ "loss": 2.2489,
26
  "step": 1500
27
  },
28
  {
29
  "epoch": 0.45,
30
  "learning_rate": 0.0001,
31
+ "loss": 2.2069,
32
  "step": 2000
33
  },
34
  {
35
  "epoch": 0.56,
36
  "learning_rate": 0.0001,
37
+ "loss": 2.1742,
38
  "step": 2500
39
  },
40
  {
41
  "epoch": 0.67,
42
  "learning_rate": 0.0001,
43
+ "loss": 2.1453,
44
  "step": 3000
45
  },
46
  {
47
  "epoch": 0.78,
48
  "learning_rate": 0.0001,
49
+ "loss": 2.1187,
50
  "step": 3500
51
  },
52
  {
53
  "epoch": 0.89,
54
  "learning_rate": 0.0001,
55
+ "loss": 2.103,
56
  "step": 4000
57
  },
58
  {
59
  "epoch": 1.0,
60
  "learning_rate": 0.0001,
61
+ "loss": 2.0829,
62
  "step": 4500
63
  },
64
  {
65
  "epoch": 1.11,
66
  "learning_rate": 0.0001,
67
+ "loss": 2.0455,
68
  "step": 5000
69
  },
70
  {
71
  "epoch": 1.23,
72
  "learning_rate": 0.0001,
73
+ "loss": 2.0346,
74
  "step": 5500
75
  },
76
  {
77
  "epoch": 1.34,
78
  "learning_rate": 0.0001,
79
+ "loss": 2.0186,
80
  "step": 6000
81
  },
82
  {
83
  "epoch": 1.45,
84
  "learning_rate": 0.0001,
85
+ "loss": 2.0114,
86
  "step": 6500
87
  },
88
  {
89
  "epoch": 1.56,
90
  "learning_rate": 0.0001,
91
+ "loss": 2.0048,
92
  "step": 7000
93
  },
94
  {
95
  "epoch": 1.67,
96
  "learning_rate": 0.0001,
97
+ "loss": 1.9923,
98
  "step": 7500
99
  },
100
  {
101
  "epoch": 1.78,
102
  "learning_rate": 0.0001,
103
+ "loss": 1.9883,
104
  "step": 8000
105
  },
106
  {
107
  "epoch": 1.89,
108
  "learning_rate": 0.0001,
109
+ "loss": 1.9821,
110
  "step": 8500
111
  },
112
  {
113
  "epoch": 2.01,
114
  "learning_rate": 0.0001,
115
+ "loss": 1.9678,
116
  "step": 9000
117
  },
118
  {
119
  "epoch": 2.12,
120
  "learning_rate": 0.0001,
121
+ "loss": 1.9427,
122
  "step": 9500
123
  },
124
  {
125
  "epoch": 2.23,
126
  "learning_rate": 0.0001,
127
+ "loss": 1.938,
128
  "step": 10000
129
  },
130
  {
131
  "epoch": 2.34,
132
  "learning_rate": 0.0001,
133
+ "loss": 1.9309,
134
  "step": 10500
135
  },
136
  {
137
  "epoch": 2.45,
138
  "learning_rate": 0.0001,
139
+ "loss": 1.9198,
140
  "step": 11000
141
  },
142
  {
143
  "epoch": 2.56,
144
  "learning_rate": 0.0001,
145
+ "loss": 1.9246,
146
  "step": 11500
147
  },
148
  {
149
  "epoch": 2.67,
150
  "learning_rate": 0.0001,
151
+ "loss": 1.916,
152
  "step": 12000
153
  },
154
  {
155
  "epoch": 2.79,
156
  "learning_rate": 0.0001,
157
+ "loss": 1.9193,
158
  "step": 12500
159
  },
160
  {
161
  "epoch": 2.9,
162
  "learning_rate": 0.0001,
163
+ "loss": 1.9114,
164
  "step": 13000
165
  },
166
  {
167
  "epoch": 3.0,
168
  "step": 13458,
169
+ "total_flos": 7.160049636932506e+17,
170
+ "train_loss": 2.0534920626541253,
171
+ "train_runtime": 109961.7964,
172
+ "train_samples_per_second": 7.833,
173
+ "train_steps_per_second": 0.122
174
  }
175
  ],
176
  "max_steps": 13458,
177
  "num_train_epochs": 3,
178
+ "total_flos": 7.160049636932506e+17,
179
  "trial_name": null,
180
  "trial_params": null
181
  }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3d3edd4fa56bca4efe8ba3a5da79eff073d499c527806533929aed03ad7e7b20
3
- size 3643
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b864680fb30fe6fe03a89db45eef8a858288f845c39283e91db713f0385a471b
3
+ size 3707