spacemanidol commited on
Commit
bb41da3
1 Parent(s): 61e7c3b

Upload 13 files

Browse files
README.md CHANGED
@@ -6,7 +6,7 @@ datasets:
6
  metrics:
7
  - rouge
8
  model-index:
9
- - name: small-5-6-t
10
  results:
11
  - task:
12
  name: Summarization
@@ -20,22 +20,22 @@ model-index:
20
  metrics:
21
  - name: Rouge1
22
  type: rouge
23
- value: 33.1943
24
  ---
25
 
26
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
27
  should probably proofread and complete it, then remove this comment. -->
28
 
29
- # small-5-6-t
30
 
31
- This model is a fine-tuned version of [models/small-5-6](https://huggingface.co/models/small-5-6) on the xsum dataset.
32
  It achieves the following results on the evaluation set:
33
- - Loss: 2.0753
34
- - Rouge1: 33.1943
35
- - Rouge2: 11.0514
36
- - Rougel: 26.0966
37
- - Rougelsum: 26.0978
38
- - Gen Len: 28.4358
39
 
40
  ## Model description
41
 
@@ -55,10 +55,10 @@ More information needed
55
 
56
  The following hyperparameters were used during training:
57
  - learning_rate: 0.0001
58
- - train_batch_size: 32
59
- - eval_batch_size: 16
60
  - seed: 42
61
- - gradient_accumulation_steps: 2
62
  - total_train_batch_size: 64
63
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
64
  - lr_scheduler_type: constant
@@ -71,6 +71,6 @@ The following hyperparameters were used during training:
71
  ### Framework versions
72
 
73
  - Transformers 4.27.0.dev0
74
- - Pytorch 1.12.1+cu113
75
- - Datasets 2.10.0
76
- - Tokenizers 0.13.2
 
6
  metrics:
7
  - rouge
8
  model-index:
9
+ - name: small-5-6
10
  results:
11
  - task:
12
  name: Summarization
 
20
  metrics:
21
  - name: Rouge1
22
  type: rouge
23
+ value: 33.3096
24
  ---
25
 
26
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
27
  should probably proofread and complete it, then remove this comment. -->
28
 
29
+ # small-5-6
30
 
31
+ This model is a fine-tuned version of [x/small-5-6/](https://huggingface.co/x/small-5-6/) on the xsum dataset.
32
  It achieves the following results on the evaluation set:
33
+ - Loss: 2.0765
34
+ - Rouge1: 33.3096
35
+ - Rouge2: 11.177
36
+ - Rougel: 26.1559
37
+ - Rougelsum: 26.1559
38
+ - Gen Len: 28.3146
39
 
40
  ## Model description
41
 
 
55
 
56
  The following hyperparameters were used during training:
57
  - learning_rate: 0.0001
58
+ - train_batch_size: 4
59
+ - eval_batch_size: 4
60
  - seed: 42
61
+ - gradient_accumulation_steps: 16
62
  - total_train_batch_size: 64
63
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
64
  - lr_scheduler_type: constant
 
71
  ### Framework versions
72
 
73
  - Transformers 4.27.0.dev0
74
+ - Pytorch 1.13.0+cu117
75
+ - Datasets 2.7.1
76
+ - Tokenizers 0.12.1
all_results.json CHANGED
@@ -1,18 +1,18 @@
1
  {
2
  "epoch": 3.0,
3
- "eval_gen_len": 28.43577293193255,
4
- "eval_loss": 2.0753448009490967,
5
- "eval_rouge1": 33.1943,
6
- "eval_rouge2": 11.0514,
7
- "eval_rougeL": 26.0966,
8
- "eval_rougeLsum": 26.0978,
9
- "eval_runtime": 584.408,
10
  "eval_samples": 11327,
11
- "eval_samples_per_second": 19.382,
12
- "eval_steps_per_second": 1.211,
13
- "train_loss": 2.2860778256571477,
14
- "train_runtime": 9841.2733,
15
  "train_samples": 204017,
16
- "train_samples_per_second": 62.192,
17
- "train_steps_per_second": 0.972
18
  }
 
1
  {
2
  "epoch": 3.0,
3
+ "eval_gen_len": 28.314646420058267,
4
+ "eval_loss": 2.0764589309692383,
5
+ "eval_rouge1": 33.3096,
6
+ "eval_rouge2": 11.177,
7
+ "eval_rougeL": 26.1559,
8
+ "eval_rougeLsum": 26.1559,
9
+ "eval_runtime": 1049.9027,
10
  "eval_samples": 11327,
11
+ "eval_samples_per_second": 10.789,
12
+ "eval_steps_per_second": 2.697,
13
+ "train_loss": 2.2826747393560716,
14
+ "train_runtime": 17657.3625,
15
  "train_samples": 204017,
16
+ "train_samples_per_second": 34.663,
17
+ "train_steps_per_second": 0.541
18
  }
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "models/small-5-6",
3
  "architectures": [
4
  "T5ForConditionalGeneration"
5
  ],
 
1
  {
2
+ "_name_or_path": "x/small-5-6/",
3
  "architectures": [
4
  "T5ForConditionalGeneration"
5
  ],
eval_results.json CHANGED
@@ -1,13 +1,13 @@
1
  {
2
  "epoch": 3.0,
3
- "eval_gen_len": 28.43577293193255,
4
- "eval_loss": 2.0753448009490967,
5
- "eval_rouge1": 33.1943,
6
- "eval_rouge2": 11.0514,
7
- "eval_rougeL": 26.0966,
8
- "eval_rougeLsum": 26.0978,
9
- "eval_runtime": 584.408,
10
  "eval_samples": 11327,
11
- "eval_samples_per_second": 19.382,
12
- "eval_steps_per_second": 1.211
13
  }
 
1
  {
2
  "epoch": 3.0,
3
+ "eval_gen_len": 28.314646420058267,
4
+ "eval_loss": 2.0764589309692383,
5
+ "eval_rouge1": 33.3096,
6
+ "eval_rouge2": 11.177,
7
+ "eval_rougeL": 26.1559,
8
+ "eval_rougeLsum": 26.1559,
9
+ "eval_runtime": 1049.9027,
10
  "eval_samples": 11327,
11
+ "eval_samples_per_second": 10.789,
12
+ "eval_steps_per_second": 2.697
13
  }
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9a51b3b8c387ff58591a70cb844cd74fedd258ff3177d1794a09feac58db8a08
3
- size 289019689
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f49373d77be8a203799387d632b09e48910471354ea668f1d5787ad6f62ddec2
3
+ size 289021373
spiece.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d60acb128cf7b7f2536e8f38a5b18a05535c9e14c7a355904270e15b0945ea86
3
+ size 791656
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "epoch": 3.0,
3
- "train_loss": 2.2860778256571477,
4
- "train_runtime": 9841.2733,
5
  "train_samples": 204017,
6
- "train_samples_per_second": 62.192,
7
- "train_steps_per_second": 0.972
8
  }
 
1
  {
2
  "epoch": 3.0,
3
+ "train_loss": 2.2826747393560716,
4
+ "train_runtime": 17657.3625,
5
  "train_samples": 204017,
6
+ "train_samples_per_second": 34.663,
7
+ "train_steps_per_second": 0.541
8
  }
trainer_state.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 3.0,
5
- "global_step": 9564,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
@@ -10,73 +10,73 @@
10
  {
11
  "epoch": 0.16,
12
  "learning_rate": 0.0001,
13
- "loss": 2.3736,
14
  "step": 500
15
  },
16
  {
17
  "epoch": 0.31,
18
  "learning_rate": 0.0001,
19
- "loss": 2.3527,
20
  "step": 1000
21
  },
22
  {
23
  "epoch": 0.47,
24
  "learning_rate": 0.0001,
25
- "loss": 2.3453,
26
  "step": 1500
27
  },
28
  {
29
  "epoch": 0.63,
30
  "learning_rate": 0.0001,
31
- "loss": 2.3264,
32
  "step": 2000
33
  },
34
  {
35
  "epoch": 0.78,
36
  "learning_rate": 0.0001,
37
- "loss": 2.3284,
38
  "step": 2500
39
  },
40
  {
41
  "epoch": 0.94,
42
  "learning_rate": 0.0001,
43
- "loss": 2.3186,
44
  "step": 3000
45
  },
46
  {
47
  "epoch": 1.1,
48
  "learning_rate": 0.0001,
49
- "loss": 2.3036,
50
  "step": 3500
51
  },
52
  {
53
- "epoch": 1.25,
54
  "learning_rate": 0.0001,
55
- "loss": 2.2881,
56
  "step": 4000
57
  },
58
  {
59
  "epoch": 1.41,
60
  "learning_rate": 0.0001,
61
- "loss": 2.2831,
62
  "step": 4500
63
  },
64
  {
65
  "epoch": 1.57,
66
  "learning_rate": 0.0001,
67
- "loss": 2.2809,
68
  "step": 5000
69
  },
70
  {
71
  "epoch": 1.73,
72
  "learning_rate": 0.0001,
73
- "loss": 2.2783,
74
  "step": 5500
75
  },
76
  {
77
  "epoch": 1.88,
78
  "learning_rate": 0.0001,
79
- "loss": 2.272,
80
  "step": 6000
81
  },
82
  {
@@ -88,52 +88,52 @@
88
  {
89
  "epoch": 2.2,
90
  "learning_rate": 0.0001,
91
- "loss": 2.2416,
92
  "step": 7000
93
  },
94
  {
95
  "epoch": 2.35,
96
  "learning_rate": 0.0001,
97
- "loss": 2.2469,
98
  "step": 7500
99
  },
100
  {
101
  "epoch": 2.51,
102
  "learning_rate": 0.0001,
103
- "loss": 2.2307,
104
  "step": 8000
105
  },
106
  {
107
  "epoch": 2.67,
108
  "learning_rate": 0.0001,
109
- "loss": 2.2355,
110
  "step": 8500
111
  },
112
  {
113
  "epoch": 2.82,
114
  "learning_rate": 0.0001,
115
- "loss": 2.2402,
116
  "step": 9000
117
  },
118
  {
119
  "epoch": 2.98,
120
  "learning_rate": 0.0001,
121
- "loss": 2.2336,
122
  "step": 9500
123
  },
124
  {
125
  "epoch": 3.0,
126
- "step": 9564,
127
- "total_flos": 2.094098781687644e+17,
128
- "train_loss": 2.2860778256571477,
129
- "train_runtime": 9841.2733,
130
- "train_samples_per_second": 62.192,
131
- "train_steps_per_second": 0.972
132
  }
133
  ],
134
- "max_steps": 9564,
135
  "num_train_epochs": 3,
136
- "total_flos": 2.094098781687644e+17,
137
  "trial_name": null,
138
  "trial_params": null
139
  }
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 2.999745123027154,
5
+ "global_step": 9561,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
 
10
  {
11
  "epoch": 0.16,
12
  "learning_rate": 0.0001,
13
+ "loss": 2.3693,
14
  "step": 500
15
  },
16
  {
17
  "epoch": 0.31,
18
  "learning_rate": 0.0001,
19
+ "loss": 2.3495,
20
  "step": 1000
21
  },
22
  {
23
  "epoch": 0.47,
24
  "learning_rate": 0.0001,
25
+ "loss": 2.342,
26
  "step": 1500
27
  },
28
  {
29
  "epoch": 0.63,
30
  "learning_rate": 0.0001,
31
+ "loss": 2.3221,
32
  "step": 2000
33
  },
34
  {
35
  "epoch": 0.78,
36
  "learning_rate": 0.0001,
37
+ "loss": 2.3242,
38
  "step": 2500
39
  },
40
  {
41
  "epoch": 0.94,
42
  "learning_rate": 0.0001,
43
+ "loss": 2.3137,
44
  "step": 3000
45
  },
46
  {
47
  "epoch": 1.1,
48
  "learning_rate": 0.0001,
49
+ "loss": 2.3027,
50
  "step": 3500
51
  },
52
  {
53
+ "epoch": 1.26,
54
  "learning_rate": 0.0001,
55
+ "loss": 2.2815,
56
  "step": 4000
57
  },
58
  {
59
  "epoch": 1.41,
60
  "learning_rate": 0.0001,
61
+ "loss": 2.2797,
62
  "step": 4500
63
  },
64
  {
65
  "epoch": 1.57,
66
  "learning_rate": 0.0001,
67
+ "loss": 2.2757,
68
  "step": 5000
69
  },
70
  {
71
  "epoch": 1.73,
72
  "learning_rate": 0.0001,
73
+ "loss": 2.2765,
74
  "step": 5500
75
  },
76
  {
77
  "epoch": 1.88,
78
  "learning_rate": 0.0001,
79
+ "loss": 2.2684,
80
  "step": 6000
81
  },
82
  {
 
88
  {
89
  "epoch": 2.2,
90
  "learning_rate": 0.0001,
91
+ "loss": 2.2373,
92
  "step": 7000
93
  },
94
  {
95
  "epoch": 2.35,
96
  "learning_rate": 0.0001,
97
+ "loss": 2.2413,
98
  "step": 7500
99
  },
100
  {
101
  "epoch": 2.51,
102
  "learning_rate": 0.0001,
103
+ "loss": 2.2277,
104
  "step": 8000
105
  },
106
  {
107
  "epoch": 2.67,
108
  "learning_rate": 0.0001,
109
+ "loss": 2.2338,
110
  "step": 8500
111
  },
112
  {
113
  "epoch": 2.82,
114
  "learning_rate": 0.0001,
115
+ "loss": 2.2395,
116
  "step": 9000
117
  },
118
  {
119
  "epoch": 2.98,
120
  "learning_rate": 0.0001,
121
+ "loss": 2.2297,
122
  "step": 9500
123
  },
124
  {
125
  "epoch": 3.0,
126
+ "step": 9561,
127
+ "total_flos": 1.655576989296722e+17,
128
+ "train_loss": 2.2826747393560716,
129
+ "train_runtime": 17657.3625,
130
+ "train_samples_per_second": 34.663,
131
+ "train_steps_per_second": 0.541
132
  }
133
  ],
134
+ "max_steps": 9561,
135
  "num_train_epochs": 3,
136
+ "total_flos": 1.655576989296722e+17,
137
  "trial_name": null,
138
  "trial_params": null
139
  }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9bf989626f6cb835ee9b482ae26f7da5f473f16575b59720728ca8236f2e3d28
3
- size 3631
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5d9f6a17c6b0412af92872bec0b9155a155266f88282ba9c3aa306a6d7564de0
3
+ size 3643