4ytk3 commited on
Commit
351059e
1 Parent(s): a7e9f4b

text generate

Browse files
README.md CHANGED
@@ -16,8 +16,8 @@ should probably proofread and complete it, then remove this comment. -->
16
 
17
  This model is a fine-tuned version of [rinna/japanese-gpt2-small](https://huggingface.co/rinna/japanese-gpt2-small) on an unknown dataset.
18
  It achieves the following results on the evaluation set:
19
- - Loss: 0.3302
20
- - Accuracy: 0.9362
21
 
22
  ## Model description
23
 
@@ -37,8 +37,8 @@ More information needed
37
 
38
  The following hyperparameters were used during training:
39
  - learning_rate: 5e-05
40
- - train_batch_size: 1
41
- - eval_batch_size: 1
42
  - seed: 42
43
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
44
  - lr_scheduler_type: linear
 
16
 
17
  This model is a fine-tuned version of [rinna/japanese-gpt2-small](https://huggingface.co/rinna/japanese-gpt2-small) on an unknown dataset.
18
  It achieves the following results on the evaluation set:
19
+ - Loss: 3.4525
20
+ - Accuracy: 0.4155
21
 
22
  ## Model description
23
 
 
37
 
38
  The following hyperparameters were used during training:
39
  - learning_rate: 5e-05
40
+ - train_batch_size: 2
41
+ - eval_batch_size: 2
42
  - seed: 42
43
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
44
  - lr_scheduler_type: linear
all_results.json CHANGED
@@ -1,15 +1,15 @@
1
  {
2
  "epoch": 10.0,
3
- "eval_accuracy": 0.9362485952347703,
4
- "eval_loss": 0.3302019238471985,
5
- "eval_runtime": 21.3271,
6
- "eval_samples": 441,
7
- "eval_samples_per_second": 20.678,
8
- "eval_steps_per_second": 20.678,
9
- "perplexity": 1.3912490264586577,
10
- "train_loss": 0.3641805998430505,
11
- "train_runtime": 2786.6784,
12
- "train_samples": 1773,
13
- "train_samples_per_second": 6.362,
14
- "train_steps_per_second": 6.362
15
  }
 
1
  {
2
  "epoch": 10.0,
3
+ "eval_accuracy": 0.4155386408089473,
4
+ "eval_loss": 3.452547311782837,
5
+ "eval_runtime": 124.7104,
6
+ "eval_samples": 1083,
7
+ "eval_samples_per_second": 8.684,
8
+ "eval_steps_per_second": 4.346,
9
+ "perplexity": 31.58073591590414,
10
+ "train_loss": 3.354035396443779,
11
+ "train_runtime": 7135.8784,
12
+ "train_samples": 4329,
13
+ "train_samples_per_second": 6.067,
14
+ "train_steps_per_second": 3.034
15
  }
eval_results.json CHANGED
@@ -1,10 +1,10 @@
1
  {
2
  "epoch": 10.0,
3
- "eval_accuracy": 0.9362485952347703,
4
- "eval_loss": 0.3302019238471985,
5
- "eval_runtime": 21.3271,
6
- "eval_samples": 441,
7
- "eval_samples_per_second": 20.678,
8
- "eval_steps_per_second": 20.678,
9
- "perplexity": 1.3912490264586577
10
  }
 
1
  {
2
  "epoch": 10.0,
3
+ "eval_accuracy": 0.4155386408089473,
4
+ "eval_loss": 3.452547311782837,
5
+ "eval_runtime": 124.7104,
6
+ "eval_samples": 1083,
7
+ "eval_samples_per_second": 8.684,
8
+ "eval_steps_per_second": 4.346,
9
+ "perplexity": 31.58073591590414
10
  }
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6b12699d10432545207f93f7160f4fadf575c69caa806787c2714cb3443a97bd
3
  size 454312509
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fb43918a890497700b32e4ae8f70411be347cdbb363534172530ab5f8fbd6b2f
3
  size 454312509
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "epoch": 10.0,
3
- "train_loss": 0.3641805998430505,
4
- "train_runtime": 2786.6784,
5
- "train_samples": 1773,
6
- "train_samples_per_second": 6.362,
7
- "train_steps_per_second": 6.362
8
  }
 
1
  {
2
  "epoch": 10.0,
3
+ "train_loss": 3.354035396443779,
4
+ "train_runtime": 7135.8784,
5
+ "train_samples": 4329,
6
+ "train_samples_per_second": 6.067,
7
+ "train_steps_per_second": 3.034
8
  }
trainer_state.json CHANGED
@@ -2,234 +2,282 @@
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
  "epoch": 10.0,
5
- "global_step": 17730,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
9
  "log_history": [
10
  {
11
- "epoch": 0.28,
12
- "learning_rate": 4.8589960518894536e-05,
13
- "loss": 1.5368,
14
  "step": 500
15
  },
16
  {
17
- "epoch": 0.56,
18
- "learning_rate": 4.717992103778906e-05,
19
- "loss": 0.4168,
20
  "step": 1000
21
  },
22
  {
23
- "epoch": 0.85,
24
- "learning_rate": 4.576988155668359e-05,
25
- "loss": 0.3662,
26
  "step": 1500
27
  },
28
  {
29
- "epoch": 1.13,
30
- "learning_rate": 4.435984207557812e-05,
31
- "loss": 0.3525,
32
  "step": 2000
33
  },
34
  {
35
- "epoch": 1.41,
36
- "learning_rate": 4.294980259447265e-05,
37
- "loss": 0.346,
38
  "step": 2500
39
  },
40
  {
41
- "epoch": 1.69,
42
- "learning_rate": 4.153976311336718e-05,
43
- "loss": 0.3425,
44
  "step": 3000
45
  },
46
  {
47
- "epoch": 1.97,
48
- "learning_rate": 4.0129723632261705e-05,
49
- "loss": 0.3409,
50
  "step": 3500
51
  },
52
  {
53
- "epoch": 2.26,
54
- "learning_rate": 3.871968415115623e-05,
55
- "loss": 0.3376,
56
  "step": 4000
57
  },
58
  {
59
- "epoch": 2.54,
60
- "learning_rate": 3.7309644670050766e-05,
61
- "loss": 0.337,
62
  "step": 4500
63
  },
64
  {
65
- "epoch": 2.82,
66
- "learning_rate": 3.589960518894529e-05,
67
- "loss": 0.335,
68
  "step": 5000
69
  },
70
  {
71
- "epoch": 3.1,
72
- "learning_rate": 3.448956570783982e-05,
73
- "loss": 0.333,
74
  "step": 5500
75
  },
76
  {
77
- "epoch": 3.38,
78
- "learning_rate": 3.307952622673435e-05,
79
- "loss": 0.3315,
80
  "step": 6000
81
  },
82
  {
83
- "epoch": 3.67,
84
- "learning_rate": 3.166948674562888e-05,
85
- "loss": 0.3303,
86
  "step": 6500
87
  },
88
  {
89
- "epoch": 3.95,
90
- "learning_rate": 3.025944726452341e-05,
91
- "loss": 0.3319,
92
  "step": 7000
93
  },
94
  {
95
- "epoch": 4.23,
96
- "learning_rate": 2.8849407783417938e-05,
97
- "loss": 0.3283,
98
  "step": 7500
99
  },
100
  {
101
- "epoch": 4.51,
102
- "learning_rate": 2.743936830231247e-05,
103
- "loss": 0.3291,
104
  "step": 8000
105
  },
106
  {
107
- "epoch": 4.79,
108
- "learning_rate": 2.6029328821206996e-05,
109
- "loss": 0.3271,
110
  "step": 8500
111
  },
112
  {
113
- "epoch": 5.08,
114
- "learning_rate": 2.4619289340101523e-05,
115
- "loss": 0.3276,
116
  "step": 9000
117
  },
118
  {
119
- "epoch": 5.36,
120
- "learning_rate": 2.3209249858996053e-05,
121
- "loss": 0.3251,
122
  "step": 9500
123
  },
124
  {
125
- "epoch": 5.64,
126
- "learning_rate": 2.1799210377890583e-05,
127
- "loss": 0.325,
128
  "step": 10000
129
  },
130
  {
131
- "epoch": 5.92,
132
- "learning_rate": 2.038917089678511e-05,
133
- "loss": 0.3249,
134
  "step": 10500
135
  },
136
  {
137
- "epoch": 6.2,
138
- "learning_rate": 1.897913141567964e-05,
139
- "loss": 0.3217,
140
  "step": 11000
141
  },
142
  {
143
- "epoch": 6.49,
144
- "learning_rate": 1.7569091934574168e-05,
145
- "loss": 0.3221,
146
  "step": 11500
147
  },
148
  {
149
- "epoch": 6.77,
150
- "learning_rate": 1.6159052453468698e-05,
151
- "loss": 0.3223,
152
  "step": 12000
153
  },
154
  {
155
- "epoch": 7.05,
156
- "learning_rate": 1.4749012972363227e-05,
157
- "loss": 0.3199,
158
  "step": 12500
159
  },
160
  {
161
- "epoch": 7.33,
162
- "learning_rate": 1.3338973491257756e-05,
163
- "loss": 0.3197,
164
  "step": 13000
165
  },
166
  {
167
- "epoch": 7.61,
168
- "learning_rate": 1.1928934010152284e-05,
169
- "loss": 0.319,
170
  "step": 13500
171
  },
172
  {
173
- "epoch": 7.9,
174
- "learning_rate": 1.0518894529046813e-05,
175
- "loss": 0.3172,
176
  "step": 14000
177
  },
178
  {
179
- "epoch": 8.18,
180
- "learning_rate": 9.108855047941344e-06,
181
- "loss": 0.3173,
182
  "step": 14500
183
  },
184
  {
185
- "epoch": 8.46,
186
- "learning_rate": 7.698815566835872e-06,
187
- "loss": 0.315,
188
  "step": 15000
189
  },
190
  {
191
- "epoch": 8.74,
192
- "learning_rate": 6.288776085730401e-06,
193
- "loss": 0.3169,
194
  "step": 15500
195
  },
196
  {
197
- "epoch": 9.02,
198
- "learning_rate": 4.87873660462493e-06,
199
- "loss": 0.315,
200
  "step": 16000
201
  },
202
  {
203
- "epoch": 9.31,
204
- "learning_rate": 3.4686971235194584e-06,
205
- "loss": 0.3123,
206
  "step": 16500
207
  },
208
  {
209
- "epoch": 9.59,
210
- "learning_rate": 2.0586576424139875e-06,
211
- "loss": 0.3138,
212
  "step": 17000
213
  },
214
  {
215
- "epoch": 9.87,
216
- "learning_rate": 6.486181613085167e-07,
217
- "loss": 0.3122,
218
  "step": 17500
219
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
220
  {
221
  "epoch": 10.0,
222
- "step": 17730,
223
- "total_flos": 9265415454720000.0,
224
- "train_loss": 0.3641805998430505,
225
- "train_runtime": 2786.6784,
226
- "train_samples_per_second": 6.362,
227
- "train_steps_per_second": 6.362
228
  }
229
  ],
230
- "max_steps": 17730,
231
  "num_train_epochs": 10,
232
- "total_flos": 9265415454720000.0,
233
  "trial_name": null,
234
  "trial_params": null
235
  }
 
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
  "epoch": 10.0,
5
+ "global_step": 21650,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
9
  "log_history": [
10
  {
11
+ "epoch": 0.23,
12
+ "learning_rate": 4.884526558891455e-05,
13
+ "loss": 4.2127,
14
  "step": 500
15
  },
16
  {
17
+ "epoch": 0.46,
18
+ "learning_rate": 4.7690531177829104e-05,
19
+ "loss": 3.9681,
20
  "step": 1000
21
  },
22
  {
23
+ "epoch": 0.69,
24
+ "learning_rate": 4.653579676674365e-05,
25
+ "loss": 3.8763,
26
  "step": 1500
27
  },
28
  {
29
+ "epoch": 0.92,
30
+ "learning_rate": 4.53810623556582e-05,
31
+ "loss": 3.8201,
32
  "step": 2000
33
  },
34
  {
35
+ "epoch": 1.15,
36
+ "learning_rate": 4.422632794457275e-05,
37
+ "loss": 3.7144,
38
  "step": 2500
39
  },
40
  {
41
+ "epoch": 1.39,
42
+ "learning_rate": 4.30715935334873e-05,
43
+ "loss": 3.6592,
44
  "step": 3000
45
  },
46
  {
47
+ "epoch": 1.62,
48
+ "learning_rate": 4.1916859122401844e-05,
49
+ "loss": 3.6401,
50
  "step": 3500
51
  },
52
  {
53
+ "epoch": 1.85,
54
+ "learning_rate": 4.07621247113164e-05,
55
+ "loss": 3.6187,
56
  "step": 4000
57
  },
58
  {
59
+ "epoch": 2.08,
60
+ "learning_rate": 3.960739030023095e-05,
61
+ "loss": 3.5668,
62
  "step": 4500
63
  },
64
  {
65
+ "epoch": 2.31,
66
+ "learning_rate": 3.84526558891455e-05,
67
+ "loss": 3.5025,
68
  "step": 5000
69
  },
70
  {
71
+ "epoch": 2.54,
72
+ "learning_rate": 3.729792147806005e-05,
73
+ "loss": 3.4936,
74
  "step": 5500
75
  },
76
  {
77
+ "epoch": 2.77,
78
+ "learning_rate": 3.61431870669746e-05,
79
+ "loss": 3.484,
80
  "step": 6000
81
  },
82
  {
83
+ "epoch": 3.0,
84
+ "learning_rate": 3.498845265588915e-05,
85
+ "loss": 3.4812,
86
  "step": 6500
87
  },
88
  {
89
+ "epoch": 3.23,
90
+ "learning_rate": 3.38337182448037e-05,
91
+ "loss": 3.3856,
92
  "step": 7000
93
  },
94
  {
95
+ "epoch": 3.46,
96
+ "learning_rate": 3.2678983833718243e-05,
97
+ "loss": 3.3908,
98
  "step": 7500
99
  },
100
  {
101
+ "epoch": 3.7,
102
+ "learning_rate": 3.1524249422632794e-05,
103
+ "loss": 3.387,
104
  "step": 8000
105
  },
106
  {
107
+ "epoch": 3.93,
108
+ "learning_rate": 3.0369515011547345e-05,
109
+ "loss": 3.3845,
110
  "step": 8500
111
  },
112
  {
113
+ "epoch": 4.16,
114
+ "learning_rate": 2.9214780600461896e-05,
115
+ "loss": 3.3198,
116
  "step": 9000
117
  },
118
  {
119
+ "epoch": 4.39,
120
+ "learning_rate": 2.8060046189376443e-05,
121
+ "loss": 3.3053,
122
  "step": 9500
123
  },
124
  {
125
+ "epoch": 4.62,
126
+ "learning_rate": 2.6905311778290994e-05,
127
+ "loss": 3.3024,
128
  "step": 10000
129
  },
130
  {
131
+ "epoch": 4.85,
132
+ "learning_rate": 2.575057736720554e-05,
133
+ "loss": 3.3116,
134
  "step": 10500
135
  },
136
  {
137
+ "epoch": 5.08,
138
+ "learning_rate": 2.4595842956120095e-05,
139
+ "loss": 3.2744,
140
  "step": 11000
141
  },
142
  {
143
+ "epoch": 5.31,
144
+ "learning_rate": 2.3441108545034643e-05,
145
+ "loss": 3.2322,
146
  "step": 11500
147
  },
148
  {
149
+ "epoch": 5.54,
150
+ "learning_rate": 2.2286374133949193e-05,
151
+ "loss": 3.2407,
152
  "step": 12000
153
  },
154
  {
155
+ "epoch": 5.77,
156
+ "learning_rate": 2.113163972286374e-05,
157
+ "loss": 3.242,
158
  "step": 12500
159
  },
160
  {
161
+ "epoch": 6.0,
162
+ "learning_rate": 1.997690531177829e-05,
163
+ "loss": 3.2379,
164
  "step": 13000
165
  },
166
  {
167
+ "epoch": 6.24,
168
+ "learning_rate": 1.8822170900692842e-05,
169
+ "loss": 3.1766,
170
  "step": 13500
171
  },
172
  {
173
+ "epoch": 6.47,
174
+ "learning_rate": 1.7667436489607393e-05,
175
+ "loss": 3.1793,
176
  "step": 14000
177
  },
178
  {
179
+ "epoch": 6.7,
180
+ "learning_rate": 1.651270207852194e-05,
181
+ "loss": 3.1928,
182
  "step": 14500
183
  },
184
  {
185
+ "epoch": 6.93,
186
+ "learning_rate": 1.535796766743649e-05,
187
+ "loss": 3.1859,
188
  "step": 15000
189
  },
190
  {
191
+ "epoch": 7.16,
192
+ "learning_rate": 1.420323325635104e-05,
193
+ "loss": 3.156,
194
  "step": 15500
195
  },
196
  {
197
+ "epoch": 7.39,
198
+ "learning_rate": 1.304849884526559e-05,
199
+ "loss": 3.1379,
200
  "step": 16000
201
  },
202
  {
203
+ "epoch": 7.62,
204
+ "learning_rate": 1.189376443418014e-05,
205
+ "loss": 3.1447,
206
  "step": 16500
207
  },
208
  {
209
+ "epoch": 7.85,
210
+ "learning_rate": 1.0739030023094689e-05,
211
+ "loss": 3.1469,
212
  "step": 17000
213
  },
214
  {
215
+ "epoch": 8.08,
216
+ "learning_rate": 9.584295612009238e-06,
217
+ "loss": 3.1291,
218
  "step": 17500
219
  },
220
+ {
221
+ "epoch": 8.31,
222
+ "learning_rate": 8.429561200923789e-06,
223
+ "loss": 3.1071,
224
+ "step": 18000
225
+ },
226
+ {
227
+ "epoch": 8.55,
228
+ "learning_rate": 7.274826789838338e-06,
229
+ "loss": 3.1128,
230
+ "step": 18500
231
+ },
232
+ {
233
+ "epoch": 8.78,
234
+ "learning_rate": 6.120092378752887e-06,
235
+ "loss": 3.1132,
236
+ "step": 19000
237
+ },
238
+ {
239
+ "epoch": 9.01,
240
+ "learning_rate": 4.965357967667437e-06,
241
+ "loss": 3.1141,
242
+ "step": 19500
243
+ },
244
+ {
245
+ "epoch": 9.24,
246
+ "learning_rate": 3.810623556581986e-06,
247
+ "loss": 3.0843,
248
+ "step": 20000
249
+ },
250
+ {
251
+ "epoch": 9.47,
252
+ "learning_rate": 2.655889145496536e-06,
253
+ "loss": 3.0897,
254
+ "step": 20500
255
+ },
256
+ {
257
+ "epoch": 9.7,
258
+ "learning_rate": 1.5011547344110855e-06,
259
+ "loss": 3.0902,
260
+ "step": 21000
261
+ },
262
+ {
263
+ "epoch": 9.93,
264
+ "learning_rate": 3.4642032332563515e-07,
265
+ "loss": 3.0911,
266
+ "step": 21500
267
+ },
268
  {
269
  "epoch": 10.0,
270
+ "step": 21650,
271
+ "total_flos": 2.262266413056e+16,
272
+ "train_loss": 3.354035396443779,
273
+ "train_runtime": 7135.8784,
274
+ "train_samples_per_second": 6.067,
275
+ "train_steps_per_second": 3.034
276
  }
277
  ],
278
+ "max_steps": 21650,
279
  "num_train_epochs": 10,
280
+ "total_flos": 2.262266413056e+16,
281
  "trial_name": null,
282
  "trial_params": null
283
  }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a01935ab011ea12717770c4c9b1fe2935341b4f35f98a60d4a3c0919f0307750
3
  size 3387
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cd56fb655a723b6fa828c8b9b0007079e678b3737e7b5bb85814f54fd349df06
3
  size 3387