gokulsrinivasagan commited on
Commit
427fac9
·
verified ·
1 Parent(s): e13dd4a

End of training

Browse files
README.md CHANGED
@@ -1,14 +1,32 @@
1
  ---
2
  library_name: transformers
 
 
3
  base_model: gokulsrinivasagan/distilbert_lda_5
4
  tags:
5
  - generated_from_trainer
 
 
6
  metrics:
7
  - matthews_correlation
8
  - accuracy
9
  model-index:
10
  - name: distilbert_lda_5_cola
11
- results: []
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
  ---
13
 
14
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
@@ -16,9 +34,9 @@ should probably proofread and complete it, then remove this comment. -->
16
 
17
  # distilbert_lda_5_cola
18
 
19
- This model is a fine-tuned version of [gokulsrinivasagan/distilbert_lda_5](https://huggingface.co/gokulsrinivasagan/distilbert_lda_5) on an unknown dataset.
20
  It achieves the following results on the evaluation set:
21
- - Loss: 0.6184
22
  - Matthews Correlation: 0.0
23
  - Accuracy: 0.6913
24
 
 
1
  ---
2
  library_name: transformers
3
+ language:
4
+ - en
5
  base_model: gokulsrinivasagan/distilbert_lda_5
6
  tags:
7
  - generated_from_trainer
8
+ datasets:
9
+ - glue
10
  metrics:
11
  - matthews_correlation
12
  - accuracy
13
  model-index:
14
  - name: distilbert_lda_5_cola
15
+ results:
16
+ - task:
17
+ name: Text Classification
18
+ type: text-classification
19
+ dataset:
20
+ name: GLUE COLA
21
+ type: glue
22
+ args: cola
23
+ metrics:
24
+ - name: Matthews Correlation
25
+ type: matthews_correlation
26
+ value: 0.0
27
+ - name: Accuracy
28
+ type: accuracy
29
+ value: 0.6912751793861389
30
  ---
31
 
32
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
 
34
 
35
  # distilbert_lda_5_cola
36
 
37
+ This model is a fine-tuned version of [gokulsrinivasagan/distilbert_lda_5](https://huggingface.co/gokulsrinivasagan/distilbert_lda_5) on the GLUE COLA dataset.
38
  It achieves the following results on the evaluation set:
39
+ - Loss: 0.6182
40
  - Matthews Correlation: 0.0
41
  - Accuracy: 0.6913
42
 
all_results.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 10.0,
3
+ "eval_accuracy": 0.6912751793861389,
4
+ "eval_loss": 0.6182230710983276,
5
+ "eval_matthews_correlation": 0.0,
6
+ "eval_runtime": 0.447,
7
+ "eval_samples": 1043,
8
+ "eval_samples_per_second": 2333.174,
9
+ "eval_steps_per_second": 11.185,
10
+ "total_flos": 5663643629537280.0,
11
+ "train_loss": 0.6425062516156365,
12
+ "train_runtime": 95.8918,
13
+ "train_samples": 8551,
14
+ "train_samples_per_second": 2675.203,
15
+ "train_steps_per_second": 10.637
16
+ }
eval_results.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 10.0,
3
+ "eval_accuracy": 0.6912751793861389,
4
+ "eval_loss": 0.6182230710983276,
5
+ "eval_matthews_correlation": 0.0,
6
+ "eval_runtime": 0.447,
7
+ "eval_samples": 1043,
8
+ "eval_samples_per_second": 2333.174,
9
+ "eval_steps_per_second": 11.185
10
+ }
logs/events.out.tfevents.1732267039.ki-g0008.1610975.19 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b7a1bd036aa9bd183192e40d00a5f46820fd3a0d9229def95ac4223afce67243
3
+ size 475
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 10.0,
3
+ "total_flos": 5663643629537280.0,
4
+ "train_loss": 0.6425062516156365,
5
+ "train_runtime": 95.8918,
6
+ "train_samples": 8551,
7
+ "train_samples_per_second": 2675.203,
8
+ "train_steps_per_second": 10.637
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,221 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.6182230710983276,
3
+ "best_model_checkpoint": "distilbert_lda_5_cola/checkpoint-170",
4
+ "epoch": 10.0,
5
+ "eval_steps": 500,
6
+ "global_step": 340,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 1.0,
13
+ "grad_norm": 0.3721345365047455,
14
+ "learning_rate": 0.0009666666666666667,
15
+ "loss": 0.9345,
16
+ "step": 34
17
+ },
18
+ {
19
+ "epoch": 1.0,
20
+ "eval_accuracy": 0.6912751793861389,
21
+ "eval_loss": 0.6220000982284546,
22
+ "eval_matthews_correlation": 0.0,
23
+ "eval_runtime": 0.4404,
24
+ "eval_samples_per_second": 2368.448,
25
+ "eval_steps_per_second": 11.354,
26
+ "step": 34
27
+ },
28
+ {
29
+ "epoch": 2.0,
30
+ "grad_norm": 0.5597137212753296,
31
+ "learning_rate": 0.0009333333333333333,
32
+ "loss": 0.6115,
33
+ "step": 68
34
+ },
35
+ {
36
+ "epoch": 2.0,
37
+ "eval_accuracy": 0.6912751793861389,
38
+ "eval_loss": 0.6193765997886658,
39
+ "eval_matthews_correlation": 0.0,
40
+ "eval_runtime": 0.4407,
41
+ "eval_samples_per_second": 2366.891,
42
+ "eval_steps_per_second": 11.347,
43
+ "step": 68
44
+ },
45
+ {
46
+ "epoch": 3.0,
47
+ "grad_norm": 0.7766118049621582,
48
+ "learning_rate": 0.0009000000000000001,
49
+ "loss": 0.6088,
50
+ "step": 102
51
+ },
52
+ {
53
+ "epoch": 3.0,
54
+ "eval_accuracy": 0.6912751793861389,
55
+ "eval_loss": 0.6194814443588257,
56
+ "eval_matthews_correlation": 0.0,
57
+ "eval_runtime": 0.4349,
58
+ "eval_samples_per_second": 2398.477,
59
+ "eval_steps_per_second": 11.498,
60
+ "step": 102
61
+ },
62
+ {
63
+ "epoch": 4.0,
64
+ "grad_norm": 0.8668702244758606,
65
+ "learning_rate": 0.0008666666666666667,
66
+ "loss": 0.6101,
67
+ "step": 136
68
+ },
69
+ {
70
+ "epoch": 4.0,
71
+ "eval_accuracy": 0.6912751793861389,
72
+ "eval_loss": 0.6185395121574402,
73
+ "eval_matthews_correlation": 0.0,
74
+ "eval_runtime": 0.4327,
75
+ "eval_samples_per_second": 2410.294,
76
+ "eval_steps_per_second": 11.555,
77
+ "step": 136
78
+ },
79
+ {
80
+ "epoch": 5.0,
81
+ "grad_norm": 0.5907299518585205,
82
+ "learning_rate": 0.0008333333333333334,
83
+ "loss": 0.6111,
84
+ "step": 170
85
+ },
86
+ {
87
+ "epoch": 5.0,
88
+ "eval_accuracy": 0.6912751793861389,
89
+ "eval_loss": 0.6182230710983276,
90
+ "eval_matthews_correlation": 0.0,
91
+ "eval_runtime": 0.4385,
92
+ "eval_samples_per_second": 2378.59,
93
+ "eval_steps_per_second": 11.403,
94
+ "step": 170
95
+ },
96
+ {
97
+ "epoch": 6.0,
98
+ "grad_norm": 0.3894692361354828,
99
+ "learning_rate": 0.0008,
100
+ "loss": 0.6081,
101
+ "step": 204
102
+ },
103
+ {
104
+ "epoch": 6.0,
105
+ "eval_accuracy": 0.6912751793861389,
106
+ "eval_loss": 0.6194814443588257,
107
+ "eval_matthews_correlation": 0.0,
108
+ "eval_runtime": 0.4421,
109
+ "eval_samples_per_second": 2359.366,
110
+ "eval_steps_per_second": 11.31,
111
+ "step": 204
112
+ },
113
+ {
114
+ "epoch": 7.0,
115
+ "grad_norm": 1.1135684251785278,
116
+ "learning_rate": 0.0007666666666666667,
117
+ "loss": 0.6104,
118
+ "step": 238
119
+ },
120
+ {
121
+ "epoch": 7.0,
122
+ "eval_accuracy": 0.6912751793861389,
123
+ "eval_loss": 0.6196780800819397,
124
+ "eval_matthews_correlation": 0.0,
125
+ "eval_runtime": 0.4356,
126
+ "eval_samples_per_second": 2394.611,
127
+ "eval_steps_per_second": 11.479,
128
+ "step": 238
129
+ },
130
+ {
131
+ "epoch": 8.0,
132
+ "grad_norm": 0.4684953987598419,
133
+ "learning_rate": 0.0007333333333333333,
134
+ "loss": 0.6105,
135
+ "step": 272
136
+ },
137
+ {
138
+ "epoch": 8.0,
139
+ "eval_accuracy": 0.6912751793861389,
140
+ "eval_loss": 0.6251610517501831,
141
+ "eval_matthews_correlation": 0.0,
142
+ "eval_runtime": 0.4391,
143
+ "eval_samples_per_second": 2375.1,
144
+ "eval_steps_per_second": 11.386,
145
+ "step": 272
146
+ },
147
+ {
148
+ "epoch": 9.0,
149
+ "grad_norm": 0.3079698979854584,
150
+ "learning_rate": 0.0007,
151
+ "loss": 0.6093,
152
+ "step": 306
153
+ },
154
+ {
155
+ "epoch": 9.0,
156
+ "eval_accuracy": 0.6912751793861389,
157
+ "eval_loss": 0.6256685256958008,
158
+ "eval_matthews_correlation": 0.0,
159
+ "eval_runtime": 0.4337,
160
+ "eval_samples_per_second": 2405.045,
161
+ "eval_steps_per_second": 11.529,
162
+ "step": 306
163
+ },
164
+ {
165
+ "epoch": 10.0,
166
+ "grad_norm": 0.6555743217468262,
167
+ "learning_rate": 0.0006666666666666666,
168
+ "loss": 0.6107,
169
+ "step": 340
170
+ },
171
+ {
172
+ "epoch": 10.0,
173
+ "eval_accuracy": 0.6912751793861389,
174
+ "eval_loss": 0.6184196472167969,
175
+ "eval_matthews_correlation": 0.0,
176
+ "eval_runtime": 0.4414,
177
+ "eval_samples_per_second": 2362.798,
178
+ "eval_steps_per_second": 11.327,
179
+ "step": 340
180
+ },
181
+ {
182
+ "epoch": 10.0,
183
+ "step": 340,
184
+ "total_flos": 5663643629537280.0,
185
+ "train_loss": 0.6425062516156365,
186
+ "train_runtime": 95.8918,
187
+ "train_samples_per_second": 2675.203,
188
+ "train_steps_per_second": 10.637
189
+ }
190
+ ],
191
+ "logging_steps": 1,
192
+ "max_steps": 1020,
193
+ "num_input_tokens_seen": 0,
194
+ "num_train_epochs": 30,
195
+ "save_steps": 500,
196
+ "stateful_callbacks": {
197
+ "EarlyStoppingCallback": {
198
+ "args": {
199
+ "early_stopping_patience": 5,
200
+ "early_stopping_threshold": 0.0
201
+ },
202
+ "attributes": {
203
+ "early_stopping_patience_counter": 5
204
+ }
205
+ },
206
+ "TrainerControl": {
207
+ "args": {
208
+ "should_epoch_stop": false,
209
+ "should_evaluate": false,
210
+ "should_log": false,
211
+ "should_save": true,
212
+ "should_training_stop": true
213
+ },
214
+ "attributes": {}
215
+ }
216
+ },
217
+ "total_flos": 5663643629537280.0,
218
+ "train_batch_size": 256,
219
+ "trial_name": null,
220
+ "trial_params": null
221
+ }