gokuls commited on
Commit
27bf9a7
1 Parent(s): 9d3168e

End of training

Browse files
README.md CHANGED
@@ -1,4 +1,6 @@
1
  ---
 
 
2
  license: apache-2.0
3
  tags:
4
  - generated_from_trainer
@@ -13,7 +15,7 @@ model-index:
13
  name: Text Classification
14
  type: text-classification
15
  dataset:
16
- name: glue
17
  type: glue
18
  config: cola
19
  split: validation
@@ -21,7 +23,7 @@ model-index:
21
  metrics:
22
  - name: Matthews Correlation
23
  type: matthews_correlation
24
- value: 0.0463559874942472
25
  ---
26
 
27
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
@@ -29,10 +31,10 @@ should probably proofread and complete it, then remove this comment. -->
29
 
30
  # mobilebert_add_GLUE_Experiment_cola_128
31
 
32
- This model is a fine-tuned version of [google/mobilebert-uncased](https://huggingface.co/google/mobilebert-uncased) on the glue dataset.
33
  It achieves the following results on the evaluation set:
34
- - Loss: 0.6185
35
- - Matthews Correlation: 0.0464
36
 
37
  ## Model description
38
 
 
1
  ---
2
+ language:
3
+ - en
4
  license: apache-2.0
5
  tags:
6
  - generated_from_trainer
 
15
  name: Text Classification
16
  type: text-classification
17
  dataset:
18
+ name: GLUE COLA
19
  type: glue
20
  config: cola
21
  split: validation
 
23
  metrics:
24
  - name: Matthews Correlation
25
  type: matthews_correlation
26
+ value: 0.0
27
  ---
28
 
29
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
 
31
 
32
  # mobilebert_add_GLUE_Experiment_cola_128
33
 
34
+ This model is a fine-tuned version of [google/mobilebert-uncased](https://huggingface.co/google/mobilebert-uncased) on the GLUE COLA dataset.
35
  It achieves the following results on the evaluation set:
36
+ - Loss: 0.6168
37
+ - Matthews Correlation: 0.0
38
 
39
  ## Model description
40
 
all_results.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 11.0,
3
+ "eval_loss": 0.6167517900466919,
4
+ "eval_matthews_correlation": 0.0,
5
+ "eval_runtime": 1.8768,
6
+ "eval_samples": 1043,
7
+ "eval_samples_per_second": 555.726,
8
+ "eval_steps_per_second": 4.795,
9
+ "train_loss": 0.5990640233944359,
10
+ "train_runtime": 596.1692,
11
+ "train_samples": 8551,
12
+ "train_samples_per_second": 717.162,
13
+ "train_steps_per_second": 5.619
14
+ }
eval_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 11.0,
3
+ "eval_loss": 0.6167517900466919,
4
+ "eval_matthews_correlation": 0.0,
5
+ "eval_runtime": 1.8768,
6
+ "eval_samples": 1043,
7
+ "eval_samples_per_second": 555.726,
8
+ "eval_steps_per_second": 4.795
9
+ }
logs/events.out.tfevents.1674736557.gera.2822633.2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8ee05008e533c90a3b2232716d5ebc3348500c3de596482bdb1556f5dc0cec3f
3
+ size 375
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 11.0,
3
+ "train_loss": 0.5990640233944359,
4
+ "train_runtime": 596.1692,
5
+ "train_samples": 8551,
6
+ "train_samples_per_second": 717.162,
7
+ "train_steps_per_second": 5.619
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,190 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.6167517900466919,
3
+ "best_model_checkpoint": "mobilebert_add_GLUE_Experiment_cola_128/checkpoint-402",
4
+ "epoch": 11.0,
5
+ "global_step": 737,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 1.0,
12
+ "learning_rate": 4.9e-05,
13
+ "loss": 0.617,
14
+ "step": 67
15
+ },
16
+ {
17
+ "epoch": 1.0,
18
+ "eval_loss": 0.6180861592292786,
19
+ "eval_matthews_correlation": 0.0,
20
+ "eval_runtime": 1.8368,
21
+ "eval_samples_per_second": 567.845,
22
+ "eval_steps_per_second": 4.9,
23
+ "step": 67
24
+ },
25
+ {
26
+ "epoch": 2.0,
27
+ "learning_rate": 4.8e-05,
28
+ "loss": 0.608,
29
+ "step": 134
30
+ },
31
+ {
32
+ "epoch": 2.0,
33
+ "eval_loss": 0.6181017756462097,
34
+ "eval_matthews_correlation": 0.0,
35
+ "eval_runtime": 1.9583,
36
+ "eval_samples_per_second": 532.607,
37
+ "eval_steps_per_second": 4.596,
38
+ "step": 134
39
+ },
40
+ {
41
+ "epoch": 3.0,
42
+ "learning_rate": 4.7e-05,
43
+ "loss": 0.6075,
44
+ "step": 201
45
+ },
46
+ {
47
+ "epoch": 3.0,
48
+ "eval_loss": 0.6182523965835571,
49
+ "eval_matthews_correlation": 0.0,
50
+ "eval_runtime": 1.8497,
51
+ "eval_samples_per_second": 563.867,
52
+ "eval_steps_per_second": 4.866,
53
+ "step": 201
54
+ },
55
+ {
56
+ "epoch": 4.0,
57
+ "learning_rate": 4.600000000000001e-05,
58
+ "loss": 0.6072,
59
+ "step": 268
60
+ },
61
+ {
62
+ "epoch": 4.0,
63
+ "eval_loss": 0.6176720261573792,
64
+ "eval_matthews_correlation": 0.0,
65
+ "eval_runtime": 1.8571,
66
+ "eval_samples_per_second": 561.629,
67
+ "eval_steps_per_second": 4.846,
68
+ "step": 268
69
+ },
70
+ {
71
+ "epoch": 5.0,
72
+ "learning_rate": 4.5e-05,
73
+ "loss": 0.6069,
74
+ "step": 335
75
+ },
76
+ {
77
+ "epoch": 5.0,
78
+ "eval_loss": 0.6185395121574402,
79
+ "eval_matthews_correlation": 0.0,
80
+ "eval_runtime": 1.8719,
81
+ "eval_samples_per_second": 557.191,
82
+ "eval_steps_per_second": 4.808,
83
+ "step": 335
84
+ },
85
+ {
86
+ "epoch": 6.0,
87
+ "learning_rate": 4.4000000000000006e-05,
88
+ "loss": 0.606,
89
+ "step": 402
90
+ },
91
+ {
92
+ "epoch": 6.0,
93
+ "eval_loss": 0.6167517900466919,
94
+ "eval_matthews_correlation": 0.0,
95
+ "eval_runtime": 1.8173,
96
+ "eval_samples_per_second": 573.928,
97
+ "eval_steps_per_second": 4.952,
98
+ "step": 402
99
+ },
100
+ {
101
+ "epoch": 7.0,
102
+ "learning_rate": 4.3e-05,
103
+ "loss": 0.6014,
104
+ "step": 469
105
+ },
106
+ {
107
+ "epoch": 7.0,
108
+ "eval_loss": 0.6234214901924133,
109
+ "eval_matthews_correlation": 0.0,
110
+ "eval_runtime": 1.8314,
111
+ "eval_samples_per_second": 569.522,
112
+ "eval_steps_per_second": 4.914,
113
+ "step": 469
114
+ },
115
+ {
116
+ "epoch": 8.0,
117
+ "learning_rate": 4.2e-05,
118
+ "loss": 0.5947,
119
+ "step": 536
120
+ },
121
+ {
122
+ "epoch": 8.0,
123
+ "eval_loss": 0.6217920184135437,
124
+ "eval_matthews_correlation": 0.0,
125
+ "eval_runtime": 1.8379,
126
+ "eval_samples_per_second": 567.481,
127
+ "eval_steps_per_second": 4.897,
128
+ "step": 536
129
+ },
130
+ {
131
+ "epoch": 9.0,
132
+ "learning_rate": 4.1e-05,
133
+ "loss": 0.5858,
134
+ "step": 603
135
+ },
136
+ {
137
+ "epoch": 9.0,
138
+ "eval_loss": 0.6321389675140381,
139
+ "eval_matthews_correlation": 0.0,
140
+ "eval_runtime": 1.8577,
141
+ "eval_samples_per_second": 561.444,
142
+ "eval_steps_per_second": 4.845,
143
+ "step": 603
144
+ },
145
+ {
146
+ "epoch": 10.0,
147
+ "learning_rate": 4e-05,
148
+ "loss": 0.579,
149
+ "step": 670
150
+ },
151
+ {
152
+ "epoch": 10.0,
153
+ "eval_loss": 0.6177334785461426,
154
+ "eval_matthews_correlation": 0.0463559874942472,
155
+ "eval_runtime": 1.8263,
156
+ "eval_samples_per_second": 571.085,
157
+ "eval_steps_per_second": 4.928,
158
+ "step": 670
159
+ },
160
+ {
161
+ "epoch": 11.0,
162
+ "learning_rate": 3.9000000000000006e-05,
163
+ "loss": 0.5762,
164
+ "step": 737
165
+ },
166
+ {
167
+ "epoch": 11.0,
168
+ "eval_loss": 0.6184676289558411,
169
+ "eval_matthews_correlation": 0.0463559874942472,
170
+ "eval_runtime": 1.8441,
171
+ "eval_samples_per_second": 565.593,
172
+ "eval_steps_per_second": 4.88,
173
+ "step": 737
174
+ },
175
+ {
176
+ "epoch": 11.0,
177
+ "step": 737,
178
+ "total_flos": 2245484232048640.0,
179
+ "train_loss": 0.5990640233944359,
180
+ "train_runtime": 596.1692,
181
+ "train_samples_per_second": 717.162,
182
+ "train_steps_per_second": 5.619
183
+ }
184
+ ],
185
+ "max_steps": 3350,
186
+ "num_train_epochs": 50,
187
+ "total_flos": 2245484232048640.0,
188
+ "trial_name": null,
189
+ "trial_params": null
190
+ }