gokulsrinivasagan commited on
Commit
34a8dbf
1 Parent(s): 28fa0c7

End of training

Browse files
README.md CHANGED
@@ -1,14 +1,32 @@
1
  ---
2
  library_name: transformers
 
 
3
  base_model: gokulsrinivasagan/bert_base_lda_20_v1_book
4
  tags:
5
  - generated_from_trainer
 
 
6
  metrics:
7
  - matthews_correlation
8
  - accuracy
9
  model-index:
10
  - name: bert_base_lda_20_v1_book_cola
11
- results: []
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
  ---
13
 
14
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
@@ -16,11 +34,11 @@ should probably proofread and complete it, then remove this comment. -->
16
 
17
  # bert_base_lda_20_v1_book_cola
18
 
19
- This model is a fine-tuned version of [gokulsrinivasagan/bert_base_lda_20_v1_book](https://huggingface.co/gokulsrinivasagan/bert_base_lda_20_v1_book) on an unknown dataset.
20
  It achieves the following results on the evaluation set:
21
- - Loss: 0.6481
22
- - Matthews Correlation: 0.5021
23
- - Accuracy: 0.7987
24
 
25
  ## Model description
26
 
 
1
  ---
2
  library_name: transformers
3
+ language:
4
+ - en
5
  base_model: gokulsrinivasagan/bert_base_lda_20_v1_book
6
  tags:
7
  - generated_from_trainer
8
+ datasets:
9
+ - glue
10
  metrics:
11
  - matthews_correlation
12
  - accuracy
13
  model-index:
14
  - name: bert_base_lda_20_v1_book_cola
15
+ results:
16
+ - task:
17
+ name: Text Classification
18
+ type: text-classification
19
+ dataset:
20
+ name: GLUE COLA
21
+ type: glue
22
+ args: cola
23
+ metrics:
24
+ - name: Matthews Correlation
25
+ type: matthews_correlation
26
+ value: 0.47428224772244454
27
+ - name: Accuracy
28
+ type: accuracy
29
+ value: 0.7890700101852417
30
  ---
31
 
32
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
 
34
 
35
  # bert_base_lda_20_v1_book_cola
36
 
37
+ This model is a fine-tuned version of [gokulsrinivasagan/bert_base_lda_20_v1_book](https://huggingface.co/gokulsrinivasagan/bert_base_lda_20_v1_book) on the GLUE COLA dataset.
38
  It achieves the following results on the evaluation set:
39
+ - Loss: 0.4867
40
+ - Matthews Correlation: 0.4743
41
+ - Accuracy: 0.7891
42
 
43
  ## Model description
44
 
all_results.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 8.0,
3
+ "eval_accuracy": 0.7890700101852417,
4
+ "eval_loss": 0.48669904470443726,
5
+ "eval_matthews_correlation": 0.47428224772244454,
6
+ "eval_runtime": 0.6848,
7
+ "eval_samples": 1043,
8
+ "eval_samples_per_second": 1522.966,
9
+ "eval_steps_per_second": 7.301,
10
+ "total_flos": 8999450537533440.0,
11
+ "train_loss": 0.31154586725375233,
12
+ "train_runtime": 134.705,
13
+ "train_samples": 8551,
14
+ "train_samples_per_second": 3173.972,
15
+ "train_steps_per_second": 12.62
16
+ }
eval_results.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 8.0,
3
+ "eval_accuracy": 0.7890700101852417,
4
+ "eval_loss": 0.48669904470443726,
5
+ "eval_matthews_correlation": 0.47428224772244454,
6
+ "eval_runtime": 0.6848,
7
+ "eval_samples": 1043,
8
+ "eval_samples_per_second": 1522.966,
9
+ "eval_steps_per_second": 7.301
10
+ }
logs/events.out.tfevents.1733838679.ki-g0008.520107.19 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1ed5267336212a38de61c696f6ba9044cd7b56655ca9597e73cb91df07081172
3
+ size 475
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 8.0,
3
+ "total_flos": 8999450537533440.0,
4
+ "train_loss": 0.31154586725375233,
5
+ "train_runtime": 134.705,
6
+ "train_samples": 8551,
7
+ "train_samples_per_second": 3173.972,
8
+ "train_steps_per_second": 12.62
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,187 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.48669904470443726,
3
+ "best_model_checkpoint": "bert_base_lda_20_v1_book_cola/checkpoint-102",
4
+ "epoch": 8.0,
5
+ "eval_steps": 500,
6
+ "global_step": 272,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 1.0,
13
+ "grad_norm": 3.9988954067230225,
14
+ "learning_rate": 4.9e-05,
15
+ "loss": 0.6052,
16
+ "step": 34
17
+ },
18
+ {
19
+ "epoch": 1.0,
20
+ "eval_accuracy": 0.6912751793861389,
21
+ "eval_loss": 0.5938838720321655,
22
+ "eval_matthews_correlation": 0.0,
23
+ "eval_runtime": 0.6852,
24
+ "eval_samples_per_second": 1522.289,
25
+ "eval_steps_per_second": 7.298,
26
+ "step": 34
27
+ },
28
+ {
29
+ "epoch": 2.0,
30
+ "grad_norm": 4.848330497741699,
31
+ "learning_rate": 4.8e-05,
32
+ "loss": 0.5337,
33
+ "step": 68
34
+ },
35
+ {
36
+ "epoch": 2.0,
37
+ "eval_accuracy": 0.7545541524887085,
38
+ "eval_loss": 0.5131515264511108,
39
+ "eval_matthews_correlation": 0.3639324656122657,
40
+ "eval_runtime": 0.6836,
41
+ "eval_samples_per_second": 1525.781,
42
+ "eval_steps_per_second": 7.314,
43
+ "step": 68
44
+ },
45
+ {
46
+ "epoch": 3.0,
47
+ "grad_norm": 5.25543737411499,
48
+ "learning_rate": 4.7e-05,
49
+ "loss": 0.4113,
50
+ "step": 102
51
+ },
52
+ {
53
+ "epoch": 3.0,
54
+ "eval_accuracy": 0.7890700101852417,
55
+ "eval_loss": 0.48669904470443726,
56
+ "eval_matthews_correlation": 0.47428224772244454,
57
+ "eval_runtime": 0.7023,
58
+ "eval_samples_per_second": 1485.127,
59
+ "eval_steps_per_second": 7.119,
60
+ "step": 102
61
+ },
62
+ {
63
+ "epoch": 4.0,
64
+ "grad_norm": 5.055551528930664,
65
+ "learning_rate": 4.600000000000001e-05,
66
+ "loss": 0.2988,
67
+ "step": 136
68
+ },
69
+ {
70
+ "epoch": 4.0,
71
+ "eval_accuracy": 0.7804410457611084,
72
+ "eval_loss": 0.5833057761192322,
73
+ "eval_matthews_correlation": 0.4456190824541546,
74
+ "eval_runtime": 0.6822,
75
+ "eval_samples_per_second": 1528.819,
76
+ "eval_steps_per_second": 7.329,
77
+ "step": 136
78
+ },
79
+ {
80
+ "epoch": 5.0,
81
+ "grad_norm": 5.268617630004883,
82
+ "learning_rate": 4.5e-05,
83
+ "loss": 0.232,
84
+ "step": 170
85
+ },
86
+ {
87
+ "epoch": 5.0,
88
+ "eval_accuracy": 0.7957813739776611,
89
+ "eval_loss": 0.5777825713157654,
90
+ "eval_matthews_correlation": 0.4905937147239641,
91
+ "eval_runtime": 0.6775,
92
+ "eval_samples_per_second": 1539.424,
93
+ "eval_steps_per_second": 7.38,
94
+ "step": 170
95
+ },
96
+ {
97
+ "epoch": 6.0,
98
+ "grad_norm": 5.387474060058594,
99
+ "learning_rate": 4.4000000000000006e-05,
100
+ "loss": 0.1741,
101
+ "step": 204
102
+ },
103
+ {
104
+ "epoch": 6.0,
105
+ "eval_accuracy": 0.8063278794288635,
106
+ "eval_loss": 0.592420756816864,
107
+ "eval_matthews_correlation": 0.5184234806320535,
108
+ "eval_runtime": 0.68,
109
+ "eval_samples_per_second": 1533.85,
110
+ "eval_steps_per_second": 7.353,
111
+ "step": 204
112
+ },
113
+ {
114
+ "epoch": 7.0,
115
+ "grad_norm": 5.771633148193359,
116
+ "learning_rate": 4.3e-05,
117
+ "loss": 0.1299,
118
+ "step": 238
119
+ },
120
+ {
121
+ "epoch": 7.0,
122
+ "eval_accuracy": 0.8015340566635132,
123
+ "eval_loss": 0.7205991744995117,
124
+ "eval_matthews_correlation": 0.5048506698662225,
125
+ "eval_runtime": 0.6748,
126
+ "eval_samples_per_second": 1545.6,
127
+ "eval_steps_per_second": 7.409,
128
+ "step": 238
129
+ },
130
+ {
131
+ "epoch": 8.0,
132
+ "grad_norm": 5.118968486785889,
133
+ "learning_rate": 4.2e-05,
134
+ "loss": 0.1075,
135
+ "step": 272
136
+ },
137
+ {
138
+ "epoch": 8.0,
139
+ "eval_accuracy": 0.7986577153205872,
140
+ "eval_loss": 0.6481350660324097,
141
+ "eval_matthews_correlation": 0.5020753041155603,
142
+ "eval_runtime": 0.6785,
143
+ "eval_samples_per_second": 1537.3,
144
+ "eval_steps_per_second": 7.37,
145
+ "step": 272
146
+ },
147
+ {
148
+ "epoch": 8.0,
149
+ "step": 272,
150
+ "total_flos": 8999450537533440.0,
151
+ "train_loss": 0.31154586725375233,
152
+ "train_runtime": 134.705,
153
+ "train_samples_per_second": 3173.972,
154
+ "train_steps_per_second": 12.62
155
+ }
156
+ ],
157
+ "logging_steps": 1,
158
+ "max_steps": 1700,
159
+ "num_input_tokens_seen": 0,
160
+ "num_train_epochs": 50,
161
+ "save_steps": 500,
162
+ "stateful_callbacks": {
163
+ "EarlyStoppingCallback": {
164
+ "args": {
165
+ "early_stopping_patience": 5,
166
+ "early_stopping_threshold": 0.0
167
+ },
168
+ "attributes": {
169
+ "early_stopping_patience_counter": 5
170
+ }
171
+ },
172
+ "TrainerControl": {
173
+ "args": {
174
+ "should_epoch_stop": false,
175
+ "should_evaluate": false,
176
+ "should_log": false,
177
+ "should_save": true,
178
+ "should_training_stop": true
179
+ },
180
+ "attributes": {}
181
+ }
182
+ },
183
+ "total_flos": 8999450537533440.0,
184
+ "train_batch_size": 256,
185
+ "trial_name": null,
186
+ "trial_params": null
187
+ }