gokulsrinivasagan commited on
Commit
4622d59
1 Parent(s): b302f26

End of training

Browse files
README.md CHANGED
@@ -1,13 +1,28 @@
1
  ---
2
  library_name: transformers
 
 
3
  base_model: gokulsrinivasagan/bert_tiny_lda_100_v1
4
  tags:
5
  - generated_from_trainer
 
 
6
  metrics:
7
  - accuracy
8
  model-index:
9
  - name: bert_tiny_lda_100_v1_rte
10
- results: []
 
 
 
 
 
 
 
 
 
 
 
11
  ---
12
 
13
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
@@ -15,10 +30,10 @@ should probably proofread and complete it, then remove this comment. -->
15
 
16
  # bert_tiny_lda_100_v1_rte
17
 
18
- This model is a fine-tuned version of [gokulsrinivasagan/bert_tiny_lda_100_v1](https://huggingface.co/gokulsrinivasagan/bert_tiny_lda_100_v1) on an unknown dataset.
19
  It achieves the following results on the evaluation set:
20
- - Loss: 0.8165
21
- - Accuracy: 0.5018
22
 
23
  ## Model description
24
 
 
1
  ---
2
  library_name: transformers
3
+ language:
4
+ - en
5
  base_model: gokulsrinivasagan/bert_tiny_lda_100_v1
6
  tags:
7
  - generated_from_trainer
8
+ datasets:
9
+ - glue
10
  metrics:
11
  - accuracy
12
  model-index:
13
  - name: bert_tiny_lda_100_v1_rte
14
+ results:
15
+ - task:
16
+ name: Text Classification
17
+ type: text-classification
18
+ dataset:
19
+ name: GLUE RTE
20
+ type: glue
21
+ args: rte
22
+ metrics:
23
+ - name: Accuracy
24
+ type: accuracy
25
+ value: 0.48736462093862815
26
  ---
27
 
28
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
 
30
 
31
  # bert_tiny_lda_100_v1_rte
32
 
33
+ This model is a fine-tuned version of [gokulsrinivasagan/bert_tiny_lda_100_v1](https://huggingface.co/gokulsrinivasagan/bert_tiny_lda_100_v1) on the GLUE RTE dataset.
34
  It achieves the following results on the evaluation set:
35
+ - Loss: 0.6925
36
+ - Accuracy: 0.4874
37
 
38
  ## Model description
39
 
all_results.json CHANGED
@@ -1,15 +1,15 @@
1
  {
2
- "epoch": 10.0,
3
- "eval_accuracy": 0.5270758122743683,
4
- "eval_loss": 0.6910818815231323,
5
- "eval_runtime": 0.0902,
6
  "eval_samples": 277,
7
- "eval_samples_per_second": 3069.608,
8
- "eval_steps_per_second": 22.163,
9
- "total_flos": 652967094988800.0,
10
- "train_loss": 0.7079746341705322,
11
- "train_runtime": 20.9087,
12
  "train_samples": 2490,
13
- "train_samples_per_second": 5954.454,
14
- "train_steps_per_second": 23.913
15
  }
 
1
  {
2
+ "epoch": 7.0,
3
+ "eval_accuracy": 0.48736462093862815,
4
+ "eval_loss": 0.6925343871116638,
5
+ "eval_runtime": 0.096,
6
  "eval_samples": 277,
7
+ "eval_samples_per_second": 2885.475,
8
+ "eval_steps_per_second": 20.834,
9
+ "total_flos": 457076966492160.0,
10
+ "train_loss": 0.6549058369227818,
11
+ "train_runtime": 15.9605,
12
  "train_samples": 2490,
13
+ "train_samples_per_second": 7800.521,
14
+ "train_steps_per_second": 31.327
15
  }
eval_results.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "epoch": 10.0,
3
- "eval_accuracy": 0.5270758122743683,
4
- "eval_loss": 0.6910818815231323,
5
- "eval_runtime": 0.0902,
6
  "eval_samples": 277,
7
- "eval_samples_per_second": 3069.608,
8
- "eval_steps_per_second": 22.163
9
  }
 
1
  {
2
+ "epoch": 7.0,
3
+ "eval_accuracy": 0.48736462093862815,
4
+ "eval_loss": 0.6925343871116638,
5
+ "eval_runtime": 0.096,
6
  "eval_samples": 277,
7
+ "eval_samples_per_second": 2885.475,
8
+ "eval_steps_per_second": 20.834
9
  }
logs/events.out.tfevents.1733327036.ki-g0008.1208741.27 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:19429d3bb3b57ae6296ad20463fa71af263aa76d695c47569f5eed365b2a0434
3
+ size 405
train_results.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "epoch": 10.0,
3
- "total_flos": 652967094988800.0,
4
- "train_loss": 0.7079746341705322,
5
- "train_runtime": 20.9087,
6
  "train_samples": 2490,
7
- "train_samples_per_second": 5954.454,
8
- "train_steps_per_second": 23.913
9
  }
 
1
  {
2
+ "epoch": 7.0,
3
+ "total_flos": 457076966492160.0,
4
+ "train_loss": 0.6549058369227818,
5
+ "train_runtime": 15.9605,
6
  "train_samples": 2490,
7
+ "train_samples_per_second": 7800.521,
8
+ "train_steps_per_second": 31.327
9
  }
trainer_state.json CHANGED
@@ -1,181 +1,133 @@
1
  {
2
- "best_metric": 0.6910818815231323,
3
- "best_model_checkpoint": "bert_tiny_lda_100_v1_rte/checkpoint-50",
4
- "epoch": 10.0,
5
  "eval_steps": 500,
6
- "global_step": 100,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
  "epoch": 1.0,
13
- "grad_norm": 0.8250333070755005,
14
- "learning_rate": 0.00098,
15
- "loss": 0.8226,
16
  "step": 10
17
  },
18
  {
19
  "epoch": 1.0,
20
- "eval_accuracy": 0.5270758122743683,
21
- "eval_loss": 0.7099644541740417,
22
- "eval_runtime": 0.0907,
23
- "eval_samples_per_second": 3054.549,
24
- "eval_steps_per_second": 22.055,
25
  "step": 10
26
  },
27
  {
28
  "epoch": 2.0,
29
- "grad_norm": 0.27419352531433105,
30
- "learning_rate": 0.00096,
31
- "loss": 0.7009,
32
  "step": 20
33
  },
34
  {
35
  "epoch": 2.0,
36
- "eval_accuracy": 0.4729241877256318,
37
- "eval_loss": 0.7083004713058472,
38
- "eval_runtime": 0.0923,
39
- "eval_samples_per_second": 3001.194,
40
- "eval_steps_per_second": 21.669,
41
  "step": 20
42
  },
43
  {
44
  "epoch": 3.0,
45
- "grad_norm": 0.2973408102989197,
46
- "learning_rate": 0.00094,
47
- "loss": 0.696,
48
  "step": 30
49
  },
50
  {
51
  "epoch": 3.0,
52
- "eval_accuracy": 0.4729241877256318,
53
- "eval_loss": 0.6955804228782654,
54
- "eval_runtime": 0.0912,
55
- "eval_samples_per_second": 3038.309,
56
- "eval_steps_per_second": 21.937,
57
  "step": 30
58
  },
59
  {
60
  "epoch": 4.0,
61
- "grad_norm": 0.25628429651260376,
62
- "learning_rate": 0.00092,
63
- "loss": 0.695,
64
  "step": 40
65
  },
66
  {
67
  "epoch": 4.0,
68
  "eval_accuracy": 0.5270758122743683,
69
- "eval_loss": 0.6930421590805054,
70
- "eval_runtime": 0.0958,
71
- "eval_samples_per_second": 2892.623,
72
- "eval_steps_per_second": 20.885,
73
  "step": 40
74
  },
75
  {
76
  "epoch": 5.0,
77
- "grad_norm": 0.6449291706085205,
78
- "learning_rate": 0.0009000000000000001,
79
- "loss": 0.6933,
80
  "step": 50
81
  },
82
  {
83
  "epoch": 5.0,
84
- "eval_accuracy": 0.5270758122743683,
85
- "eval_loss": 0.6910818815231323,
86
- "eval_runtime": 0.0889,
87
- "eval_samples_per_second": 3114.696,
88
- "eval_steps_per_second": 22.489,
89
  "step": 50
90
  },
91
  {
92
  "epoch": 6.0,
93
- "grad_norm": 0.27499687671661377,
94
- "learning_rate": 0.00088,
95
- "loss": 0.6964,
96
  "step": 60
97
  },
98
  {
99
  "epoch": 6.0,
100
- "eval_accuracy": 0.4729241877256318,
101
- "eval_loss": 0.698697030544281,
102
- "eval_runtime": 0.0888,
103
- "eval_samples_per_second": 3117.998,
104
- "eval_steps_per_second": 22.513,
105
  "step": 60
106
  },
107
  {
108
  "epoch": 7.0,
109
- "grad_norm": 0.1044619083404541,
110
- "learning_rate": 0.00086,
111
- "loss": 0.694,
112
  "step": 70
113
  },
114
  {
115
  "epoch": 7.0,
116
- "eval_accuracy": 0.5270758122743683,
117
- "eval_loss": 0.6921958923339844,
118
- "eval_runtime": 0.0893,
119
- "eval_samples_per_second": 3100.888,
120
- "eval_steps_per_second": 22.389,
121
  "step": 70
122
  },
123
  {
124
- "epoch": 8.0,
125
- "grad_norm": 0.16408830881118774,
126
- "learning_rate": 0.00084,
127
- "loss": 0.6941,
128
- "step": 80
129
- },
130
- {
131
- "epoch": 8.0,
132
- "eval_accuracy": 0.4729241877256318,
133
- "eval_loss": 0.6951573491096497,
134
- "eval_runtime": 0.0897,
135
- "eval_samples_per_second": 3088.335,
136
- "eval_steps_per_second": 22.298,
137
- "step": 80
138
- },
139
- {
140
- "epoch": 9.0,
141
- "grad_norm": 0.14835208654403687,
142
- "learning_rate": 0.00082,
143
- "loss": 0.6942,
144
- "step": 90
145
- },
146
- {
147
- "epoch": 9.0,
148
- "eval_accuracy": 0.5270758122743683,
149
- "eval_loss": 0.6926190257072449,
150
- "eval_runtime": 0.092,
151
- "eval_samples_per_second": 3011.868,
152
- "eval_steps_per_second": 21.746,
153
- "step": 90
154
- },
155
- {
156
- "epoch": 10.0,
157
- "grad_norm": 0.11492882668972015,
158
- "learning_rate": 0.0008,
159
- "loss": 0.6932,
160
- "step": 100
161
- },
162
- {
163
- "epoch": 10.0,
164
- "eval_accuracy": 0.4729241877256318,
165
- "eval_loss": 0.6936765909194946,
166
- "eval_runtime": 0.0892,
167
- "eval_samples_per_second": 3106.792,
168
- "eval_steps_per_second": 22.432,
169
- "step": 100
170
- },
171
- {
172
- "epoch": 10.0,
173
- "step": 100,
174
- "total_flos": 652967094988800.0,
175
- "train_loss": 0.7079746341705322,
176
- "train_runtime": 20.9087,
177
- "train_samples_per_second": 5954.454,
178
- "train_steps_per_second": 23.913
179
  }
180
  ],
181
  "logging_steps": 1,
@@ -204,7 +156,7 @@
204
  "attributes": {}
205
  }
206
  },
207
- "total_flos": 652967094988800.0,
208
  "train_batch_size": 256,
209
  "trial_name": null,
210
  "trial_params": null
 
1
  {
2
+ "best_metric": 0.6925343871116638,
3
+ "best_model_checkpoint": "bert_tiny_lda_100_v1_rte/checkpoint-20",
4
+ "epoch": 7.0,
5
  "eval_steps": 500,
6
+ "global_step": 70,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
  "epoch": 1.0,
13
+ "grad_norm": 1.2701410055160522,
14
+ "learning_rate": 4.9e-05,
15
+ "loss": 0.7159,
16
  "step": 10
17
  },
18
  {
19
  "epoch": 1.0,
20
+ "eval_accuracy": 0.4729241877256318,
21
+ "eval_loss": 0.7091183662414551,
22
+ "eval_runtime": 0.0972,
23
+ "eval_samples_per_second": 2849.909,
24
+ "eval_steps_per_second": 20.577,
25
  "step": 10
26
  },
27
  {
28
  "epoch": 2.0,
29
+ "grad_norm": 0.6685553789138794,
30
+ "learning_rate": 4.8e-05,
31
+ "loss": 0.6926,
32
  "step": 20
33
  },
34
  {
35
  "epoch": 2.0,
36
+ "eval_accuracy": 0.48736462093862815,
37
+ "eval_loss": 0.6925343871116638,
38
+ "eval_runtime": 0.1007,
39
+ "eval_samples_per_second": 2750.35,
40
+ "eval_steps_per_second": 19.858,
41
  "step": 20
42
  },
43
  {
44
  "epoch": 3.0,
45
+ "grad_norm": 0.5383151769638062,
46
+ "learning_rate": 4.7e-05,
47
+ "loss": 0.6814,
48
  "step": 30
49
  },
50
  {
51
  "epoch": 3.0,
52
+ "eval_accuracy": 0.51985559566787,
53
+ "eval_loss": 0.6944310665130615,
54
+ "eval_runtime": 0.0969,
55
+ "eval_samples_per_second": 2858.407,
56
+ "eval_steps_per_second": 20.638,
57
  "step": 30
58
  },
59
  {
60
  "epoch": 4.0,
61
+ "grad_norm": 0.6841632723808289,
62
+ "learning_rate": 4.600000000000001e-05,
63
+ "loss": 0.6663,
64
  "step": 40
65
  },
66
  {
67
  "epoch": 4.0,
68
  "eval_accuracy": 0.5270758122743683,
69
+ "eval_loss": 0.697773277759552,
70
+ "eval_runtime": 0.0972,
71
+ "eval_samples_per_second": 2850.811,
72
+ "eval_steps_per_second": 20.583,
73
  "step": 40
74
  },
75
  {
76
  "epoch": 5.0,
77
+ "grad_norm": 1.6859617233276367,
78
+ "learning_rate": 4.5e-05,
79
+ "loss": 0.6472,
80
  "step": 50
81
  },
82
  {
83
  "epoch": 5.0,
84
+ "eval_accuracy": 0.5415162454873647,
85
+ "eval_loss": 0.7425259351730347,
86
+ "eval_runtime": 0.0971,
87
+ "eval_samples_per_second": 2851.944,
88
+ "eval_steps_per_second": 20.592,
89
  "step": 50
90
  },
91
  {
92
  "epoch": 6.0,
93
+ "grad_norm": 1.9587653875350952,
94
+ "learning_rate": 4.4000000000000006e-05,
95
+ "loss": 0.6276,
96
  "step": 60
97
  },
98
  {
99
  "epoch": 6.0,
100
+ "eval_accuracy": 0.5451263537906137,
101
+ "eval_loss": 0.7315176129341125,
102
+ "eval_runtime": 0.098,
103
+ "eval_samples_per_second": 2825.759,
104
+ "eval_steps_per_second": 20.403,
105
  "step": 60
106
  },
107
  {
108
  "epoch": 7.0,
109
+ "grad_norm": 1.4208569526672363,
110
+ "learning_rate": 4.3e-05,
111
+ "loss": 0.5534,
112
  "step": 70
113
  },
114
  {
115
  "epoch": 7.0,
116
+ "eval_accuracy": 0.5018050541516246,
117
+ "eval_loss": 0.816455602645874,
118
+ "eval_runtime": 0.1092,
119
+ "eval_samples_per_second": 2537.616,
120
+ "eval_steps_per_second": 18.322,
121
  "step": 70
122
  },
123
  {
124
+ "epoch": 7.0,
125
+ "step": 70,
126
+ "total_flos": 457076966492160.0,
127
+ "train_loss": 0.6549058369227818,
128
+ "train_runtime": 15.9605,
129
+ "train_samples_per_second": 7800.521,
130
+ "train_steps_per_second": 31.327
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
131
  }
132
  ],
133
  "logging_steps": 1,
 
156
  "attributes": {}
157
  }
158
  },
159
+ "total_flos": 457076966492160.0,
160
  "train_batch_size": 256,
161
  "trial_name": null,
162
  "trial_params": null