gokulsrinivasagan commited on
Commit
8a82561
1 Parent(s): f6c530b

End of training

Browse files
README.md CHANGED
@@ -1,13 +1,28 @@
1
  ---
2
  library_name: transformers
 
 
3
  base_model: gokulsrinivasagan/bert_tiny_lda_20_v1
4
  tags:
5
  - generated_from_trainer
 
 
6
  metrics:
7
  - accuracy
8
  model-index:
9
  - name: bert_tiny_lda_20_v1_sst2
10
- results: []
 
 
 
 
 
 
 
 
 
 
 
11
  ---
12
 
13
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
@@ -15,10 +30,10 @@ should probably proofread and complete it, then remove this comment. -->
15
 
16
  # bert_tiny_lda_20_v1_sst2
17
 
18
- This model is a fine-tuned version of [gokulsrinivasagan/bert_tiny_lda_20_v1](https://huggingface.co/gokulsrinivasagan/bert_tiny_lda_20_v1) on an unknown dataset.
19
  It achieves the following results on the evaluation set:
20
- - Loss: 0.7103
21
- - Accuracy: 0.7970
22
 
23
  ## Model description
24
 
 
1
  ---
2
  library_name: transformers
3
+ language:
4
+ - en
5
  base_model: gokulsrinivasagan/bert_tiny_lda_20_v1
6
  tags:
7
  - generated_from_trainer
8
+ datasets:
9
+ - glue
10
  metrics:
11
  - accuracy
12
  model-index:
13
  - name: bert_tiny_lda_20_v1_sst2
14
+ results:
15
+ - task:
16
+ name: Text Classification
17
+ type: text-classification
18
+ dataset:
19
+ name: GLUE SST2
20
+ type: glue
21
+ args: sst2
22
+ metrics:
23
+ - name: Accuracy
24
+ type: accuracy
25
+ value: 0.8165137614678899
26
  ---
27
 
28
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
 
30
 
31
  # bert_tiny_lda_20_v1_sst2
32
 
33
+ This model is a fine-tuned version of [gokulsrinivasagan/bert_tiny_lda_20_v1](https://huggingface.co/gokulsrinivasagan/bert_tiny_lda_20_v1) on the GLUE SST2 dataset.
34
  It achieves the following results on the evaluation set:
35
+ - Loss: 0.4656
36
+ - Accuracy: 0.8165
37
 
38
  ## Model description
39
 
all_results.json CHANGED
@@ -1,15 +1,15 @@
1
  {
2
  "epoch": 7.0,
3
- "eval_accuracy": 0.5091743119266054,
4
- "eval_loss": 0.6938790082931519,
5
- "eval_runtime": 0.2767,
6
  "eval_samples": 872,
7
- "eval_samples_per_second": 3151.194,
8
- "eval_steps_per_second": 14.455,
9
  "total_flos": 1.2362922335855616e+16,
10
- "train_loss": 0.6874211381524156,
11
- "train_runtime": 259.3366,
12
  "train_samples": 67349,
13
- "train_samples_per_second": 12984.864,
14
- "train_steps_per_second": 50.899
15
  }
 
1
  {
2
  "epoch": 7.0,
3
+ "eval_accuracy": 0.8165137614678899,
4
+ "eval_loss": 0.46561405062675476,
5
+ "eval_runtime": 0.2713,
6
  "eval_samples": 872,
7
+ "eval_samples_per_second": 3214.537,
8
+ "eval_steps_per_second": 14.746,
9
  "total_flos": 1.2362922335855616e+16,
10
+ "train_loss": 0.19844082836464885,
11
+ "train_runtime": 261.6164,
12
  "train_samples": 67349,
13
+ "train_samples_per_second": 12871.711,
14
+ "train_steps_per_second": 50.456
15
  }
eval_results.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "epoch": 7.0,
3
- "eval_accuracy": 0.5091743119266054,
4
- "eval_loss": 0.6938790082931519,
5
- "eval_runtime": 0.2767,
6
  "eval_samples": 872,
7
- "eval_samples_per_second": 3151.194,
8
- "eval_steps_per_second": 14.455
9
  }
 
1
  {
2
  "epoch": 7.0,
3
+ "eval_accuracy": 0.8165137614678899,
4
+ "eval_loss": 0.46561405062675476,
5
+ "eval_runtime": 0.2713,
6
  "eval_samples": 872,
7
+ "eval_samples_per_second": 3214.537,
8
+ "eval_steps_per_second": 14.746
9
  }
logs/events.out.tfevents.1733326024.ki-g0008.1207389.29 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:58c0f421c14dcc7a311266aa38754fb6dc18312885a57b899bdd70f1ad4c57cb
3
+ size 411
train_results.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "epoch": 7.0,
3
  "total_flos": 1.2362922335855616e+16,
4
- "train_loss": 0.6874211381524156,
5
- "train_runtime": 259.3366,
6
  "train_samples": 67349,
7
- "train_samples_per_second": 12984.864,
8
- "train_steps_per_second": 50.899
9
  }
 
1
  {
2
  "epoch": 7.0,
3
  "total_flos": 1.2362922335855616e+16,
4
+ "train_loss": 0.19844082836464885,
5
+ "train_runtime": 261.6164,
6
  "train_samples": 67349,
7
+ "train_samples_per_second": 12871.711,
8
+ "train_steps_per_second": 50.456
9
  }
trainer_state.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "best_metric": 0.6938790082931519,
3
  "best_model_checkpoint": "bert_tiny_lda_20_v1_sst2/checkpoint-528",
4
  "epoch": 7.0,
5
  "eval_steps": 500,
@@ -10,124 +10,124 @@
10
  "log_history": [
11
  {
12
  "epoch": 1.0,
13
- "grad_norm": 0.7342337965965271,
14
- "learning_rate": 0.00098,
15
- "loss": 0.6918,
16
  "step": 264
17
  },
18
  {
19
  "epoch": 1.0,
20
- "eval_accuracy": 0.5091743119266054,
21
- "eval_loss": 0.695652961730957,
22
- "eval_runtime": 0.2707,
23
- "eval_samples_per_second": 3221.409,
24
- "eval_steps_per_second": 14.777,
25
  "step": 264
26
  },
27
  {
28
  "epoch": 2.0,
29
- "grad_norm": 0.1505114883184433,
30
- "learning_rate": 0.00096,
31
- "loss": 0.6872,
32
  "step": 528
33
  },
34
  {
35
  "epoch": 2.0,
36
- "eval_accuracy": 0.5091743119266054,
37
- "eval_loss": 0.6938790082931519,
38
- "eval_runtime": 0.2836,
39
- "eval_samples_per_second": 3075.093,
40
- "eval_steps_per_second": 14.106,
41
  "step": 528
42
  },
43
  {
44
  "epoch": 3.0,
45
- "grad_norm": 0.24582037329673767,
46
- "learning_rate": 0.00094,
47
- "loss": 0.6867,
48
  "step": 792
49
  },
50
  {
51
  "epoch": 3.0,
52
- "eval_accuracy": 0.5091743119266054,
53
- "eval_loss": 0.6983273029327393,
54
- "eval_runtime": 0.2681,
55
- "eval_samples_per_second": 3252.295,
56
- "eval_steps_per_second": 14.919,
57
  "step": 792
58
  },
59
  {
60
  "epoch": 4.0,
61
- "grad_norm": 0.3541450500488281,
62
- "learning_rate": 0.00092,
63
- "loss": 0.6867,
64
  "step": 1056
65
  },
66
  {
67
  "epoch": 4.0,
68
- "eval_accuracy": 0.5091743119266054,
69
- "eval_loss": 0.695652961730957,
70
- "eval_runtime": 0.268,
71
- "eval_samples_per_second": 3253.504,
72
- "eval_steps_per_second": 14.924,
73
  "step": 1056
74
  },
75
  {
76
  "epoch": 5.0,
77
- "grad_norm": 0.18224285542964935,
78
- "learning_rate": 0.0009000000000000001,
79
- "loss": 0.6867,
80
  "step": 1320
81
  },
82
  {
83
  "epoch": 5.0,
84
- "eval_accuracy": 0.5091743119266054,
85
- "eval_loss": 0.6973552107810974,
86
- "eval_runtime": 0.272,
87
- "eval_samples_per_second": 3206.126,
88
- "eval_steps_per_second": 14.707,
89
  "step": 1320
90
  },
91
  {
92
  "epoch": 6.0,
93
- "grad_norm": 0.08771929144859314,
94
- "learning_rate": 0.00088,
95
- "loss": 0.6864,
96
  "step": 1584
97
  },
98
  {
99
  "epoch": 6.0,
100
- "eval_accuracy": 0.5091743119266054,
101
- "eval_loss": 0.6970685124397278,
102
- "eval_runtime": 0.2805,
103
- "eval_samples_per_second": 3109.229,
104
- "eval_steps_per_second": 14.263,
105
  "step": 1584
106
  },
107
  {
108
  "epoch": 7.0,
109
- "grad_norm": 0.24813300371170044,
110
- "learning_rate": 0.00086,
111
- "loss": 0.6863,
112
  "step": 1848
113
  },
114
  {
115
  "epoch": 7.0,
116
- "eval_accuracy": 0.5091743119266054,
117
- "eval_loss": 0.6987707614898682,
118
- "eval_runtime": 0.2765,
119
- "eval_samples_per_second": 3153.642,
120
- "eval_steps_per_second": 14.466,
121
  "step": 1848
122
  },
123
  {
124
  "epoch": 7.0,
125
  "step": 1848,
126
  "total_flos": 1.2362922335855616e+16,
127
- "train_loss": 0.6874211381524156,
128
- "train_runtime": 259.3366,
129
- "train_samples_per_second": 12984.864,
130
- "train_steps_per_second": 50.899
131
  }
132
  ],
133
  "logging_steps": 1,
 
1
  {
2
+ "best_metric": 0.46561405062675476,
3
  "best_model_checkpoint": "bert_tiny_lda_20_v1_sst2/checkpoint-528",
4
  "epoch": 7.0,
5
  "eval_steps": 500,
 
10
  "log_history": [
11
  {
12
  "epoch": 1.0,
13
+ "grad_norm": 12.078267097473145,
14
+ "learning_rate": 4.9e-05,
15
+ "loss": 0.4233,
16
  "step": 264
17
  },
18
  {
19
  "epoch": 1.0,
20
+ "eval_accuracy": 0.7844036697247706,
21
+ "eval_loss": 0.4931957721710205,
22
+ "eval_runtime": 0.2795,
23
+ "eval_samples_per_second": 3119.529,
24
+ "eval_steps_per_second": 14.31,
25
  "step": 264
26
  },
27
  {
28
  "epoch": 2.0,
29
+ "grad_norm": 5.435814380645752,
30
+ "learning_rate": 4.8e-05,
31
+ "loss": 0.2592,
32
  "step": 528
33
  },
34
  {
35
  "epoch": 2.0,
36
+ "eval_accuracy": 0.8165137614678899,
37
+ "eval_loss": 0.46561405062675476,
38
+ "eval_runtime": 0.2723,
39
+ "eval_samples_per_second": 3202.14,
40
+ "eval_steps_per_second": 14.689,
41
  "step": 528
42
  },
43
  {
44
  "epoch": 3.0,
45
+ "grad_norm": 3.6048450469970703,
46
+ "learning_rate": 4.7e-05,
47
+ "loss": 0.1987,
48
  "step": 792
49
  },
50
  {
51
  "epoch": 3.0,
52
+ "eval_accuracy": 0.8130733944954128,
53
+ "eval_loss": 0.47246110439300537,
54
+ "eval_runtime": 0.2782,
55
+ "eval_samples_per_second": 3134.998,
56
+ "eval_steps_per_second": 14.381,
57
  "step": 792
58
  },
59
  {
60
  "epoch": 4.0,
61
+ "grad_norm": 6.613548755645752,
62
+ "learning_rate": 4.600000000000001e-05,
63
+ "loss": 0.161,
64
  "step": 1056
65
  },
66
  {
67
  "epoch": 4.0,
68
+ "eval_accuracy": 0.8107798165137615,
69
+ "eval_loss": 0.531734824180603,
70
+ "eval_runtime": 0.278,
71
+ "eval_samples_per_second": 3136.958,
72
+ "eval_steps_per_second": 14.39,
73
  "step": 1056
74
  },
75
  {
76
  "epoch": 5.0,
77
+ "grad_norm": 9.82017993927002,
78
+ "learning_rate": 4.5e-05,
79
+ "loss": 0.1363,
80
  "step": 1320
81
  },
82
  {
83
  "epoch": 5.0,
84
+ "eval_accuracy": 0.8073394495412844,
85
+ "eval_loss": 0.5661426782608032,
86
+ "eval_runtime": 0.2752,
87
+ "eval_samples_per_second": 3168.806,
88
+ "eval_steps_per_second": 14.536,
89
  "step": 1320
90
  },
91
  {
92
  "epoch": 6.0,
93
+ "grad_norm": 2.2137458324432373,
94
+ "learning_rate": 4.4000000000000006e-05,
95
+ "loss": 0.1147,
96
  "step": 1584
97
  },
98
  {
99
  "epoch": 6.0,
100
+ "eval_accuracy": 0.8084862385321101,
101
+ "eval_loss": 0.6461686491966248,
102
+ "eval_runtime": 0.2696,
103
+ "eval_samples_per_second": 3234.111,
104
+ "eval_steps_per_second": 14.835,
105
  "step": 1584
106
  },
107
  {
108
  "epoch": 7.0,
109
+ "grad_norm": 4.09332799911499,
110
+ "learning_rate": 4.3e-05,
111
+ "loss": 0.0959,
112
  "step": 1848
113
  },
114
  {
115
  "epoch": 7.0,
116
+ "eval_accuracy": 0.7970183486238532,
117
+ "eval_loss": 0.7102797627449036,
118
+ "eval_runtime": 0.2804,
119
+ "eval_samples_per_second": 3109.959,
120
+ "eval_steps_per_second": 14.266,
121
  "step": 1848
122
  },
123
  {
124
  "epoch": 7.0,
125
  "step": 1848,
126
  "total_flos": 1.2362922335855616e+16,
127
+ "train_loss": 0.19844082836464885,
128
+ "train_runtime": 261.6164,
129
+ "train_samples_per_second": 12871.711,
130
+ "train_steps_per_second": 50.456
131
  }
132
  ],
133
  "logging_steps": 1,