gokuls commited on
Commit
29c8e2e
1 Parent(s): 04c426c

End of training

Browse files
Files changed (5) hide show
  1. README.md +10 -8
  2. all_results.json +16 -0
  3. eval_results.json +11 -0
  4. train_results.json +8 -0
  5. trainer_state.json +130 -0
README.md CHANGED
@@ -1,4 +1,6 @@
1
  ---
 
 
2
  base_model: gokuls/bert_12_layer_model_v1_complete_training_new_wt_init_48
3
  tags:
4
  - generated_from_trainer
@@ -14,7 +16,7 @@ model-index:
14
  name: Text Classification
15
  type: text-classification
16
  dataset:
17
- name: glue
18
  type: glue
19
  config: qqp
20
  split: validation
@@ -22,10 +24,10 @@ model-index:
22
  metrics:
23
  - name: Accuracy
24
  type: accuracy
25
- value: 0.7089290131090774
26
  - name: F1
27
  type: f1
28
- value: 0.6289335939963422
29
  ---
30
 
31
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
@@ -33,12 +35,12 @@ should probably proofread and complete it, then remove this comment. -->
33
 
34
  # hBERTv1_new_pretrain_w_init_48_ver2_qqp
35
 
36
- This model is a fine-tuned version of [gokuls/bert_12_layer_model_v1_complete_training_new_wt_init_48](https://huggingface.co/gokuls/bert_12_layer_model_v1_complete_training_new_wt_init_48) on the glue dataset.
37
  It achieves the following results on the evaluation set:
38
- - Loss: 0.5789
39
- - Accuracy: 0.7089
40
- - F1: 0.6289
41
- - Combined Score: 0.6689
42
 
43
  ## Model description
44
 
 
1
  ---
2
+ language:
3
+ - en
4
  base_model: gokuls/bert_12_layer_model_v1_complete_training_new_wt_init_48
5
  tags:
6
  - generated_from_trainer
 
16
  name: Text Classification
17
  type: text-classification
18
  dataset:
19
+ name: GLUE QQP
20
  type: glue
21
  config: qqp
22
  split: validation
 
24
  metrics:
25
  - name: Accuracy
26
  type: accuracy
27
+ value: 0.7601038832550087
28
  - name: F1
29
  type: f1
30
+ value: 0.6952012821721505
31
  ---
32
 
33
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
 
35
 
36
  # hBERTv1_new_pretrain_w_init_48_ver2_qqp
37
 
38
+ This model is a fine-tuned version of [gokuls/bert_12_layer_model_v1_complete_training_new_wt_init_48](https://huggingface.co/gokuls/bert_12_layer_model_v1_complete_training_new_wt_init_48) on the GLUE QQP dataset.
39
  It achieves the following results on the evaluation set:
40
+ - Loss: 0.4918
41
+ - Accuracy: 0.7601
42
+ - F1: 0.6952
43
+ - Combined Score: 0.7277
44
 
45
  ## Model description
46
 
all_results.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 6.0,
3
+ "eval_accuracy": 0.7601038832550087,
4
+ "eval_combined_score": 0.7276525827135796,
5
+ "eval_f1": 0.6952012821721505,
6
+ "eval_loss": 0.4918229579925537,
7
+ "eval_runtime": 211.0345,
8
+ "eval_samples": 40430,
9
+ "eval_samples_per_second": 191.58,
10
+ "eval_steps_per_second": 2.995,
11
+ "train_loss": 0.5074344026286237,
12
+ "train_runtime": 35514.6787,
13
+ "train_samples": 363846,
14
+ "train_samples_per_second": 153.674,
15
+ "train_steps_per_second": 2.402
16
+ }
eval_results.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 6.0,
3
+ "eval_accuracy": 0.7601038832550087,
4
+ "eval_combined_score": 0.7276525827135796,
5
+ "eval_f1": 0.6952012821721505,
6
+ "eval_loss": 0.4918229579925537,
7
+ "eval_runtime": 211.0345,
8
+ "eval_samples": 40430,
9
+ "eval_samples_per_second": 191.58,
10
+ "eval_steps_per_second": 2.995
11
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 6.0,
3
+ "train_loss": 0.5074344026286237,
4
+ "train_runtime": 35514.6787,
5
+ "train_samples": 363846,
6
+ "train_samples_per_second": 153.674,
7
+ "train_steps_per_second": 2.402
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.4918229579925537,
3
+ "best_model_checkpoint": "hBERTv1_new_pretrain_w_init_48_ver2_qqp/checkpoint-5686",
4
+ "epoch": 6.0,
5
+ "eval_steps": 500,
6
+ "global_step": 34116,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 1.0,
13
+ "learning_rate": 3.733333333333334e-05,
14
+ "loss": 0.5279,
15
+ "step": 5686
16
+ },
17
+ {
18
+ "epoch": 1.0,
19
+ "eval_accuracy": 0.7601038832550087,
20
+ "eval_combined_score": 0.7276525827135796,
21
+ "eval_f1": 0.6952012821721505,
22
+ "eval_loss": 0.4918229579925537,
23
+ "eval_runtime": 213.3541,
24
+ "eval_samples_per_second": 189.497,
25
+ "eval_steps_per_second": 2.962,
26
+ "step": 5686
27
+ },
28
+ {
29
+ "epoch": 2.0,
30
+ "learning_rate": 3.466666666666667e-05,
31
+ "loss": 0.4826,
32
+ "step": 11372
33
+ },
34
+ {
35
+ "epoch": 2.0,
36
+ "eval_accuracy": 0.7643581498886965,
37
+ "eval_combined_score": 0.7099691042274545,
38
+ "eval_f1": 0.6555800585662124,
39
+ "eval_loss": 0.5367019176483154,
40
+ "eval_runtime": 211.9321,
41
+ "eval_samples_per_second": 190.769,
42
+ "eval_steps_per_second": 2.982,
43
+ "step": 11372
44
+ },
45
+ {
46
+ "epoch": 3.0,
47
+ "learning_rate": 3.2000000000000005e-05,
48
+ "loss": 0.4943,
49
+ "step": 17058
50
+ },
51
+ {
52
+ "epoch": 3.0,
53
+ "eval_accuracy": 0.7593618600049469,
54
+ "eval_combined_score": 0.7016571422623088,
55
+ "eval_f1": 0.6439524245196707,
56
+ "eval_loss": 0.5223015546798706,
57
+ "eval_runtime": 211.9395,
58
+ "eval_samples_per_second": 190.762,
59
+ "eval_steps_per_second": 2.982,
60
+ "step": 17058
61
+ },
62
+ {
63
+ "epoch": 4.0,
64
+ "learning_rate": 2.9333333333333333e-05,
65
+ "loss": 0.492,
66
+ "step": 22744
67
+ },
68
+ {
69
+ "epoch": 4.0,
70
+ "eval_accuracy": 0.7599802127133317,
71
+ "eval_combined_score": 0.7032452425944627,
72
+ "eval_f1": 0.6465102724755938,
73
+ "eval_loss": 0.5378891825675964,
74
+ "eval_runtime": 211.5301,
75
+ "eval_samples_per_second": 191.131,
76
+ "eval_steps_per_second": 2.988,
77
+ "step": 22744
78
+ },
79
+ {
80
+ "epoch": 5.0,
81
+ "learning_rate": 2.6666666666666667e-05,
82
+ "loss": 0.505,
83
+ "step": 28430
84
+ },
85
+ {
86
+ "epoch": 5.0,
87
+ "eval_accuracy": 0.74232005936186,
88
+ "eval_combined_score": 0.6965020361186982,
89
+ "eval_f1": 0.6506840128755366,
90
+ "eval_loss": 0.5431071519851685,
91
+ "eval_runtime": 211.3198,
92
+ "eval_samples_per_second": 191.321,
93
+ "eval_steps_per_second": 2.991,
94
+ "step": 28430
95
+ },
96
+ {
97
+ "epoch": 6.0,
98
+ "learning_rate": 2.4e-05,
99
+ "loss": 0.5428,
100
+ "step": 34116
101
+ },
102
+ {
103
+ "epoch": 6.0,
104
+ "eval_accuracy": 0.7089290131090774,
105
+ "eval_combined_score": 0.6689313035527098,
106
+ "eval_f1": 0.6289335939963422,
107
+ "eval_loss": 0.5789130330085754,
108
+ "eval_runtime": 211.7558,
109
+ "eval_samples_per_second": 190.927,
110
+ "eval_steps_per_second": 2.985,
111
+ "step": 34116
112
+ },
113
+ {
114
+ "epoch": 6.0,
115
+ "step": 34116,
116
+ "total_flos": 3.2330651506468454e+17,
117
+ "train_loss": 0.5074344026286237,
118
+ "train_runtime": 35514.6787,
119
+ "train_samples_per_second": 153.674,
120
+ "train_steps_per_second": 2.402
121
+ }
122
+ ],
123
+ "logging_steps": 1,
124
+ "max_steps": 85290,
125
+ "num_train_epochs": 15,
126
+ "save_steps": 500,
127
+ "total_flos": 3.2330651506468454e+17,
128
+ "trial_name": null,
129
+ "trial_params": null
130
+ }