karanzrk commited on
Commit
4c0356f
1 Parent(s): 6324bc9

model upload

Browse files
config.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "bert-base-uncased",
3
+ "architectures": [
4
+ "BertForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "gradient_checkpointing": false,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 768,
12
+ "id2label": {
13
+ "0": "Bad",
14
+ "1": "Acceptable",
15
+ "2": "Excellent"
16
+ },
17
+ "initializer_range": 0.02,
18
+ "intermediate_size": 3072,
19
+ "label2id": {
20
+ "Acceptable": 1,
21
+ "Bad": 0,
22
+ "Excellent": 2
23
+ },
24
+ "layer_norm_eps": 1e-12,
25
+ "max_position_embeddings": 512,
26
+ "model_type": "bert",
27
+ "num_attention_heads": 12,
28
+ "num_hidden_layers": 12,
29
+ "pad_token_id": 0,
30
+ "position_embedding_type": "absolute",
31
+ "problem_type": "single_label_classification",
32
+ "torch_dtype": "float32",
33
+ "transformers_version": "4.31.0",
34
+ "type_vocab_size": 2,
35
+ "use_cache": true,
36
+ "vocab_size": 30522
37
+ }
optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c226fb7b05b51d1d26e9789968f17bbe68add4f17e64f06fd08281d19e0073f0
3
+ size 875994309
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3b5d85fb39177d35156f7e7a76b8572a19b38284bb3d079327a216f3523a3ee5
3
+ size 438006577
rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b28b686b9c62e0933d02f93e09018f12092561082b02ea2ea436cc386ea0b532
3
+ size 14575
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2b0f7e60b3896f1ce2f5ae0ba2bc11159c989847000c2702cc2a86a02b22c6c4
3
+ size 627
special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "clean_up_tokenization_spaces": true,
3
+ "cls_token": "[CLS]",
4
+ "do_lower_case": true,
5
+ "mask_token": "[MASK]",
6
+ "model_max_length": 512,
7
+ "pad_token": "[PAD]",
8
+ "sep_token": "[SEP]",
9
+ "strip_accents": null,
10
+ "tokenize_chinese_chars": true,
11
+ "tokenizer_class": "BertTokenizer",
12
+ "unk_token": "[UNK]"
13
+ }
trainer_state.json ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.44421833753585815,
3
+ "best_model_checkpoint": "essayl0/checkpoint-144",
4
+ "epoch": 15.0,
5
+ "global_step": 1080,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 1.0,
12
+ "eval_accuracy": 0.7526132404181185,
13
+ "eval_loss": 0.6014369130134583,
14
+ "eval_runtime": 8.8226,
15
+ "eval_samples_per_second": 32.53,
16
+ "eval_steps_per_second": 2.04,
17
+ "step": 72
18
+ },
19
+ {
20
+ "epoch": 2.0,
21
+ "eval_accuracy": 0.8606271777003485,
22
+ "eval_loss": 0.44421833753585815,
23
+ "eval_runtime": 8.9292,
24
+ "eval_samples_per_second": 32.142,
25
+ "eval_steps_per_second": 2.016,
26
+ "step": 144
27
+ },
28
+ {
29
+ "epoch": 3.0,
30
+ "eval_accuracy": 0.8153310104529616,
31
+ "eval_loss": 0.5106114745140076,
32
+ "eval_runtime": 9.4286,
33
+ "eval_samples_per_second": 30.439,
34
+ "eval_steps_per_second": 1.909,
35
+ "step": 216
36
+ },
37
+ {
38
+ "epoch": 4.0,
39
+ "eval_accuracy": 0.7665505226480837,
40
+ "eval_loss": 0.7232147455215454,
41
+ "eval_runtime": 8.9358,
42
+ "eval_samples_per_second": 32.118,
43
+ "eval_steps_per_second": 2.014,
44
+ "step": 288
45
+ },
46
+ {
47
+ "epoch": 5.0,
48
+ "eval_accuracy": 0.8501742160278746,
49
+ "eval_loss": 0.5562837719917297,
50
+ "eval_runtime": 8.9194,
51
+ "eval_samples_per_second": 32.177,
52
+ "eval_steps_per_second": 2.018,
53
+ "step": 360
54
+ },
55
+ {
56
+ "epoch": 6.0,
57
+ "eval_accuracy": 0.794425087108014,
58
+ "eval_loss": 0.783423125743866,
59
+ "eval_runtime": 8.9169,
60
+ "eval_samples_per_second": 32.186,
61
+ "eval_steps_per_second": 2.019,
62
+ "step": 432
63
+ },
64
+ {
65
+ "epoch": 6.94,
66
+ "learning_rate": 1.0740740740740742e-05,
67
+ "loss": 0.2758,
68
+ "step": 500
69
+ },
70
+ {
71
+ "epoch": 7.0,
72
+ "eval_accuracy": 0.8501742160278746,
73
+ "eval_loss": 0.7359230518341064,
74
+ "eval_runtime": 8.9,
75
+ "eval_samples_per_second": 32.247,
76
+ "eval_steps_per_second": 2.022,
77
+ "step": 504
78
+ },
79
+ {
80
+ "epoch": 8.0,
81
+ "eval_accuracy": 0.8780487804878049,
82
+ "eval_loss": 0.6547905802726746,
83
+ "eval_runtime": 8.9239,
84
+ "eval_samples_per_second": 32.161,
85
+ "eval_steps_per_second": 2.017,
86
+ "step": 576
87
+ },
88
+ {
89
+ "epoch": 9.0,
90
+ "eval_accuracy": 0.8885017421602788,
91
+ "eval_loss": 0.6335029602050781,
92
+ "eval_runtime": 8.9095,
93
+ "eval_samples_per_second": 32.213,
94
+ "eval_steps_per_second": 2.02,
95
+ "step": 648
96
+ },
97
+ {
98
+ "epoch": 10.0,
99
+ "eval_accuracy": 0.7839721254355401,
100
+ "eval_loss": 1.1050056219100952,
101
+ "eval_runtime": 8.8781,
102
+ "eval_samples_per_second": 32.327,
103
+ "eval_steps_per_second": 2.027,
104
+ "step": 720
105
+ },
106
+ {
107
+ "epoch": 11.0,
108
+ "eval_accuracy": 0.8780487804878049,
109
+ "eval_loss": 0.7101187705993652,
110
+ "eval_runtime": 8.9338,
111
+ "eval_samples_per_second": 32.125,
112
+ "eval_steps_per_second": 2.015,
113
+ "step": 792
114
+ },
115
+ {
116
+ "epoch": 12.0,
117
+ "eval_accuracy": 0.8397212543554007,
118
+ "eval_loss": 0.7923139929771423,
119
+ "eval_runtime": 8.9267,
120
+ "eval_samples_per_second": 32.151,
121
+ "eval_steps_per_second": 2.016,
122
+ "step": 864
123
+ },
124
+ {
125
+ "epoch": 13.0,
126
+ "eval_accuracy": 0.8432055749128919,
127
+ "eval_loss": 0.8634345531463623,
128
+ "eval_runtime": 8.9462,
129
+ "eval_samples_per_second": 32.081,
130
+ "eval_steps_per_second": 2.012,
131
+ "step": 936
132
+ },
133
+ {
134
+ "epoch": 13.89,
135
+ "learning_rate": 1.4814814814814815e-06,
136
+ "loss": 0.0185,
137
+ "step": 1000
138
+ },
139
+ {
140
+ "epoch": 14.0,
141
+ "eval_accuracy": 0.8432055749128919,
142
+ "eval_loss": 0.83455491065979,
143
+ "eval_runtime": 8.8702,
144
+ "eval_samples_per_second": 32.355,
145
+ "eval_steps_per_second": 2.029,
146
+ "step": 1008
147
+ },
148
+ {
149
+ "epoch": 15.0,
150
+ "eval_accuracy": 0.8327526132404182,
151
+ "eval_loss": 0.8648095726966858,
152
+ "eval_runtime": 8.9204,
153
+ "eval_samples_per_second": 32.173,
154
+ "eval_steps_per_second": 2.018,
155
+ "step": 1080
156
+ }
157
+ ],
158
+ "max_steps": 1080,
159
+ "num_train_epochs": 15,
160
+ "total_flos": 4194210824632584.0,
161
+ "trial_name": null,
162
+ "trial_params": null
163
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:868275f7e90918ce1f4f337aa674ada980bd4cd8b24991324931efd880272a5d
3
+ size 3963
vocab.txt ADDED
The diff for this file is too large to render. See raw diff