EC2 Default User commited on
Commit
168ef09
1 Parent(s): af5e140
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ checkpoint-*/
README.md ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - en
4
+ license: apache-2.0
5
+ tags:
6
+ - generated_from_trainer
7
+ datasets:
8
+ - boolq
9
+ metrics:
10
+ - accuracy
11
+ model_index:
12
+ - name: distilbert-base-uncased-boolq
13
+ results:
14
+ - task:
15
+ name: Question Answering
16
+ type: question-answering
17
+ dataset:
18
+ name: boolq
19
+ type: boolq
20
+ args: default
21
+ metric:
22
+ name: Accuracy
23
+ type: accuracy
24
+ value: 0.7314984709480122
25
+ ---
26
+
27
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
28
+ should probably proofread and complete it, then remove this comment. -->
29
+
30
+ # distilbert-base-uncased-boolq
31
+
32
+ This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the boolq dataset.
33
+ It achieves the following results on the evaluation set:
34
+ - Loss: 1.2071
35
+ - Accuracy: 0.7315
36
+
37
+ ## Model description
38
+
39
+ More information needed
40
+
41
+ ## Intended uses & limitations
42
+
43
+ More information needed
44
+
45
+ ## Training and evaluation data
46
+
47
+ More information needed
48
+
49
+ ## Training procedure
50
+
51
+ ### Training hyperparameters
52
+
53
+ The following hyperparameters were used during training:
54
+ - learning_rate: 5e-05
55
+ - train_batch_size: 16
56
+ - eval_batch_size: 32
57
+ - seed: 42
58
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
59
+ - lr_scheduler_type: linear
60
+ - lr_scheduler_warmup_steps: 1000
61
+ - num_epochs: 5
62
+
63
+ ### Training results
64
+
65
+ | Training Loss | Epoch | Step | Validation Loss | Accuracy |
66
+ |:-------------:|:-----:|:----:|:---------------:|:--------:|
67
+ | 0.6506 | 1.0 | 531 | 0.6075 | 0.6681 |
68
+ | 0.575 | 2.0 | 1062 | 0.5816 | 0.6978 |
69
+ | 0.4397 | 3.0 | 1593 | 0.6137 | 0.7253 |
70
+ | 0.2524 | 4.0 | 2124 | 0.8124 | 0.7466 |
71
+ | 0.126 | 5.0 | 2655 | 1.1437 | 0.7370 |
72
+
73
+
74
+ ### Framework versions
75
+
76
+ - Transformers 4.8.2
77
+ - Pytorch 1.8.1+cu111
78
+ - Datasets 1.8.0
79
+ - Tokenizers 0.10.3
all_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.0,
3
+ "eval_accuracy": 0.7314984709480122,
4
+ "eval_loss": 1.2070553302764893,
5
+ "eval_runtime": 10.6939,
6
+ "eval_samples_per_second": 305.782,
7
+ "eval_steps_per_second": 9.632
8
+ }
config.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "distilbert-base-uncased",
3
+ "activation": "gelu",
4
+ "architectures": [
5
+ "DistilBertForSequenceClassification"
6
+ ],
7
+ "attention_dropout": 0.1,
8
+ "dim": 768,
9
+ "dropout": 0.1,
10
+ "hidden_dim": 3072,
11
+ "id2label": {
12
+ "0": "False",
13
+ "1": "True"
14
+ },
15
+ "initializer_range": 0.02,
16
+ "label2id": {
17
+ "False": 0,
18
+ "True": 1
19
+ },
20
+ "max_position_embeddings": 512,
21
+ "model_type": "distilbert",
22
+ "n_heads": 12,
23
+ "n_layers": 6,
24
+ "pad_token_id": 0,
25
+ "problem_type": "single_label_classification",
26
+ "qa_dropout": 0.1,
27
+ "seq_classif_dropout": 0.2,
28
+ "sinusoidal_pos_embds": false,
29
+ "tie_weights_": true,
30
+ "transformers_version": "4.8.2",
31
+ "vocab_size": 30522
32
+ }
log.log ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Training dataset length:
2
+ 8484
3
+ Validation dataset length:
4
+ 943
5
+ Test dataset length:
6
+ 3270
7
+ Current performance:
8
+ Eval:
9
+ {'eval_loss': 0.7018890380859375, 'eval_accuracy': 0.40827147401908803, 'eval_runtime': 3.2526, 'eval_samples_per_second': 289.923, 'eval_steps_per_second': 9.223}
10
+ Test:
11
+ {'eval_loss': 0.7028740644454956, 'eval_accuracy': 0.39847094801223243, 'eval_runtime': 10.3848, 'eval_samples_per_second': 314.882, 'eval_steps_per_second': 9.918}
12
+ Training complete performance:
13
+ Eval:
14
+ {'eval_loss': 1.1437411308288574, 'eval_accuracy': 0.7370095440084835, 'eval_runtime': 3.1329, 'eval_samples_per_second': 301.0, 'eval_steps_per_second': 9.576, 'epoch': 5.0}
15
+ Test:
16
+ {'eval_loss': 1.2070553302764893, 'eval_accuracy': 0.7314984709480122, 'eval_runtime': 10.6939, 'eval_samples_per_second': 305.782, 'eval_steps_per_second': 9.632, 'epoch': 5.0}
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:364d06f2321d04dbdac461a7b7affdf3c81adb8c2864b7c2a8d663843598e0c4
3
+ size 267863191
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"do_lower_case": true, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "model_max_length": 512, "special_tokens_map_file": null, "name_or_path": "distilbert-base-uncased", "tokenizer_class": "DistilBertTokenizer"}
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.0,
3
+ "eval_accuracy": 0.7314984709480122,
4
+ "eval_loss": 1.2070553302764893,
5
+ "eval_runtime": 10.6939,
6
+ "eval_samples_per_second": 305.782,
7
+ "eval_steps_per_second": 9.632
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 5.0,
5
+ "global_step": 2655,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.94,
12
+ "learning_rate": 2.5e-05,
13
+ "loss": 0.6506,
14
+ "step": 500
15
+ },
16
+ {
17
+ "epoch": 1.0,
18
+ "eval_accuracy": 0.6680805938494168,
19
+ "eval_loss": 0.6074808239936829,
20
+ "eval_runtime": 3.0467,
21
+ "eval_samples_per_second": 309.518,
22
+ "eval_steps_per_second": 9.847,
23
+ "step": 531
24
+ },
25
+ {
26
+ "epoch": 1.88,
27
+ "learning_rate": 5e-05,
28
+ "loss": 0.575,
29
+ "step": 1000
30
+ },
31
+ {
32
+ "epoch": 2.0,
33
+ "eval_accuracy": 0.6977730646871686,
34
+ "eval_loss": 0.5816474556922913,
35
+ "eval_runtime": 3.0782,
36
+ "eval_samples_per_second": 306.344,
37
+ "eval_steps_per_second": 9.746,
38
+ "step": 1062
39
+ },
40
+ {
41
+ "epoch": 2.82,
42
+ "learning_rate": 3.489425981873112e-05,
43
+ "loss": 0.4397,
44
+ "step": 1500
45
+ },
46
+ {
47
+ "epoch": 3.0,
48
+ "eval_accuracy": 0.7253446447507953,
49
+ "eval_loss": 0.6136603951454163,
50
+ "eval_runtime": 3.1046,
51
+ "eval_samples_per_second": 303.739,
52
+ "eval_steps_per_second": 9.663,
53
+ "step": 1593
54
+ },
55
+ {
56
+ "epoch": 3.77,
57
+ "learning_rate": 1.9788519637462235e-05,
58
+ "loss": 0.2524,
59
+ "step": 2000
60
+ },
61
+ {
62
+ "epoch": 4.0,
63
+ "eval_accuracy": 0.7465535524920467,
64
+ "eval_loss": 0.8123661279678345,
65
+ "eval_runtime": 3.1044,
66
+ "eval_samples_per_second": 303.766,
67
+ "eval_steps_per_second": 9.664,
68
+ "step": 2124
69
+ },
70
+ {
71
+ "epoch": 4.71,
72
+ "learning_rate": 4.682779456193353e-06,
73
+ "loss": 0.126,
74
+ "step": 2500
75
+ },
76
+ {
77
+ "epoch": 5.0,
78
+ "eval_accuracy": 0.7370095440084835,
79
+ "eval_loss": 1.1437411308288574,
80
+ "eval_runtime": 3.1169,
81
+ "eval_samples_per_second": 302.541,
82
+ "eval_steps_per_second": 9.625,
83
+ "step": 2655
84
+ },
85
+ {
86
+ "epoch": 5.0,
87
+ "step": 2655,
88
+ "total_flos": 4866474386987520.0,
89
+ "train_loss": 0.3910632814166685,
90
+ "train_runtime": 432.5593,
91
+ "train_samples_per_second": 98.067,
92
+ "train_steps_per_second": 6.138
93
+ },
94
+ {
95
+ "epoch": 5.0,
96
+ "eval_accuracy": 0.7370095440084835,
97
+ "eval_loss": 1.1437411308288574,
98
+ "eval_runtime": 3.1329,
99
+ "eval_samples_per_second": 301.0,
100
+ "eval_steps_per_second": 9.576,
101
+ "step": 2655
102
+ },
103
+ {
104
+ "epoch": 5.0,
105
+ "eval_accuracy": 0.7314984709480122,
106
+ "eval_loss": 1.2070553302764893,
107
+ "eval_runtime": 10.6939,
108
+ "eval_samples_per_second": 305.782,
109
+ "eval_steps_per_second": 9.632,
110
+ "step": 2655
111
+ }
112
+ ],
113
+ "max_steps": 2655,
114
+ "num_train_epochs": 5,
115
+ "total_flos": 4866474386987520.0,
116
+ "trial_name": null,
117
+ "trial_params": null
118
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:523ad77b6426b847f727ece175def7814962d019bec45d84aaec67afaa90ffc7
3
+ size 2735
vocab.txt ADDED
The diff for this file is too large to render. See raw diff