jordonpeter01 commited on
Commit
9f31738
1 Parent(s): 5dc826e

Upload folder using huggingface_hub

Browse files
.outputs/outputs/README.md ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - en
4
+ license: apache-2.0
5
+ base_model: bert-base-cased
6
+ tags:
7
+ - generated_from_trainer
8
+ datasets:
9
+ - glue
10
+ metrics:
11
+ - accuracy
12
+ - f1
13
+ model-index:
14
+ - name: outputs
15
+ results:
16
+ - task:
17
+ name: Text Classification
18
+ type: text-classification
19
+ dataset:
20
+ name: GLUE MRPC
21
+ type: glue
22
+ config: mrpc
23
+ split: validation
24
+ args: mrpc
25
+ metrics:
26
+ - name: Accuracy
27
+ type: accuracy
28
+ value: 0.8406862745098039
29
+ - name: F1
30
+ type: f1
31
+ value: 0.888888888888889
32
+ ---
33
+
34
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
35
+ should probably proofread and complete it, then remove this comment. -->
36
+
37
+ # outputs
38
+
39
+ This model is a fine-tuned version of [bert-base-cased](https://huggingface.co/bert-base-cased) on the GLUE MRPC dataset.
40
+ It achieves the following results on the evaluation set:
41
+ - Loss: 0.4183
42
+ - Accuracy: 0.8407
43
+ - F1: 0.8889
44
+ - Combined Score: 0.8648
45
+
46
+ ## Model description
47
+
48
+ More information needed
49
+
50
+ ## Intended uses & limitations
51
+
52
+ More information needed
53
+
54
+ ## Training and evaluation data
55
+
56
+ More information needed
57
+
58
+ ## Training procedure
59
+
60
+ ### Training hyperparameters
61
+
62
+ The following hyperparameters were used during training:
63
+ - learning_rate: 2e-05
64
+ - train_batch_size: 32
65
+ - eval_batch_size: 8
66
+ - seed: 42
67
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
68
+ - lr_scheduler_type: linear
69
+ - num_epochs: 3.0
70
+
71
+ ### Training results
72
+
73
+
74
+
75
+ ### Framework versions
76
+
77
+ - Transformers 4.33.0.dev0
78
+ - Pytorch 2.0.1+cu117
79
+ - Datasets 2.14.4
80
+ - Tokenizers 0.13.3
.outputs/outputs/all_results.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.0,
3
+ "eval_accuracy": 0.8406862745098039,
4
+ "eval_combined_score": 0.8647875816993464,
5
+ "eval_f1": 0.888888888888889,
6
+ "eval_loss": 0.41829124093055725,
7
+ "eval_runtime": 56.9052,
8
+ "eval_samples": 408,
9
+ "eval_samples_per_second": 7.17,
10
+ "eval_steps_per_second": 0.896,
11
+ "train_loss": 0.428191568194956,
12
+ "train_runtime": 4927.0278,
13
+ "train_samples": 3668,
14
+ "train_samples_per_second": 2.233,
15
+ "train_steps_per_second": 0.07
16
+ }
.outputs/outputs/config.json ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "bert-base-cased",
3
+ "architectures": [
4
+ "BertForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "finetuning_task": "mrpc",
9
+ "gradient_checkpointing": false,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 768,
13
+ "id2label": {
14
+ "0": "not_equivalent",
15
+ "1": "equivalent"
16
+ },
17
+ "initializer_range": 0.02,
18
+ "intermediate_size": 3072,
19
+ "label2id": {
20
+ "equivalent": 1,
21
+ "not_equivalent": 0
22
+ },
23
+ "layer_norm_eps": 1e-12,
24
+ "max_position_embeddings": 512,
25
+ "model_type": "bert",
26
+ "num_attention_heads": 12,
27
+ "num_hidden_layers": 12,
28
+ "pad_token_id": 0,
29
+ "position_embedding_type": "absolute",
30
+ "problem_type": "single_label_classification",
31
+ "torch_dtype": "float32",
32
+ "transformers_version": "4.33.0.dev0",
33
+ "type_vocab_size": 2,
34
+ "use_cache": true,
35
+ "vocab_size": 28996
36
+ }
.outputs/outputs/eval_results.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.0,
3
+ "eval_accuracy": 0.8406862745098039,
4
+ "eval_combined_score": 0.8647875816993464,
5
+ "eval_f1": 0.888888888888889,
6
+ "eval_loss": 0.41829124093055725,
7
+ "eval_runtime": 56.9052,
8
+ "eval_samples": 408,
9
+ "eval_samples_per_second": 7.17,
10
+ "eval_steps_per_second": 0.896
11
+ }
.outputs/outputs/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:026f6386a5a1d0c6fb98ce8cc635ee3fcb3d26db57394428731ac18105865e80
3
+ size 433312817
.outputs/outputs/special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
.outputs/outputs/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
.outputs/outputs/tokenizer_config.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "clean_up_tokenization_spaces": true,
3
+ "cls_token": "[CLS]",
4
+ "do_lower_case": false,
5
+ "mask_token": "[MASK]",
6
+ "model_max_length": 512,
7
+ "pad_token": "[PAD]",
8
+ "sep_token": "[SEP]",
9
+ "strip_accents": null,
10
+ "tokenize_chinese_chars": true,
11
+ "tokenizer_class": "BertTokenizer",
12
+ "unk_token": "[UNK]"
13
+ }
.outputs/outputs/train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.0,
3
+ "train_loss": 0.428191568194956,
4
+ "train_runtime": 4927.0278,
5
+ "train_samples": 3668,
6
+ "train_samples_per_second": 2.233,
7
+ "train_steps_per_second": 0.07
8
+ }
.outputs/outputs/trainer_state.json ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 3.0,
5
+ "eval_steps": 500,
6
+ "global_step": 345,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.17,
13
+ "learning_rate": 1.8840579710144928e-05,
14
+ "loss": 0.7776,
15
+ "step": 20
16
+ },
17
+ {
18
+ "epoch": 0.35,
19
+ "learning_rate": 1.7681159420289858e-05,
20
+ "loss": 0.6203,
21
+ "step": 40
22
+ },
23
+ {
24
+ "epoch": 0.52,
25
+ "learning_rate": 1.6521739130434785e-05,
26
+ "loss": 0.5526,
27
+ "step": 60
28
+ },
29
+ {
30
+ "epoch": 0.7,
31
+ "learning_rate": 1.536231884057971e-05,
32
+ "loss": 0.5557,
33
+ "step": 80
34
+ },
35
+ {
36
+ "epoch": 0.87,
37
+ "learning_rate": 1.420289855072464e-05,
38
+ "loss": 0.5374,
39
+ "step": 100
40
+ },
41
+ {
42
+ "epoch": 1.04,
43
+ "learning_rate": 1.3043478260869566e-05,
44
+ "loss": 0.4943,
45
+ "step": 120
46
+ },
47
+ {
48
+ "epoch": 1.22,
49
+ "learning_rate": 1.1884057971014494e-05,
50
+ "loss": 0.4549,
51
+ "step": 140
52
+ },
53
+ {
54
+ "epoch": 1.39,
55
+ "learning_rate": 1.0724637681159422e-05,
56
+ "loss": 0.4398,
57
+ "step": 160
58
+ },
59
+ {
60
+ "epoch": 1.57,
61
+ "learning_rate": 9.565217391304349e-06,
62
+ "loss": 0.3868,
63
+ "step": 180
64
+ },
65
+ {
66
+ "epoch": 1.74,
67
+ "learning_rate": 8.405797101449275e-06,
68
+ "loss": 0.3783,
69
+ "step": 200
70
+ },
71
+ {
72
+ "epoch": 1.91,
73
+ "learning_rate": 7.246376811594203e-06,
74
+ "loss": 0.3841,
75
+ "step": 220
76
+ },
77
+ {
78
+ "epoch": 2.09,
79
+ "learning_rate": 6.086956521739132e-06,
80
+ "loss": 0.3241,
81
+ "step": 240
82
+ },
83
+ {
84
+ "epoch": 2.26,
85
+ "learning_rate": 4.927536231884059e-06,
86
+ "loss": 0.2835,
87
+ "step": 260
88
+ },
89
+ {
90
+ "epoch": 2.43,
91
+ "learning_rate": 3.768115942028986e-06,
92
+ "loss": 0.3149,
93
+ "step": 280
94
+ },
95
+ {
96
+ "epoch": 2.61,
97
+ "learning_rate": 2.6086956521739132e-06,
98
+ "loss": 0.2772,
99
+ "step": 300
100
+ },
101
+ {
102
+ "epoch": 2.78,
103
+ "learning_rate": 1.4492753623188408e-06,
104
+ "loss": 0.2516,
105
+ "step": 320
106
+ },
107
+ {
108
+ "epoch": 2.96,
109
+ "learning_rate": 2.8985507246376816e-07,
110
+ "loss": 0.2649,
111
+ "step": 340
112
+ },
113
+ {
114
+ "epoch": 3.0,
115
+ "step": 345,
116
+ "total_flos": 723818513295360.0,
117
+ "train_loss": 0.428191568194956,
118
+ "train_runtime": 4927.0278,
119
+ "train_samples_per_second": 2.233,
120
+ "train_steps_per_second": 0.07
121
+ }
122
+ ],
123
+ "logging_steps": 20,
124
+ "max_steps": 345,
125
+ "num_train_epochs": 3,
126
+ "save_steps": 500,
127
+ "total_flos": 723818513295360.0,
128
+ "trial_name": null,
129
+ "trial_params": null
130
+ }
.outputs/outputs/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4eae5ebb645f2c6c554810512cf396591249dcd813d8581802ab61596d829d56
3
+ size 3963
.outputs/outputs/vocab.txt ADDED
The diff for this file is too large to render. See raw diff