etgld commited on
Commit
1332f08
1 Parent(s): 7d16f45

Upload 13 files

Browse files
README.md CHANGED
@@ -1,3 +1,73 @@
1
  ---
2
- license: apache-2.0
 
 
 
 
 
 
 
 
 
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ license: mit
3
+ base_model: microsoft/BiomedNLP-BiomedBERT-base-uncased-abstract
4
+ tags:
5
+ - generated_from_trainer
6
+ metrics:
7
+ - accuracy
8
+ - f1
9
+ model-index:
10
+ - name: test
11
+ results: []
12
  ---
13
+
14
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
+ should probably proofread and complete it, then remove this comment. -->
16
+
17
+ # test
18
+
19
+ This model is a fine-tuned version of [microsoft/BiomedNLP-BiomedBERT-base-uncased-abstract](https://huggingface.co/microsoft/BiomedNLP-BiomedBERT-base-uncased-abstract) on an unknown dataset.
20
+ It achieves the following results on the evaluation set:
21
+ - Loss: 0.6886
22
+ - Accuracy: 0.8143
23
+ - F1: [0.92816572 0.56028369 0.1 0.2633452 ]
24
+
25
+ ## Model description
26
+
27
+ More information needed
28
+
29
+ ## Intended uses & limitations
30
+
31
+ More information needed
32
+
33
+ ## Training and evaluation data
34
+
35
+ More information needed
36
+
37
+ ## Training procedure
38
+
39
+ ### Training hyperparameters
40
+
41
+ The following hyperparameters were used during training:
42
+ - learning_rate: 2e-05
43
+ - train_batch_size: 32
44
+ - eval_batch_size: 64
45
+ - seed: 42
46
+ - gradient_accumulation_steps: 2
47
+ - total_train_batch_size: 64
48
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
49
+ - lr_scheduler_type: cosine
50
+ - num_epochs: 10.0
51
+
52
+ ### Training results
53
+
54
+ | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 |
55
+ |:-------------:|:-----:|:----:|:---------------:|:--------:|:---------------------------------------------:|
56
+ | No log | 1.0 | 37 | 0.4891 | 0.8235 | [0.91702786 0.33333333 0. 0.10837438] |
57
+ | No log | 2.0 | 74 | 0.4762 | 0.8321 | [0.93139159 0.48466258 0. 0.22857143] |
58
+ | No log | 3.0 | 111 | 0.5084 | 0.8208 | [0.92995725 0.44887781 0. 0.19266055] |
59
+ | No log | 4.0 | 148 | 0.5519 | 0.8105 | [0.92421691 0.44444444 0.06557377 0.30769231] |
60
+ | No log | 5.0 | 185 | 0.5805 | 0.8294 | [0.93531353 0.52336449 0.09345794 0.27131783] |
61
+ | No log | 6.0 | 222 | 0.6778 | 0.7955 | [0.91344509 0.55305466 0.15463918 0.29166667] |
62
+ | No log | 7.0 | 259 | 0.6407 | 0.8213 | [0.93298292 0.51383399 0.10191083 0.2519084 ] |
63
+ | No log | 8.0 | 296 | 0.6639 | 0.8272 | [0.9326288 0.55052265 0.18181818 0.26271186] |
64
+ | No log | 9.0 | 333 | 0.6863 | 0.8192 | [0.93071286 0.55830389 0.11042945 0.2761194 ] |
65
+ | No log | 10.0 | 370 | 0.6886 | 0.8143 | [0.92816572 0.56028369 0.1 0.2633452 ] |
66
+
67
+
68
+ ### Framework versions
69
+
70
+ - Transformers 4.37.2
71
+ - Pytorch 2.2.0+cu121
72
+ - Datasets 2.17.0
73
+ - Tokenizers 0.15.2
added_tokens.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</a1>": 28898,
3
+ "</a2>": 28900,
4
+ "</e>": 28896,
5
+ "<a1>": 28897,
6
+ "<a2>": 28899,
7
+ "<cr>": 28901,
8
+ "<e>": 28895,
9
+ "<neg>": 28902
10
+ }
all_results.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 10.0,
3
+ "eval_accuracy": 0.81431645154953,
4
+ "eval_f1": "[0.92816572 0.56028369 0.1 0.2633452 ]",
5
+ "eval_loss": 0.6886363625526428,
6
+ "eval_runtime": 7.7747,
7
+ "eval_samples": 1858,
8
+ "eval_samples_per_second": 238.981,
9
+ "eval_steps_per_second": 3.859,
10
+ "train_loss": 0.21409231649862753,
11
+ "train_runtime": 515.9059,
12
+ "train_samples": 2338,
13
+ "train_samples_per_second": 45.318,
14
+ "train_steps_per_second": 0.717
15
+ }
config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ {
3
+ "_name_or_path": "microsoft/BiomedNLP-BiomedBERT-base-uncased-abstract",
4
+ "architectures": [
5
+ "BertForSequenceClassification"
6
+ ],
7
+ "attention_probs_dropout_prob": 0.1,
8
+ "classifier_dropout": null,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 768,
12
+ "id2label": {
13
+ "0": "ACTUAL",
14
+ "1": "GENERIC",
15
+ "2": "HEDGED",
16
+ "3": "HYPOTHETICAL"
17
+ },
18
+ "initializer_range": 0.02,
19
+ "intermediate_size": 3072,
20
+ "label2id": {
21
+ "ACTUAL": 0,
22
+ "GENERIC": 1,
23
+ "HEDGED": 2,
24
+ "HYPOTHETICAL": 3
25
+ },
26
+ "layer_norm_eps": 1e-12,
27
+ "max_position_embeddings": 512,
28
+ "model_type": "bert",
29
+ "num_attention_heads": 12,
30
+ "num_hidden_layers": 12,
31
+ "pad_token_id": 0,
32
+ "position_embedding_type": "absolute",
33
+ "problem_type": "single_label_classification",
34
+ "torch_dtype": "float32",
35
+ "transformers_version": "4.37.2",
36
+ "type_vocab_size": 2,
37
+ "use_cache": true,
38
+ "vocab_size": 28903
39
+ }
eval_results.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 10.0,
3
+ "eval_accuracy": 0.81431645154953,
4
+ "eval_f1": "[0.92816572 0.56028369 0.1 0.2633452 ]",
5
+ "eval_loss": 0.6886363625526428,
6
+ "eval_runtime": 7.7747,
7
+ "eval_samples": 1858,
8
+ "eval_samples_per_second": 238.981,
9
+ "eval_steps_per_second": 3.859
10
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:370ff03698c998c3a98bfc5eb64053f4046eab460b116a97207506145bbec52c
3
+ size 432991216
special_tokens_map.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<e>",
4
+ "</e>",
5
+ "<a1>",
6
+ "</a1>",
7
+ "<a2>",
8
+ "</a2>",
9
+ "<cr>",
10
+ "<neg>"
11
+ ],
12
+ "cls_token": "[CLS]",
13
+ "mask_token": "[MASK]",
14
+ "pad_token": "[PAD]",
15
+ "sep_token": "[SEP]",
16
+ "unk_token": "[UNK]"
17
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": true,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": "[PAD]",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "1": {
13
+ "content": "[UNK]",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "2": {
21
+ "content": "[CLS]",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "3": {
29
+ "content": "[SEP]",
30
+ "lstrip": false,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ },
36
+ "4": {
37
+ "content": "[MASK]",
38
+ "lstrip": false,
39
+ "normalized": false,
40
+ "rstrip": false,
41
+ "single_word": false,
42
+ "special": true
43
+ },
44
+ "28895": {
45
+ "content": "<e>",
46
+ "lstrip": false,
47
+ "normalized": false,
48
+ "rstrip": false,
49
+ "single_word": false,
50
+ "special": true
51
+ },
52
+ "28896": {
53
+ "content": "</e>",
54
+ "lstrip": false,
55
+ "normalized": false,
56
+ "rstrip": false,
57
+ "single_word": false,
58
+ "special": true
59
+ },
60
+ "28897": {
61
+ "content": "<a1>",
62
+ "lstrip": false,
63
+ "normalized": false,
64
+ "rstrip": false,
65
+ "single_word": false,
66
+ "special": true
67
+ },
68
+ "28898": {
69
+ "content": "</a1>",
70
+ "lstrip": false,
71
+ "normalized": false,
72
+ "rstrip": false,
73
+ "single_word": false,
74
+ "special": true
75
+ },
76
+ "28899": {
77
+ "content": "<a2>",
78
+ "lstrip": false,
79
+ "normalized": false,
80
+ "rstrip": false,
81
+ "single_word": false,
82
+ "special": true
83
+ },
84
+ "28900": {
85
+ "content": "</a2>",
86
+ "lstrip": false,
87
+ "normalized": false,
88
+ "rstrip": false,
89
+ "single_word": false,
90
+ "special": true
91
+ },
92
+ "28901": {
93
+ "content": "<cr>",
94
+ "lstrip": false,
95
+ "normalized": false,
96
+ "rstrip": false,
97
+ "single_word": false,
98
+ "special": true
99
+ },
100
+ "28902": {
101
+ "content": "<neg>",
102
+ "lstrip": false,
103
+ "normalized": false,
104
+ "rstrip": false,
105
+ "single_word": false,
106
+ "special": true
107
+ }
108
+ },
109
+ "additional_special_tokens": [
110
+ "<e>",
111
+ "</e>",
112
+ "<a1>",
113
+ "</a1>",
114
+ "<a2>",
115
+ "</a2>",
116
+ "<cr>",
117
+ "<neg>"
118
+ ],
119
+ "clean_up_tokenization_spaces": true,
120
+ "cls_token": "[CLS]",
121
+ "do_basic_tokenize": true,
122
+ "do_lower_case": true,
123
+ "mask_token": "[MASK]",
124
+ "model_max_length": 1000000000000000019884624838656,
125
+ "never_split": null,
126
+ "pad_token": "[PAD]",
127
+ "sep_token": "[SEP]",
128
+ "strip_accents": null,
129
+ "tokenize_chinese_chars": true,
130
+ "tokenizer_class": "BertTokenizer",
131
+ "unk_token": "[UNK]"
132
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 10.0,
3
+ "train_loss": 0.21409231649862753,
4
+ "train_runtime": 515.9059,
5
+ "train_samples": 2338,
6
+ "train_samples_per_second": 45.318,
7
+ "train_steps_per_second": 0.717
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 10.0,
5
+ "eval_steps": 500,
6
+ "global_step": 370,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 1.0,
13
+ "eval_accuracy": 0.8234661221504211,
14
+ "eval_f1": "[0.91702786 0.33333333 0. 0.10837438]",
15
+ "eval_loss": 0.48905953764915466,
16
+ "eval_runtime": 8.4871,
17
+ "eval_samples_per_second": 218.92,
18
+ "eval_steps_per_second": 3.535,
19
+ "step": 37
20
+ },
21
+ {
22
+ "epoch": 2.0,
23
+ "eval_accuracy": 0.8320775032043457,
24
+ "eval_f1": "[0.93139159 0.48466258 0. 0.22857143]",
25
+ "eval_loss": 0.47622746229171753,
26
+ "eval_runtime": 8.4411,
27
+ "eval_samples_per_second": 220.115,
28
+ "eval_steps_per_second": 3.554,
29
+ "step": 74
30
+ },
31
+ {
32
+ "epoch": 3.0,
33
+ "eval_accuracy": 0.820775032043457,
34
+ "eval_f1": "[0.92995725 0.44887781 0. 0.19266055]",
35
+ "eval_loss": 0.5084477066993713,
36
+ "eval_runtime": 8.3479,
37
+ "eval_samples_per_second": 222.571,
38
+ "eval_steps_per_second": 3.594,
39
+ "step": 111
40
+ },
41
+ {
42
+ "epoch": 4.0,
43
+ "eval_accuracy": 0.8105489611625671,
44
+ "eval_f1": "[0.92421691 0.44444444 0.06557377 0.30769231]",
45
+ "eval_loss": 0.5519338846206665,
46
+ "eval_runtime": 8.1137,
47
+ "eval_samples_per_second": 228.996,
48
+ "eval_steps_per_second": 3.697,
49
+ "step": 148
50
+ },
51
+ {
52
+ "epoch": 5.0,
53
+ "eval_accuracy": 0.8293864130973816,
54
+ "eval_f1": "[0.93531353 0.52336449 0.09345794 0.27131783]",
55
+ "eval_loss": 0.5805315971374512,
56
+ "eval_runtime": 8.8001,
57
+ "eval_samples_per_second": 211.135,
58
+ "eval_steps_per_second": 3.409,
59
+ "step": 185
60
+ },
61
+ {
62
+ "epoch": 6.0,
63
+ "eval_accuracy": 0.7954789996147156,
64
+ "eval_f1": "[0.91344509 0.55305466 0.15463918 0.29166667]",
65
+ "eval_loss": 0.6778322458267212,
66
+ "eval_runtime": 7.8496,
67
+ "eval_samples_per_second": 236.701,
68
+ "eval_steps_per_second": 3.822,
69
+ "step": 222
70
+ },
71
+ {
72
+ "epoch": 7.0,
73
+ "eval_accuracy": 0.8213132619857788,
74
+ "eval_f1": "[0.93298292 0.51383399 0.10191083 0.2519084 ]",
75
+ "eval_loss": 0.6407278776168823,
76
+ "eval_runtime": 8.767,
77
+ "eval_samples_per_second": 211.931,
78
+ "eval_steps_per_second": 3.422,
79
+ "step": 259
80
+ },
81
+ {
82
+ "epoch": 8.0,
83
+ "eval_accuracy": 0.827233612537384,
84
+ "eval_f1": "[0.9326288 0.55052265 0.18181818 0.26271186]",
85
+ "eval_loss": 0.6639025807380676,
86
+ "eval_runtime": 8.0873,
87
+ "eval_samples_per_second": 229.742,
88
+ "eval_steps_per_second": 3.709,
89
+ "step": 296
90
+ },
91
+ {
92
+ "epoch": 9.0,
93
+ "eval_accuracy": 0.8191604018211365,
94
+ "eval_f1": "[0.93071286 0.55830389 0.11042945 0.2761194 ]",
95
+ "eval_loss": 0.6863259077072144,
96
+ "eval_runtime": 7.9866,
97
+ "eval_samples_per_second": 232.64,
98
+ "eval_steps_per_second": 3.756,
99
+ "step": 333
100
+ },
101
+ {
102
+ "epoch": 10.0,
103
+ "eval_accuracy": 0.81431645154953,
104
+ "eval_f1": "[0.92816572 0.56028369 0.1 0.2633452 ]",
105
+ "eval_loss": 0.6886363625526428,
106
+ "eval_runtime": 7.7254,
107
+ "eval_samples_per_second": 240.504,
108
+ "eval_steps_per_second": 3.883,
109
+ "step": 370
110
+ },
111
+ {
112
+ "epoch": 10.0,
113
+ "step": 370,
114
+ "total_flos": 1537911734661120.0,
115
+ "train_loss": 0.21409231649862753,
116
+ "train_runtime": 515.9059,
117
+ "train_samples_per_second": 45.318,
118
+ "train_steps_per_second": 0.717
119
+ }
120
+ ],
121
+ "logging_steps": 500,
122
+ "max_steps": 370,
123
+ "num_input_tokens_seen": 0,
124
+ "num_train_epochs": 10,
125
+ "save_steps": 500,
126
+ "total_flos": 1537911734661120.0,
127
+ "train_batch_size": 32,
128
+ "trial_name": null,
129
+ "trial_params": null
130
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7d1aab45051285d4d86e8ffbcab852ca26f8e59e13b6b06c115c64cb15b0664a
3
+ size 4664
vocab.txt ADDED
The diff for this file is too large to render. See raw diff