IcyKallen commited on
Commit
d5ad01e
1 Parent(s): 3afdd0f

Upload 12 files

Browse files
README.md CHANGED
@@ -1,3 +1,62 @@
1
  ---
2
- license: apache-2.0
 
 
 
 
 
 
 
 
 
 
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ license: mit
3
+ tags:
4
+ - generated_from_trainer
5
+ metrics:
6
+ - precision
7
+ - recall
8
+ - f1
9
+ - accuracy
10
+ model-index:
11
+ - name: ner-bert-mini-conll2003
12
+ results: []
13
  ---
14
+
15
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
16
+ should probably proofread and complete it, then remove this comment. -->
17
+
18
+ # ner-bert-mini-conll2003
19
+
20
+ This model is a fine-tuned version of [prajjwal1/bert-mini](https://huggingface.co/prajjwal1/bert-mini) on an unknown dataset.
21
+ It achieves the following results on the evaluation set:
22
+ - Loss: 0.1007
23
+ - Precision: 0.8743
24
+ - Recall: 0.8993
25
+ - F1: 0.8866
26
+ - Accuracy: 0.9745
27
+
28
+ ## Model description
29
+
30
+ More information needed
31
+
32
+ ## Intended uses & limitations
33
+
34
+ More information needed
35
+
36
+ ## Training and evaluation data
37
+
38
+ More information needed
39
+
40
+ ## Training procedure
41
+
42
+ ### Training hyperparameters
43
+
44
+ The following hyperparameters were used during training:
45
+ - learning_rate: 5e-05
46
+ - train_batch_size: 8
47
+ - eval_batch_size: 8
48
+ - seed: 42
49
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
50
+ - lr_scheduler_type: linear
51
+ - num_epochs: 5
52
+
53
+ ### Training results
54
+
55
+
56
+
57
+ ### Framework versions
58
+
59
+ - Transformers 4.27.4
60
+ - Pytorch 1.13.1+cu117
61
+ - Datasets 2.10.1
62
+ - Tokenizers 0.13.2
all_results.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.0,
3
+ "eval_accuracy": 0.97453909726637,
4
+ "eval_f1": 0.8866201859229748,
5
+ "eval_loss": 0.10066453367471695,
6
+ "eval_precision": 0.874283843509576,
7
+ "eval_recall": 0.8993096480889039,
8
+ "eval_runtime": 3.335,
9
+ "eval_samples": 3250,
10
+ "eval_samples_per_second": 974.504,
11
+ "eval_steps_per_second": 122.038,
12
+ "train_loss": 0.09602124729026151,
13
+ "train_runtime": 167.8929,
14
+ "train_samples": 14041,
15
+ "train_samples_per_second": 418.153,
16
+ "train_steps_per_second": 52.295
17
+ }
config.json ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "prajjwal1/bert-mini",
3
+ "architectures": [
4
+ "BertForTokenClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "finetuning_task": "ner",
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 256,
12
+ "id2label": {
13
+ "0": "O",
14
+ "1": "B-PER",
15
+ "2": "I-PER",
16
+ "3": "B-ORG",
17
+ "4": "I-ORG",
18
+ "5": "B-LOC",
19
+ "6": "I-LOC",
20
+ "7": "B-MISC",
21
+ "8": "I-MISC"
22
+ },
23
+ "initializer_range": 0.02,
24
+ "intermediate_size": 1024,
25
+ "label2id": {
26
+ "B-LOC": 5,
27
+ "B-MISC": 7,
28
+ "B-ORG": 3,
29
+ "B-PER": 1,
30
+ "I-LOC": 6,
31
+ "I-MISC": 8,
32
+ "I-ORG": 4,
33
+ "I-PER": 2,
34
+ "O": 0
35
+ },
36
+ "layer_norm_eps": 1e-12,
37
+ "max_position_embeddings": 512,
38
+ "model_type": "bert",
39
+ "num_attention_heads": 4,
40
+ "num_hidden_layers": 4,
41
+ "pad_token_id": 0,
42
+ "position_embedding_type": "absolute",
43
+ "torch_dtype": "float32",
44
+ "transformers_version": "4.27.4",
45
+ "type_vocab_size": 2,
46
+ "use_cache": true,
47
+ "vocab_size": 30522
48
+ }
eval_results.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.0,
3
+ "eval_accuracy": 0.97453909726637,
4
+ "eval_f1": 0.8866201859229748,
5
+ "eval_loss": 0.10066453367471695,
6
+ "eval_precision": 0.874283843509576,
7
+ "eval_recall": 0.8993096480889039,
8
+ "eval_runtime": 3.335,
9
+ "eval_samples": 3250,
10
+ "eval_samples_per_second": 974.504,
11
+ "eval_steps_per_second": 122.038
12
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d52013ba7b2fb5c53063af48d3ed8ff033380d78ac52cac960e1ac9a704d5711
3
+ size 44456585
special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "do_basic_tokenize": true,
4
+ "do_lower_case": true,
5
+ "mask_token": "[MASK]",
6
+ "model_max_length": 1000000000000000019884624838656,
7
+ "never_split": null,
8
+ "pad_token": "[PAD]",
9
+ "sep_token": "[SEP]",
10
+ "special_tokens_map_file": null,
11
+ "strip_accents": null,
12
+ "tokenize_chinese_chars": true,
13
+ "tokenizer_class": "BertTokenizer",
14
+ "unk_token": "[UNK]"
15
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.0,
3
+ "train_loss": 0.09602124729026151,
4
+ "train_runtime": 167.8929,
5
+ "train_samples": 14041,
6
+ "train_samples_per_second": 418.153,
7
+ "train_steps_per_second": 52.295
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 5.0,
5
+ "global_step": 8780,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.28,
12
+ "learning_rate": 4.715261958997722e-05,
13
+ "loss": 0.3945,
14
+ "step": 500
15
+ },
16
+ {
17
+ "epoch": 0.57,
18
+ "learning_rate": 4.4305239179954444e-05,
19
+ "loss": 0.1883,
20
+ "step": 1000
21
+ },
22
+ {
23
+ "epoch": 0.85,
24
+ "learning_rate": 4.1457858769931665e-05,
25
+ "loss": 0.1584,
26
+ "step": 1500
27
+ },
28
+ {
29
+ "epoch": 1.14,
30
+ "learning_rate": 3.8610478359908886e-05,
31
+ "loss": 0.1311,
32
+ "step": 2000
33
+ },
34
+ {
35
+ "epoch": 1.42,
36
+ "learning_rate": 3.5763097949886106e-05,
37
+ "loss": 0.1052,
38
+ "step": 2500
39
+ },
40
+ {
41
+ "epoch": 1.71,
42
+ "learning_rate": 3.291571753986333e-05,
43
+ "loss": 0.0989,
44
+ "step": 3000
45
+ },
46
+ {
47
+ "epoch": 1.99,
48
+ "learning_rate": 3.0068337129840545e-05,
49
+ "loss": 0.0868,
50
+ "step": 3500
51
+ },
52
+ {
53
+ "epoch": 2.28,
54
+ "learning_rate": 2.722095671981777e-05,
55
+ "loss": 0.0687,
56
+ "step": 4000
57
+ },
58
+ {
59
+ "epoch": 2.56,
60
+ "learning_rate": 2.437357630979499e-05,
61
+ "loss": 0.0694,
62
+ "step": 4500
63
+ },
64
+ {
65
+ "epoch": 2.85,
66
+ "learning_rate": 2.152619589977221e-05,
67
+ "loss": 0.0594,
68
+ "step": 5000
69
+ },
70
+ {
71
+ "epoch": 3.13,
72
+ "learning_rate": 1.867881548974943e-05,
73
+ "loss": 0.0556,
74
+ "step": 5500
75
+ },
76
+ {
77
+ "epoch": 3.42,
78
+ "learning_rate": 1.5831435079726652e-05,
79
+ "loss": 0.0498,
80
+ "step": 6000
81
+ },
82
+ {
83
+ "epoch": 3.7,
84
+ "learning_rate": 1.2984054669703875e-05,
85
+ "loss": 0.0472,
86
+ "step": 6500
87
+ },
88
+ {
89
+ "epoch": 3.99,
90
+ "learning_rate": 1.0136674259681094e-05,
91
+ "loss": 0.0439,
92
+ "step": 7000
93
+ },
94
+ {
95
+ "epoch": 4.27,
96
+ "learning_rate": 7.289293849658315e-06,
97
+ "loss": 0.0373,
98
+ "step": 7500
99
+ },
100
+ {
101
+ "epoch": 4.56,
102
+ "learning_rate": 4.4419134396355355e-06,
103
+ "loss": 0.0369,
104
+ "step": 8000
105
+ },
106
+ {
107
+ "epoch": 4.84,
108
+ "learning_rate": 1.5945330296127566e-06,
109
+ "loss": 0.0369,
110
+ "step": 8500
111
+ },
112
+ {
113
+ "epoch": 5.0,
114
+ "step": 8780,
115
+ "total_flos": 55398132371610.0,
116
+ "train_loss": 0.09602124729026151,
117
+ "train_runtime": 167.8929,
118
+ "train_samples_per_second": 418.153,
119
+ "train_steps_per_second": 52.295
120
+ }
121
+ ],
122
+ "max_steps": 8780,
123
+ "num_train_epochs": 5,
124
+ "total_flos": 55398132371610.0,
125
+ "trial_name": null,
126
+ "trial_params": null
127
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d071c90bfa11743c96d026f25e28150fffeade69820d56bff1215f9ddbb9abe9
3
+ size 3579
vocab.txt ADDED
The diff for this file is too large to render. See raw diff