swtb commited on
Commit
d05822b
1 Parent(s): 3b34387

End of training

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ base_model: xlm-roberta-base
4
+ tags:
5
+ - generated_from_trainer
6
+ datasets:
7
+ - conll2003
8
+ metrics:
9
+ - precision
10
+ - recall
11
+ - f1
12
+ - accuracy
13
+ model-index:
14
+ - name: XLM-RoBERTa-Base-Conll2003-English-NER-Finetune-FP16-BinaryClass-WeightedLoss
15
+ results:
16
+ - task:
17
+ name: Token Classification
18
+ type: token-classification
19
+ dataset:
20
+ name: conll2003
21
+ type: conll2003
22
+ config: conll2003
23
+ split: test
24
+ args: conll2003
25
+ metrics:
26
+ - name: Precision
27
+ type: precision
28
+ value: 0.9526306589757035
29
+ - name: Recall
30
+ type: recall
31
+ value: 0.964943342776204
32
+ - name: F1
33
+ type: f1
34
+ value: 0.9587474711935965
35
+ - name: Accuracy
36
+ type: accuracy
37
+ value: 0.9901367502961128
38
+ ---
39
+
40
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
41
+ should probably proofread and complete it, then remove this comment. -->
42
+
43
+ # XLM-RoBERTa-Base-Conll2003-English-NER-Finetune-FP16-BinaryClass-WeightedLoss
44
+
45
+ This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the conll2003 dataset.
46
+ It achieves the following results on the evaluation set:
47
+ - Loss: 0.1188
48
+ - Precision: 0.9526
49
+ - Recall: 0.9649
50
+ - F1: 0.9587
51
+ - Accuracy: 0.9901
52
+
53
+ ## Model description
54
+
55
+ More information needed
56
+
57
+ ## Intended uses & limitations
58
+
59
+ More information needed
60
+
61
+ ## Training and evaluation data
62
+
63
+ More information needed
64
+
65
+ ## Training procedure
66
+
67
+ ### Training hyperparameters
68
+
69
+ The following hyperparameters were used during training:
70
+ - learning_rate: 5e-06
71
+ - train_batch_size: 4
72
+ - eval_batch_size: 4
73
+ - seed: 42
74
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
75
+ - lr_scheduler_type: linear
76
+ - lr_scheduler_warmup_ratio: 0.05
77
+ - num_epochs: 10
78
+ - mixed_precision_training: Native AMP
79
+
80
+ ### Training results
81
+
82
+ | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy |
83
+ |:-------------:|:------:|:-----:|:---------------:|:---------:|:------:|:------:|:--------:|
84
+ | 0.2739 | 0.3333 | 1441 | 0.0632 | 0.9412 | 0.9373 | 0.9392 | 0.9863 |
85
+ | 0.0329 | 0.6667 | 2882 | 0.0572 | 0.9435 | 0.9347 | 0.9391 | 0.9865 |
86
+ | 0.024 | 1.0 | 4323 | 0.0679 | 0.9433 | 0.9536 | 0.9484 | 0.9882 |
87
+ | 0.0181 | 1.3333 | 5764 | 0.0652 | 0.9458 | 0.9618 | 0.9537 | 0.9897 |
88
+ | 0.0187 | 1.6667 | 7205 | 0.0625 | 0.9531 | 0.9492 | 0.9511 | 0.9895 |
89
+ | 0.0176 | 2.0 | 8646 | 0.0685 | 0.9488 | 0.9573 | 0.9530 | 0.9896 |
90
+ | 0.0108 | 2.3333 | 10087 | 0.0931 | 0.9470 | 0.9625 | 0.9547 | 0.9897 |
91
+ | 0.0117 | 2.6667 | 11528 | 0.0808 | 0.9489 | 0.9632 | 0.9560 | 0.9900 |
92
+ | 0.0107 | 3.0 | 12969 | 0.0672 | 0.9531 | 0.9602 | 0.9566 | 0.9908 |
93
+ | 0.0076 | 3.3333 | 14410 | 0.0973 | 0.9470 | 0.9587 | 0.9528 | 0.9897 |
94
+ | 0.0085 | 3.6667 | 15851 | 0.0741 | 0.9574 | 0.9549 | 0.9561 | 0.9906 |
95
+ | 0.0092 | 4.0 | 17292 | 0.0807 | 0.9492 | 0.9621 | 0.9556 | 0.9901 |
96
+ | 0.0049 | 4.3333 | 18733 | 0.0886 | 0.9527 | 0.9623 | 0.9575 | 0.9906 |
97
+ | 0.0058 | 4.6667 | 20174 | 0.0871 | 0.9516 | 0.9639 | 0.9577 | 0.9904 |
98
+ | 0.0047 | 5.0 | 21615 | 0.0928 | 0.9541 | 0.9610 | 0.9576 | 0.9903 |
99
+ | 0.0041 | 5.3333 | 23056 | 0.1145 | 0.9491 | 0.9667 | 0.9578 | 0.9899 |
100
+ | 0.0048 | 5.6667 | 24497 | 0.0854 | 0.9554 | 0.9623 | 0.9588 | 0.9907 |
101
+ | 0.0032 | 6.0 | 25938 | 0.1107 | 0.9488 | 0.9651 | 0.9569 | 0.9899 |
102
+ | 0.003 | 6.3333 | 27379 | 0.1038 | 0.9524 | 0.9674 | 0.9599 | 0.9907 |
103
+ | 0.0032 | 6.6667 | 28820 | 0.1038 | 0.9533 | 0.9651 | 0.9592 | 0.9904 |
104
+ | 0.0034 | 7.0 | 30261 | 0.1038 | 0.9534 | 0.9667 | 0.9600 | 0.9906 |
105
+ | 0.0025 | 7.3333 | 31702 | 0.1103 | 0.9528 | 0.9619 | 0.9574 | 0.9899 |
106
+ | 0.003 | 7.6667 | 33143 | 0.1177 | 0.9506 | 0.9644 | 0.9575 | 0.9899 |
107
+ | 0.0022 | 8.0 | 34584 | 0.1151 | 0.9511 | 0.9633 | 0.9572 | 0.9900 |
108
+ | 0.0016 | 8.3333 | 36025 | 0.1141 | 0.9528 | 0.9651 | 0.9589 | 0.9904 |
109
+ | 0.0025 | 8.6667 | 37466 | 0.1090 | 0.9550 | 0.9626 | 0.9588 | 0.9905 |
110
+ | 0.0024 | 9.0 | 38907 | 0.1115 | 0.9546 | 0.9653 | 0.9599 | 0.9906 |
111
+ | 0.002 | 9.3333 | 40348 | 0.1148 | 0.9536 | 0.9639 | 0.9587 | 0.9903 |
112
+ | 0.0014 | 9.6667 | 41789 | 0.1201 | 0.9522 | 0.9655 | 0.9588 | 0.9902 |
113
+ | 0.0015 | 10.0 | 43230 | 0.1188 | 0.9526 | 0.9649 | 0.9587 | 0.9901 |
114
+
115
+
116
+ ### Framework versions
117
+
118
+ - Transformers 4.41.1
119
+ - Pytorch 2.3.0+cu121
120
+ - Datasets 2.19.1
121
+ - Tokenizers 0.19.1
config.json ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "xlm-roberta-base",
3
+ "architectures": [
4
+ "XLMRobertaForTokenClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 0,
8
+ "classifier_dropout": null,
9
+ "eos_token_id": 2,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 768,
13
+ "id2label": {
14
+ "0": "B-MISC",
15
+ "1": "I-MISC",
16
+ "2": "O"
17
+ },
18
+ "initializer_range": 0.02,
19
+ "intermediate_size": 3072,
20
+ "label2id": {
21
+ "B-MISC": 0,
22
+ "I-MISC": 1,
23
+ "O": 2
24
+ },
25
+ "layer_norm_eps": 1e-05,
26
+ "max_position_embeddings": 514,
27
+ "model_type": "xlm-roberta",
28
+ "num_attention_heads": 12,
29
+ "num_hidden_layers": 12,
30
+ "output_past": true,
31
+ "pad_token_id": 1,
32
+ "position_embedding_type": "absolute",
33
+ "torch_dtype": "float32",
34
+ "transformers_version": "4.41.1",
35
+ "type_vocab_size": 1,
36
+ "use_cache": true,
37
+ "vocab_size": 250002
38
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc591e20e6f399dbbfccc365d76347757a20c9d5ffe5da4ff2e5b0a2ca126901
3
+ size 1109845500
special_tokens_map.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "cls_token": "<s>",
4
+ "eos_token": "</s>",
5
+ "mask_token": {
6
+ "content": "<mask>",
7
+ "lstrip": true,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "pad_token": "<pad>",
13
+ "sep_token": "</s>",
14
+ "unk_token": "<unk>"
15
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3a56def25aa40facc030ea8b0b87f3688e4b3c39eb8b45d5702b3a1300fe2a20
3
+ size 17082734
tokenizer_config.json ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "<s>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "<pad>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "</s>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "<unk>",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "250001": {
36
+ "content": "<mask>",
37
+ "lstrip": true,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "bos_token": "<s>",
45
+ "clean_up_tokenization_spaces": true,
46
+ "cls_token": "<s>",
47
+ "eos_token": "</s>",
48
+ "mask_token": "<mask>",
49
+ "model_max_length": 512,
50
+ "pad_token": "<pad>",
51
+ "sep_token": "</s>",
52
+ "tokenizer_class": "XLMRobertaTokenizer",
53
+ "unk_token": "<unk>"
54
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bfd611d34af4bb2ea5e9d83cf09624d76c42f82a84fe8607532ee7e1578f5d5b
3
+ size 5176