Yaxin commited on
Commit
76afc18
1 Parent(s): 4fa3bfb

update from Yaxin

Browse files
README.md ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ tags:
4
+ - generated_from_trainer
5
+ datasets:
6
+ - conll2003
7
+ metrics:
8
+ - precision
9
+ - recall
10
+ - f1
11
+ - accuracy
12
+ model-index:
13
+ - name: test-conll2003-ner
14
+ results:
15
+ - task:
16
+ name: Token Classification
17
+ type: token-classification
18
+ dataset:
19
+ name: conll2003
20
+ type: conll2003
21
+ args: conll2003
22
+ metrics:
23
+ - name: Precision
24
+ type: precision
25
+ value: 0.9459188783174762
26
+ - name: Recall
27
+ type: recall
28
+ value: 0.9537192864355436
29
+ - name: F1
30
+ type: f1
31
+ value: 0.94980306712478
32
+ - name: Accuracy
33
+ type: accuracy
34
+ value: 0.9911218410498034
35
+ ---
36
+
37
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
38
+ should probably proofread and complete it, then remove this comment. -->
39
+
40
+ # test-conll2003-ner
41
+
42
+ This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the conll2003 dataset.
43
+ It achieves the following results on the evaluation set:
44
+ - Loss: 0.0470
45
+ - Precision: 0.9459
46
+ - Recall: 0.9537
47
+ - F1: 0.9498
48
+ - Accuracy: 0.9911
49
+
50
+ ## Model description
51
+
52
+ More information needed
53
+
54
+ ## Intended uses & limitations
55
+
56
+ More information needed
57
+
58
+ ## Training and evaluation data
59
+
60
+ More information needed
61
+
62
+ ## Training procedure
63
+
64
+ ### Training hyperparameters
65
+
66
+ The following hyperparameters were used during training:
67
+ - learning_rate: 5e-05
68
+ - train_batch_size: 8
69
+ - eval_batch_size: 8
70
+ - seed: 42
71
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
72
+ - lr_scheduler_type: linear
73
+ - num_epochs: 3.0
74
+
75
+ ### Training results
76
+
77
+
78
+
79
+ ### Framework versions
80
+
81
+ - Transformers 4.18.0.dev0
82
+ - Pytorch 1.10.0
83
+ - Datasets 1.18.3
84
+ - Tokenizers 0.11.0
all_results.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.0,
3
+ "eval_accuracy": 0.9911218410498034,
4
+ "eval_f1": 0.94980306712478,
5
+ "eval_loss": 0.0470016747713089,
6
+ "eval_precision": 0.9459188783174762,
7
+ "eval_recall": 0.9537192864355436,
8
+ "eval_runtime": 8.6425,
9
+ "eval_samples": 3251,
10
+ "eval_samples_per_second": 376.165,
11
+ "eval_steps_per_second": 47.093,
12
+ "train_loss": 0.05759347815719987,
13
+ "train_runtime": 571.906,
14
+ "train_samples": 14042,
15
+ "train_samples_per_second": 73.659,
16
+ "train_steps_per_second": 9.211
17
+ }
eval_results.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.0,
3
+ "eval_accuracy": 0.9911218410498034,
4
+ "eval_f1": 0.94980306712478,
5
+ "eval_loss": 0.0470016747713089,
6
+ "eval_precision": 0.9459188783174762,
7
+ "eval_recall": 0.9537192864355436,
8
+ "eval_runtime": 8.6425,
9
+ "eval_samples": 3251,
10
+ "eval_samples_per_second": 376.165,
11
+ "eval_steps_per_second": 47.093
12
+ }
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "sep_token": "</s>", "pad_token": "<pad>", "cls_token": "<s>", "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": false}}
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "sep_token": "</s>", "cls_token": "<s>", "unk_token": "<unk>", "pad_token": "<pad>", "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "model_max_length": 512, "special_tokens_map_file": null, "name_or_path": "xlm-roberta-base", "tokenizer_class": "XLMRobertaTokenizer"}
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.0,
3
+ "train_loss": 0.05759347815719987,
4
+ "train_runtime": 571.906,
5
+ "train_samples": 14042,
6
+ "train_samples_per_second": 73.659,
7
+ "train_steps_per_second": 9.211
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 3.0,
5
+ "global_step": 5268,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.28,
12
+ "learning_rate": 4.525436598329537e-05,
13
+ "loss": 0.2137,
14
+ "step": 500
15
+ },
16
+ {
17
+ "epoch": 0.57,
18
+ "learning_rate": 4.050873196659074e-05,
19
+ "loss": 0.0901,
20
+ "step": 1000
21
+ },
22
+ {
23
+ "epoch": 0.85,
24
+ "learning_rate": 3.5763097949886106e-05,
25
+ "loss": 0.0723,
26
+ "step": 1500
27
+ },
28
+ {
29
+ "epoch": 1.14,
30
+ "learning_rate": 3.1017463933181475e-05,
31
+ "loss": 0.0491,
32
+ "step": 2000
33
+ },
34
+ {
35
+ "epoch": 1.42,
36
+ "learning_rate": 2.6271829916476843e-05,
37
+ "loss": 0.0436,
38
+ "step": 2500
39
+ },
40
+ {
41
+ "epoch": 1.71,
42
+ "learning_rate": 2.152619589977221e-05,
43
+ "loss": 0.0397,
44
+ "step": 3000
45
+ },
46
+ {
47
+ "epoch": 1.99,
48
+ "learning_rate": 1.678056188306758e-05,
49
+ "loss": 0.0331,
50
+ "step": 3500
51
+ },
52
+ {
53
+ "epoch": 2.28,
54
+ "learning_rate": 1.2034927866362947e-05,
55
+ "loss": 0.0241,
56
+ "step": 4000
57
+ },
58
+ {
59
+ "epoch": 2.56,
60
+ "learning_rate": 7.289293849658315e-06,
61
+ "loss": 0.0177,
62
+ "step": 4500
63
+ },
64
+ {
65
+ "epoch": 2.85,
66
+ "learning_rate": 2.5436598329536827e-06,
67
+ "loss": 0.0158,
68
+ "step": 5000
69
+ },
70
+ {
71
+ "epoch": 3.0,
72
+ "step": 5268,
73
+ "total_flos": 1035876517340328.0,
74
+ "train_loss": 0.05759347815719987,
75
+ "train_runtime": 571.906,
76
+ "train_samples_per_second": 73.659,
77
+ "train_steps_per_second": 9.211
78
+ }
79
+ ],
80
+ "max_steps": 5268,
81
+ "num_train_epochs": 3,
82
+ "total_flos": 1035876517340328.0,
83
+ "trial_name": null,
84
+ "trial_params": null
85
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0137cb859f2e54236b7d37db34b9bd8498374a82ac0a92769fb43e6cf2a8a6a2
3
+ size 2991