asahi417 commited on
Commit
f93f9b3
1 Parent(s): 5e2dd1c
config.json ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "xlm-roberta-large",
3
+ "architectures": [
4
+ "XLMRobertaForTokenClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 0,
8
+ "eos_token_id": 2,
9
+ "gradient_checkpointing": false,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 1024,
13
+ "id2label": {
14
+ "0": "O",
15
+ "1": "B-location",
16
+ "2": "I-location",
17
+ "3": "B-group",
18
+ "4": "B-corporation",
19
+ "5": "B-person",
20
+ "6": "B-work of art",
21
+ "7": "B-product",
22
+ "8": "I-person",
23
+ "9": "I-work of art",
24
+ "10": "I-corporation",
25
+ "11": "I-group",
26
+ "12": "I-product"
27
+ },
28
+ "initializer_range": 0.02,
29
+ "intermediate_size": 4096,
30
+ "label2id": {
31
+ "B-corporation": 4,
32
+ "B-group": 3,
33
+ "B-location": 1,
34
+ "B-person": 5,
35
+ "B-product": 7,
36
+ "B-work of art": 6,
37
+ "I-corporation": 10,
38
+ "I-group": 11,
39
+ "I-location": 2,
40
+ "I-person": 8,
41
+ "I-product": 12,
42
+ "I-work of art": 9,
43
+ "O": 0
44
+ },
45
+ "layer_norm_eps": 1e-05,
46
+ "max_position_embeddings": 514,
47
+ "model_type": "xlm-roberta",
48
+ "num_attention_heads": 16,
49
+ "num_hidden_layers": 24,
50
+ "output_past": true,
51
+ "pad_token_id": 1,
52
+ "type_vocab_size": 1,
53
+ "vocab_size": 250002
54
+ }
parameter.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"dataset": ["wnut2017"], "transformers_model": "xlm-roberta-large", "random_seed": 1234, "lr": 1e-05, "total_step": 5000, "warmup_step": 700, "weight_decay": 1e-07, "batch_size": 32, "max_seq_length": 128, "fp16": false, "max_grad_norm": 1.0, "lower_case": false}
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e3cdcd0c727fbcf0a9bf3d93b33ed5b4c2ff1420fe55619e7ba0616cf3a677dc
3
+ size 2235581546
sentencepiece.bpe.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cfc8146abe2a0488e9e2a0c56de7952f7c11ab059eca145a0a727afce0db2865
3
+ size 5069051
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "sep_token": "</s>", "pad_token": "<pad>", "cls_token": "<s>", "mask_token": "<mask>"}
test_bc5cdr_span.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"valid": {"f1": 0.0, "recall": 0.0, "precision": 0.0, "summary": ""}, "test": {"f1": 0.0, "recall": 0.0, "precision": 0.0, "summary": ""}}
test_bionlp2004_span.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"valid": {"f1": 0.0, "recall": 0.0, "precision": 0.0, "summary": ""}}
test_conll2003_span.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"valid": {"f1": 89.02770233568712, "recall": 89.51392681594757, "precision": 88.54673149648839, "accuracy": 98.24006868024662, "summary": " precision recall f1-score support\n\n entity 0.89 0.90 0.89 3662\n\n micro avg 0.89 0.90 0.89 3662\n macro avg 0.89 0.90 0.89 3662\nweighted avg 0.89 0.90 0.89 3662\n"}, "test": {"f1": 85.69667077681873, "recall": 85.01529051987767, "precision": 86.38906152889993, "accuracy": 97.76114574121605, "summary": " precision recall f1-score support\n\n entity 0.86 0.85 0.86 3270\n\n micro avg 0.86 0.85 0.86 3270\n macro avg 0.86 0.85 0.86 3270\nweighted avg 0.86 0.85 0.86 3270\n"}}
test_fin_span.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"valid": {"f1": 20.074349442379187, "recall": 13.366336633663368, "precision": 40.298507462686565, "accuracy": 97.76627360751607, "summary": " precision recall f1-score support\n\n entity 0.40 0.13 0.20 202\n\n micro avg 0.40 0.13 0.20 202\n macro avg 0.40 0.13 0.20 202\nweighted avg 0.40 0.13 0.20 202\n"}}
test_ontonotes5_span.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"valid": {"f1": 41.13397548161121, "recall": 57.30405611466911, "precision": 32.08127027488475, "accuracy": 95.39802007051804, "summary": " precision recall f1-score support\n\n entity 0.32 0.57 0.41 3279\n\n micro avg 0.32 0.57 0.41 3279\n macro avg 0.32 0.57 0.41 3279\nweighted avg 0.32 0.57 0.41 3279\n"}, "test": {"f1": 41.891274554591135, "recall": 56.43076923076923, "precision": 33.30911732655285, "accuracy": 95.6830572447054, "summary": " precision recall f1-score support\n\n entity 0.33 0.56 0.42 3250\n\n micro avg 0.33 0.56 0.42 3250\n macro avg 0.33 0.56 0.42 3250\nweighted avg 0.33 0.56 0.42 3250\n"}}
test_panx_dataset-en_span.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"valid": {"f1": 55.62348926489407, "recall": 62.174189446916714, "precision": 50.32158476974531, "accuracy": 82.891302726753, "summary": " precision recall f1-score support\n\n entity 0.50 0.62 0.56 9438\n\n micro avg 0.50 0.62 0.56 9438\n macro avg 0.50 0.62 0.56 9438\nweighted avg 0.50 0.62 0.56 9438\n"}, "test": {"f1": 54.52257658616851, "recall": 61.56284153005465, "precision": 48.92729957439416, "accuracy": 82.83985276686006, "summary": " precision recall f1-score support\n\n entity 0.49 0.62 0.55 9150\n\n micro avg 0.49 0.62 0.55 9150\n macro avg 0.49 0.62 0.55 9150\nweighted avg 0.49 0.62 0.55 9150\n"}}
test_wnut2017.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"valid": {"f1": 66.36363636363636, "recall": 61.1244019138756, "precision": 72.58522727272727, "accuracy": 96.4483482910063, "summary": " precision recall f1-score support\n\n corporation 0.38 0.38 0.38 34\n group 0.41 0.41 0.41 39\n location 0.69 0.62 0.65 74\n person 0.86 0.77 0.81 470\n product 0.51 0.41 0.46 114\n work of art 0.57 0.28 0.37 105\n\n micro avg 0.73 0.61 0.66 836\n macro avg 0.57 0.48 0.51 836\nweighted avg 0.72 0.61 0.66 836\n"}, "test": {"f1": 58.575197889182064, "recall": 51.532033426183844, "precision": 67.84841075794621, "accuracy": 95.61209918213683, "summary": " precision recall f1-score support\n\n corporation 0.37 0.44 0.40 66\n group 0.60 0.33 0.43 165\n location 0.73 0.64 0.68 150\n person 0.78 0.69 0.73 428\n product 0.46 0.23 0.31 127\n work of art 0.69 0.35 0.46 141\n\n micro avg 0.68 0.52 0.59 1077\n macro avg 0.60 0.45 0.50 1077\nweighted avg 0.67 0.52 0.57 1077\n"}}
test_wnut2017_span.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"valid": {"f1": 75.09778357235984, "recall": 68.89952153110048, "precision": 82.52148997134671, "accuracy": 97.14212971803195, "summary": " precision recall f1-score support\n\n entity 0.83 0.69 0.75 836\n\n micro avg 0.83 0.69 0.75 836\n macro avg 0.83 0.69 0.75 836\nweighted avg 0.83 0.69 0.75 836\n"}, "test": {"f1": 68.32363828662083, "recall": 59.98142989786444, "precision": 79.36117936117935, "accuracy": 96.25686961789779, "summary": " precision recall f1-score support\n\n entity 0.79 0.60 0.68 1077\n\n micro avg 0.79 0.60 0.68 1077\n macro avg 0.79 0.60 0.68 1077\nweighted avg 0.79 0.60 0.68 1077\n"}}
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "sep_token": "</s>", "cls_token": "<s>", "unk_token": "<unk>", "pad_token": "<pad>", "mask_token": "<mask>", "model_max_length": 512, "name_or_path": "xlm-roberta-large"}