asahi417 commited on
Commit
6f884b8
1 Parent(s): 90e7bb7
config.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "xlm-roberta-base",
3
+ "architectures": [
4
+ "XLMRobertaForTokenClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 0,
8
+ "eos_token_id": 2,
9
+ "gradient_checkpointing": false,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 768,
13
+ "id2label": {
14
+ "0": "B-organization",
15
+ "1": "O",
16
+ "2": "B-other",
17
+ "3": "B-person",
18
+ "4": "I-person",
19
+ "5": "B-location",
20
+ "6": "I-organization",
21
+ "7": "I-other",
22
+ "8": "I-location"
23
+ },
24
+ "initializer_range": 0.02,
25
+ "intermediate_size": 3072,
26
+ "label2id": {
27
+ "B-location": 5,
28
+ "B-organization": 0,
29
+ "B-other": 2,
30
+ "B-person": 3,
31
+ "I-location": 8,
32
+ "I-organization": 6,
33
+ "I-other": 7,
34
+ "I-person": 4,
35
+ "O": 1
36
+ },
37
+ "layer_norm_eps": 1e-05,
38
+ "max_position_embeddings": 514,
39
+ "model_type": "xlm-roberta",
40
+ "num_attention_heads": 12,
41
+ "num_hidden_layers": 12,
42
+ "output_past": true,
43
+ "pad_token_id": 1,
44
+ "type_vocab_size": 1,
45
+ "vocab_size": 250002
46
+ }
parameter.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"dataset": ["conll2003"], "transformers_model": "xlm-roberta-base", "random_seed": 1234, "lr": 1e-05, "total_step": 13000, "warmup_step": 700, "weight_decay": 1e-07, "batch_size": 16, "max_seq_length": 128, "fp16": false, "max_grad_norm": 1.0, "lower_case": true}
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f8dbb6fb60ccac9f1b7edf5ed61aa6a40b35aa6c6ee5c632abf053c4181cc1f5
3
+ size 1109925260
sentencepiece.bpe.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cfc8146abe2a0488e9e2a0c56de7952f7c11ab059eca145a0a727afce0db2865
3
+ size 5069051
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "sep_token": "</s>", "pad_token": "<pad>", "cls_token": "<s>", "mask_token": "<mask>"}
test_bc5cdr_span_lower.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"valid": {"f1": 0.0, "recall": 0.0, "precision": 0.0, "summary": ""}, "test": {"f1": 0.0, "recall": 0.0, "precision": 0.0, "summary": ""}}
test_bionlp2004_span_lower.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"valid": {"f1": 0.0, "recall": 0.0, "precision": 0.0, "summary": ""}}
test_conll2003_lower.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"valid": {"f1": 92.49789739276704, "recall": 92.90420679168778, "precision": 92.09512644448165, "summary": " precision recall f1-score support\n\n location 0.95 0.95 0.95 1837\norganization 0.87 0.89 0.88 1341\n other 0.87 0.87 0.87 922\n person 0.96 0.97 0.96 1819\n\n micro avg 0.92 0.93 0.92 5919\n macro avg 0.91 0.92 0.91 5919\nweighted avg 0.92 0.93 0.93 5919\n"}, "test": {"f1": 88.80070546737213, "recall": 89.4633972992182, "precision": 88.14775910364145, "summary": " precision recall f1-score support\n\n location 0.92 0.91 0.91 1659\norganization 0.83 0.87 0.85 1660\n other 0.75 0.78 0.76 702\n person 0.96 0.95 0.96 1607\n\n micro avg 0.88 0.89 0.89 5628\n macro avg 0.86 0.88 0.87 5628\nweighted avg 0.88 0.89 0.89 5628\n"}}
test_conll2003_span_lower.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"valid": {"f1": 95.70490146538657, "recall": 95.99594526102382, "precision": 95.41561712846348, "summary": " precision recall f1-score support\n\n entity 0.95 0.96 0.96 5919\n\n micro avg 0.95 0.96 0.96 5919\n macro avg 0.95 0.96 0.96 5919\nweighted avg 0.95 0.96 0.96 5919\n"}, "test": {"f1": 93.72680685633505, "recall": 94.24307036247335, "precision": 93.21616871704745, "summary": " precision recall f1-score support\n\n entity 0.93 0.94 0.94 5628\n\n micro avg 0.93 0.94 0.94 5628\n macro avg 0.93 0.94 0.94 5628\nweighted avg 0.93 0.94 0.94 5628\n"}}
test_fin_span_lower.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"valid": {"f1": 20.42755344418052, "recall": 16.538461538461537, "precision": 26.70807453416149, "summary": " precision recall f1-score support\n\n entity 0.27 0.17 0.20 260\n\n micro avg 0.27 0.17 0.20 260\n macro avg 0.27 0.17 0.20 260\nweighted avg 0.27 0.17 0.20 260\n"}}
test_mit_movie_trivia_span_lower.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"valid": {"f1": 0.0, "recall": 0.0, "precision": 0.0, "summary": ""}}
test_mit_restaurant_span_lower.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"valid": {"f1": 21.8809980806142, "recall": 14.039408866995073, "precision": 49.56521739130435, "summary": " precision recall f1-score support\n\n entity 0.50 0.14 0.22 812\n\n micro avg 0.50 0.14 0.22 812\n macro avg 0.50 0.14 0.22 812\nweighted avg 0.50 0.14 0.22 812\n"}}
test_ontonotes5_span_lower.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"valid": {"f1": 54.83534212774034, "recall": 73.2319391634981, "precision": 43.82584951456311, "summary": " precision recall f1-score support\n\n entity 0.44 0.73 0.55 3945\n\n micro avg 0.44 0.73 0.55 3945\n macro avg 0.44 0.73 0.55 3945\nweighted avg 0.44 0.73 0.55 3945\n"}, "test": {"f1": 55.090280445639635, "recall": 72.44253599393787, "precision": 44.44444444444444, "summary": " precision recall f1-score support\n\n entity 0.44 0.72 0.55 3959\n\n micro avg 0.44 0.72 0.55 3959\n macro avg 0.44 0.72 0.55 3959\nweighted avg 0.44 0.72 0.55 3959\n"}}
test_panx_dataset-en_span_lower.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"valid": {"f1": 57.22429446125695, "recall": 57.63396654380494, "precision": 56.820405310971346, "summary": " precision recall f1-score support\n\n entity 0.57 0.58 0.57 14108\n\n micro avg 0.57 0.58 0.57 14108\n macro avg 0.57 0.58 0.57 14108\nweighted avg 0.57 0.58 0.57 14108\n"}, "test": {"f1": 56.84082276213, "recall": 57.12742202693942, "precision": 56.55708478927476, "summary": " precision recall f1-score support\n\n entity 0.57 0.57 0.57 13883\n\n micro avg 0.57 0.57 0.57 13883\n macro avg 0.57 0.57 0.57 13883\nweighted avg 0.57 0.57 0.57 13883\n"}}
test_wnut2017_span_lower.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"valid": {"f1": 70.05347593582889, "recall": 72.24264705882352, "precision": 67.99307958477509, "summary": " precision recall f1-score support\n\n entity 0.68 0.72 0.70 544\n\n micro avg 0.68 0.72 0.70 544\n macro avg 0.68 0.72 0.70 544\nweighted avg 0.68 0.72 0.70 544\n"}, "test": {"f1": 60.530191458026515, "recall": 71.10726643598616, "precision": 52.69230769230769, "summary": " precision recall f1-score support\n\n entity 0.53 0.71 0.61 578\n\n micro avg 0.53 0.71 0.61 578\n macro avg 0.53 0.71 0.61 578\nweighted avg 0.53 0.71 0.61 578\n"}}
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "sep_token": "</s>", "cls_token": "<s>", "unk_token": "<unk>", "pad_token": "<pad>", "mask_token": "<mask>", "model_max_length": 512, "name_or_path": "xlm-roberta-base"}