asahi417 commited on
Commit
1eb01fa
1 Parent(s): 3872418
config.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "xlm-roberta-base",
3
+ "architectures": [
4
+ "XLMRobertaForTokenClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 0,
8
+ "eos_token_id": 2,
9
+ "gradient_checkpointing": false,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 768,
13
+ "id2label": {
14
+ "0": "O",
15
+ "1": "B-organization",
16
+ "2": "I-organization",
17
+ "3": "B-location",
18
+ "4": "I-location",
19
+ "5": "B-person",
20
+ "6": "I-person",
21
+ "7": "B-other"
22
+ },
23
+ "initializer_range": 0.02,
24
+ "intermediate_size": 3072,
25
+ "label2id": {
26
+ "B-location": 3,
27
+ "B-organization": 1,
28
+ "B-other": 7,
29
+ "B-person": 5,
30
+ "I-location": 4,
31
+ "I-organization": 2,
32
+ "I-person": 6,
33
+ "O": 0
34
+ },
35
+ "layer_norm_eps": 1e-05,
36
+ "max_position_embeddings": 514,
37
+ "model_type": "xlm-roberta",
38
+ "num_attention_heads": 12,
39
+ "num_hidden_layers": 12,
40
+ "output_past": true,
41
+ "pad_token_id": 1,
42
+ "type_vocab_size": 1,
43
+ "vocab_size": 250002
44
+ }
parameter.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"dataset": ["fin"], "transformers_model": "xlm-roberta-base", "random_seed": 1234, "lr": 1e-05, "total_step": 13000, "warmup_step": 700, "weight_decay": 1e-07, "batch_size": 16, "max_seq_length": 128, "fp16": false, "max_grad_norm": 1.0, "lower_case": true, "checkpoint_prefix": null}
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bc21ba30f1324b9d9d99a332f939bdf168cacc605caaafc047298ccdce562d7d
3
+ size 1109921537
sentencepiece.bpe.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cfc8146abe2a0488e9e2a0c56de7952f7c11ab059eca145a0a727afce0db2865
3
+ size 5069051
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "sep_token": "</s>", "pad_token": "<pad>", "cls_token": "<s>", "mask_token": "<mask>"}
test_bc5cdr_span_lower.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"valid": {"f1": 0.0, "recall": 0.0, "precision": 0.0, "summary": ""}, "test": {"f1": 0.0, "recall": 0.0, "precision": 0.0, "summary": ""}}
test_bionlp2004_span_lower.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"valid": {"f1": 0.0, "recall": 0.0, "precision": 0.0, "summary": ""}}
test_conll2003_span_lower.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"valid": {"f1": 69.04997748761818, "recall": 64.77445514445007, "precision": 73.92981102969533, "summary": " precision recall f1-score support\n\n entity 0.74 0.65 0.69 5919\n\n micro avg 0.74 0.65 0.69 5919\n macro avg 0.74 0.65 0.69 5919\nweighted avg 0.74 0.65 0.69 5919\n"}, "test": {"f1": 64.40645773979108, "recall": 60.25230987917555, "precision": 69.17584659322725, "summary": " precision recall f1-score support\n\n entity 0.69 0.60 0.64 5628\n\n micro avg 0.69 0.60 0.64 5628\n macro avg 0.69 0.60 0.64 5628\nweighted avg 0.69 0.60 0.64 5628\n"}}
test_fin_lower.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"valid": {"f1": 77.2313296903461, "recall": 81.53846153846153, "precision": 73.35640138408304, "summary": " precision recall f1-score support\n\n location 0.54 0.63 0.58 35\norganization 0.38 0.53 0.44 51\n other 1.00 0.33 0.50 6\n person 0.93 0.96 0.94 168\n\n micro avg 0.73 0.82 0.77 260\n macro avg 0.71 0.61 0.61 260\nweighted avg 0.77 0.82 0.78 260\n"}}
test_fin_span_lower.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"valid": {"f1": 81.49532710280376, "recall": 83.84615384615385, "precision": 79.27272727272727, "summary": " precision recall f1-score support\n\n entity 0.79 0.84 0.81 260\n\n micro avg 0.79 0.84 0.81 260\n macro avg 0.79 0.84 0.81 260\nweighted avg 0.79 0.84 0.81 260\n"}}
test_mit_movie_trivia_span_lower.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"valid": {"f1": 0.0, "recall": 0.0, "precision": 0.0, "summary": ""}}
test_mit_restaurant_span_lower.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"valid": {"f1": 22.018348623853214, "recall": 17.733990147783253, "precision": 29.03225806451613, "summary": " precision recall f1-score support\n\n entity 0.29 0.18 0.22 812\n\n micro avg 0.29 0.18 0.22 812\n macro avg 0.29 0.18 0.22 812\nweighted avg 0.29 0.18 0.22 812\n"}}
test_ontonotes5_span_lower.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"valid": {"f1": 41.09062980030722, "recall": 54.24588086185045, "precision": 33.07062277855046, "summary": " precision recall f1-score support\n\n entity 0.33 0.54 0.41 3945\n\n micro avg 0.33 0.54 0.41 3945\n macro avg 0.33 0.54 0.41 3945\nweighted avg 0.33 0.54 0.41 3945\n"}, "test": {"f1": 41.323115724124555, "recall": 53.80146501641829, "precision": 33.54330708661417, "summary": " precision recall f1-score support\n\n entity 0.34 0.54 0.41 3959\n\n micro avg 0.34 0.54 0.41 3959\n macro avg 0.34 0.54 0.41 3959\nweighted avg 0.34 0.54 0.41 3959\n"}}
test_panx_dataset-en_span_lower.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"valid": {"f1": 57.97273680531283, "recall": 58.782251204990075, "precision": 57.185215832298994, "summary": " precision recall f1-score support\n\n entity 0.57 0.59 0.58 14108\n\n micro avg 0.57 0.59 0.58 14108\n macro avg 0.57 0.59 0.58 14108\nweighted avg 0.57 0.59 0.58 14108\n"}, "test": {"f1": 57.84132841328413, "recall": 58.71209392782539, "precision": 56.99601426473673, "summary": " precision recall f1-score support\n\n entity 0.57 0.59 0.58 13883\n\n micro avg 0.57 0.59 0.58 13883\n macro avg 0.57 0.59 0.58 13883\nweighted avg 0.57 0.59 0.58 13883\n"}}
test_wnut2017_span_lower.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"valid": {"f1": 58.34862385321101, "recall": 58.45588235294118, "precision": 58.24175824175825, "summary": " precision recall f1-score support\n\n entity 0.58 0.58 0.58 544\n\n micro avg 0.58 0.58 0.58 544\n macro avg 0.58 0.58 0.58 544\nweighted avg 0.58 0.58 0.58 544\n"}, "test": {"f1": 45.77987846049967, "recall": 58.65051903114187, "precision": 37.54152823920266, "summary": " precision recall f1-score support\n\n entity 0.38 0.59 0.46 578\n\n micro avg 0.38 0.59 0.46 578\n macro avg 0.38 0.59 0.46 578\nweighted avg 0.38 0.59 0.46 578\n"}}
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "sep_token": "</s>", "cls_token": "<s>", "unk_token": "<unk>", "pad_token": "<pad>", "mask_token": "<mask>", "model_max_length": 512, "name_or_path": "xlm-roberta-base"}