Go Inoue commited on
Commit
336ae23
1 Parent(s): 4857346

Add model files

Browse files
config.json ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "BertForTokenClassification"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "gradient_checkpointing": false,
7
+ "hidden_act": "gelu",
8
+ "hidden_dropout_prob": 0.1,
9
+ "hidden_size": 768,
10
+ "id2label": {
11
+ "0": "abbrev",
12
+ "1": "adj",
13
+ "2": "adj_comp",
14
+ "3": "adj_num",
15
+ "4": "adv",
16
+ "5": "adv_interrog",
17
+ "6": "adv_rel",
18
+ "7": "conj",
19
+ "8": "conj_sub",
20
+ "9": "digit",
21
+ "10": "interj",
22
+ "11": "noun",
23
+ "12": "noun_num",
24
+ "13": "noun_prop",
25
+ "14": "noun_quant",
26
+ "15": "part",
27
+ "16": "part_det",
28
+ "17": "part_focus",
29
+ "18": "part_fut",
30
+ "19": "part_interrog",
31
+ "20": "part_neg",
32
+ "21": "part_restrict",
33
+ "22": "part_verb",
34
+ "23": "part_voc",
35
+ "24": "prep",
36
+ "25": "pron",
37
+ "26": "pron_dem",
38
+ "27": "pron_interrog",
39
+ "28": "pron_rel",
40
+ "29": "punc",
41
+ "30": "verb",
42
+ "31": "verb_pseudo"
43
+ },
44
+ "initializer_range": 0.02,
45
+ "intermediate_size": 3072,
46
+ "label2id": {
47
+ "abbrev": 0,
48
+ "adj": 1,
49
+ "adj_comp": 2,
50
+ "adj_num": 3,
51
+ "adv": 4,
52
+ "adv_interrog": 5,
53
+ "adv_rel": 6,
54
+ "conj": 7,
55
+ "conj_sub": 8,
56
+ "digit": 9,
57
+ "interj": 10,
58
+ "noun": 11,
59
+ "noun_num": 12,
60
+ "noun_prop": 13,
61
+ "noun_quant": 14,
62
+ "part": 15,
63
+ "part_det": 16,
64
+ "part_focus": 17,
65
+ "part_fut": 18,
66
+ "part_interrog": 19,
67
+ "part_neg": 20,
68
+ "part_restrict": 21,
69
+ "part_verb": 22,
70
+ "part_voc": 23,
71
+ "prep": 24,
72
+ "pron": 25,
73
+ "pron_dem": 26,
74
+ "pron_interrog": 27,
75
+ "pron_rel": 28,
76
+ "punc": 29,
77
+ "verb": 30,
78
+ "verb_pseudo": 31
79
+ },
80
+ "layer_norm_eps": 1e-12,
81
+ "max_position_embeddings": 512,
82
+ "model_type": "bert",
83
+ "num_attention_heads": 12,
84
+ "num_hidden_layers": 12,
85
+ "pad_token_id": 0,
86
+ "type_vocab_size": 2,
87
+ "vocab_size": 30000
88
+ }
eval_results.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
1
+ eval_loss = 0.09755658993848261
2
+ eval_accuracy = 0.9812471292585964
3
+ eval_precision_micro = 0.9812471292585964
4
+ eval_recall_micro = 0.9812471292585964
5
+ eval_f1_micro = 0.9812471292585964
6
+ eval_precision_macro = 0.963784588303459
7
+ eval_recall_macro = 0.9416425165629574
8
+ eval_f1_macro = 0.9499728844950441
optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b82d8193ad604a8fd8b188a0b900525971dd9183c34e03067451d190a6ee19c2
3
+ size 868182312
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f7b3f45fea8c633720b93a06894ca55720b713795b329f66dc19edae19ba8ecf
3
+ size 436478373
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4315271f8b99c706b75c052e74190398e52ea495a231075e45c10d1522c40727
3
+ size 326
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
1
+ {"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
test_predictions.txt ADDED
The diff for this file is too large to render. See raw diff
test_results.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
1
+ eval_loss = 0.10925390168723674
2
+ eval_accuracy = 0.9812575191540556
3
+ eval_precision_micro = 0.9812575191540556
4
+ eval_recall_micro = 0.9812575191540556
5
+ eval_f1_micro = 0.9812575191540556
6
+ eval_precision_macro = 0.9585049025386786
7
+ eval_recall_macro = 0.9424433203724842
8
+ eval_f1_macro = 0.9490995149400377
tf_model.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b662695ebb206b63eb6e891a623b37dac6f6c1ecf5982083fb40e679da8f9aa5
3
+ size 436592640
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
1
+ {"do_lower_case": false, "special_tokens_map_file": null, "full_tokenizer_file": null}
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4aac8e8c52efd64ffda5706a9e75b5155d7042da82dcce5e6b849035f0b747f6
3
+ size 1355
vocab.txt ADDED
The diff for this file is too large to render. See raw diff