KoichiYasuoka commited on
Commit
bfdde8a
1 Parent(s): 361ffa5

model improved

Browse files
Files changed (5) hide show
  1. config.json +96 -95
  2. pytorch_model.bin +2 -2
  3. supar.model +2 -2
  4. tokenizer.json +0 -0
  5. tokenizer_config.json +1 -1
config.json CHANGED
@@ -3,113 +3,114 @@
3
  "BertForTokenClassification"
4
  ],
5
  "attention_probs_dropout_prob": 0.1,
 
6
  "gradient_checkpointing": false,
7
  "hidden_act": "gelu",
8
  "hidden_dropout_prob": 0.1,
9
  "hidden_size": 768,
10
  "id2label": {
11
- "0": "B-AUX",
12
- "1": "I-PRON",
13
- "2": "B-PUNCT",
14
- "3": "I-NUM",
15
- "4": "I-INTJ",
16
- "5": "I-PUNCT",
17
- "6": "SCONJ",
18
- "7": "B-NOUN",
19
  "8": "B-CCONJ",
20
- "9": "I-NOUN",
21
- "10": "B-ADP",
22
- "11": "I-DET",
23
- "12": "NOUN",
24
- "13": "B-PROPN",
25
- "14": "ADJ",
26
- "15": "B-INTJ",
27
- "16": "B-X",
28
- "17": "B-VERB",
29
- "18": "SYM",
30
- "19": "I-ADP",
31
- "20": "B-ADJ",
32
- "21": "I-PROPN",
33
- "22": "B-PART",
34
- "23": "I-PART",
35
- "24": "B-NUM",
36
- "25": "CCONJ",
37
- "26": "I-VERB",
38
- "27": "VERB",
39
- "28": "I-X",
40
- "29": "X",
41
- "30": "I-ADJ",
42
- "31": "PUNCT",
43
- "32": "I-SYM",
44
- "33": "B-ADV",
45
- "34": "B-SCONJ",
46
- "35": "PART",
47
- "36": "NUM",
48
- "37": "ADP",
49
- "38": "B-PRON",
50
- "39": "I-SCONJ",
51
- "40": "PROPN",
52
- "41": "I-CCONJ",
53
  "42": "PRON",
54
- "43": "ADV",
55
- "44": "B-SYM",
56
- "45": "I-ADV",
57
- "46": "AUX",
58
- "47": "B-DET",
59
- "48": "I-AUX"
60
  },
61
  "initializer_range": 0.02,
62
  "intermediate_size": 3072,
63
  "label2id": {
64
- "ADJ": 14,
65
- "ADP": 37,
66
- "ADV": 43,
67
- "AUX": 46,
68
- "B-ADJ": 20,
69
- "B-ADP": 10,
70
- "B-ADV": 33,
71
- "B-AUX": 0,
72
  "B-CCONJ": 8,
73
- "B-DET": 47,
74
- "B-INTJ": 15,
75
- "B-NOUN": 7,
76
- "B-NUM": 24,
77
- "B-PART": 22,
78
- "B-PRON": 38,
79
- "B-PROPN": 13,
80
- "B-PUNCT": 2,
81
- "B-SCONJ": 34,
82
- "B-SYM": 44,
83
- "B-VERB": 17,
84
- "B-X": 16,
85
- "CCONJ": 25,
86
- "I-ADJ": 30,
87
- "I-ADP": 19,
88
- "I-ADV": 45,
89
- "I-AUX": 48,
90
- "I-CCONJ": 41,
91
- "I-DET": 11,
92
- "I-INTJ": 4,
93
- "I-NOUN": 9,
94
- "I-NUM": 3,
95
- "I-PART": 23,
96
- "I-PRON": 1,
97
- "I-PROPN": 21,
98
- "I-PUNCT": 5,
99
- "I-SCONJ": 39,
100
- "I-SYM": 32,
101
- "I-VERB": 26,
102
- "I-X": 28,
103
- "NOUN": 12,
104
- "NUM": 36,
105
- "PART": 35,
106
  "PRON": 42,
107
- "PROPN": 40,
108
- "PUNCT": 31,
109
- "SCONJ": 6,
110
- "SYM": 18,
111
- "VERB": 27,
112
- "X": 29
113
  },
114
  "layer_norm_eps": 1e-12,
115
  "max_position_embeddings": 512,
@@ -120,7 +121,7 @@
120
  "position_embedding_type": "absolute",
121
  "tokenizer_class": "BertTokenizer",
122
  "torch_dtype": "float32",
123
- "transformers_version": "4.7.0",
124
  "type_vocab_size": 2,
125
  "use_cache": true,
126
  "vocab_size": 6291
3
  "BertForTokenClassification"
4
  ],
5
  "attention_probs_dropout_prob": 0.1,
6
+ "classifier_dropout": null,
7
  "gradient_checkpointing": false,
8
  "hidden_act": "gelu",
9
  "hidden_dropout_prob": 0.1,
10
  "hidden_size": 768,
11
  "id2label": {
12
+ "0": "ADJ",
13
+ "1": "ADP",
14
+ "2": "ADV",
15
+ "3": "AUX",
16
+ "4": "B-ADJ",
17
+ "5": "B-ADP",
18
+ "6": "B-ADV",
19
+ "7": "B-AUX",
20
  "8": "B-CCONJ",
21
+ "9": "B-DET",
22
+ "10": "B-INTJ",
23
+ "11": "B-NOUN",
24
+ "12": "B-NUM",
25
+ "13": "B-PART",
26
+ "14": "B-PRON",
27
+ "15": "B-PROPN",
28
+ "16": "B-PUNCT",
29
+ "17": "B-SCONJ",
30
+ "18": "B-SYM",
31
+ "19": "B-VERB",
32
+ "20": "B-X",
33
+ "21": "CCONJ",
34
+ "22": "I-ADJ",
35
+ "23": "I-ADP",
36
+ "24": "I-ADV",
37
+ "25": "I-AUX",
38
+ "26": "I-CCONJ",
39
+ "27": "I-DET",
40
+ "28": "I-INTJ",
41
+ "29": "I-NOUN",
42
+ "30": "I-NUM",
43
+ "31": "I-PART",
44
+ "32": "I-PRON",
45
+ "33": "I-PROPN",
46
+ "34": "I-PUNCT",
47
+ "35": "I-SCONJ",
48
+ "36": "I-SYM",
49
+ "37": "I-VERB",
50
+ "38": "I-X",
51
+ "39": "NOUN",
52
+ "40": "NUM",
53
+ "41": "PART",
54
  "42": "PRON",
55
+ "43": "PROPN",
56
+ "44": "PUNCT",
57
+ "45": "SCONJ",
58
+ "46": "SYM",
59
+ "47": "VERB",
60
+ "48": "X"
61
  },
62
  "initializer_range": 0.02,
63
  "intermediate_size": 3072,
64
  "label2id": {
65
+ "ADJ": 0,
66
+ "ADP": 1,
67
+ "ADV": 2,
68
+ "AUX": 3,
69
+ "B-ADJ": 4,
70
+ "B-ADP": 5,
71
+ "B-ADV": 6,
72
+ "B-AUX": 7,
73
  "B-CCONJ": 8,
74
+ "B-DET": 9,
75
+ "B-INTJ": 10,
76
+ "B-NOUN": 11,
77
+ "B-NUM": 12,
78
+ "B-PART": 13,
79
+ "B-PRON": 14,
80
+ "B-PROPN": 15,
81
+ "B-PUNCT": 16,
82
+ "B-SCONJ": 17,
83
+ "B-SYM": 18,
84
+ "B-VERB": 19,
85
+ "B-X": 20,
86
+ "CCONJ": 21,
87
+ "I-ADJ": 22,
88
+ "I-ADP": 23,
89
+ "I-ADV": 24,
90
+ "I-AUX": 25,
91
+ "I-CCONJ": 26,
92
+ "I-DET": 27,
93
+ "I-INTJ": 28,
94
+ "I-NOUN": 29,
95
+ "I-NUM": 30,
96
+ "I-PART": 31,
97
+ "I-PRON": 32,
98
+ "I-PROPN": 33,
99
+ "I-PUNCT": 34,
100
+ "I-SCONJ": 35,
101
+ "I-SYM": 36,
102
+ "I-VERB": 37,
103
+ "I-X": 38,
104
+ "NOUN": 39,
105
+ "NUM": 40,
106
+ "PART": 41,
107
  "PRON": 42,
108
+ "PROPN": 43,
109
+ "PUNCT": 44,
110
+ "SCONJ": 45,
111
+ "SYM": 46,
112
+ "VERB": 47,
113
+ "X": 48
114
  },
115
  "layer_norm_eps": 1e-12,
116
  "max_position_embeddings": 512,
121
  "position_embedding_type": "absolute",
122
  "tokenizer_class": "BertTokenizer",
123
  "torch_dtype": "float32",
124
+ "transformers_version": "4.19.2",
125
  "type_vocab_size": 2,
126
  "use_cache": true,
127
  "vocab_size": 6291
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cd1b1d3cc78fb698eaa3ef04291824e29a0d31fab419fe78b68943bbbf3e0387
3
- size 361366224
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9b8258f7c475fca7980396ebbade0cc300f773d94e4f109d2b482fe3344dab49
3
+ size 361353229
supar.model CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:08982b52f4d011e1fcd47bba2dd72d9f11ab928b1b587b43f94644912a33b8ae
3
- size 411801351
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:57617003c4704ff1e4e5ce70a0c7cd20764ff0ef2ebdc8cde125fe355435ccf7
3
+ size 411800936
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
tokenizer_config.json CHANGED
@@ -1 +1 @@
1
- {"do_lower_case": false, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "model_max_length": 512, "do_word_tokenize": true, "do_subword_tokenize": true, "word_tokenizer_type": "basic", "subword_tokenizer_type": "character", "never_split": ["[PAD]", "[UNK]", "[CLS]", "[SEP]", "[MASK]"], "mecab_kwargs": null, "do_basic_tokenize": true, "tokenizer_class": "BertTokenizer"}
1
+ {"do_lower_case": false, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "model_max_length": 512, "do_word_tokenize": true, "do_subword_tokenize": true, "word_tokenizer_type": "basic", "subword_tokenizer_type": "character", "never_split": ["[PAD]", "[UNK]", "[CLS]", "[SEP]", "[MASK]"], "mecab_kwargs": null, "do_basic_tokenize": true, "tokenizer_class": "BertTokenizerFast"}