KoichiYasuoka commited on
Commit
8a3afed
1 Parent(s): eb1d4ac

model improved

Browse files
Files changed (5) hide show
  1. README.md +2 -2
  2. config.json +169 -47
  3. pytorch_model.bin +2 -2
  4. supar.model +2 -2
  5. tokenizer_config.json +1 -1
README.md CHANGED
@@ -20,7 +20,7 @@ widget:
20
 
21
  ## Model Description
22
 
23
- This is a RoBERTa model pre-trained on Classical Chinese texts for POS-tagging and dependency-parsing, derived from [roberta-classical-chinese-large-char](https://huggingface.co/KoichiYasuoka/roberta-classical-chinese-large-char). Every word is tagged by [UPOS](https://universaldependencies.org/u/pos/) (Universal Part-Of-Speech).
24
 
25
  ## How to Use
26
 
@@ -42,5 +42,5 @@ Koichi Yasuoka: [Universal Dependencies Treebank of the Four Books in Classical
42
 
43
  ## See Also
44
 
45
- [esupar](https://github.com/KoichiYasuoka/esupar): Tokenizer POS-tagger and Dependency-parser with BERT/RoBERTa models
46
 
20
 
21
  ## Model Description
22
 
23
+ This is a RoBERTa model pre-trained on Classical Chinese texts for POS-tagging and dependency-parsing, derived from [roberta-classical-chinese-large-char](https://huggingface.co/KoichiYasuoka/roberta-classical-chinese-large-char). Every word is tagged by [UPOS](https://universaldependencies.org/u/pos/) (Universal Part-Of-Speech) and [FEATS](https://universaldependencies.org/u/feat/).
24
 
25
  ## How to Use
26
 
42
 
43
  ## See Also
44
 
45
+ [esupar](https://github.com/KoichiYasuoka/esupar): Tokenizer POS-tagger and Dependency-parser with BERT/RoBERTa/DeBERTa models
46
 
config.json CHANGED
@@ -4,62 +4,183 @@
4
  ],
5
  "attention_probs_dropout_prob": 0.1,
6
  "bos_token_id": 0,
 
7
  "eos_token_id": 2,
8
  "gradient_checkpointing": false,
9
  "hidden_act": "gelu",
10
  "hidden_dropout_prob": 0.1,
11
  "hidden_size": 1024,
12
  "id2label": {
13
- "0": "NOUN",
14
- "1": "B-NOUN",
15
- "2": "I-VERB",
16
- "3": "AUX",
17
- "4": "CCONJ",
18
- "5": "B-VERB",
19
- "6": "PRON",
20
- "7": "NUM",
21
- "8": "SYM",
22
- "9": "I-ADV",
23
- "10": "I-NOUN",
24
- "11": "PART",
25
- "12": "B-NUM",
26
- "13": "I-PROPN",
27
- "14": "PROPN",
28
- "15": "I-NUM",
29
- "16": "B-PROPN",
30
- "17": "INTJ",
31
- "18": "ADV",
32
- "19": "VERB",
33
- "20": "B-ADV",
34
- "21": "SCONJ",
35
- "22": "ADP"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
  },
37
  "initializer_range": 0.02,
38
  "intermediate_size": 4096,
39
  "label2id": {
40
- "ADP": 22,
41
- "ADV": 18,
42
- "AUX": 3,
43
- "B-ADV": 20,
44
- "B-NOUN": 1,
45
- "B-NUM": 12,
46
- "B-PROPN": 16,
47
- "B-VERB": 5,
48
- "CCONJ": 4,
49
- "I-ADV": 9,
50
- "I-NOUN": 10,
51
- "I-NUM": 15,
52
- "I-PROPN": 13,
53
- "I-VERB": 2,
54
- "INTJ": 17,
55
- "NOUN": 0,
56
- "NUM": 7,
57
- "PART": 11,
58
- "PRON": 6,
59
- "PROPN": 14,
60
- "SCONJ": 21,
61
- "SYM": 8,
62
- "VERB": 19
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
63
  },
64
  "layer_norm_eps": 1e-05,
65
  "max_position_embeddings": 514,
@@ -69,7 +190,8 @@
69
  "pad_token_id": 1,
70
  "position_embedding_type": "absolute",
71
  "tokenizer_class": "BertTokenizer",
72
- "transformers_version": "4.7.0",
 
73
  "type_vocab_size": 1,
74
  "use_cache": true,
75
  "vocab_size": 26318
4
  ],
5
  "attention_probs_dropout_prob": 0.1,
6
  "bos_token_id": 0,
7
+ "classifier_dropout": null,
8
  "eos_token_id": 2,
9
  "gradient_checkpointing": false,
10
  "hidden_act": "gelu",
11
  "hidden_dropout_prob": 0.1,
12
  "hidden_size": 1024,
13
  "id2label": {
14
+ "0": "ADP",
15
+ "1": "ADP|Degree=Equ",
16
+ "2": "ADV",
17
+ "3": "ADV|AdvType=Cau",
18
+ "4": "ADV|AdvType=Deg|Degree=Cmp",
19
+ "5": "ADV|AdvType=Deg|Degree=Pos",
20
+ "6": "ADV|AdvType=Deg|Degree=Sup",
21
+ "7": "ADV|AdvType=Tim",
22
+ "8": "ADV|AdvType=Tim|Aspect=Perf",
23
+ "9": "ADV|AdvType=Tim|Tense=Fut",
24
+ "10": "ADV|AdvType=Tim|Tense=Past",
25
+ "11": "ADV|AdvType=Tim|Tense=Pres",
26
+ "12": "ADV|Degree=Equ|VerbForm=Conv",
27
+ "13": "ADV|Degree=Pos|VerbForm=Conv",
28
+ "14": "ADV|Polarity=Neg",
29
+ "15": "ADV|Polarity=Neg|VerbForm=Conv",
30
+ "16": "ADV|VerbForm=Conv",
31
+ "17": "AUX|Mood=Des",
32
+ "18": "AUX|Mood=Nec",
33
+ "19": "AUX|Mood=Pot",
34
+ "20": "AUX|VerbType=Cop",
35
+ "21": "AUX|Voice=Pass",
36
+ "22": "B-ADV|VerbForm=Conv",
37
+ "23": "B-NOUN",
38
+ "24": "B-NOUN|Case=Loc",
39
+ "25": "B-NOUN|Case=Tem",
40
+ "26": "B-NUM",
41
+ "27": "B-NUM|NumType=Ord",
42
+ "28": "B-PROPN|Case=Loc|NameType=Geo",
43
+ "29": "B-PROPN|Case=Loc|NameType=Nat",
44
+ "30": "B-PROPN|NameType=Giv",
45
+ "31": "B-PROPN|NameType=Prs",
46
+ "32": "B-PROPN|NameType=Sur",
47
+ "33": "B-VERB",
48
+ "34": "B-VERB|Degree=Equ",
49
+ "35": "B-VERB|Degree=Pos",
50
+ "36": "B-VERB|VerbForm=Part",
51
+ "37": "CCONJ",
52
+ "38": "I-ADV|VerbForm=Conv",
53
+ "39": "I-NOUN",
54
+ "40": "I-NOUN|Case=Loc",
55
+ "41": "I-NOUN|Case=Tem",
56
+ "42": "I-NUM",
57
+ "43": "I-NUM|NumType=Ord",
58
+ "44": "I-PROPN|Case=Loc|NameType=Geo",
59
+ "45": "I-PROPN|Case=Loc|NameType=Nat",
60
+ "46": "I-PROPN|NameType=Giv",
61
+ "47": "I-PROPN|NameType=Prs",
62
+ "48": "I-PROPN|NameType=Sur",
63
+ "49": "I-VERB",
64
+ "50": "I-VERB|Degree=Equ",
65
+ "51": "I-VERB|Degree=Pos",
66
+ "52": "I-VERB|VerbForm=Part",
67
+ "53": "INTJ",
68
+ "54": "NOUN",
69
+ "55": "NOUN|Case=Loc",
70
+ "56": "NOUN|Case=Tem",
71
+ "57": "NOUN|NounType=Clf",
72
+ "58": "NUM",
73
+ "59": "NUM|NumType=Ord",
74
+ "60": "PART",
75
+ "61": "PRON|Person=1|PronType=Prs",
76
+ "62": "PRON|Person=2|PronType=Prs",
77
+ "63": "PRON|Person=3|PronType=Prs",
78
+ "64": "PRON|PronType=Dem",
79
+ "65": "PRON|PronType=Int",
80
+ "66": "PRON|PronType=Prs",
81
+ "67": "PRON|PronType=Prs|Reflex=Yes",
82
+ "68": "PROPN|Case=Loc|NameType=Geo",
83
+ "69": "PROPN|Case=Loc|NameType=Nat",
84
+ "70": "PROPN|NameType=Giv",
85
+ "71": "PROPN|NameType=Prs",
86
+ "72": "PROPN|NameType=Sur",
87
+ "73": "SCONJ",
88
+ "74": "SYM",
89
+ "75": "VERB",
90
+ "76": "VERB|Degree=Equ",
91
+ "77": "VERB|Degree=Equ|VerbForm=Part",
92
+ "78": "VERB|Degree=Pos",
93
+ "79": "VERB|Degree=Pos|VerbForm=Part",
94
+ "80": "VERB|Polarity=Neg",
95
+ "81": "VERB|Polarity=Neg|VerbForm=Part",
96
+ "82": "VERB|VerbForm=Part"
97
  },
98
  "initializer_range": 0.02,
99
  "intermediate_size": 4096,
100
  "label2id": {
101
+ "ADP": 0,
102
+ "ADP|Degree=Equ": 1,
103
+ "ADV": 2,
104
+ "ADV|AdvType=Cau": 3,
105
+ "ADV|AdvType=Deg|Degree=Cmp": 4,
106
+ "ADV|AdvType=Deg|Degree=Pos": 5,
107
+ "ADV|AdvType=Deg|Degree=Sup": 6,
108
+ "ADV|AdvType=Tim": 7,
109
+ "ADV|AdvType=Tim|Aspect=Perf": 8,
110
+ "ADV|AdvType=Tim|Tense=Fut": 9,
111
+ "ADV|AdvType=Tim|Tense=Past": 10,
112
+ "ADV|AdvType=Tim|Tense=Pres": 11,
113
+ "ADV|Degree=Equ|VerbForm=Conv": 12,
114
+ "ADV|Degree=Pos|VerbForm=Conv": 13,
115
+ "ADV|Polarity=Neg": 14,
116
+ "ADV|Polarity=Neg|VerbForm=Conv": 15,
117
+ "ADV|VerbForm=Conv": 16,
118
+ "AUX|Mood=Des": 17,
119
+ "AUX|Mood=Nec": 18,
120
+ "AUX|Mood=Pot": 19,
121
+ "AUX|VerbType=Cop": 20,
122
+ "AUX|Voice=Pass": 21,
123
+ "B-ADV|VerbForm=Conv": 22,
124
+ "B-NOUN": 23,
125
+ "B-NOUN|Case=Loc": 24,
126
+ "B-NOUN|Case=Tem": 25,
127
+ "B-NUM": 26,
128
+ "B-NUM|NumType=Ord": 27,
129
+ "B-PROPN|Case=Loc|NameType=Geo": 28,
130
+ "B-PROPN|Case=Loc|NameType=Nat": 29,
131
+ "B-PROPN|NameType=Giv": 30,
132
+ "B-PROPN|NameType=Prs": 31,
133
+ "B-PROPN|NameType=Sur": 32,
134
+ "B-VERB": 33,
135
+ "B-VERB|Degree=Equ": 34,
136
+ "B-VERB|Degree=Pos": 35,
137
+ "B-VERB|VerbForm=Part": 36,
138
+ "CCONJ": 37,
139
+ "I-ADV|VerbForm=Conv": 38,
140
+ "I-NOUN": 39,
141
+ "I-NOUN|Case=Loc": 40,
142
+ "I-NOUN|Case=Tem": 41,
143
+ "I-NUM": 42,
144
+ "I-NUM|NumType=Ord": 43,
145
+ "I-PROPN|Case=Loc|NameType=Geo": 44,
146
+ "I-PROPN|Case=Loc|NameType=Nat": 45,
147
+ "I-PROPN|NameType=Giv": 46,
148
+ "I-PROPN|NameType=Prs": 47,
149
+ "I-PROPN|NameType=Sur": 48,
150
+ "I-VERB": 49,
151
+ "I-VERB|Degree=Equ": 50,
152
+ "I-VERB|Degree=Pos": 51,
153
+ "I-VERB|VerbForm=Part": 52,
154
+ "INTJ": 53,
155
+ "NOUN": 54,
156
+ "NOUN|Case=Loc": 55,
157
+ "NOUN|Case=Tem": 56,
158
+ "NOUN|NounType=Clf": 57,
159
+ "NUM": 58,
160
+ "NUM|NumType=Ord": 59,
161
+ "PART": 60,
162
+ "PRON|Person=1|PronType=Prs": 61,
163
+ "PRON|Person=2|PronType=Prs": 62,
164
+ "PRON|Person=3|PronType=Prs": 63,
165
+ "PRON|PronType=Dem": 64,
166
+ "PRON|PronType=Int": 65,
167
+ "PRON|PronType=Prs": 66,
168
+ "PRON|PronType=Prs|Reflex=Yes": 67,
169
+ "PROPN|Case=Loc|NameType=Geo": 68,
170
+ "PROPN|Case=Loc|NameType=Nat": 69,
171
+ "PROPN|NameType=Giv": 70,
172
+ "PROPN|NameType=Prs": 71,
173
+ "PROPN|NameType=Sur": 72,
174
+ "SCONJ": 73,
175
+ "SYM": 74,
176
+ "VERB": 75,
177
+ "VERB|Degree=Equ": 76,
178
+ "VERB|Degree=Equ|VerbForm=Part": 77,
179
+ "VERB|Degree=Pos": 78,
180
+ "VERB|Degree=Pos|VerbForm=Part": 79,
181
+ "VERB|Polarity=Neg": 80,
182
+ "VERB|Polarity=Neg|VerbForm=Part": 81,
183
+ "VERB|VerbForm=Part": 82
184
  },
185
  "layer_norm_eps": 1e-05,
186
  "max_position_embeddings": 514,
190
  "pad_token_id": 1,
191
  "position_embedding_type": "absolute",
192
  "tokenizer_class": "BertTokenizer",
193
+ "torch_dtype": "float32",
194
+ "transformers_version": "4.19.4",
195
  "type_vocab_size": 1,
196
  "use_cache": true,
197
  "vocab_size": 26318
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:12d20d517388b22b6eb4112e667fbffba05c8145536b6b3339063acd585856ec
3
- size 1319415945
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e1eede3754b56ebf8688955829a63eac392372b9b396da90a8a2f75835ed52ec
3
+ size 1319635921
supar.model CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:034863af3ad0b86406db62a10e6427cdb712278aab87557bec2dee7419a37f24
3
- size 1371348085
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:93cbfb849b2ec3e3e4c5b53dee3386f41c029d78e5ed746088b32255994c6da8
3
+ size 1371525616
tokenizer_config.json CHANGED
@@ -1 +1 @@
1
- {"do_lower_case": true, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "special_tokens_map_file": null, "model_max_length": 512, "do_basic_tokenize": true, "never_split": null}
1
+ {"do_lower_case": true, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "special_tokens_map_file": null, "model_max_length": 512, "do_basic_tokenize": true, "never_split": null, "tokenizer_class": "BertTokenizer"}