KoichiYasuoka commited on
Commit
3e3d517
1 Parent(s): 7bf9884

initial release

Browse files
Files changed (10) hide show
  1. README.md +33 -0
  2. config.json +340 -0
  3. maker.py +51 -0
  4. mecab-jumandic-utf8.zip +3 -0
  5. pytorch_model.bin +3 -0
  6. special_tokens_map.json +9 -0
  7. spm.model +3 -0
  8. tokenizer.json +0 -0
  9. tokenizer_config.json +17 -0
  10. ud.py +103 -0
README.md ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - "ja"
4
+ tags:
5
+ - "japanese"
6
+ - "wikipedia"
7
+ - "cc100"
8
+ - "oscar"
9
+ - "pos"
10
+ - "dependency-parsing"
11
+ datasets:
12
+ - "universal_dependencies"
13
+ license: "cc-by-sa-4.0"
14
+ pipeline_tag: "token-classification"
15
+ widget:
16
+ - text: "全学年にわたって小学校の国語の教科書に挿し絵が用いられている"
17
+ ---
18
+
19
+ # deberta-base-japanese-juman-ud-goeswith
20
+
21
+ ## Model Description
22
+
23
+ This is a DeBERTa(V2) model pretrained on Japanese Wikipedia, CC-100, and OSCAR texts for POS-tagging and dependency-parsing (using `goeswith` for subwords), derived from [deberta-v2-base-japanese](https://huggingface.co/ku-nlp/deberta-v2-base-japanese).
24
+
25
+ ## How to Use
26
+
27
+ ```
28
+ from transformers import pipeline
29
+ nlp=pipeline("universal-dependencies","KoichiYasuoka/deberta-base-japanese-juman-ud-goeswith",trust_remote_code=True,aggregation_strategy="simple")
30
+ print(nlp("全学年にわたって小学校の国語の教科書に挿し絵が用いられている"))
31
+ ```
32
+
33
+ [fugashi](https://pypi.org/project/fugashi) and [pytextspan](https://pypi.org/project/pytextspan) are required.
config.json ADDED
@@ -0,0 +1,340 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "DebertaV2ForTokenClassification"
4
+ ],
5
+ "attention_head_size": 64,
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "conv_act": "gelu",
8
+ "conv_kernel_size": 3,
9
+ "custom_pipelines": {
10
+ "universal-dependencies": {
11
+ "impl": "ud.UniversalDependenciesPipeline"
12
+ }
13
+ },
14
+ "hidden_act": "gelu",
15
+ "hidden_dropout_prob": 0.1,
16
+ "hidden_size": 768,
17
+ "id2label": {
18
+ "0": "-|_|dep",
19
+ "1": "ADJ|Polarity=Neg|acl",
20
+ "2": "ADJ|Polarity=Neg|advcl",
21
+ "3": "ADJ|Polarity=Neg|ccomp",
22
+ "4": "ADJ|Polarity=Neg|root",
23
+ "5": "ADJ|_|acl",
24
+ "6": "ADJ|_|advcl",
25
+ "7": "ADJ|_|amod",
26
+ "8": "ADJ|_|ccomp",
27
+ "9": "ADJ|_|compound",
28
+ "10": "ADJ|_|csubj",
29
+ "11": "ADJ|_|dep",
30
+ "12": "ADJ|_|iobj",
31
+ "13": "ADJ|_|nmod",
32
+ "14": "ADJ|_|nsubj",
33
+ "15": "ADJ|_|obj",
34
+ "16": "ADJ|_|obl",
35
+ "17": "ADJ|_|parataxis",
36
+ "18": "ADJ|_|root",
37
+ "19": "ADP|_|case",
38
+ "20": "ADP|_|dislocated",
39
+ "21": "ADP|_|fixed",
40
+ "22": "ADP|_|mark",
41
+ "23": "ADP|_|root",
42
+ "24": "ADV|_|advcl",
43
+ "25": "ADV|_|advmod",
44
+ "26": "ADV|_|compound",
45
+ "27": "ADV|_|dislocated",
46
+ "28": "ADV|_|iobj",
47
+ "29": "ADV|_|nmod",
48
+ "30": "ADV|_|nsubj",
49
+ "31": "ADV|_|obj",
50
+ "32": "ADV|_|obl",
51
+ "33": "ADV|_|root",
52
+ "34": "AUX|Polarity=Neg|aux",
53
+ "35": "AUX|_|acl",
54
+ "36": "AUX|_|advcl",
55
+ "37": "AUX|_|aux",
56
+ "38": "AUX|_|ccomp",
57
+ "39": "AUX|_|conj",
58
+ "40": "AUX|_|cop",
59
+ "41": "AUX|_|fixed",
60
+ "42": "AUX|_|iobj",
61
+ "43": "AUX|_|obj",
62
+ "44": "AUX|_|obl",
63
+ "45": "AUX|_|root",
64
+ "46": "CCONJ|_|advmod",
65
+ "47": "CCONJ|_|case",
66
+ "48": "CCONJ|_|cc",
67
+ "49": "CCONJ|_|ccomp",
68
+ "50": "CCONJ|_|fixed",
69
+ "51": "CCONJ|_|mark",
70
+ "52": "DET|_|det",
71
+ "53": "DET|_|nsubj",
72
+ "54": "DET|_|obl",
73
+ "55": "DET|_|root",
74
+ "56": "INTJ|_|discourse",
75
+ "57": "INTJ|_|root",
76
+ "58": "NOUN|Polarity=Neg|compound",
77
+ "59": "NOUN|_|acl",
78
+ "60": "NOUN|_|advcl",
79
+ "61": "NOUN|_|advmod",
80
+ "62": "NOUN|_|appos",
81
+ "63": "NOUN|_|ccomp",
82
+ "64": "NOUN|_|compound",
83
+ "65": "NOUN|_|conj",
84
+ "66": "NOUN|_|csubj",
85
+ "67": "NOUN|_|dislocated",
86
+ "68": "NOUN|_|iobj",
87
+ "69": "NOUN|_|list",
88
+ "70": "NOUN|_|nmod",
89
+ "71": "NOUN|_|nsubj",
90
+ "72": "NOUN|_|obj",
91
+ "73": "NOUN|_|obl",
92
+ "74": "NOUN|_|parataxis",
93
+ "75": "NOUN|_|root",
94
+ "76": "NUM|_|advcl",
95
+ "77": "NUM|_|dislocated",
96
+ "78": "NUM|_|iobj",
97
+ "79": "NUM|_|nmod",
98
+ "80": "NUM|_|nsubj",
99
+ "81": "NUM|_|nummod",
100
+ "82": "NUM|_|obj",
101
+ "83": "NUM|_|obl",
102
+ "84": "NUM|_|root",
103
+ "85": "PART|_|acl",
104
+ "86": "PART|_|advcl",
105
+ "87": "PART|_|amod",
106
+ "88": "PART|_|case",
107
+ "89": "PART|_|conj",
108
+ "90": "PART|_|iobj",
109
+ "91": "PART|_|mark",
110
+ "92": "PART|_|nmod",
111
+ "93": "PART|_|nsubj",
112
+ "94": "PART|_|obj",
113
+ "95": "PART|_|obl",
114
+ "96": "PART|_|root",
115
+ "97": "PRON|_|acl",
116
+ "98": "PRON|_|advcl",
117
+ "99": "PRON|_|compound",
118
+ "100": "PRON|_|discourse",
119
+ "101": "PRON|_|dislocated",
120
+ "102": "PRON|_|iobj",
121
+ "103": "PRON|_|nmod",
122
+ "104": "PRON|_|nsubj",
123
+ "105": "PRON|_|obj",
124
+ "106": "PRON|_|obl",
125
+ "107": "PRON|_|root",
126
+ "108": "PROPN|_|acl",
127
+ "109": "PROPN|_|advcl",
128
+ "110": "PROPN|_|compound",
129
+ "111": "PROPN|_|dislocated",
130
+ "112": "PROPN|_|iobj",
131
+ "113": "PROPN|_|nmod",
132
+ "114": "PROPN|_|nsubj",
133
+ "115": "PROPN|_|obj",
134
+ "116": "PROPN|_|obl",
135
+ "117": "PROPN|_|root",
136
+ "118": "PROPN|_|vocative",
137
+ "119": "PUNCT|_|punct",
138
+ "120": "SCONJ|_|advcl",
139
+ "121": "SCONJ|_|fixed",
140
+ "122": "SCONJ|_|mark",
141
+ "123": "SYM|_|compound",
142
+ "124": "SYM|_|nmod",
143
+ "125": "SYM|_|nsubj",
144
+ "126": "SYM|_|obl",
145
+ "127": "SYM|_|punct",
146
+ "128": "VERB|_|acl",
147
+ "129": "VERB|_|advcl",
148
+ "130": "VERB|_|aux",
149
+ "131": "VERB|_|ccomp",
150
+ "132": "VERB|_|compound",
151
+ "133": "VERB|_|conj",
152
+ "134": "VERB|_|csubj",
153
+ "135": "VERB|_|dislocated",
154
+ "136": "VERB|_|fixed",
155
+ "137": "VERB|_|iobj",
156
+ "138": "VERB|_|nmod",
157
+ "139": "VERB|_|nsubj",
158
+ "140": "VERB|_|obj",
159
+ "141": "VERB|_|obl",
160
+ "142": "VERB|_|parataxis",
161
+ "143": "VERB|_|root",
162
+ "144": "X|_|dep",
163
+ "145": "X|_|goeswith",
164
+ "146": "X|_|nmod"
165
+ },
166
+ "initializer_range": 0.02,
167
+ "intermediate_size": 3072,
168
+ "label2id": {
169
+ "-|_|dep": 0,
170
+ "ADJ|Polarity=Neg|acl": 1,
171
+ "ADJ|Polarity=Neg|advcl": 2,
172
+ "ADJ|Polarity=Neg|ccomp": 3,
173
+ "ADJ|Polarity=Neg|root": 4,
174
+ "ADJ|_|acl": 5,
175
+ "ADJ|_|advcl": 6,
176
+ "ADJ|_|amod": 7,
177
+ "ADJ|_|ccomp": 8,
178
+ "ADJ|_|compound": 9,
179
+ "ADJ|_|csubj": 10,
180
+ "ADJ|_|dep": 11,
181
+ "ADJ|_|iobj": 12,
182
+ "ADJ|_|nmod": 13,
183
+ "ADJ|_|nsubj": 14,
184
+ "ADJ|_|obj": 15,
185
+ "ADJ|_|obl": 16,
186
+ "ADJ|_|parataxis": 17,
187
+ "ADJ|_|root": 18,
188
+ "ADP|_|case": 19,
189
+ "ADP|_|dislocated": 20,
190
+ "ADP|_|fixed": 21,
191
+ "ADP|_|mark": 22,
192
+ "ADP|_|root": 23,
193
+ "ADV|_|advcl": 24,
194
+ "ADV|_|advmod": 25,
195
+ "ADV|_|compound": 26,
196
+ "ADV|_|dislocated": 27,
197
+ "ADV|_|iobj": 28,
198
+ "ADV|_|nmod": 29,
199
+ "ADV|_|nsubj": 30,
200
+ "ADV|_|obj": 31,
201
+ "ADV|_|obl": 32,
202
+ "ADV|_|root": 33,
203
+ "AUX|Polarity=Neg|aux": 34,
204
+ "AUX|_|acl": 35,
205
+ "AUX|_|advcl": 36,
206
+ "AUX|_|aux": 37,
207
+ "AUX|_|ccomp": 38,
208
+ "AUX|_|conj": 39,
209
+ "AUX|_|cop": 40,
210
+ "AUX|_|fixed": 41,
211
+ "AUX|_|iobj": 42,
212
+ "AUX|_|obj": 43,
213
+ "AUX|_|obl": 44,
214
+ "AUX|_|root": 45,
215
+ "CCONJ|_|advmod": 46,
216
+ "CCONJ|_|case": 47,
217
+ "CCONJ|_|cc": 48,
218
+ "CCONJ|_|ccomp": 49,
219
+ "CCONJ|_|fixed": 50,
220
+ "CCONJ|_|mark": 51,
221
+ "DET|_|det": 52,
222
+ "DET|_|nsubj": 53,
223
+ "DET|_|obl": 54,
224
+ "DET|_|root": 55,
225
+ "INTJ|_|discourse": 56,
226
+ "INTJ|_|root": 57,
227
+ "NOUN|Polarity=Neg|compound": 58,
228
+ "NOUN|_|acl": 59,
229
+ "NOUN|_|advcl": 60,
230
+ "NOUN|_|advmod": 61,
231
+ "NOUN|_|appos": 62,
232
+ "NOUN|_|ccomp": 63,
233
+ "NOUN|_|compound": 64,
234
+ "NOUN|_|conj": 65,
235
+ "NOUN|_|csubj": 66,
236
+ "NOUN|_|dislocated": 67,
237
+ "NOUN|_|iobj": 68,
238
+ "NOUN|_|list": 69,
239
+ "NOUN|_|nmod": 70,
240
+ "NOUN|_|nsubj": 71,
241
+ "NOUN|_|obj": 72,
242
+ "NOUN|_|obl": 73,
243
+ "NOUN|_|parataxis": 74,
244
+ "NOUN|_|root": 75,
245
+ "NUM|_|advcl": 76,
246
+ "NUM|_|dislocated": 77,
247
+ "NUM|_|iobj": 78,
248
+ "NUM|_|nmod": 79,
249
+ "NUM|_|nsubj": 80,
250
+ "NUM|_|nummod": 81,
251
+ "NUM|_|obj": 82,
252
+ "NUM|_|obl": 83,
253
+ "NUM|_|root": 84,
254
+ "PART|_|acl": 85,
255
+ "PART|_|advcl": 86,
256
+ "PART|_|amod": 87,
257
+ "PART|_|case": 88,
258
+ "PART|_|conj": 89,
259
+ "PART|_|iobj": 90,
260
+ "PART|_|mark": 91,
261
+ "PART|_|nmod": 92,
262
+ "PART|_|nsubj": 93,
263
+ "PART|_|obj": 94,
264
+ "PART|_|obl": 95,
265
+ "PART|_|root": 96,
266
+ "PRON|_|acl": 97,
267
+ "PRON|_|advcl": 98,
268
+ "PRON|_|compound": 99,
269
+ "PRON|_|discourse": 100,
270
+ "PRON|_|dislocated": 101,
271
+ "PRON|_|iobj": 102,
272
+ "PRON|_|nmod": 103,
273
+ "PRON|_|nsubj": 104,
274
+ "PRON|_|obj": 105,
275
+ "PRON|_|obl": 106,
276
+ "PRON|_|root": 107,
277
+ "PROPN|_|acl": 108,
278
+ "PROPN|_|advcl": 109,
279
+ "PROPN|_|compound": 110,
280
+ "PROPN|_|dislocated": 111,
281
+ "PROPN|_|iobj": 112,
282
+ "PROPN|_|nmod": 113,
283
+ "PROPN|_|nsubj": 114,
284
+ "PROPN|_|obj": 115,
285
+ "PROPN|_|obl": 116,
286
+ "PROPN|_|root": 117,
287
+ "PROPN|_|vocative": 118,
288
+ "PUNCT|_|punct": 119,
289
+ "SCONJ|_|advcl": 120,
290
+ "SCONJ|_|fixed": 121,
291
+ "SCONJ|_|mark": 122,
292
+ "SYM|_|compound": 123,
293
+ "SYM|_|nmod": 124,
294
+ "SYM|_|nsubj": 125,
295
+ "SYM|_|obl": 126,
296
+ "SYM|_|punct": 127,
297
+ "VERB|_|acl": 128,
298
+ "VERB|_|advcl": 129,
299
+ "VERB|_|aux": 130,
300
+ "VERB|_|ccomp": 131,
301
+ "VERB|_|compound": 132,
302
+ "VERB|_|conj": 133,
303
+ "VERB|_|csubj": 134,
304
+ "VERB|_|dislocated": 135,
305
+ "VERB|_|fixed": 136,
306
+ "VERB|_|iobj": 137,
307
+ "VERB|_|nmod": 138,
308
+ "VERB|_|nsubj": 139,
309
+ "VERB|_|obj": 140,
310
+ "VERB|_|obl": 141,
311
+ "VERB|_|parataxis": 142,
312
+ "VERB|_|root": 143,
313
+ "X|_|dep": 144,
314
+ "X|_|goeswith": 145,
315
+ "X|_|nmod": 146
316
+ },
317
+ "layer_norm_eps": 1e-07,
318
+ "max_position_embeddings": 512,
319
+ "max_relative_positions": -1,
320
+ "model_type": "deberta-v2",
321
+ "norm_rel_ebd": "layer_norm",
322
+ "num_attention_heads": 12,
323
+ "num_hidden_layers": 12,
324
+ "pad_token_id": 0,
325
+ "pooler_dropout": 0,
326
+ "pooler_hidden_act": "gelu",
327
+ "pooler_hidden_size": 768,
328
+ "pos_att_type": [
329
+ "p2c",
330
+ "c2p"
331
+ ],
332
+ "position_biased_input": false,
333
+ "position_buckets": 256,
334
+ "relative_attention": true,
335
+ "share_att_key": true,
336
+ "torch_dtype": "float32",
337
+ "transformers_version": "4.26.0",
338
+ "type_vocab_size": 0,
339
+ "vocab_size": 32000
340
+ }
maker.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #! /usr/bin/python3
2
+ src="ku-nlp/deberta-v2-base-japanese"
3
+ tgt="KoichiYasuoka/deberta-base-japanese-juman-ud-goeswith"
4
+ url="https://github.com/KoichiYasuoka/SuPar-UniDic/raw/main/suparunidic/suparmodels/ja_gsd_modern.conllu"
5
+ import os
6
+ f=os.path.basename(url)
7
+ os.system("test -f "+f+" || curl -LO "+url)
8
+ class UDgoeswithDataset(object):
9
+ def __init__(self,conllu,tokenizer):
10
+ self.ids,self.tags,label=[],[],set()
11
+ with open(conllu,"r",encoding="utf-8") as r:
12
+ cls,sep,msk=tokenizer.cls_token_id,tokenizer.sep_token_id,tokenizer.mask_token_id
13
+ dep,c="-|_|dep",[]
14
+ for s in r:
15
+ t=s.split("\t")
16
+ if len(t)==10 and t[0].isdecimal():
17
+ c.append(t)
18
+ elif c!=[]:
19
+ v=tokenizer([t[1] for t in c],add_special_tokens=False)["input_ids"]
20
+ for i in range(len(v)-1,-1,-1):
21
+ for j in range(1,len(v[i])):
22
+ c.insert(i+1,[c[i][0],"_","_","X","_","_",c[i][0],"goeswith","_","_"])
23
+ y=["0"]+[t[0] for t in c]
24
+ h=[i if t[6]=="0" else y.index(t[6]) for i,t in enumerate(c,1)]
25
+ p,v=[t[3]+"|"+t[5]+"|"+t[7] for t in c],sum(v,[])
26
+ self.ids.append([cls]+v+[sep])
27
+ self.tags.append([dep]+p+[dep])
28
+ label=set(sum([self.tags[-1],list(label)],[]))
29
+ for i,k in enumerate(v):
30
+ self.ids.append([cls]+v[0:i]+[msk]+v[i+1:]+[sep,k])
31
+ self.tags.append([dep]+[t if h[j]==i+1 else dep for j,t in enumerate(p)]+[dep,dep])
32
+ c=[]
33
+ self.label2id={l:i for i,l in enumerate(sorted(label))}
34
+ def __call__(*args):
35
+ label=set(sum([list(t.label2id) for t in args],[]))
36
+ lid={l:i for i,l in enumerate(sorted(label))}
37
+ for t in args:
38
+ t.label2id=lid
39
+ return lid
40
+ __len__=lambda self:len(self.ids)
41
+ __getitem__=lambda self,i:{"input_ids":self.ids[i],"labels":[self.label2id[t] for t in self.tags[i]]}
42
+ from transformers import AutoTokenizer,AutoConfig,AutoModelForTokenClassification,DataCollatorForTokenClassification,TrainingArguments,Trainer
43
+ tkz=AutoTokenizer.from_pretrained(src)
44
+ trainDS=UDgoeswithDataset(f,tkz)
45
+ lid=trainDS.label2id
46
+ cfg=AutoConfig.from_pretrained(src,num_labels=len(lid),label2id=lid,id2label={i:l for l,i in lid.items()})
47
+ arg=TrainingArguments(num_train_epochs=3,per_device_train_batch_size=32,output_dir="/tmp",overwrite_output_dir=True,save_total_limit=2,learning_rate=5e-05,warmup_ratio=0.1)
48
+ trn=Trainer(args=arg,data_collator=DataCollatorForTokenClassification(tkz),model=AutoModelForTokenClassification.from_pretrained(src,config=cfg),train_dataset=trainDS)
49
+ trn.train()
50
+ trn.save_model(tgt)
51
+ tkz.save_pretrained(tgt)
mecab-jumandic-utf8.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bbde3e53407df0e50122816df8f936ceb006580c17026e21037518ed542e4cbc
3
+ size 33196897
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aa6b69a8f0ebb38e9b87785dd8987aea71e6adcf468c761895046127349667d7
3
+ size 447719207
special_tokens_map.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "[CLS]",
3
+ "cls_token": "[CLS]",
4
+ "eos_token": "[SEP]",
5
+ "mask_token": "[MASK]",
6
+ "pad_token": "[PAD]",
7
+ "sep_token": "[SEP]",
8
+ "unk_token": "[UNK]"
9
+ }
spm.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6c111c16e2e52366dcac46b886e40650bb843fe2938a65f5970271fc5697a127
3
+ size 805061
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_map": {"AutoTokenizer":[null,"ud.JumanDebertaV2TokenizerFast"]},
3
+ "bos_token": "[CLS]",
4
+ "cls_token": "[CLS]",
5
+ "do_lower_case": false,
6
+ "eos_token": "[SEP]",
7
+ "keep_accents": true,
8
+ "mask_token": "[MASK]",
9
+ "model_max_length": 512,
10
+ "pad_token": "[PAD]",
11
+ "sep_token": "[SEP]",
12
+ "sp_model_kwargs": {},
13
+ "special_tokens_map_file": null,
14
+ "split_by_punct": false,
15
+ "tokenizer_class": "JumanDebertaV2TokenizerFast",
16
+ "unk_token": "[UNK]"
17
+ }
ud.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import TokenClassificationPipeline,DebertaV2TokenizerFast
2
+ from transformers.models.bert_japanese.tokenization_bert_japanese import MecabTokenizer
3
+
4
+ class UniversalDependenciesPipeline(TokenClassificationPipeline):
5
+ def _forward(self,model_inputs):
6
+ import torch
7
+ v=model_inputs["input_ids"][0].tolist()
8
+ with torch.no_grad():
9
+ e=self.model(input_ids=torch.tensor([v[0:i]+[self.tokenizer.mask_token_id]+v[i+1:]+[j] for i,j in enumerate(v[1:-1],1)],device=self.device))
10
+ return {"logits":e.logits[:,1:-2,:],**model_inputs}
11
+ def postprocess(self,model_outputs,**kwargs):
12
+ import numpy
13
+ e=model_outputs["logits"].numpy()
14
+ r=[1 if i==0 else -1 if j.endswith("|root") else 0 for i,j in sorted(self.model.config.id2label.items())]
15
+ e+=numpy.where(numpy.add.outer(numpy.identity(e.shape[0]),r)==0,0,numpy.nan)
16
+ g=self.model.config.label2id["X|_|goeswith"]
17
+ r=numpy.tri(e.shape[0])
18
+ for i in range(e.shape[0]):
19
+ for j in range(i+2,e.shape[1]):
20
+ r[i,j]=r[i,j-1] if numpy.nanargmax(e[i,j-1])==g else 1
21
+ e[:,:,g]+=numpy.where(r==0,0,numpy.nan)
22
+ m,p=numpy.nanmax(e,axis=2),numpy.nanargmax(e,axis=2)
23
+ h=self.chu_liu_edmonds(m)
24
+ z=[i for i,j in enumerate(h) if i==j]
25
+ if len(z)>1:
26
+ k,h=z[numpy.nanargmax(m[z,z])],numpy.nanmin(m)-numpy.nanmax(m)
27
+ m[:,z]+=[[0 if j in z and (i!=j or i==k) else h for i in z] for j in range(m.shape[0])]
28
+ h=self.chu_liu_edmonds(m)
29
+ v=[(s,e) for s,e in model_outputs["offset_mapping"][0].tolist() if s<e]
30
+ q=[self.model.config.id2label[p[j,i]].split("|") for i,j in enumerate(h)]
31
+ if "aggregation_strategy" in kwargs and kwargs["aggregation_strategy"]!="none":
32
+ for i,j in reversed(list(enumerate(q[1:],1))):
33
+ if j[-1]=="goeswith" and set([t[-1] for t in q[h[i]+1:i+1]])=={"goeswith"}:
34
+ h=[b if i>b else b-1 for a,b in enumerate(h) if i!=a]
35
+ v[i-1]=(v[i-1][0],v.pop(i)[1])
36
+ q.pop(i)
37
+ t=model_outputs["sentence"].replace("\n"," ")
38
+ u="# text = "+t+"\n"
39
+ for i,(s,e) in enumerate(v):
40
+ u+="\t".join([str(i+1),t[s:e],"_",q[i][0],"_","|".join(q[i][1:-1]),str(0 if h[i]==i else h[i]+1),q[i][-1],"_","_" if i+1<len(v) and e<v[i+1][0] else "SpaceAfter=No"])+"\n"
41
+ return u+"\n"
42
+ def chu_liu_edmonds(self,matrix):
43
+ import numpy
44
+ h=numpy.nanargmax(matrix,axis=0)
45
+ x=[-1 if i==j else j for i,j in enumerate(h)]
46
+ for b in [lambda x,i,j:-1 if i not in x else x[i],lambda x,i,j:-1 if j<0 else x[j]]:
47
+ y=[]
48
+ while x!=y:
49
+ y=list(x)
50
+ for i,j in enumerate(x):
51
+ x[i]=b(x,i,j)
52
+ if max(x)<0:
53
+ return h
54
+ y,x=[i for i,j in enumerate(x) if j==max(x)],[i for i,j in enumerate(x) if j<max(x)]
55
+ z=matrix-numpy.nanmax(matrix,axis=0)
56
+ m=numpy.block([[z[x,:][:,x],numpy.nanmax(z[x,:][:,y],axis=1).reshape(len(x),1)],[numpy.nanmax(z[y,:][:,x],axis=0),numpy.nanmax(z[y,y])]])
57
+ k=[j if i==len(x) else x[j] if j<len(x) else y[numpy.nanargmax(z[y,x[i]])] for i,j in enumerate(self.chu_liu_edmonds(m))]
58
+ h=[j if i in y else k[x.index(i)] for i,j in enumerate(h)]
59
+ i=y[numpy.nanargmax(z[x[k[-1]],y] if k[-1]<len(x) else z[y,y])]
60
+ h[i]=x[k[-1]] if k[-1]<len(x) else i
61
+ return h
62
+
63
+ class MecabPreTokenizer(MecabTokenizer):
64
+ def mecab_split(self,i,normalized_string):
65
+ import textspan
66
+ t=str(normalized_string)
67
+ k=self.tokenize(t)
68
+ return [normalized_string[s:e] for c in textspan.get_original_spans(k,t) for s,e in c]
69
+ def pre_tokenize(self,pretok):
70
+ pretok.split(self.mecab_split)
71
+
72
+ class JumanDebertaV2TokenizerFast(DebertaV2TokenizerFast):
73
+ def __init__(self,**kwargs):
74
+ import os
75
+ from tokenizers.pre_tokenizers import PreTokenizer,Metaspace,Sequence
76
+ super().__init__(**kwargs)
77
+ d,r="/var/lib/mecab/dic/juman-utf8","/etc/mecabrc"
78
+ if not (os.path.isdir(d) and os.path.isfile(r)):
79
+ import zipfile
80
+ import tempfile
81
+ try:
82
+ from transformers.utils import cached_file
83
+ except:
84
+ from transformers.file_utils import cached_path,hf_bucket_url
85
+ cached_file=lambda x,y:cached_path(hf_bucket_url(x,y))
86
+ self.dicdir=tempfile.TemporaryDirectory()
87
+ d=self.dicdir.name
88
+ with zipfile.ZipFile(cached_file("KoichiYasuoka/deberta-base-japanese-juman-ud-goeswith","mecab-jumandic-utf8.zip")) as z:
89
+ z.extractall(d)
90
+ r=os.path.join(d,"mecabrc")
91
+ with open(r,"w",encoding="utf-8") as w:
92
+ print("dicdir =",d,file=w)
93
+ self.custom_pre_tokenizer=Sequence([PreTokenizer.custom(MecabPreTokenizer(mecab_dic=None,mecab_option="-d "+d+" -r "+r)),Metaspace()])
94
+ self._tokenizer.pre_tokenizer=self.custom_pre_tokenizer
95
+ def save_pretrained(self,save_directory,**kwargs):
96
+ import os
97
+ import shutil
98
+ from tokenizers.pre_tokenizers import Metaspace
99
+ self._auto_map={"AutoTokenizer":[None,"ud.JumanDebertaV2TokenizerFast"]}
100
+ self._tokenizer.pre_tokenizer=Metaspace()
101
+ super().save_pretrained(save_directory,**kwargs)
102
+ self._tokenizer.pre_tokenizer=self.custom_pre_tokenizer
103
+ shutil.copy(os.path.abspath(__file__),os.path.join(save_directory,"ud.py"))