KoichiYasuoka commited on
Commit
45d76b3
1 Parent(s): 81ae88f

initial release

Browse files
Files changed (9) hide show
  1. README.md +76 -0
  2. config.json +570 -0
  3. maker.py +56 -0
  4. pytorch_model.bin +3 -0
  5. special_tokens_map.json +9 -0
  6. tokenizer.json +0 -0
  7. tokenizer_config.json +16 -0
  8. ud.py +60 -0
  9. vocab.txt +0 -0
README.md ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - "ko"
4
+ tags:
5
+ - "korean"
6
+ - "token-classification"
7
+ - "pos"
8
+ - "dependency-parsing"
9
+ datasets:
10
+ - "universal_dependencies"
11
+ license: "cc-by-sa-4.0"
12
+ pipeline_tag: "token-classification"
13
+ widget:
14
+ - text: "홍시 맛이 나서 홍시라 생각한다."
15
+ ---
16
+
17
+ # roberta-large-korean-ud-goeswith
18
+
19
+ ## Model Description
20
+
21
+ This is a RoBERTa model for POS-tagging and dependency-parsing (using `goeswith` for subwords), derived from [klue/roberta-large](https://huggingface.co/klue/roberta-large).
22
+
23
+ ## How to Use
24
+
25
+ ```py
26
+ class UDgoeswith(object):
27
+ def __init__(self,bert):
28
+ from transformers import AutoTokenizer,AutoModelForTokenClassification
29
+ self.tokenizer=AutoTokenizer.from_pretrained(bert)
30
+ self.model=AutoModelForTokenClassification.from_pretrained(bert)
31
+ def __call__(self,text):
32
+ import numpy,torch,ufal.chu_liu_edmonds
33
+ w=self.tokenizer(text,return_offsets_mapping=True)
34
+ v=w["input_ids"]
35
+ x=[v[0:i]+[self.tokenizer.mask_token_id]+v[i+1:]+[j] for i,j in enumerate(v[1:-1],1)]
36
+ with torch.no_grad():
37
+ e=self.model(input_ids=torch.tensor(x)).logits.numpy()[:,1:-2,:]
38
+ r=[1 if i==0 else -1 if j.endswith("|root") else 0 for i,j in sorted(self.model.config.id2label.items())]
39
+ e+=numpy.where(numpy.add.outer(numpy.identity(e.shape[0]),r)==0,0,numpy.nan)
40
+ g=self.model.config.label2id["X|_|goeswith"]
41
+ r=numpy.tri(e.shape[0])
42
+ for i in range(e.shape[0]):
43
+ for j in range(i+2,e.shape[1]):
44
+ r[i,j]=r[i,j-1] if numpy.nanargmax(e[i,j-1])==g else 1
45
+ e[:,:,g]+=numpy.where(r==0,0,numpy.nan)
46
+ m=numpy.full((e.shape[0]+1,e.shape[1]+1),numpy.nan)
47
+ m[1:,1:]=numpy.nanmax(e,axis=2).transpose()
48
+ p=numpy.zeros(m.shape)
49
+ p[1:,1:]=numpy.nanargmax(e,axis=2).transpose()
50
+ for i in range(1,m.shape[0]):
51
+ m[i,0],m[i,i],p[i,0]=m[i,i],numpy.nan,p[i,i]
52
+ h=ufal.chu_liu_edmonds.chu_liu_edmonds(m)[0]
53
+ if [0 for i in h if i==0]!=[0]:
54
+ m[:,0]+=numpy.where(m[:,0]==numpy.nanmax(m[[i for i,j in enumerate(h) if j==0],0]),0,numpy.nan)
55
+ m[[i for i,j in enumerate(h) if j==0]]+=[0 if i==0 or j==0 else numpy.nan for i,j in enumerate(h)]
56
+ h=ufal.chu_liu_edmonds.chu_liu_edmonds(m)[0]
57
+ u="# text = "+text+"\n"
58
+ v=[(s,e) for s,e in w["offset_mapping"] if s<e]
59
+ for i,(s,e) in enumerate(v,1):
60
+ q=self.model.config.id2label[p[i,h[i]]].split("|")
61
+ u+="\t".join([str(i),text[s:e],"_",q[0],"_","|".join(q[1:-1]),str(h[i]),q[-1],"_","_" if i<len(v) and e<v[i][0] else "SpaceAfter=No"])+"\n"
62
+ return u+"\n"
63
+
64
+ nlp=UDgoeswith("KoichiYasuoka/roberta-large-korean-ud-goeswith")
65
+ print(nlp("홍시 맛이 나서 홍시라 생각한다."))
66
+ ```
67
+
68
+ with [ufal.chu-liu-edmonds](https://pypi.org/project/ufal.chu-liu-edmonds/).
69
+ Or without ufal.chu-liu-edmonds:
70
+
71
+ ```
72
+ from transformers import pipeline
73
+ nlp=pipeline("universal-dependencies","KoichiYasuoka/roberta-large-korean-ud-goeswith",trust_remote_code=True,aggregation_strategy="simple")
74
+ print(nlp("홍시 맛이 나서 홍시라 생각한다."))
75
+ ```
76
+
config.json ADDED
@@ -0,0 +1,570 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "RobertaForTokenClassification"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "bos_token_id": 0,
7
+ "classifier_dropout": null,
8
+ "eos_token_id": 2,
9
+ "gradient_checkpointing": false,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 1024,
13
+ "id2label": {
14
+ "0": "-|_|dep",
15
+ "1": "ADJ|_|acl",
16
+ "2": "ADJ|_|acl:relcl",
17
+ "3": "ADJ|_|advcl",
18
+ "4": "ADJ|_|advmod",
19
+ "5": "ADJ|_|amod",
20
+ "6": "ADJ|_|appos",
21
+ "7": "ADJ|_|ccomp",
22
+ "8": "ADJ|_|conj",
23
+ "9": "ADJ|_|dep",
24
+ "10": "ADJ|_|dislocated",
25
+ "11": "ADJ|_|fixed",
26
+ "12": "ADJ|_|flat",
27
+ "13": "ADJ|_|nmod",
28
+ "14": "ADJ|_|nsubj",
29
+ "15": "ADJ|_|obj",
30
+ "16": "ADJ|_|obl",
31
+ "17": "ADJ|_|root",
32
+ "18": "ADP|_|appos",
33
+ "19": "ADP|_|case",
34
+ "20": "ADP|_|flat",
35
+ "21": "ADP|_|mark",
36
+ "22": "ADP|_|nmod",
37
+ "23": "ADP|_|nsubj",
38
+ "24": "ADP|_|obj",
39
+ "25": "ADV|_|acl",
40
+ "26": "ADV|_|acl:relcl",
41
+ "27": "ADV|_|advcl",
42
+ "28": "ADV|_|advmod",
43
+ "29": "ADV|_|amod",
44
+ "30": "ADV|_|appos",
45
+ "31": "ADV|_|case",
46
+ "32": "ADV|_|cc",
47
+ "33": "ADV|_|ccomp",
48
+ "34": "ADV|_|conj",
49
+ "35": "ADV|_|dep",
50
+ "36": "ADV|_|dislocated",
51
+ "37": "ADV|_|fixed",
52
+ "38": "ADV|_|flat",
53
+ "39": "ADV|_|iobj",
54
+ "40": "ADV|_|mark",
55
+ "41": "ADV|_|nmod",
56
+ "42": "ADV|_|nmod:poss",
57
+ "43": "ADV|_|nsubj",
58
+ "44": "ADV|_|obj",
59
+ "45": "ADV|_|obl",
60
+ "46": "ADV|_|root",
61
+ "47": "AUX|_|amod",
62
+ "48": "AUX|_|aux",
63
+ "49": "AUX|_|cop",
64
+ "50": "AUX|_|fixed",
65
+ "51": "AUX|_|flat",
66
+ "52": "AUX|_|root",
67
+ "53": "CCONJ|_|acl",
68
+ "54": "CCONJ|_|advcl",
69
+ "55": "CCONJ|_|advmod",
70
+ "56": "CCONJ|_|amod",
71
+ "57": "CCONJ|_|case",
72
+ "58": "CCONJ|_|cc",
73
+ "59": "CCONJ|_|ccomp",
74
+ "60": "CCONJ|_|compound",
75
+ "61": "CCONJ|_|conj",
76
+ "62": "CCONJ|_|csubj",
77
+ "63": "CCONJ|_|dep",
78
+ "64": "CCONJ|_|dislocated",
79
+ "65": "CCONJ|_|fixed",
80
+ "66": "CCONJ|_|flat",
81
+ "67": "CCONJ|_|iobj",
82
+ "68": "CCONJ|_|mark",
83
+ "69": "CCONJ|_|nmod",
84
+ "70": "CCONJ|_|nsubj",
85
+ "71": "CCONJ|_|obj",
86
+ "72": "CCONJ|_|obl",
87
+ "73": "CCONJ|_|root",
88
+ "74": "CCONJ|_|xcomp",
89
+ "75": "DET|NumType=Card|det",
90
+ "76": "DET|_|appos",
91
+ "77": "DET|_|ccomp",
92
+ "78": "DET|_|det",
93
+ "79": "DET|_|root",
94
+ "80": "INTJ|_|advcl",
95
+ "81": "INTJ|_|conj",
96
+ "82": "INTJ|_|discourse",
97
+ "83": "INTJ|_|root",
98
+ "84": "INTJ|_|vocative",
99
+ "85": "NOUN|_|acl",
100
+ "86": "NOUN|_|acl:relcl",
101
+ "87": "NOUN|_|advcl",
102
+ "88": "NOUN|_|amod",
103
+ "89": "NOUN|_|appos",
104
+ "90": "NOUN|_|case",
105
+ "91": "NOUN|_|ccomp",
106
+ "92": "NOUN|_|clf",
107
+ "93": "NOUN|_|compound",
108
+ "94": "NOUN|_|conj",
109
+ "95": "NOUN|_|csubj",
110
+ "96": "NOUN|_|dep",
111
+ "97": "NOUN|_|dislocated",
112
+ "98": "NOUN|_|fixed",
113
+ "99": "NOUN|_|flat",
114
+ "100": "NOUN|_|iobj",
115
+ "101": "NOUN|_|nmod",
116
+ "102": "NOUN|_|nmod:poss",
117
+ "103": "NOUN|_|nsubj",
118
+ "104": "NOUN|_|nsubj:pass",
119
+ "105": "NOUN|_|nummod",
120
+ "106": "NOUN|_|obj",
121
+ "107": "NOUN|_|obl",
122
+ "108": "NOUN|_|parataxis",
123
+ "109": "NOUN|_|root",
124
+ "110": "NOUN|_|vocative",
125
+ "111": "NOUN|_|xcomp",
126
+ "112": "NUM|NumType=Card|acl:relcl",
127
+ "113": "NUM|NumType=Card|advcl",
128
+ "114": "NUM|NumType=Card|appos",
129
+ "115": "NUM|NumType=Card|conj",
130
+ "116": "NUM|NumType=Card|dep",
131
+ "117": "NUM|NumType=Card|flat",
132
+ "118": "NUM|NumType=Card|nmod",
133
+ "119": "NUM|NumType=Card|nmod:poss",
134
+ "120": "NUM|NumType=Card|nsubj",
135
+ "121": "NUM|NumType=Card|nsubj:pass",
136
+ "122": "NUM|NumType=Card|nummod",
137
+ "123": "NUM|NumType=Card|obj",
138
+ "124": "NUM|NumType=Card|obl",
139
+ "125": "NUM|NumType=Card|root",
140
+ "126": "NUM|_|acl:relcl",
141
+ "127": "NUM|_|advcl",
142
+ "128": "NUM|_|appos",
143
+ "129": "NUM|_|compound",
144
+ "130": "NUM|_|conj",
145
+ "131": "NUM|_|csubj",
146
+ "132": "NUM|_|dep",
147
+ "133": "NUM|_|dislocated",
148
+ "134": "NUM|_|flat",
149
+ "135": "NUM|_|nmod",
150
+ "136": "NUM|_|nmod:poss",
151
+ "137": "NUM|_|nsubj",
152
+ "138": "NUM|_|nsubj:pass",
153
+ "139": "NUM|_|nummod",
154
+ "140": "NUM|_|obj",
155
+ "141": "NUM|_|obl",
156
+ "142": "NUM|_|root",
157
+ "143": "NUM|_|xcomp",
158
+ "144": "PART|_|acl",
159
+ "145": "PART|_|advcl",
160
+ "146": "PART|_|advmod",
161
+ "147": "PART|_|amod",
162
+ "148": "PART|_|case",
163
+ "149": "PART|_|ccomp",
164
+ "150": "PART|_|compound",
165
+ "151": "PART|_|conj",
166
+ "152": "PART|_|dep",
167
+ "153": "PART|_|dislocated",
168
+ "154": "PART|_|nmod",
169
+ "155": "PART|_|nsubj",
170
+ "156": "PART|_|obj",
171
+ "157": "PART|_|obl",
172
+ "158": "PART|_|root",
173
+ "159": "PART|_|xcomp",
174
+ "160": "PRON|_|acl",
175
+ "161": "PRON|_|advcl",
176
+ "162": "PRON|_|appos",
177
+ "163": "PRON|_|ccomp",
178
+ "164": "PRON|_|compound",
179
+ "165": "PRON|_|conj",
180
+ "166": "PRON|_|csubj",
181
+ "167": "PRON|_|dep",
182
+ "168": "PRON|_|det:poss",
183
+ "169": "PRON|_|dislocated",
184
+ "170": "PRON|_|flat",
185
+ "171": "PRON|_|iobj",
186
+ "172": "PRON|_|nmod",
187
+ "173": "PRON|_|nsubj",
188
+ "174": "PRON|_|nsubj:pass",
189
+ "175": "PRON|_|obj",
190
+ "176": "PRON|_|obl",
191
+ "177": "PRON|_|root",
192
+ "178": "PROPN|_|acl",
193
+ "179": "PROPN|_|acl:relcl",
194
+ "180": "PROPN|_|advcl",
195
+ "181": "PROPN|_|amod",
196
+ "182": "PROPN|_|appos",
197
+ "183": "PROPN|_|ccomp",
198
+ "184": "PROPN|_|compound",
199
+ "185": "PROPN|_|conj",
200
+ "186": "PROPN|_|csubj",
201
+ "187": "PROPN|_|dep",
202
+ "188": "PROPN|_|dislocated",
203
+ "189": "PROPN|_|flat",
204
+ "190": "PROPN|_|iobj",
205
+ "191": "PROPN|_|nmod",
206
+ "192": "PROPN|_|nmod:poss",
207
+ "193": "PROPN|_|nsubj",
208
+ "194": "PROPN|_|nsubj:pass",
209
+ "195": "PROPN|_|obj",
210
+ "196": "PROPN|_|obl",
211
+ "197": "PROPN|_|root",
212
+ "198": "PROPN|_|vocative",
213
+ "199": "PROPN|_|xcomp",
214
+ "200": "PUNCT|NumType=Card|punct",
215
+ "201": "PUNCT|_|punct",
216
+ "202": "SCONJ|_|acl",
217
+ "203": "SCONJ|_|advcl",
218
+ "204": "SCONJ|_|amod",
219
+ "205": "SCONJ|_|appos",
220
+ "206": "SCONJ|_|case",
221
+ "207": "SCONJ|_|cc",
222
+ "208": "SCONJ|_|ccomp",
223
+ "209": "SCONJ|_|conj",
224
+ "210": "SCONJ|_|dep",
225
+ "211": "SCONJ|_|dislocated",
226
+ "212": "SCONJ|_|fixed",
227
+ "213": "SCONJ|_|mark",
228
+ "214": "SCONJ|_|nmod",
229
+ "215": "SCONJ|_|nsubj",
230
+ "216": "SCONJ|_|obj",
231
+ "217": "SCONJ|_|obl",
232
+ "218": "SCONJ|_|root",
233
+ "219": "SCONJ|_|xcomp",
234
+ "220": "SYM|_|advmod",
235
+ "221": "SYM|_|appos",
236
+ "222": "SYM|_|compound",
237
+ "223": "SYM|_|conj",
238
+ "224": "SYM|_|dep",
239
+ "225": "SYM|_|flat",
240
+ "226": "SYM|_|list",
241
+ "227": "SYM|_|nmod",
242
+ "228": "SYM|_|nmod:poss",
243
+ "229": "SYM|_|nsubj",
244
+ "230": "SYM|_|nsubj:pass",
245
+ "231": "SYM|_|nummod",
246
+ "232": "SYM|_|obj",
247
+ "233": "SYM|_|root",
248
+ "234": "VERB|_|acl",
249
+ "235": "VERB|_|acl:relcl",
250
+ "236": "VERB|_|advcl",
251
+ "237": "VERB|_|amod",
252
+ "238": "VERB|_|appos",
253
+ "239": "VERB|_|case",
254
+ "240": "VERB|_|ccomp",
255
+ "241": "VERB|_|compound",
256
+ "242": "VERB|_|conj",
257
+ "243": "VERB|_|csubj",
258
+ "244": "VERB|_|dep",
259
+ "245": "VERB|_|dislocated",
260
+ "246": "VERB|_|fixed",
261
+ "247": "VERB|_|flat",
262
+ "248": "VERB|_|nmod",
263
+ "249": "VERB|_|nmod:poss",
264
+ "250": "VERB|_|nsubj",
265
+ "251": "VERB|_|obj",
266
+ "252": "VERB|_|obl",
267
+ "253": "VERB|_|root",
268
+ "254": "VERB|_|xcomp",
269
+ "255": "X|_|advcl",
270
+ "256": "X|_|appos",
271
+ "257": "X|_|compound",
272
+ "258": "X|_|conj",
273
+ "259": "X|_|csubj",
274
+ "260": "X|_|dep",
275
+ "261": "X|_|dislocated",
276
+ "262": "X|_|flat",
277
+ "263": "X|_|goeswith",
278
+ "264": "X|_|nmod",
279
+ "265": "X|_|nsubj",
280
+ "266": "X|_|obj",
281
+ "267": "X|_|obl",
282
+ "268": "X|_|root"
283
+ },
284
+ "initializer_range": 0.02,
285
+ "intermediate_size": 4096,
286
+ "label2id": {
287
+ "-|_|dep": 0,
288
+ "ADJ|_|acl": 1,
289
+ "ADJ|_|acl:relcl": 2,
290
+ "ADJ|_|advcl": 3,
291
+ "ADJ|_|advmod": 4,
292
+ "ADJ|_|amod": 5,
293
+ "ADJ|_|appos": 6,
294
+ "ADJ|_|ccomp": 7,
295
+ "ADJ|_|conj": 8,
296
+ "ADJ|_|dep": 9,
297
+ "ADJ|_|dislocated": 10,
298
+ "ADJ|_|fixed": 11,
299
+ "ADJ|_|flat": 12,
300
+ "ADJ|_|nmod": 13,
301
+ "ADJ|_|nsubj": 14,
302
+ "ADJ|_|obj": 15,
303
+ "ADJ|_|obl": 16,
304
+ "ADJ|_|root": 17,
305
+ "ADP|_|appos": 18,
306
+ "ADP|_|case": 19,
307
+ "ADP|_|flat": 20,
308
+ "ADP|_|mark": 21,
309
+ "ADP|_|nmod": 22,
310
+ "ADP|_|nsubj": 23,
311
+ "ADP|_|obj": 24,
312
+ "ADV|_|acl": 25,
313
+ "ADV|_|acl:relcl": 26,
314
+ "ADV|_|advcl": 27,
315
+ "ADV|_|advmod": 28,
316
+ "ADV|_|amod": 29,
317
+ "ADV|_|appos": 30,
318
+ "ADV|_|case": 31,
319
+ "ADV|_|cc": 32,
320
+ "ADV|_|ccomp": 33,
321
+ "ADV|_|conj": 34,
322
+ "ADV|_|dep": 35,
323
+ "ADV|_|dislocated": 36,
324
+ "ADV|_|fixed": 37,
325
+ "ADV|_|flat": 38,
326
+ "ADV|_|iobj": 39,
327
+ "ADV|_|mark": 40,
328
+ "ADV|_|nmod": 41,
329
+ "ADV|_|nmod:poss": 42,
330
+ "ADV|_|nsubj": 43,
331
+ "ADV|_|obj": 44,
332
+ "ADV|_|obl": 45,
333
+ "ADV|_|root": 46,
334
+ "AUX|_|amod": 47,
335
+ "AUX|_|aux": 48,
336
+ "AUX|_|cop": 49,
337
+ "AUX|_|fixed": 50,
338
+ "AUX|_|flat": 51,
339
+ "AUX|_|root": 52,
340
+ "CCONJ|_|acl": 53,
341
+ "CCONJ|_|advcl": 54,
342
+ "CCONJ|_|advmod": 55,
343
+ "CCONJ|_|amod": 56,
344
+ "CCONJ|_|case": 57,
345
+ "CCONJ|_|cc": 58,
346
+ "CCONJ|_|ccomp": 59,
347
+ "CCONJ|_|compound": 60,
348
+ "CCONJ|_|conj": 61,
349
+ "CCONJ|_|csubj": 62,
350
+ "CCONJ|_|dep": 63,
351
+ "CCONJ|_|dislocated": 64,
352
+ "CCONJ|_|fixed": 65,
353
+ "CCONJ|_|flat": 66,
354
+ "CCONJ|_|iobj": 67,
355
+ "CCONJ|_|mark": 68,
356
+ "CCONJ|_|nmod": 69,
357
+ "CCONJ|_|nsubj": 70,
358
+ "CCONJ|_|obj": 71,
359
+ "CCONJ|_|obl": 72,
360
+ "CCONJ|_|root": 73,
361
+ "CCONJ|_|xcomp": 74,
362
+ "DET|NumType=Card|det": 75,
363
+ "DET|_|appos": 76,
364
+ "DET|_|ccomp": 77,
365
+ "DET|_|det": 78,
366
+ "DET|_|root": 79,
367
+ "INTJ|_|advcl": 80,
368
+ "INTJ|_|conj": 81,
369
+ "INTJ|_|discourse": 82,
370
+ "INTJ|_|root": 83,
371
+ "INTJ|_|vocative": 84,
372
+ "NOUN|_|acl": 85,
373
+ "NOUN|_|acl:relcl": 86,
374
+ "NOUN|_|advcl": 87,
375
+ "NOUN|_|amod": 88,
376
+ "NOUN|_|appos": 89,
377
+ "NOUN|_|case": 90,
378
+ "NOUN|_|ccomp": 91,
379
+ "NOUN|_|clf": 92,
380
+ "NOUN|_|compound": 93,
381
+ "NOUN|_|conj": 94,
382
+ "NOUN|_|csubj": 95,
383
+ "NOUN|_|dep": 96,
384
+ "NOUN|_|dislocated": 97,
385
+ "NOUN|_|fixed": 98,
386
+ "NOUN|_|flat": 99,
387
+ "NOUN|_|iobj": 100,
388
+ "NOUN|_|nmod": 101,
389
+ "NOUN|_|nmod:poss": 102,
390
+ "NOUN|_|nsubj": 103,
391
+ "NOUN|_|nsubj:pass": 104,
392
+ "NOUN|_|nummod": 105,
393
+ "NOUN|_|obj": 106,
394
+ "NOUN|_|obl": 107,
395
+ "NOUN|_|parataxis": 108,
396
+ "NOUN|_|root": 109,
397
+ "NOUN|_|vocative": 110,
398
+ "NOUN|_|xcomp": 111,
399
+ "NUM|NumType=Card|acl:relcl": 112,
400
+ "NUM|NumType=Card|advcl": 113,
401
+ "NUM|NumType=Card|appos": 114,
402
+ "NUM|NumType=Card|conj": 115,
403
+ "NUM|NumType=Card|dep": 116,
404
+ "NUM|NumType=Card|flat": 117,
405
+ "NUM|NumType=Card|nmod": 118,
406
+ "NUM|NumType=Card|nmod:poss": 119,
407
+ "NUM|NumType=Card|nsubj": 120,
408
+ "NUM|NumType=Card|nsubj:pass": 121,
409
+ "NUM|NumType=Card|nummod": 122,
410
+ "NUM|NumType=Card|obj": 123,
411
+ "NUM|NumType=Card|obl": 124,
412
+ "NUM|NumType=Card|root": 125,
413
+ "NUM|_|acl:relcl": 126,
414
+ "NUM|_|advcl": 127,
415
+ "NUM|_|appos": 128,
416
+ "NUM|_|compound": 129,
417
+ "NUM|_|conj": 130,
418
+ "NUM|_|csubj": 131,
419
+ "NUM|_|dep": 132,
420
+ "NUM|_|dislocated": 133,
421
+ "NUM|_|flat": 134,
422
+ "NUM|_|nmod": 135,
423
+ "NUM|_|nmod:poss": 136,
424
+ "NUM|_|nsubj": 137,
425
+ "NUM|_|nsubj:pass": 138,
426
+ "NUM|_|nummod": 139,
427
+ "NUM|_|obj": 140,
428
+ "NUM|_|obl": 141,
429
+ "NUM|_|root": 142,
430
+ "NUM|_|xcomp": 143,
431
+ "PART|_|acl": 144,
432
+ "PART|_|advcl": 145,
433
+ "PART|_|advmod": 146,
434
+ "PART|_|amod": 147,
435
+ "PART|_|case": 148,
436
+ "PART|_|ccomp": 149,
437
+ "PART|_|compound": 150,
438
+ "PART|_|conj": 151,
439
+ "PART|_|dep": 152,
440
+ "PART|_|dislocated": 153,
441
+ "PART|_|nmod": 154,
442
+ "PART|_|nsubj": 155,
443
+ "PART|_|obj": 156,
444
+ "PART|_|obl": 157,
445
+ "PART|_|root": 158,
446
+ "PART|_|xcomp": 159,
447
+ "PRON|_|acl": 160,
448
+ "PRON|_|advcl": 161,
449
+ "PRON|_|appos": 162,
450
+ "PRON|_|ccomp": 163,
451
+ "PRON|_|compound": 164,
452
+ "PRON|_|conj": 165,
453
+ "PRON|_|csubj": 166,
454
+ "PRON|_|dep": 167,
455
+ "PRON|_|det:poss": 168,
456
+ "PRON|_|dislocated": 169,
457
+ "PRON|_|flat": 170,
458
+ "PRON|_|iobj": 171,
459
+ "PRON|_|nmod": 172,
460
+ "PRON|_|nsubj": 173,
461
+ "PRON|_|nsubj:pass": 174,
462
+ "PRON|_|obj": 175,
463
+ "PRON|_|obl": 176,
464
+ "PRON|_|root": 177,
465
+ "PROPN|_|acl": 178,
466
+ "PROPN|_|acl:relcl": 179,
467
+ "PROPN|_|advcl": 180,
468
+ "PROPN|_|amod": 181,
469
+ "PROPN|_|appos": 182,
470
+ "PROPN|_|ccomp": 183,
471
+ "PROPN|_|compound": 184,
472
+ "PROPN|_|conj": 185,
473
+ "PROPN|_|csubj": 186,
474
+ "PROPN|_|dep": 187,
475
+ "PROPN|_|dislocated": 188,
476
+ "PROPN|_|flat": 189,
477
+ "PROPN|_|iobj": 190,
478
+ "PROPN|_|nmod": 191,
479
+ "PROPN|_|nmod:poss": 192,
480
+ "PROPN|_|nsubj": 193,
481
+ "PROPN|_|nsubj:pass": 194,
482
+ "PROPN|_|obj": 195,
483
+ "PROPN|_|obl": 196,
484
+ "PROPN|_|root": 197,
485
+ "PROPN|_|vocative": 198,
486
+ "PROPN|_|xcomp": 199,
487
+ "PUNCT|NumType=Card|punct": 200,
488
+ "PUNCT|_|punct": 201,
489
+ "SCONJ|_|acl": 202,
490
+ "SCONJ|_|advcl": 203,
491
+ "SCONJ|_|amod": 204,
492
+ "SCONJ|_|appos": 205,
493
+ "SCONJ|_|case": 206,
494
+ "SCONJ|_|cc": 207,
495
+ "SCONJ|_|ccomp": 208,
496
+ "SCONJ|_|conj": 209,
497
+ "SCONJ|_|dep": 210,
498
+ "SCONJ|_|dislocated": 211,
499
+ "SCONJ|_|fixed": 212,
500
+ "SCONJ|_|mark": 213,
501
+ "SCONJ|_|nmod": 214,
502
+ "SCONJ|_|nsubj": 215,
503
+ "SCONJ|_|obj": 216,
504
+ "SCONJ|_|obl": 217,
505
+ "SCONJ|_|root": 218,
506
+ "SCONJ|_|xcomp": 219,
507
+ "SYM|_|advmod": 220,
508
+ "SYM|_|appos": 221,
509
+ "SYM|_|compound": 222,
510
+ "SYM|_|conj": 223,
511
+ "SYM|_|dep": 224,
512
+ "SYM|_|flat": 225,
513
+ "SYM|_|list": 226,
514
+ "SYM|_|nmod": 227,
515
+ "SYM|_|nmod:poss": 228,
516
+ "SYM|_|nsubj": 229,
517
+ "SYM|_|nsubj:pass": 230,
518
+ "SYM|_|nummod": 231,
519
+ "SYM|_|obj": 232,
520
+ "SYM|_|root": 233,
521
+ "VERB|_|acl": 234,
522
+ "VERB|_|acl:relcl": 235,
523
+ "VERB|_|advcl": 236,
524
+ "VERB|_|amod": 237,
525
+ "VERB|_|appos": 238,
526
+ "VERB|_|case": 239,
527
+ "VERB|_|ccomp": 240,
528
+ "VERB|_|compound": 241,
529
+ "VERB|_|conj": 242,
530
+ "VERB|_|csubj": 243,
531
+ "VERB|_|dep": 244,
532
+ "VERB|_|dislocated": 245,
533
+ "VERB|_|fixed": 246,
534
+ "VERB|_|flat": 247,
535
+ "VERB|_|nmod": 248,
536
+ "VERB|_|nmod:poss": 249,
537
+ "VERB|_|nsubj": 250,
538
+ "VERB|_|obj": 251,
539
+ "VERB|_|obl": 252,
540
+ "VERB|_|root": 253,
541
+ "VERB|_|xcomp": 254,
542
+ "X|_|advcl": 255,
543
+ "X|_|appos": 256,
544
+ "X|_|compound": 257,
545
+ "X|_|conj": 258,
546
+ "X|_|csubj": 259,
547
+ "X|_|dep": 260,
548
+ "X|_|dislocated": 261,
549
+ "X|_|flat": 262,
550
+ "X|_|goeswith": 263,
551
+ "X|_|nmod": 264,
552
+ "X|_|nsubj": 265,
553
+ "X|_|obj": 266,
554
+ "X|_|obl": 267,
555
+ "X|_|root": 268
556
+ },
557
+ "layer_norm_eps": 1e-05,
558
+ "max_position_embeddings": 514,
559
+ "model_type": "roberta",
560
+ "num_attention_heads": 16,
561
+ "num_hidden_layers": 24,
562
+ "pad_token_id": 1,
563
+ "position_embedding_type": "absolute",
564
+ "tokenizer_class": "BertTokenizerFast",
565
+ "torch_dtype": "float32",
566
+ "transformers_version": "4.22.1",
567
+ "type_vocab_size": 1,
568
+ "use_cache": true,
569
+ "vocab_size": 32000
570
+ }
maker.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #! /usr/bin/python3
2
+ src="klue/roberta-large"
3
+ tgt="KoichiYasuoka/roberta-large-korean-ud-goeswith"
4
+ import os
5
+ url="https://github.com/UniversalDependencies/UD_Korean-Kaist"
6
+ os.system("test -d "+os.path.basename(url)+" || git clone --depth=1 "+url)
7
+ url="https://github.com/UniversalDependencies/UD_Korean-GSD"
8
+ os.system("test -d "+os.path.basename(url)+" || git clone --depth=1 "+url)
9
+ os.system("for F in train dev test ; do cat UD_Korean-*/*-$F.conllu > $F.conllu ; done")
10
+ class UDgoeswithDataset(object):
11
+ def __init__(self,conllu,tokenizer):
12
+ self.ids,self.tags,label=[],[],set()
13
+ with open(conllu,"r",encoding="utf-8") as r:
14
+ cls,sep,msk=tokenizer.cls_token_id,tokenizer.sep_token_id,tokenizer.mask_token_id
15
+ dep,c="-|_|dep",[]
16
+ for s in r:
17
+ t=s.split("\t")
18
+ if len(t)==10 and t[0].isdecimal():
19
+ c.append(t)
20
+ elif c!=[]:
21
+ v=tokenizer([t[1] for t in c],add_special_tokens=False)["input_ids"]
22
+ for i in range(len(v)-1,-1,-1):
23
+ for j in range(1,len(v[i])):
24
+ c.insert(i+1,[c[i][0],"_","_","X","_","_",c[i][0],"goeswith","_","_"])
25
+ y=["0"]+[t[0] for t in c]
26
+ h=[i if t[6]=="0" else y.index(t[6]) for i,t in enumerate(c,1)]
27
+ p,v=[t[3]+"|"+t[5]+"|"+t[7] for t in c],sum(v,[])
28
+ if len(v)<tokenizer.model_max_length-3:
29
+ self.ids.append([cls]+v+[sep])
30
+ self.tags.append([dep]+p+[dep])
31
+ label=set(sum([self.tags[-1],list(label)],[]))
32
+ for i,k in enumerate(v):
33
+ self.ids.append([cls]+v[0:i]+[msk]+v[i+1:]+[sep,k])
34
+ self.tags.append([dep]+[t if h[j]==i+1 else dep for j,t in enumerate(p)]+[dep,dep])
35
+ c=[]
36
+ self.label2id={l:i for i,l in enumerate(sorted(label))}
37
+ def __call__(*args):
38
+ label=set(sum([list(t.label2id) for t in args],[]))
39
+ lid={l:i for i,l in enumerate(sorted(label))}
40
+ for t in args:
41
+ t.label2id=lid
42
+ return lid
43
+ __len__=lambda self:len(self.ids)
44
+ __getitem__=lambda self,i:{"input_ids":self.ids[i],"labels":[self.label2id[t] for t in self.tags[i]]}
45
+ from transformers import AutoTokenizer,AutoConfig,AutoModelForTokenClassification,DataCollatorForTokenClassification,TrainingArguments,Trainer
46
+ tkz=AutoTokenizer.from_pretrained(src)
47
+ trainDS=UDgoeswithDataset("train.conllu",tkz)
48
+ devDS=UDgoeswithDataset("dev.conllu",tkz)
49
+ testDS=UDgoeswithDataset("test.conllu",tkz)
50
+ lid=trainDS(devDS,testDS)
51
+ cfg=AutoConfig.from_pretrained(src,num_labels=len(lid),label2id=lid,id2label={i:l for l,i in lid.items()})
52
+ arg=TrainingArguments(num_train_epochs=3,per_device_train_batch_size=32,output_dir="/tmp",overwrite_output_dir=True,save_total_limit=2,evaluation_strategy="epoch",learning_rate=5e-05,warmup_ratio=0.1)
53
+ trn=Trainer(args=arg,data_collator=DataCollatorForTokenClassification(tkz),model=AutoModelForTokenClassification.from_pretrained(src,config=cfg),train_dataset=trainDS,eval_dataset=devDS)
54
+ trn.train()
55
+ trn.save_model(tgt)
56
+ tkz.save_pretrained(tgt)
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a0969ea9aa315d89655c8baaaaa7a83baa30728b52615099059dd3a8366cf11e
3
+ size 1343666545
special_tokens_map.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "[CLS]",
3
+ "cls_token": "[CLS]",
4
+ "eos_token": "[SEP]",
5
+ "mask_token": "[MASK]",
6
+ "pad_token": "[PAD]",
7
+ "sep_token": "[SEP]",
8
+ "unk_token": "[UNK]"
9
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "[CLS]",
3
+ "cls_token": "[CLS]",
4
+ "do_basic_tokenize": true,
5
+ "do_lower_case": false,
6
+ "eos_token": "[SEP]",
7
+ "mask_token": "[MASK]",
8
+ "model_max_length": 512,
9
+ "never_split": null,
10
+ "pad_token": "[PAD]",
11
+ "sep_token": "[SEP]",
12
+ "strip_accents": null,
13
+ "tokenize_chinese_chars": true,
14
+ "tokenizer_class": "BertTokenizerFast",
15
+ "unk_token": "[UNK]"
16
+ }
ud.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import TokenClassificationPipeline
2
+
3
+ class UniversalDependenciesPipeline(TokenClassificationPipeline):
4
+ def _forward(self,model_input):
5
+ import torch
6
+ v=model_input["input_ids"][0].tolist()
7
+ with torch.no_grad():
8
+ e=self.model(input_ids=torch.tensor([v[0:i]+[self.tokenizer.mask_token_id]+v[i+1:]+[j] for i,j in enumerate(v[1:-1],1)]))
9
+ return {"logits":e.logits[:,1:-2,:],**model_input}
10
+ def postprocess(self,model_output,**kwargs):
11
+ import numpy
12
+ e=model_output["logits"].numpy()
13
+ r=[1 if i==0 else -1 if j.endswith("|root") else 0 for i,j in sorted(self.model.config.id2label.items())]
14
+ e+=numpy.where(numpy.add.outer(numpy.identity(e.shape[0]),r)==0,0,numpy.nan)
15
+ g=self.model.config.label2id["X|_|goeswith"]
16
+ r=numpy.tri(e.shape[0])
17
+ for i in range(e.shape[0]):
18
+ for j in range(i+2,e.shape[1]):
19
+ r[i,j]=r[i,j-1] if numpy.nanargmax(e[i,j-1])==g else 1
20
+ e[:,:,g]+=numpy.where(r==0,0,numpy.nan)
21
+ m,p=numpy.nanmax(e,axis=2),numpy.nanargmax(e,axis=2)
22
+ h=self.chu_liu_edmonds(m)
23
+ z=[i for i,j in enumerate(h) if i==j]
24
+ if len(z)>1:
25
+ k,h=z[numpy.nanargmax(m[z,z])],numpy.nanmin(m)-numpy.nanmax(m)
26
+ m[:,z]+=[[0 if j in z and (i!=j or i==k) else h for i in z] for j in range(m.shape[0])]
27
+ h=self.chu_liu_edmonds(m)
28
+ v=[(s,e) for s,e in model_output["offset_mapping"][0].tolist() if s<e]
29
+ q=[self.model.config.id2label[p[j,i]].split("|") for i,j in enumerate(h)]
30
+ if "aggregation_strategy" in kwargs and kwargs["aggregation_strategy"]!="none":
31
+ for i,j in reversed(list(enumerate(q[1:],1))):
32
+ if j[-1]=="goeswith" and set([t[-1] for t in q[h[i]+1:i+1]])=={"goeswith"}:
33
+ h=[b if i>b else b-1 for a,b in enumerate(h) if i!=a]
34
+ v[i-1]=(v[i-1][0],v.pop(i)[1])
35
+ q.pop(i)
36
+ t=model_output["sentence"].replace("\n"," ")
37
+ u="# text = "+t+"\n"
38
+ for i,(s,e) in enumerate(v):
39
+ u+="\t".join([str(i+1),t[s:e],"_",q[i][0],"_","|".join(q[i][1:-1]),str(0 if h[i]==i else h[i]+1),q[i][-1],"_","_" if i+1<len(v) and e<v[i+1][0] else "SpaceAfter=No"])+"\n"
40
+ return u+"\n"
41
+ def chu_liu_edmonds(self,matrix):
42
+ import numpy
43
+ h=numpy.nanargmax(matrix,axis=0)
44
+ x=[-1 if i==j else j for i,j in enumerate(h)]
45
+ for b in [lambda x,i,j:-1 if i not in x else x[i],lambda x,i,j:-1 if j<0 else x[j]]:
46
+ y=[]
47
+ while x!=y:
48
+ y=list(x)
49
+ for i,j in enumerate(x):
50
+ x[i]=b(x,i,j)
51
+ if max(x)<0:
52
+ return h
53
+ y,x=[i for i,j in enumerate(x) if j==max(x)],[i for i,j in enumerate(x) if j<max(x)]
54
+ z=matrix-numpy.nanmax(matrix,axis=0)
55
+ m=numpy.block([[z[x,:][:,x],numpy.nanmax(z[x,:][:,y],axis=1).reshape(len(x),1)],[numpy.nanmax(z[y,:][:,x],axis=0),numpy.nanmax(z[y,y])]])
56
+ k=[j if i==len(x) else x[j] if j<len(x) else y[numpy.nanargmax(z[y,x[i]])] for i,j in enumerate(self.chu_liu_edmonds(m))]
57
+ h=[j if i in y else k[x.index(i)] for i,j in enumerate(h)]
58
+ i=y[numpy.nanargmax(z[x[k[-1]],y] if k[-1]<len(x) else z[y,y])]
59
+ h[i]=x[k[-1]] if k[-1]<len(x) else i
60
+ return h
vocab.txt ADDED
The diff for this file is too large to render. See raw diff