KoichiYasuoka commited on
Commit
f643899
β€’
1 Parent(s): 6d719ed

initial release

Browse files
Files changed (9) hide show
  1. README.md +74 -0
  2. config.json +651 -0
  3. maker.py +53 -0
  4. pytorch_model.bin +3 -0
  5. special_tokens_map.json +7 -0
  6. tokenizer.json +0 -0
  7. tokenizer_config.json +14 -0
  8. ud.py +61 -0
  9. vocab.txt +0 -0
README.md ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - "zh"
4
+ tags:
5
+ - "chinese"
6
+ - "token-classification"
7
+ - "pos"
8
+ - "dependency-parsing"
9
+ datasets:
10
+ - "universal_dependencies"
11
+ license: "apache-2.0"
12
+ pipeline_tag: "token-classification"
13
+ ---
14
+
15
+ # roberta-base-chinese-ud-goeswith
16
+
17
+ ## Model Description
18
+
19
+ This is a RoBERTa model pre-trained on Chinese Wikipedia texts (both simplified and traditional) for POS-tagging and dependency-parsing (using `goeswith` for subwords), derived from [chinese-roberta-base-upos](https://huggingface.co/KoichiYasuoka/chinese-roberta-base-upos).
20
+
21
+ ## How to Use
22
+
23
+ ```py
24
+ class UDgoeswith(object):
25
+ def __init__(self,bert):
26
+ from transformers import AutoTokenizer,AutoModelForTokenClassification
27
+ self.tokenizer=AutoTokenizer.from_pretrained(bert)
28
+ self.model=AutoModelForTokenClassification.from_pretrained(bert)
29
+ def __call__(self,text):
30
+ import numpy,torch,ufal.chu_liu_edmonds
31
+ w=self.tokenizer(text,return_offsets_mapping=True)
32
+ v=w["input_ids"]
33
+ x=[v[0:i]+[self.tokenizer.mask_token_id]+v[i+1:]+[j] for i,j in enumerate(v[1:-1],1)]
34
+ with torch.no_grad():
35
+ e=self.model(input_ids=torch.tensor(x)).logits.numpy()[:,1:-2,:]
36
+ r=[1 if i==0 else -1 if j.endswith("|root") else 0 for i,j in sorted(self.model.config.id2label.items())]
37
+ e+=numpy.where(numpy.add.outer(numpy.identity(e.shape[0]),r)==0,0,numpy.nan)
38
+ g=self.model.config.label2id["X|_|goeswith"]
39
+ r=numpy.tri(e.shape[0])
40
+ for i in range(e.shape[0]):
41
+ for j in range(i+2,e.shape[1]):
42
+ r[i,j]=r[i,j-1] if numpy.nanargmax(e[i,j-1])==g else 1
43
+ e[:,:,g]+=numpy.where(r==0,0,numpy.nan)
44
+ m=numpy.full((e.shape[0]+1,e.shape[1]+1),numpy.nan)
45
+ m[1:,1:]=numpy.nanmax(e,axis=2).transpose()
46
+ p=numpy.zeros(m.shape)
47
+ p[1:,1:]=numpy.nanargmax(e,axis=2).transpose()
48
+ for i in range(1,m.shape[0]):
49
+ m[i,0],m[i,i],p[i,0]=m[i,i],numpy.nan,p[i,i]
50
+ h=ufal.chu_liu_edmonds.chu_liu_edmonds(m)[0]
51
+ if [0 for i in h if i==0]!=[0]:
52
+ m[:,0]+=numpy.where(m[:,0]==numpy.nanmax(m[[i for i,j in enumerate(h) if j==0],0]),0,numpy.nan)
53
+ m[[i for i,j in enumerate(h) if j==0]]+=[0 if i==0 or j==0 else numpy.nan for i,j in enumerate(h)]
54
+ h=ufal.chu_liu_edmonds.chu_liu_edmonds(m)[0]
55
+ u="# text = "+text+"\n"
56
+ v=[(s,e) for s,e in w["offset_mapping"] if s<e]
57
+ for i,(s,e) in enumerate(v,1):
58
+ q=self.model.config.id2label[p[i,h[i]]].split("|")
59
+ u+="\t".join([str(i),text[s:e],"_",q[0],"_","|".join(q[1:-1]),str(h[i]),q[-1],"_","_" if i<len(v) and e<v[i][0] else "SpaceAfter=No"])+"\n"
60
+ return u+"\n"
61
+
62
+ nlp=UDgoeswith("KoichiYasuoka/roberta-base-chinese-ud-goeswith")
63
+ print(nlp("ζˆ‘ε«θ¨ζ‹‰οΌŒζˆ‘δ½εœ¨δΌ¦ζ•¦γ€‚"))
64
+ ```
65
+
66
+ with [ufal.chu-liu-edmonds](https://pypi.org/project/ufal.chu-liu-edmonds/).
67
+ Or without ufal.chu-liu-edmonds:
68
+
69
+ ```
70
+ from transformers import pipeline
71
+ nlp=pipeline("universal-dependencies","KoichiYasuoka/roberta-base-chinese-ud-goeswith",trust_remote_code=True,aggregation_strategy="simple")
72
+ print(nlp("ζˆ‘ε«θ¨ζ‹‰οΌŒζˆ‘δ½εœ¨δΌ¦ζ•¦γ€‚"))
73
+ ```
74
+
config.json ADDED
@@ -0,0 +1,651 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "BertForTokenClassification"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "bos_token_id": 0,
7
+ "classifier_dropout": null,
8
+ "custom_pipelines": {
9
+ "universal-dependencies": {
10
+ "impl": "ud.UniversalDependenciesPipeline"
11
+ }
12
+ },
13
+ "directionality": "bidi",
14
+ "eos_token_id": 2,
15
+ "finetuning_task": "ner",
16
+ "gradient_checkpointing": false,
17
+ "hidden_act": "gelu",
18
+ "hidden_dropout_prob": 0.1,
19
+ "hidden_size": 768,
20
+ "id2label": {
21
+ "0": "-|_|dep",
22
+ "1": "ADJ|_|acl",
23
+ "2": "ADJ|_|acl:relcl",
24
+ "3": "ADJ|_|advcl",
25
+ "4": "ADJ|_|advmod",
26
+ "5": "ADJ|_|amod",
27
+ "6": "ADJ|_|appos",
28
+ "7": "ADJ|_|case",
29
+ "8": "ADJ|_|ccomp",
30
+ "9": "ADJ|_|compound",
31
+ "10": "ADJ|_|conj",
32
+ "11": "ADJ|_|cop",
33
+ "12": "ADJ|_|csubj",
34
+ "13": "ADJ|_|dislocated",
35
+ "14": "ADJ|_|nmod",
36
+ "15": "ADJ|_|nsubj",
37
+ "16": "ADJ|_|nsubj:pass",
38
+ "17": "ADJ|_|nummod",
39
+ "18": "ADJ|_|obj",
40
+ "19": "ADJ|_|obl",
41
+ "20": "ADJ|_|parataxis",
42
+ "21": "ADJ|_|root",
43
+ "22": "ADJ|_|xcomp",
44
+ "23": "ADP|Case=Gen|acl",
45
+ "24": "ADP|Voice=Cau|case",
46
+ "25": "ADP|_|acl",
47
+ "26": "ADP|_|acl:relcl",
48
+ "27": "ADP|_|advcl",
49
+ "28": "ADP|_|advmod",
50
+ "29": "ADP|_|amod",
51
+ "30": "ADP|_|appos",
52
+ "31": "ADP|_|case",
53
+ "32": "ADP|_|cc",
54
+ "33": "ADP|_|ccomp",
55
+ "34": "ADP|_|conj",
56
+ "35": "ADP|_|csubj",
57
+ "36": "ADP|_|det",
58
+ "37": "ADP|_|mark",
59
+ "38": "ADP|_|nmod:tmod",
60
+ "39": "ADP|_|obl",
61
+ "40": "ADP|_|parataxis",
62
+ "41": "ADP|_|root",
63
+ "42": "ADP|_|xcomp",
64
+ "43": "ADV|Polarity=Neg|advmod",
65
+ "44": "ADV|_|acl",
66
+ "45": "ADV|_|advcl",
67
+ "46": "ADV|_|advmod",
68
+ "47": "ADV|_|amod",
69
+ "48": "ADV|_|cc",
70
+ "49": "ADV|_|ccomp",
71
+ "50": "ADV|_|conj",
72
+ "51": "ADV|_|mark",
73
+ "52": "ADV|_|nmod:tmod",
74
+ "53": "ADV|_|parataxis",
75
+ "54": "ADV|_|root",
76
+ "55": "ADV|_|xcomp",
77
+ "56": "AUX|Aspect=Perf|aux",
78
+ "57": "AUX|Aspect=Prog|aux",
79
+ "58": "AUX|Voice=Pass|aux:pass",
80
+ "59": "AUX|_|acl:relcl",
81
+ "60": "AUX|_|aux",
82
+ "61": "AUX|_|ccomp",
83
+ "62": "AUX|_|conj",
84
+ "63": "AUX|_|cop",
85
+ "64": "AUX|_|root",
86
+ "65": "AUX|_|xcomp",
87
+ "66": "CCONJ|_|cc",
88
+ "67": "DET|_|acl",
89
+ "68": "DET|_|advmod",
90
+ "69": "DET|_|amod",
91
+ "70": "DET|_|case",
92
+ "71": "DET|_|conj",
93
+ "72": "DET|_|det",
94
+ "73": "DET|_|nmod",
95
+ "74": "DET|_|nmod:tmod",
96
+ "75": "DET|_|nsubj",
97
+ "76": "DET|_|obl",
98
+ "77": "NOUN|Number=Plur|nmod",
99
+ "78": "NOUN|Number=Plur|nsubj",
100
+ "79": "NOUN|_|acl",
101
+ "80": "NOUN|_|acl:relcl",
102
+ "81": "NOUN|_|advcl",
103
+ "82": "NOUN|_|advmod",
104
+ "83": "NOUN|_|amod",
105
+ "84": "NOUN|_|appos",
106
+ "85": "NOUN|_|case",
107
+ "86": "NOUN|_|ccomp",
108
+ "87": "NOUN|_|clf",
109
+ "88": "NOUN|_|compound",
110
+ "89": "NOUN|_|conj",
111
+ "90": "NOUN|_|csubj",
112
+ "91": "NOUN|_|dislocated",
113
+ "92": "NOUN|_|iobj",
114
+ "93": "NOUN|_|mark",
115
+ "94": "NOUN|_|mark:rel",
116
+ "95": "NOUN|_|nmod",
117
+ "96": "NOUN|_|nmod:tmod",
118
+ "97": "NOUN|_|nsubj",
119
+ "98": "NOUN|_|nsubj:pass",
120
+ "99": "NOUN|_|nummod",
121
+ "100": "NOUN|_|obj",
122
+ "101": "NOUN|_|obl",
123
+ "102": "NOUN|_|obl:patient",
124
+ "103": "NOUN|_|orphan",
125
+ "104": "NOUN|_|parataxis",
126
+ "105": "NOUN|_|root",
127
+ "106": "NOUN|_|xcomp",
128
+ "107": "NUM|NumType=Card|acl",
129
+ "108": "NUM|NumType=Card|advcl",
130
+ "109": "NUM|NumType=Card|advmod",
131
+ "110": "NUM|NumType=Card|amod",
132
+ "111": "NUM|NumType=Card|appos",
133
+ "112": "NUM|NumType=Card|ccomp",
134
+ "113": "NUM|NumType=Card|compound",
135
+ "114": "NUM|NumType=Card|conj",
136
+ "115": "NUM|NumType=Card|nmod",
137
+ "116": "NUM|NumType=Card|nmod:tmod",
138
+ "117": "NUM|NumType=Card|nsubj",
139
+ "118": "NUM|NumType=Card|nsubj:pass",
140
+ "119": "NUM|NumType=Card|nummod",
141
+ "120": "NUM|NumType=Card|obj",
142
+ "121": "NUM|NumType=Card|obl",
143
+ "122": "NUM|NumType=Card|parataxis",
144
+ "123": "NUM|NumType=Card|root",
145
+ "124": "NUM|NumType=Card|xcomp",
146
+ "125": "NUM|NumType=Ord|advmod",
147
+ "126": "NUM|NumType=Ord|compound",
148
+ "127": "NUM|NumType=Ord|conj",
149
+ "128": "NUM|NumType=Ord|nmod",
150
+ "129": "NUM|NumType=Ord|nummod",
151
+ "130": "NUM|NumType=Ord|obj",
152
+ "131": "NUM|NumType=Ord|root",
153
+ "132": "NUM|_|nummod",
154
+ "133": "PART|Aspect=Perf|discourse",
155
+ "134": "PART|Aspect=Perf|parataxis",
156
+ "135": "PART|Case=Gen|case",
157
+ "136": "PART|Number=Plur|appos",
158
+ "137": "PART|Number=Plur|conj",
159
+ "138": "PART|Number=Plur|nmod",
160
+ "139": "PART|Number=Plur|nsubj",
161
+ "140": "PART|Number=Plur|obl",
162
+ "141": "PART|PartType=Int|discourse:sp",
163
+ "142": "PART|_|acl",
164
+ "143": "PART|_|acl:relcl",
165
+ "144": "PART|_|advcl",
166
+ "145": "PART|_|advmod",
167
+ "146": "PART|_|amod",
168
+ "147": "PART|_|appos",
169
+ "148": "PART|_|case",
170
+ "149": "PART|_|ccomp",
171
+ "150": "PART|_|compound",
172
+ "151": "PART|_|compound:ext",
173
+ "152": "PART|_|conj",
174
+ "153": "PART|_|csubj",
175
+ "154": "PART|_|discourse",
176
+ "155": "PART|_|iobj",
177
+ "156": "PART|_|mark:adv",
178
+ "157": "PART|_|mark:rel",
179
+ "158": "PART|_|nmod",
180
+ "159": "PART|_|nmod:tmod",
181
+ "160": "PART|_|nsubj",
182
+ "161": "PART|_|nsubj:pass",
183
+ "162": "PART|_|obj",
184
+ "163": "PART|_|obl",
185
+ "164": "PART|_|obl:patient",
186
+ "165": "PART|_|parataxis",
187
+ "166": "PART|_|root",
188
+ "167": "PART|_|xcomp",
189
+ "168": "PRON|Number=Plur|Person=1|det",
190
+ "169": "PRON|Number=Plur|Person=1|nmod",
191
+ "170": "PRON|Number=Plur|Person=1|nsubj",
192
+ "171": "PRON|Number=Plur|Person=1|obj",
193
+ "172": "PRON|Number=Plur|Person=1|obl",
194
+ "173": "PRON|Number=Plur|Person=3|appos",
195
+ "174": "PRON|Number=Plur|Person=3|det",
196
+ "175": "PRON|Number=Plur|Person=3|nmod",
197
+ "176": "PRON|Number=Plur|Person=3|nsubj",
198
+ "177": "PRON|Number=Plur|Person=3|nsubj:pass",
199
+ "178": "PRON|Number=Plur|Person=3|obj",
200
+ "179": "PRON|Number=Plur|Person=3|obl",
201
+ "180": "PRON|Number=Plur|Person=3|obl:patient",
202
+ "181": "PRON|Person=1|compound",
203
+ "182": "PRON|Person=1|det",
204
+ "183": "PRON|Person=1|nmod",
205
+ "184": "PRON|Person=1|nsubj",
206
+ "185": "PRON|Person=1|obj",
207
+ "186": "PRON|Person=2|det",
208
+ "187": "PRON|Person=2|iobj",
209
+ "188": "PRON|Person=2|nmod",
210
+ "189": "PRON|Person=2|nsubj",
211
+ "190": "PRON|Person=2|obj",
212
+ "191": "PRON|Person=2|obl",
213
+ "192": "PRON|Person=3|conj",
214
+ "193": "PRON|Person=3|det",
215
+ "194": "PRON|Person=3|iobj",
216
+ "195": "PRON|Person=3|nmod",
217
+ "196": "PRON|Person=3|nsubj",
218
+ "197": "PRON|Person=3|nsubj:pass",
219
+ "198": "PRON|Person=3|obj",
220
+ "199": "PRON|Person=3|obl",
221
+ "200": "PRON|Person=3|obl:patient",
222
+ "201": "PRON|Person=3|parataxis",
223
+ "202": "PRON|Person=3|xcomp",
224
+ "203": "PRON|_|advmod",
225
+ "204": "PRON|_|appos",
226
+ "205": "PRON|_|compound",
227
+ "206": "PRON|_|conj",
228
+ "207": "PRON|_|csubj",
229
+ "208": "PRON|_|det",
230
+ "209": "PRON|_|nmod",
231
+ "210": "PRON|_|nsubj",
232
+ "211": "PRON|_|nsubj:pass",
233
+ "212": "PRON|_|obj",
234
+ "213": "PRON|_|obl",
235
+ "214": "PRON|_|obl:patient",
236
+ "215": "PRON|_|parataxis",
237
+ "216": "PROPN|_|acl:relcl",
238
+ "217": "PROPN|_|advcl",
239
+ "218": "PROPN|_|advmod",
240
+ "219": "PROPN|_|appos",
241
+ "220": "PROPN|_|case",
242
+ "221": "PROPN|_|ccomp",
243
+ "222": "PROPN|_|compound",
244
+ "223": "PROPN|_|conj",
245
+ "224": "PROPN|_|dislocated",
246
+ "225": "PROPN|_|flat:foreign",
247
+ "226": "PROPN|_|flat:name",
248
+ "227": "PROPN|_|iobj",
249
+ "228": "PROPN|_|nmod",
250
+ "229": "PROPN|_|nmod:tmod",
251
+ "230": "PROPN|_|nsubj",
252
+ "231": "PROPN|_|nsubj:pass",
253
+ "232": "PROPN|_|nummod",
254
+ "233": "PROPN|_|obj",
255
+ "234": "PROPN|_|obl",
256
+ "235": "PROPN|_|obl:patient",
257
+ "236": "PROPN|_|parataxis",
258
+ "237": "PROPN|_|root",
259
+ "238": "PROPN|_|vocative",
260
+ "239": "PROPN|_|xcomp",
261
+ "240": "PUNCT|_|appos",
262
+ "241": "PUNCT|_|punct",
263
+ "242": "PUNCT|_|reparandum",
264
+ "243": "SYM|_|appos",
265
+ "244": "SYM|_|nsubj",
266
+ "245": "SYM|_|obj",
267
+ "246": "SYM|_|punct",
268
+ "247": "VERB|Voice=Cau|acl",
269
+ "248": "VERB|Voice=Cau|acl:relcl",
270
+ "249": "VERB|Voice=Cau|advcl",
271
+ "250": "VERB|Voice=Cau|amod",
272
+ "251": "VERB|Voice=Cau|aux:pass",
273
+ "252": "VERB|Voice=Cau|ccomp",
274
+ "253": "VERB|Voice=Cau|compound",
275
+ "254": "VERB|Voice=Cau|conj",
276
+ "255": "VERB|Voice=Cau|csubj",
277
+ "256": "VERB|Voice=Cau|mark",
278
+ "257": "VERB|Voice=Cau|parataxis",
279
+ "258": "VERB|Voice=Cau|root",
280
+ "259": "VERB|Voice=Cau|xcomp",
281
+ "260": "VERB|Voice=Pass|aux:pass",
282
+ "261": "VERB|_|acl",
283
+ "262": "VERB|_|acl:relcl",
284
+ "263": "VERB|_|advcl",
285
+ "264": "VERB|_|advmod",
286
+ "265": "VERB|_|amod",
287
+ "266": "VERB|_|appos",
288
+ "267": "VERB|_|ccomp",
289
+ "268": "VERB|_|compound",
290
+ "269": "VERB|_|conj",
291
+ "270": "VERB|_|csubj",
292
+ "271": "VERB|_|csubj:pass",
293
+ "272": "VERB|_|det",
294
+ "273": "VERB|_|discourse",
295
+ "274": "VERB|_|dislocated",
296
+ "275": "VERB|_|mark",
297
+ "276": "VERB|_|nmod",
298
+ "277": "VERB|_|nmod:tmod",
299
+ "278": "VERB|_|nsubj",
300
+ "279": "VERB|_|obj",
301
+ "280": "VERB|_|obl",
302
+ "281": "VERB|_|parataxis",
303
+ "282": "VERB|_|reparandum",
304
+ "283": "VERB|_|root",
305
+ "284": "VERB|_|xcomp",
306
+ "285": "X|_|acl",
307
+ "286": "X|_|advcl",
308
+ "287": "X|_|advmod",
309
+ "288": "X|_|amod",
310
+ "289": "X|_|appos",
311
+ "290": "X|_|ccomp",
312
+ "291": "X|_|compound",
313
+ "292": "X|_|conj",
314
+ "293": "X|_|det",
315
+ "294": "X|_|flat:foreign",
316
+ "295": "X|_|goeswith",
317
+ "296": "X|_|nmod",
318
+ "297": "X|_|nsubj",
319
+ "298": "X|_|nummod",
320
+ "299": "X|_|obj",
321
+ "300": "X|_|obl",
322
+ "301": "X|_|parataxis",
323
+ "302": "X|_|root"
324
+ },
325
+ "initializer_range": 0.02,
326
+ "intermediate_size": 3072,
327
+ "label2id": {
328
+ "-|_|dep": 0,
329
+ "ADJ|_|acl": 1,
330
+ "ADJ|_|acl:relcl": 2,
331
+ "ADJ|_|advcl": 3,
332
+ "ADJ|_|advmod": 4,
333
+ "ADJ|_|amod": 5,
334
+ "ADJ|_|appos": 6,
335
+ "ADJ|_|case": 7,
336
+ "ADJ|_|ccomp": 8,
337
+ "ADJ|_|compound": 9,
338
+ "ADJ|_|conj": 10,
339
+ "ADJ|_|cop": 11,
340
+ "ADJ|_|csubj": 12,
341
+ "ADJ|_|dislocated": 13,
342
+ "ADJ|_|nmod": 14,
343
+ "ADJ|_|nsubj": 15,
344
+ "ADJ|_|nsubj:pass": 16,
345
+ "ADJ|_|nummod": 17,
346
+ "ADJ|_|obj": 18,
347
+ "ADJ|_|obl": 19,
348
+ "ADJ|_|parataxis": 20,
349
+ "ADJ|_|root": 21,
350
+ "ADJ|_|xcomp": 22,
351
+ "ADP|Case=Gen|acl": 23,
352
+ "ADP|Voice=Cau|case": 24,
353
+ "ADP|_|acl": 25,
354
+ "ADP|_|acl:relcl": 26,
355
+ "ADP|_|advcl": 27,
356
+ "ADP|_|advmod": 28,
357
+ "ADP|_|amod": 29,
358
+ "ADP|_|appos": 30,
359
+ "ADP|_|case": 31,
360
+ "ADP|_|cc": 32,
361
+ "ADP|_|ccomp": 33,
362
+ "ADP|_|conj": 34,
363
+ "ADP|_|csubj": 35,
364
+ "ADP|_|det": 36,
365
+ "ADP|_|mark": 37,
366
+ "ADP|_|nmod:tmod": 38,
367
+ "ADP|_|obl": 39,
368
+ "ADP|_|parataxis": 40,
369
+ "ADP|_|root": 41,
370
+ "ADP|_|xcomp": 42,
371
+ "ADV|Polarity=Neg|advmod": 43,
372
+ "ADV|_|acl": 44,
373
+ "ADV|_|advcl": 45,
374
+ "ADV|_|advmod": 46,
375
+ "ADV|_|amod": 47,
376
+ "ADV|_|cc": 48,
377
+ "ADV|_|ccomp": 49,
378
+ "ADV|_|conj": 50,
379
+ "ADV|_|mark": 51,
380
+ "ADV|_|nmod:tmod": 52,
381
+ "ADV|_|parataxis": 53,
382
+ "ADV|_|root": 54,
383
+ "ADV|_|xcomp": 55,
384
+ "AUX|Aspect=Perf|aux": 56,
385
+ "AUX|Aspect=Prog|aux": 57,
386
+ "AUX|Voice=Pass|aux:pass": 58,
387
+ "AUX|_|acl:relcl": 59,
388
+ "AUX|_|aux": 60,
389
+ "AUX|_|ccomp": 61,
390
+ "AUX|_|conj": 62,
391
+ "AUX|_|cop": 63,
392
+ "AUX|_|root": 64,
393
+ "AUX|_|xcomp": 65,
394
+ "CCONJ|_|cc": 66,
395
+ "DET|_|acl": 67,
396
+ "DET|_|advmod": 68,
397
+ "DET|_|amod": 69,
398
+ "DET|_|case": 70,
399
+ "DET|_|conj": 71,
400
+ "DET|_|det": 72,
401
+ "DET|_|nmod": 73,
402
+ "DET|_|nmod:tmod": 74,
403
+ "DET|_|nsubj": 75,
404
+ "DET|_|obl": 76,
405
+ "NOUN|Number=Plur|nmod": 77,
406
+ "NOUN|Number=Plur|nsubj": 78,
407
+ "NOUN|_|acl": 79,
408
+ "NOUN|_|acl:relcl": 80,
409
+ "NOUN|_|advcl": 81,
410
+ "NOUN|_|advmod": 82,
411
+ "NOUN|_|amod": 83,
412
+ "NOUN|_|appos": 84,
413
+ "NOUN|_|case": 85,
414
+ "NOUN|_|ccomp": 86,
415
+ "NOUN|_|clf": 87,
416
+ "NOUN|_|compound": 88,
417
+ "NOUN|_|conj": 89,
418
+ "NOUN|_|csubj": 90,
419
+ "NOUN|_|dislocated": 91,
420
+ "NOUN|_|iobj": 92,
421
+ "NOUN|_|mark": 93,
422
+ "NOUN|_|mark:rel": 94,
423
+ "NOUN|_|nmod": 95,
424
+ "NOUN|_|nmod:tmod": 96,
425
+ "NOUN|_|nsubj": 97,
426
+ "NOUN|_|nsubj:pass": 98,
427
+ "NOUN|_|nummod": 99,
428
+ "NOUN|_|obj": 100,
429
+ "NOUN|_|obl": 101,
430
+ "NOUN|_|obl:patient": 102,
431
+ "NOUN|_|orphan": 103,
432
+ "NOUN|_|parataxis": 104,
433
+ "NOUN|_|root": 105,
434
+ "NOUN|_|xcomp": 106,
435
+ "NUM|NumType=Card|acl": 107,
436
+ "NUM|NumType=Card|advcl": 108,
437
+ "NUM|NumType=Card|advmod": 109,
438
+ "NUM|NumType=Card|amod": 110,
439
+ "NUM|NumType=Card|appos": 111,
440
+ "NUM|NumType=Card|ccomp": 112,
441
+ "NUM|NumType=Card|compound": 113,
442
+ "NUM|NumType=Card|conj": 114,
443
+ "NUM|NumType=Card|nmod": 115,
444
+ "NUM|NumType=Card|nmod:tmod": 116,
445
+ "NUM|NumType=Card|nsubj": 117,
446
+ "NUM|NumType=Card|nsubj:pass": 118,
447
+ "NUM|NumType=Card|nummod": 119,
448
+ "NUM|NumType=Card|obj": 120,
449
+ "NUM|NumType=Card|obl": 121,
450
+ "NUM|NumType=Card|parataxis": 122,
451
+ "NUM|NumType=Card|root": 123,
452
+ "NUM|NumType=Card|xcomp": 124,
453
+ "NUM|NumType=Ord|advmod": 125,
454
+ "NUM|NumType=Ord|compound": 126,
455
+ "NUM|NumType=Ord|conj": 127,
456
+ "NUM|NumType=Ord|nmod": 128,
457
+ "NUM|NumType=Ord|nummod": 129,
458
+ "NUM|NumType=Ord|obj": 130,
459
+ "NUM|NumType=Ord|root": 131,
460
+ "NUM|_|nummod": 132,
461
+ "PART|Aspect=Perf|discourse": 133,
462
+ "PART|Aspect=Perf|parataxis": 134,
463
+ "PART|Case=Gen|case": 135,
464
+ "PART|Number=Plur|appos": 136,
465
+ "PART|Number=Plur|conj": 137,
466
+ "PART|Number=Plur|nmod": 138,
467
+ "PART|Number=Plur|nsubj": 139,
468
+ "PART|Number=Plur|obl": 140,
469
+ "PART|PartType=Int|discourse:sp": 141,
470
+ "PART|_|acl": 142,
471
+ "PART|_|acl:relcl": 143,
472
+ "PART|_|advcl": 144,
473
+ "PART|_|advmod": 145,
474
+ "PART|_|amod": 146,
475
+ "PART|_|appos": 147,
476
+ "PART|_|case": 148,
477
+ "PART|_|ccomp": 149,
478
+ "PART|_|compound": 150,
479
+ "PART|_|compound:ext": 151,
480
+ "PART|_|conj": 152,
481
+ "PART|_|csubj": 153,
482
+ "PART|_|discourse": 154,
483
+ "PART|_|iobj": 155,
484
+ "PART|_|mark:adv": 156,
485
+ "PART|_|mark:rel": 157,
486
+ "PART|_|nmod": 158,
487
+ "PART|_|nmod:tmod": 159,
488
+ "PART|_|nsubj": 160,
489
+ "PART|_|nsubj:pass": 161,
490
+ "PART|_|obj": 162,
491
+ "PART|_|obl": 163,
492
+ "PART|_|obl:patient": 164,
493
+ "PART|_|parataxis": 165,
494
+ "PART|_|root": 166,
495
+ "PART|_|xcomp": 167,
496
+ "PRON|Number=Plur|Person=1|det": 168,
497
+ "PRON|Number=Plur|Person=1|nmod": 169,
498
+ "PRON|Number=Plur|Person=1|nsubj": 170,
499
+ "PRON|Number=Plur|Person=1|obj": 171,
500
+ "PRON|Number=Plur|Person=1|obl": 172,
501
+ "PRON|Number=Plur|Person=3|appos": 173,
502
+ "PRON|Number=Plur|Person=3|det": 174,
503
+ "PRON|Number=Plur|Person=3|nmod": 175,
504
+ "PRON|Number=Plur|Person=3|nsubj": 176,
505
+ "PRON|Number=Plur|Person=3|nsubj:pass": 177,
506
+ "PRON|Number=Plur|Person=3|obj": 178,
507
+ "PRON|Number=Plur|Person=3|obl": 179,
508
+ "PRON|Number=Plur|Person=3|obl:patient": 180,
509
+ "PRON|Person=1|compound": 181,
510
+ "PRON|Person=1|det": 182,
511
+ "PRON|Person=1|nmod": 183,
512
+ "PRON|Person=1|nsubj": 184,
513
+ "PRON|Person=1|obj": 185,
514
+ "PRON|Person=2|det": 186,
515
+ "PRON|Person=2|iobj": 187,
516
+ "PRON|Person=2|nmod": 188,
517
+ "PRON|Person=2|nsubj": 189,
518
+ "PRON|Person=2|obj": 190,
519
+ "PRON|Person=2|obl": 191,
520
+ "PRON|Person=3|conj": 192,
521
+ "PRON|Person=3|det": 193,
522
+ "PRON|Person=3|iobj": 194,
523
+ "PRON|Person=3|nmod": 195,
524
+ "PRON|Person=3|nsubj": 196,
525
+ "PRON|Person=3|nsubj:pass": 197,
526
+ "PRON|Person=3|obj": 198,
527
+ "PRON|Person=3|obl": 199,
528
+ "PRON|Person=3|obl:patient": 200,
529
+ "PRON|Person=3|parataxis": 201,
530
+ "PRON|Person=3|xcomp": 202,
531
+ "PRON|_|advmod": 203,
532
+ "PRON|_|appos": 204,
533
+ "PRON|_|compound": 205,
534
+ "PRON|_|conj": 206,
535
+ "PRON|_|csubj": 207,
536
+ "PRON|_|det": 208,
537
+ "PRON|_|nmod": 209,
538
+ "PRON|_|nsubj": 210,
539
+ "PRON|_|nsubj:pass": 211,
540
+ "PRON|_|obj": 212,
541
+ "PRON|_|obl": 213,
542
+ "PRON|_|obl:patient": 214,
543
+ "PRON|_|parataxis": 215,
544
+ "PROPN|_|acl:relcl": 216,
545
+ "PROPN|_|advcl": 217,
546
+ "PROPN|_|advmod": 218,
547
+ "PROPN|_|appos": 219,
548
+ "PROPN|_|case": 220,
549
+ "PROPN|_|ccomp": 221,
550
+ "PROPN|_|compound": 222,
551
+ "PROPN|_|conj": 223,
552
+ "PROPN|_|dislocated": 224,
553
+ "PROPN|_|flat:foreign": 225,
554
+ "PROPN|_|flat:name": 226,
555
+ "PROPN|_|iobj": 227,
556
+ "PROPN|_|nmod": 228,
557
+ "PROPN|_|nmod:tmod": 229,
558
+ "PROPN|_|nsubj": 230,
559
+ "PROPN|_|nsubj:pass": 231,
560
+ "PROPN|_|nummod": 232,
561
+ "PROPN|_|obj": 233,
562
+ "PROPN|_|obl": 234,
563
+ "PROPN|_|obl:patient": 235,
564
+ "PROPN|_|parataxis": 236,
565
+ "PROPN|_|root": 237,
566
+ "PROPN|_|vocative": 238,
567
+ "PROPN|_|xcomp": 239,
568
+ "PUNCT|_|appos": 240,
569
+ "PUNCT|_|punct": 241,
570
+ "PUNCT|_|reparandum": 242,
571
+ "SYM|_|appos": 243,
572
+ "SYM|_|nsubj": 244,
573
+ "SYM|_|obj": 245,
574
+ "SYM|_|punct": 246,
575
+ "VERB|Voice=Cau|acl": 247,
576
+ "VERB|Voice=Cau|acl:relcl": 248,
577
+ "VERB|Voice=Cau|advcl": 249,
578
+ "VERB|Voice=Cau|amod": 250,
579
+ "VERB|Voice=Cau|aux:pass": 251,
580
+ "VERB|Voice=Cau|ccomp": 252,
581
+ "VERB|Voice=Cau|compound": 253,
582
+ "VERB|Voice=Cau|conj": 254,
583
+ "VERB|Voice=Cau|csubj": 255,
584
+ "VERB|Voice=Cau|mark": 256,
585
+ "VERB|Voice=Cau|parataxis": 257,
586
+ "VERB|Voice=Cau|root": 258,
587
+ "VERB|Voice=Cau|xcomp": 259,
588
+ "VERB|Voice=Pass|aux:pass": 260,
589
+ "VERB|_|acl": 261,
590
+ "VERB|_|acl:relcl": 262,
591
+ "VERB|_|advcl": 263,
592
+ "VERB|_|advmod": 264,
593
+ "VERB|_|amod": 265,
594
+ "VERB|_|appos": 266,
595
+ "VERB|_|ccomp": 267,
596
+ "VERB|_|compound": 268,
597
+ "VERB|_|conj": 269,
598
+ "VERB|_|csubj": 270,
599
+ "VERB|_|csubj:pass": 271,
600
+ "VERB|_|det": 272,
601
+ "VERB|_|discourse": 273,
602
+ "VERB|_|dislocated": 274,
603
+ "VERB|_|mark": 275,
604
+ "VERB|_|nmod": 276,
605
+ "VERB|_|nmod:tmod": 277,
606
+ "VERB|_|nsubj": 278,
607
+ "VERB|_|obj": 279,
608
+ "VERB|_|obl": 280,
609
+ "VERB|_|parataxis": 281,
610
+ "VERB|_|reparandum": 282,
611
+ "VERB|_|root": 283,
612
+ "VERB|_|xcomp": 284,
613
+ "X|_|acl": 285,
614
+ "X|_|advcl": 286,
615
+ "X|_|advmod": 287,
616
+ "X|_|amod": 288,
617
+ "X|_|appos": 289,
618
+ "X|_|ccomp": 290,
619
+ "X|_|compound": 291,
620
+ "X|_|conj": 292,
621
+ "X|_|det": 293,
622
+ "X|_|flat:foreign": 294,
623
+ "X|_|goeswith": 295,
624
+ "X|_|nmod": 296,
625
+ "X|_|nsubj": 297,
626
+ "X|_|nummod": 298,
627
+ "X|_|obj": 299,
628
+ "X|_|obl": 300,
629
+ "X|_|parataxis": 301,
630
+ "X|_|root": 302
631
+ },
632
+ "layer_norm_eps": 1e-12,
633
+ "max_position_embeddings": 512,
634
+ "model_type": "bert",
635
+ "num_attention_heads": 12,
636
+ "num_hidden_layers": 12,
637
+ "output_past": true,
638
+ "pad_token_id": 1,
639
+ "pooler_fc_size": 768,
640
+ "pooler_num_attention_heads": 12,
641
+ "pooler_num_fc_layers": 3,
642
+ "pooler_size_per_head": 128,
643
+ "pooler_type": "first_token_transform",
644
+ "position_embedding_type": "absolute",
645
+ "tokenizer_class": "BertTokenizerFast",
646
+ "torch_dtype": "float32",
647
+ "transformers_version": "4.22.1",
648
+ "type_vocab_size": 2,
649
+ "use_cache": true,
650
+ "vocab_size": 21128
651
+ }
maker.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #! /usr/bin/python3
2
+ src="KoichiYasuoka/chinese-roberta-base-upos"
3
+ tgt="KoichiYasuoka/roberta-base-chinese-ud-goeswith"
4
+ import os
5
+ for d in ["UD_Chinese-GSD","UD_Chinese-GSDSimp"]:
6
+ os.system("test -d "+d+" || git clone --depth=1 https://github.com/UniversalDependencies/"+d)
7
+ os.system("for F in train dev test ; do cat UD_Chinese-*/*-$F.conllu > $F.conllu ; done")
8
+ class UDgoeswithDataset(object):
9
+ def __init__(self,conllu,tokenizer):
10
+ self.ids,self.tags,label=[],[],set()
11
+ with open(conllu,"r",encoding="utf-8") as r:
12
+ cls,sep,msk=tokenizer.cls_token_id,tokenizer.sep_token_id,tokenizer.mask_token_id
13
+ dep,c="-|_|dep",[]
14
+ for s in r:
15
+ t=s.split("\t")
16
+ if len(t)==10 and t[0].isdecimal():
17
+ c.append(t)
18
+ elif c!=[]:
19
+ v=tokenizer([t[1] for t in c],add_special_tokens=False)["input_ids"]
20
+ for i in range(len(v)-1,-1,-1):
21
+ for j in range(1,len(v[i])):
22
+ c.insert(i+1,[c[i][0],"_","_","X","_","_",c[i][0],"goeswith","_","_"])
23
+ y=["0"]+[t[0] for t in c]
24
+ h=[i if t[6]=="0" else y.index(t[6]) for i,t in enumerate(c,1)]
25
+ p,v=[t[3]+"|"+t[5]+"|"+t[7] for t in c],sum(v,[])
26
+ self.ids.append([cls]+v+[sep])
27
+ self.tags.append([dep]+p+[dep])
28
+ label=set(sum([self.tags[-1],list(label)],[]))
29
+ for i,k in enumerate(v):
30
+ self.ids.append([cls]+v[0:i]+[msk]+v[i+1:]+[sep,k])
31
+ self.tags.append([dep]+[t if h[j]==i+1 else dep for j,t in enumerate(p)]+[dep,dep])
32
+ c=[]
33
+ self.label2id={l:i for i,l in enumerate(sorted(label))}
34
+ def __call__(*args):
35
+ label=set(sum([list(t.label2id) for t in args],[]))
36
+ lid={l:i for i,l in enumerate(sorted(label))}
37
+ for t in args:
38
+ t.label2id=lid
39
+ return lid
40
+ __len__=lambda self:len(self.ids)
41
+ __getitem__=lambda self,i:{"input_ids":self.ids[i],"labels":[self.label2id[t] for t in self.tags[i]]}
42
+ from transformers import AutoTokenizer,AutoConfig,AutoModelForTokenClassification,DataCollatorForTokenClassification,TrainingArguments,Trainer
43
+ tkz=AutoTokenizer.from_pretrained(src)
44
+ trainDS=UDgoeswithDataset("train.conllu",tkz)
45
+ devDS=UDgoeswithDataset("dev.conllu",tkz)
46
+ testDS=UDgoeswithDataset("test.conllu",tkz)
47
+ lid=trainDS(devDS,testDS)
48
+ cfg=AutoConfig.from_pretrained(src,num_labels=len(lid),label2id=lid,id2label={i:l for l,i in lid.items()},ignore_mismatched_sizes=True)
49
+ arg=TrainingArguments(num_train_epochs=3,per_device_train_batch_size=32,output_dir="/tmp",overwrite_output_dir=True,save_total_limit=2,evaluation_strategy="epoch",learning_rate=5e-05,warmup_ratio=0.1)
50
+ trn=Trainer(args=arg,data_collator=DataCollatorForTokenClassification(tkz),model=AutoModelForTokenClassification.from_pretrained(src,config=cfg,ignore_mismatched_sizes=True),train_dataset=trainDS,eval_dataset=devDS)
51
+ trn.train()
52
+ trn.save_model(tgt)
53
+ tkz.save_pretrained(tgt)
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:803211fef51e6bd23e68f211c1396d8975f16e2aea0ac0469a612fd83ccb38ba
3
+ size 407710641
special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "do_basic_tokenize": true,
4
+ "do_lower_case": true,
5
+ "mask_token": "[MASK]",
6
+ "model_max_length": 512,
7
+ "never_split": null,
8
+ "pad_token": "[PAD]",
9
+ "sep_token": "[SEP]",
10
+ "strip_accents": null,
11
+ "tokenize_chinese_chars": true,
12
+ "tokenizer_class": "BertTokenizerFast",
13
+ "unk_token": "[UNK]"
14
+ }
ud.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import TokenClassificationPipeline
2
+
3
+ class UniversalDependenciesPipeline(TokenClassificationPipeline):
4
+ def _forward(self,model_input):
5
+ import torch
6
+ v=model_input["input_ids"][0].tolist()
7
+ with torch.no_grad():
8
+ e=self.model(input_ids=torch.tensor([v[0:i]+[self.tokenizer.mask_token_id]+v[i+1:]+[j] for i,j in enumerate(v[1:-1],1)]))
9
+ return {"logits":e.logits[:,1:-2,:],**model_input}
10
+ def postprocess(self,model_output,**kwargs):
11
+ import numpy
12
+ e=model_output["logits"].numpy()
13
+ r=[1 if i==0 else -1 if j.endswith("|root") else 0 for i,j in sorted(self.model.config.id2label.items())]
14
+ e+=numpy.where(numpy.add.outer(numpy.identity(e.shape[0]),r)==0,0,numpy.nan)
15
+ g=self.model.config.label2id["X|_|goeswith"]
16
+ r=numpy.tri(e.shape[0])
17
+ for i in range(e.shape[0]):
18
+ for j in range(i+2,e.shape[1]):
19
+ r[i,j]=r[i,j-1] if numpy.nanargmax(e[i,j-1])==g else 1
20
+ e[:,:,g]+=numpy.where(r==0,0,numpy.nan)
21
+ m,p=numpy.nanmax(e,axis=2),numpy.nanargmax(e,axis=2)
22
+ h=self.chu_liu_edmonds(m)
23
+ z=[i for i,j in enumerate(h) if i==j]
24
+ if len(z)>1:
25
+ k,h=z[numpy.nanargmax(m[z,z])],numpy.nanmin(m)-numpy.nanmax(m)
26
+ m[:,z]+=[[0 if j in z and (i!=j or i==k) else h for i in z] for j in range(m.shape[0])]
27
+ h=self.chu_liu_edmonds(m)
28
+ v=[(s,e) for s,e in model_output["offset_mapping"][0].tolist() if s<e]
29
+ q=[self.model.config.id2label[p[j,i]].split("|") for i,j in enumerate(h)]
30
+ g="aggregation_strategy" in kwargs and kwargs["aggregation_strategy"]!="none"
31
+ if g:
32
+ for i,j in reversed(list(enumerate(q[1:],1))):
33
+ if j[-1]=="goeswith" and set([t[-1] for t in q[h[i]+1:i+1]])=={"goeswith"}:
34
+ h=[b if i>b else b-1 for a,b in enumerate(h) if i!=a]
35
+ v[i-1]=(v[i-1][0],v.pop(i)[1])
36
+ q.pop(i)
37
+ t=model_output["sentence"].replace("\n"," ")
38
+ u="# text = "+t+"\n"
39
+ for i,(s,e) in enumerate(v):
40
+ u+="\t".join([str(i+1),t[s:e],t[s:e] if g else "_",q[i][0],"_","|".join(q[i][1:-1]),str(0 if h[i]==i else h[i]+1),q[i][-1],"_","_" if i+1<len(v) and e<v[i+1][0] else "SpaceAfter=No"])+"\n"
41
+ return u+"\n"
42
+ def chu_liu_edmonds(self,matrix):
43
+ import numpy
44
+ h=numpy.nanargmax(matrix,axis=0)
45
+ x=[-1 if i==j else j for i,j in enumerate(h)]
46
+ for b in [lambda x,i,j:-1 if i not in x else x[i],lambda x,i,j:-1 if j<0 else x[j]]:
47
+ y=[]
48
+ while x!=y:
49
+ y=list(x)
50
+ for i,j in enumerate(x):
51
+ x[i]=b(x,i,j)
52
+ if max(x)<0:
53
+ return h
54
+ y,x=[i for i,j in enumerate(x) if j==max(x)],[i for i,j in enumerate(x) if j<max(x)]
55
+ z=matrix-numpy.nanmax(matrix,axis=0)
56
+ m=numpy.block([[z[x,:][:,x],numpy.nanmax(z[x,:][:,y],axis=1).reshape(len(x),1)],[numpy.nanmax(z[y,:][:,x],axis=0),numpy.nanmax(z[y,y])]])
57
+ k=[j if i==len(x) else x[j] if j<len(x) else y[numpy.nanargmax(z[y,x[i]])] for i,j in enumerate(self.chu_liu_edmonds(m))]
58
+ h=[j if i in y else k[x.index(i)] for i,j in enumerate(h)]
59
+ i=y[numpy.nanargmax(z[x[k[-1]],y] if k[-1]<len(x) else z[y,y])]
60
+ h[i]=x[k[-1]] if k[-1]<len(x) else i
61
+ return h
vocab.txt ADDED
The diff for this file is too large to render. See raw diff