KoichiYasuoka commited on
Commit
d07e889
1 Parent(s): 616b039

initial release

Browse files
Files changed (10) hide show
  1. README.md +94 -0
  2. added_tokens.json +3 -0
  3. bpe.codes +0 -0
  4. config.json +771 -0
  5. maker.py +71 -0
  6. pytorch_model.bin +3 -0
  7. special_tokens_map.json +9 -0
  8. tokenizer_config.json +12 -0
  9. ud.py +84 -0
  10. vocab.txt +0 -0
README.md ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - "vi"
4
+ tags:
5
+ - "vietnamese"
6
+ - "token-classification"
7
+ - "pos"
8
+ - "dependency-parsing"
9
+ datasets:
10
+ - "universal_dependencies"
11
+ license: "cc-by-sa-4.0"
12
+ pipeline_tag: "token-classification"
13
+ widget:
14
+ - text: "Hai cái đầu thì tốt hơn một"
15
+ ---
16
+
17
+ # phobert-large-vietnamese-ud-goeswith
18
+
19
+ ## Model Description
20
+
21
+ This is a PhoBERT model pre-trained on Vietnamese texts for POS-tagging and dependency-parsing (using `goeswith` for subwords), derived from [phobert-large](https://huggingface.co/vinai/phobert-large).
22
+
23
+ ## How to Use
24
+
25
+ ```py
26
+ class UDgoeswithViNLP(object):
27
+ def __init__(self,bert):
28
+ from transformers import AutoTokenizer,AutoModelForTokenClassification
29
+ from ViNLP import word_tokenize
30
+ self.tokenizer=AutoTokenizer.from_pretrained(bert)
31
+ self.model=AutoModelForTokenClassification.from_pretrained(bert)
32
+ self.vinlp=word_tokenize
33
+ def __call__(self,text):
34
+ import numpy,torch,ufal.chu_liu_edmonds
35
+ t=self.vinlp(text)
36
+ w=self.tokenizer(t,add_special_tokens=False)["input_ids"]
37
+ z=[]
38
+ for i,j in enumerate(t):
39
+ if j.find("_")>0 and [k for k in w[i] if k==self.tokenizer.unk_token_id]!=[]:
40
+ w[i]=self.tokenizer(j.replace("_"," "))["input_ids"][1:-1]
41
+ if [k for k in w[i] if k==self.tokenizer.unk_token_id]!=[]:
42
+ w[i]=[self.tokenizer.unk_token_id]
43
+ z.append(j)
44
+ v=[self.tokenizer.cls_token_id]+sum(w,[])+[self.tokenizer.sep_token_id]
45
+ x=[v[0:i]+[self.tokenizer.mask_token_id]+v[i+1:]+[j] for i,j in enumerate(v[1:-1],1)]
46
+ with torch.no_grad():
47
+ e=self.model(input_ids=torch.tensor(x)).logits.numpy()[:,1:-2,:]
48
+ r=[1 if i==0 else -1 if j.endswith("|root") else 0 for i,j in sorted(self.model.config.id2label.items())]
49
+ e+=numpy.where(numpy.add.outer(numpy.identity(e.shape[0]),r)==0,0,numpy.nan)
50
+ g=self.model.config.label2id["X|_|goeswith"]
51
+ r=numpy.tri(e.shape[0])
52
+ for i in range(e.shape[0]):
53
+ for j in range(i+2,e.shape[1]):
54
+ r[i,j]=r[i,j-1] if numpy.nanargmax(e[i,j-1])==g else 1
55
+ e[:,:,g]+=numpy.where(r==0,0,numpy.nan)
56
+ m=numpy.full((e.shape[0]+1,e.shape[1]+1),numpy.nan)
57
+ m[1:,1:]=numpy.nanmax(e,axis=2).transpose()
58
+ p=numpy.zeros(m.shape)
59
+ p[1:,1:]=numpy.nanargmax(e,axis=2).transpose()
60
+ for i in range(1,m.shape[0]):
61
+ m[i,0],m[i,i],p[i,0]=m[i,i],numpy.nan,p[i,i]
62
+ h=ufal.chu_liu_edmonds.chu_liu_edmonds(m)[0]
63
+ if [0 for i in h if i==0]!=[0]:
64
+ m[:,0]+=numpy.where(m[:,0]==numpy.nanmax(m[[i for i,j in enumerate(h) if j==0],0]),0,numpy.nan)
65
+ m[[i for i,j in enumerate(h) if j==0]]+=[0 if i==0 or j==0 else numpy.nan for i,j in enumerate(h)]
66
+ h=ufal.chu_liu_edmonds.chu_liu_edmonds(m)[0]
67
+ u="# text = "+text+"\n"
68
+ q=[self.model.config.id2label[p[i,j]].split("|") for i,j in enumerate(h)]
69
+ t=[i.replace("_"," ") for i in t]
70
+ if len(t)!=len(v)-2:
71
+ t=[z.pop(0) if i==self.tokenizer.unk_token else i.replace("_"," ") for i in self.tokenizer.convert_ids_to_tokens(v[1:-1])]
72
+ for i,j in reversed(list(enumerate(q[2:],2))):
73
+ if j[-1]=="goeswith" and set([k[-1] for k in q[h[i]+1:i+1]])=={"goeswith"}:
74
+ h=[b if i>b else b-1 for a,b in enumerate(h) if i!=a]
75
+ t[i-2]=(t[i-2][0:-2] if t[i-2].endswith("@@") else t[i-2]+" ")+t.pop(i-1)
76
+ q.pop(i)
77
+ t=[i[0:-2].strip() if i.endswith("@@") else i.strip() for i in t]
78
+ for i,j in enumerate(t,1):
79
+ u+="\t".join([str(i),j,"_",q[i][0],"_","|".join(q[i][1:-1]),str(h[i]),q[i][-1],"_","_"])+"\n"
80
+ return u+"\n"
81
+
82
+ nlp=UDgoeswithViNLP("KoichiYasuoka/phobert-large-vietnamese-ud-goeswith")
83
+ print(nlp("Hai cái đầu thì tốt hơn một."))
84
+ ```
85
+
86
+ with [ufal.chu-liu-edmonds](https://pypi.org/project/ufal.chu-liu-edmonds/) and [ViNLP](https://pypi.org/project/ViNLP/).
87
+ Or without them:
88
+
89
+ ```
90
+ from transformers import pipeline
91
+ nlp=pipeline("universal-dependencies","KoichiYasuoka/phobert-large-vietnamese-ud-goeswith",trust_remote_code=True,aggregation_strategy="simple")
92
+ print(nlp("Hai cái đầu thì tốt hơn một."))
93
+ ```
94
+
added_tokens.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "<mask>": 64000
3
+ }
bpe.codes ADDED
The diff for this file is too large to render. See raw diff
 
config.json ADDED
@@ -0,0 +1,771 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "RobertaForTokenClassification"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "bos_token_id": 0,
7
+ "classifier_dropout": null,
8
+ "custom_pipelines": {
9
+ "universal-dependencies": {
10
+ "impl": "ud.UniversalDependenciesPipeline"
11
+ }
12
+ },
13
+ "eos_token_id": 2,
14
+ "gradient_checkpointing": false,
15
+ "hidden_act": "gelu",
16
+ "hidden_dropout_prob": 0.1,
17
+ "hidden_size": 1024,
18
+ "id2label": {
19
+ "0": "-|_|dep",
20
+ "1": "ADJ|_|acl",
21
+ "2": "ADJ|_|acl:subj",
22
+ "3": "ADJ|_|acl:tmod",
23
+ "4": "ADJ|_|acl:tonp",
24
+ "5": "ADJ|_|advcl",
25
+ "6": "ADJ|_|advcl:objective",
26
+ "7": "ADJ|_|advmod",
27
+ "8": "ADJ|_|advmod:adj",
28
+ "9": "ADJ|_|advmod:neg",
29
+ "10": "ADJ|_|amod",
30
+ "11": "ADJ|_|appos",
31
+ "12": "ADJ|_|appos:nmod",
32
+ "13": "ADJ|_|ccomp",
33
+ "14": "ADJ|_|compound",
34
+ "15": "ADJ|_|compound:adj",
35
+ "16": "ADJ|_|compound:amod",
36
+ "17": "ADJ|_|compound:apr",
37
+ "18": "ADJ|_|compound:atov",
38
+ "19": "ADJ|_|compound:dir",
39
+ "20": "ADJ|_|compound:prt",
40
+ "21": "ADJ|_|compound:svc",
41
+ "22": "ADJ|_|compound:verbnoun",
42
+ "23": "ADJ|_|compound:vmod",
43
+ "24": "ADJ|_|conj",
44
+ "25": "ADJ|_|csubj",
45
+ "26": "ADJ|_|csubj:asubj",
46
+ "27": "ADJ|_|dep",
47
+ "28": "ADJ|_|discourse",
48
+ "29": "ADJ|_|dislocated",
49
+ "30": "ADJ|_|fixed",
50
+ "31": "ADJ|_|flat",
51
+ "32": "ADJ|_|flat:name",
52
+ "33": "ADJ|_|nmod",
53
+ "34": "ADJ|_|nsubj",
54
+ "35": "ADJ|_|obj",
55
+ "36": "ADJ|_|obl",
56
+ "37": "ADJ|_|obl:about",
57
+ "38": "ADJ|_|obl:adj",
58
+ "39": "ADJ|_|obl:comp",
59
+ "40": "ADJ|_|obl:tmod",
60
+ "41": "ADJ|_|obl:with",
61
+ "42": "ADJ|_|parataxis",
62
+ "43": "ADJ|_|root",
63
+ "44": "ADJ|_|xcomp",
64
+ "45": "ADJ|_|xcomp:adj",
65
+ "46": "ADP|_|acl:tmod",
66
+ "47": "ADP|_|advcl",
67
+ "48": "ADP|_|case",
68
+ "49": "ADP|_|cc",
69
+ "50": "ADP|_|ccomp",
70
+ "51": "ADP|_|compound",
71
+ "52": "ADP|_|compound:atov",
72
+ "53": "ADP|_|compound:dir",
73
+ "54": "ADP|_|compound:prt",
74
+ "55": "ADP|_|compound:svc",
75
+ "56": "ADP|_|conj",
76
+ "57": "ADP|_|csubj",
77
+ "58": "ADP|_|dep",
78
+ "59": "ADP|_|discourse",
79
+ "60": "ADP|_|fixed",
80
+ "61": "ADP|_|mark",
81
+ "62": "ADP|_|mark:pcomp",
82
+ "63": "ADP|_|nmod",
83
+ "64": "ADP|_|obl",
84
+ "65": "ADP|_|obl:tmod",
85
+ "66": "ADP|_|parataxis",
86
+ "67": "ADP|_|root",
87
+ "68": "ADP|_|xcomp",
88
+ "69": "ADV|_|acl:subj",
89
+ "70": "ADV|_|advcl",
90
+ "71": "ADV|_|advcl:objective",
91
+ "72": "ADV|_|advmod",
92
+ "73": "ADV|_|advmod:adj",
93
+ "74": "ADV|_|advmod:dir",
94
+ "75": "ADV|_|advmod:neg",
95
+ "76": "ADV|_|appos:nmod",
96
+ "77": "ADV|_|case",
97
+ "78": "ADV|_|compound",
98
+ "79": "ADV|_|compound:apr",
99
+ "80": "ADV|_|compound:atov",
100
+ "81": "ADV|_|compound:dir",
101
+ "82": "ADV|_|compound:prt",
102
+ "83": "ADV|_|compound:redup",
103
+ "84": "ADV|_|compound:svc",
104
+ "85": "ADV|_|conj",
105
+ "86": "ADV|_|discourse",
106
+ "87": "ADV|_|fixed",
107
+ "88": "ADV|_|flat:redup",
108
+ "89": "ADV|_|mark",
109
+ "90": "ADV|_|nmod",
110
+ "91": "ADV|_|obj",
111
+ "92": "ADV|_|obl",
112
+ "93": "ADV|_|obl:adv",
113
+ "94": "ADV|_|obl:tmod",
114
+ "95": "ADV|_|root",
115
+ "96": "ADV|_|xcomp",
116
+ "97": "AUX|_|aux",
117
+ "98": "AUX|_|aux:pass",
118
+ "99": "AUX|_|compound",
119
+ "100": "AUX|_|cop",
120
+ "101": "AUX|_|discourse",
121
+ "102": "AUX|_|parataxis",
122
+ "103": "AUX|_|root",
123
+ "104": "AUX|_|xcomp",
124
+ "105": "CCONJ|_|case",
125
+ "106": "CCONJ|_|cc",
126
+ "107": "CCONJ|_|flat",
127
+ "108": "CCONJ|_|mark",
128
+ "109": "DET|_|advmod:adj",
129
+ "110": "DET|_|clf:det",
130
+ "111": "DET|_|det",
131
+ "112": "DET|_|discourse",
132
+ "113": "DET|_|nmod:poss",
133
+ "114": "DET|_|nsubj",
134
+ "115": "DET|_|obj",
135
+ "116": "DET|_|obl:tmod",
136
+ "117": "INTJ|_|discourse",
137
+ "118": "INTJ|_|root",
138
+ "119": "NOUN|_|acl",
139
+ "120": "NOUN|_|acl:subj",
140
+ "121": "NOUN|_|acl:tmod",
141
+ "122": "NOUN|_|advcl",
142
+ "123": "NOUN|_|advcl:objective",
143
+ "124": "NOUN|_|amod",
144
+ "125": "NOUN|_|appos",
145
+ "126": "NOUN|_|appos:nmod",
146
+ "127": "NOUN|_|case",
147
+ "128": "NOUN|_|ccomp",
148
+ "129": "NOUN|_|clf",
149
+ "130": "NOUN|_|clf:det",
150
+ "131": "NOUN|_|compound",
151
+ "132": "NOUN|_|compound:amod",
152
+ "133": "NOUN|_|compound:dir",
153
+ "134": "NOUN|_|compound:verbnoun",
154
+ "135": "NOUN|_|compound:vmod",
155
+ "136": "NOUN|_|conj",
156
+ "137": "NOUN|_|csubj",
157
+ "138": "NOUN|_|csubj:pass",
158
+ "139": "NOUN|_|csubj:vsubj",
159
+ "140": "NOUN|_|dep",
160
+ "141": "NOUN|_|discourse",
161
+ "142": "NOUN|_|dislocated",
162
+ "143": "NOUN|_|fixed",
163
+ "144": "NOUN|_|flat",
164
+ "145": "NOUN|_|flat:name",
165
+ "146": "NOUN|_|flat:number",
166
+ "147": "NOUN|_|flat:time",
167
+ "148": "NOUN|_|iobj",
168
+ "149": "NOUN|_|list",
169
+ "150": "NOUN|_|nmod",
170
+ "151": "NOUN|_|nmod:poss",
171
+ "152": "NOUN|_|nsubj",
172
+ "153": "NOUN|_|nsubj:nn",
173
+ "154": "NOUN|_|nsubj:pass",
174
+ "155": "NOUN|_|nsubj:xsubj",
175
+ "156": "NOUN|_|nummod",
176
+ "157": "NOUN|_|obj",
177
+ "158": "NOUN|_|obl",
178
+ "159": "NOUN|_|obl:about",
179
+ "160": "NOUN|_|obl:adj",
180
+ "161": "NOUN|_|obl:adv",
181
+ "162": "NOUN|_|obl:agent",
182
+ "163": "NOUN|_|obl:comp",
183
+ "164": "NOUN|_|obl:iobj",
184
+ "165": "NOUN|_|obl:tmod",
185
+ "166": "NOUN|_|obl:with",
186
+ "167": "NOUN|_|parataxis",
187
+ "168": "NOUN|_|root",
188
+ "169": "NOUN|_|vocative",
189
+ "170": "NOUN|_|xcomp",
190
+ "171": "NUM|_|amod",
191
+ "172": "NUM|_|appos",
192
+ "173": "NUM|_|appos:nmod",
193
+ "174": "NUM|_|clf",
194
+ "175": "NUM|_|clf:det",
195
+ "176": "NUM|_|compound",
196
+ "177": "NUM|_|compound:verbnoun",
197
+ "178": "NUM|_|conj",
198
+ "179": "NUM|_|flat:date",
199
+ "180": "NUM|_|flat:name",
200
+ "181": "NUM|_|flat:number",
201
+ "182": "NUM|_|flat:time",
202
+ "183": "NUM|_|nmod",
203
+ "184": "NUM|_|nsubj",
204
+ "185": "NUM|_|nummod",
205
+ "186": "NUM|_|obj",
206
+ "187": "NUM|_|obl",
207
+ "188": "NUM|_|obl:comp",
208
+ "189": "NUM|_|obl:tmod",
209
+ "190": "NUM|_|parataxis",
210
+ "191": "NUM|_|root",
211
+ "192": "PART|_|advcl",
212
+ "193": "PART|_|advmod",
213
+ "194": "PART|_|amod",
214
+ "195": "PART|_|case",
215
+ "196": "PART|_|clf:det",
216
+ "197": "PART|_|compound",
217
+ "198": "PART|_|compound:prt",
218
+ "199": "PART|_|discourse",
219
+ "200": "PART|_|fixed",
220
+ "201": "PART|_|mark",
221
+ "202": "PART|_|obl",
222
+ "203": "PART|_|parataxis",
223
+ "204": "PRON|_|acl:tmod",
224
+ "205": "PRON|_|advcl",
225
+ "206": "PRON|_|appos:nmod",
226
+ "207": "PRON|_|ccomp",
227
+ "208": "PRON|_|compound",
228
+ "209": "PRON|_|compound:pron",
229
+ "210": "PRON|_|compound:prt",
230
+ "211": "PRON|_|conj",
231
+ "212": "PRON|_|det",
232
+ "213": "PRON|_|det:pmod",
233
+ "214": "PRON|_|discourse",
234
+ "215": "PRON|_|expl",
235
+ "216": "PRON|_|fixed",
236
+ "217": "PRON|_|iobj",
237
+ "218": "PRON|_|nmod",
238
+ "219": "PRON|_|nmod:poss",
239
+ "220": "PRON|_|nsubj",
240
+ "221": "PRON|_|nsubj:nn",
241
+ "222": "PRON|_|nsubj:pass",
242
+ "223": "PRON|_|nsubj:xsubj",
243
+ "224": "PRON|_|obj",
244
+ "225": "PRON|_|obl",
245
+ "226": "PRON|_|obl:about",
246
+ "227": "PRON|_|obl:adj",
247
+ "228": "PRON|_|obl:comp",
248
+ "229": "PRON|_|obl:iobj",
249
+ "230": "PRON|_|obl:tmod",
250
+ "231": "PRON|_|obl:with",
251
+ "232": "PRON|_|parataxis",
252
+ "233": "PRON|_|root",
253
+ "234": "PROPN|_|acl:subj",
254
+ "235": "PROPN|_|advcl",
255
+ "236": "PROPN|_|appos",
256
+ "237": "PROPN|_|appos:nmod",
257
+ "238": "PROPN|_|ccomp",
258
+ "239": "PROPN|_|compound",
259
+ "240": "PROPN|_|compound:verbnoun",
260
+ "241": "PROPN|_|conj",
261
+ "242": "PROPN|_|csubj:pass",
262
+ "243": "PROPN|_|dep",
263
+ "244": "PROPN|_|flat",
264
+ "245": "PROPN|_|flat:name",
265
+ "246": "PROPN|_|iobj",
266
+ "247": "PROPN|_|list",
267
+ "248": "PROPN|_|nmod",
268
+ "249": "PROPN|_|nmod:poss",
269
+ "250": "PROPN|_|nsubj",
270
+ "251": "PROPN|_|nsubj:nn",
271
+ "252": "PROPN|_|nsubj:pass",
272
+ "253": "PROPN|_|nsubj:xsubj",
273
+ "254": "PROPN|_|obj",
274
+ "255": "PROPN|_|obl",
275
+ "256": "PROPN|_|obl:agent",
276
+ "257": "PROPN|_|obl:comp",
277
+ "258": "PROPN|_|obl:iobj",
278
+ "259": "PROPN|_|obl:with",
279
+ "260": "PROPN|_|parataxis",
280
+ "261": "PROPN|_|root",
281
+ "262": "PROPN|_|vocative",
282
+ "263": "PUNCT|_|punct",
283
+ "264": "SCONJ|_|advcl",
284
+ "265": "SCONJ|_|case",
285
+ "266": "SCONJ|_|cc",
286
+ "267": "SCONJ|_|compound",
287
+ "268": "SCONJ|_|compound:svc",
288
+ "269": "SCONJ|_|discourse",
289
+ "270": "SCONJ|_|fixed",
290
+ "271": "SCONJ|_|mark",
291
+ "272": "SCONJ|_|obl",
292
+ "273": "SCONJ|_|parataxis",
293
+ "274": "SCONJ|_|root",
294
+ "275": "SCONJ|_|vocative",
295
+ "276": "SYM|_|advcl",
296
+ "277": "SYM|_|appos:nmod",
297
+ "278": "SYM|_|compound",
298
+ "279": "SYM|_|compound:z",
299
+ "280": "SYM|_|discourse",
300
+ "281": "SYM|_|flat",
301
+ "282": "SYM|_|flat:date",
302
+ "283": "SYM|_|flat:name",
303
+ "284": "SYM|_|flat:number",
304
+ "285": "SYM|_|flat:time",
305
+ "286": "SYM|_|nmod",
306
+ "287": "SYM|_|nsubj",
307
+ "288": "SYM|_|obj",
308
+ "289": "VERB|_|acl",
309
+ "290": "VERB|_|acl:relcl",
310
+ "291": "VERB|_|acl:subj",
311
+ "292": "VERB|_|acl:tmod",
312
+ "293": "VERB|_|acl:tonp",
313
+ "294": "VERB|_|advcl",
314
+ "295": "VERB|_|advcl:objective",
315
+ "296": "VERB|_|advmod",
316
+ "297": "VERB|_|amod",
317
+ "298": "VERB|_|appos",
318
+ "299": "VERB|_|appos:nmod",
319
+ "300": "VERB|_|case",
320
+ "301": "VERB|_|ccomp",
321
+ "302": "VERB|_|compound",
322
+ "303": "VERB|_|compound:amod",
323
+ "304": "VERB|_|compound:atov",
324
+ "305": "VERB|_|compound:dir",
325
+ "306": "VERB|_|compound:prt",
326
+ "307": "VERB|_|compound:redup",
327
+ "308": "VERB|_|compound:svc",
328
+ "309": "VERB|_|compound:verbnoun",
329
+ "310": "VERB|_|compound:vmod",
330
+ "311": "VERB|_|conj",
331
+ "312": "VERB|_|csubj",
332
+ "313": "VERB|_|csubj:pass",
333
+ "314": "VERB|_|csubj:vsubj",
334
+ "315": "VERB|_|discourse",
335
+ "316": "VERB|_|fixed",
336
+ "317": "VERB|_|flat:redup",
337
+ "318": "VERB|_|iobj",
338
+ "319": "VERB|_|mark",
339
+ "320": "VERB|_|mark:pcomp",
340
+ "321": "VERB|_|nmod",
341
+ "322": "VERB|_|nmod:poss",
342
+ "323": "VERB|_|nsubj",
343
+ "324": "VERB|_|nsubj:pass",
344
+ "325": "VERB|_|nsubj:xsubj",
345
+ "326": "VERB|_|obj",
346
+ "327": "VERB|_|obl",
347
+ "328": "VERB|_|obl:about",
348
+ "329": "VERB|_|obl:comp",
349
+ "330": "VERB|_|obl:iobj",
350
+ "331": "VERB|_|obl:tmod",
351
+ "332": "VERB|_|parataxis",
352
+ "333": "VERB|_|root",
353
+ "334": "VERB|_|vocative",
354
+ "335": "VERB|_|xcomp",
355
+ "336": "VERB|_|xcomp:adj",
356
+ "337": "VERB|_|xcomp:vcomp",
357
+ "338": "X|_|acl",
358
+ "339": "X|_|acl:subj",
359
+ "340": "X|_|acl:tonp",
360
+ "341": "X|_|advcl",
361
+ "342": "X|_|amod",
362
+ "343": "X|_|case",
363
+ "344": "X|_|cc",
364
+ "345": "X|_|ccomp",
365
+ "346": "X|_|compound",
366
+ "347": "X|_|compound:adj",
367
+ "348": "X|_|compound:prt",
368
+ "349": "X|_|compound:vmod",
369
+ "350": "X|_|compound:z",
370
+ "351": "X|_|conj",
371
+ "352": "X|_|discourse",
372
+ "353": "X|_|dislocated",
373
+ "354": "X|_|goeswith",
374
+ "355": "X|_|mark",
375
+ "356": "X|_|nmod",
376
+ "357": "X|_|nmod:poss",
377
+ "358": "X|_|nsubj",
378
+ "359": "X|_|obj",
379
+ "360": "X|_|obl",
380
+ "361": "X|_|obl:about",
381
+ "362": "X|_|obl:comp",
382
+ "363": "X|_|obl:tmod",
383
+ "364": "X|_|parataxis",
384
+ "365": "X|_|root",
385
+ "366": "X|_|xcomp"
386
+ },
387
+ "initializer_range": 0.02,
388
+ "intermediate_size": 4096,
389
+ "label2id": {
390
+ "-|_|dep": 0,
391
+ "ADJ|_|acl": 1,
392
+ "ADJ|_|acl:subj": 2,
393
+ "ADJ|_|acl:tmod": 3,
394
+ "ADJ|_|acl:tonp": 4,
395
+ "ADJ|_|advcl": 5,
396
+ "ADJ|_|advcl:objective": 6,
397
+ "ADJ|_|advmod": 7,
398
+ "ADJ|_|advmod:adj": 8,
399
+ "ADJ|_|advmod:neg": 9,
400
+ "ADJ|_|amod": 10,
401
+ "ADJ|_|appos": 11,
402
+ "ADJ|_|appos:nmod": 12,
403
+ "ADJ|_|ccomp": 13,
404
+ "ADJ|_|compound": 14,
405
+ "ADJ|_|compound:adj": 15,
406
+ "ADJ|_|compound:amod": 16,
407
+ "ADJ|_|compound:apr": 17,
408
+ "ADJ|_|compound:atov": 18,
409
+ "ADJ|_|compound:dir": 19,
410
+ "ADJ|_|compound:prt": 20,
411
+ "ADJ|_|compound:svc": 21,
412
+ "ADJ|_|compound:verbnoun": 22,
413
+ "ADJ|_|compound:vmod": 23,
414
+ "ADJ|_|conj": 24,
415
+ "ADJ|_|csubj": 25,
416
+ "ADJ|_|csubj:asubj": 26,
417
+ "ADJ|_|dep": 27,
418
+ "ADJ|_|discourse": 28,
419
+ "ADJ|_|dislocated": 29,
420
+ "ADJ|_|fixed": 30,
421
+ "ADJ|_|flat": 31,
422
+ "ADJ|_|flat:name": 32,
423
+ "ADJ|_|nmod": 33,
424
+ "ADJ|_|nsubj": 34,
425
+ "ADJ|_|obj": 35,
426
+ "ADJ|_|obl": 36,
427
+ "ADJ|_|obl:about": 37,
428
+ "ADJ|_|obl:adj": 38,
429
+ "ADJ|_|obl:comp": 39,
430
+ "ADJ|_|obl:tmod": 40,
431
+ "ADJ|_|obl:with": 41,
432
+ "ADJ|_|parataxis": 42,
433
+ "ADJ|_|root": 43,
434
+ "ADJ|_|xcomp": 44,
435
+ "ADJ|_|xcomp:adj": 45,
436
+ "ADP|_|acl:tmod": 46,
437
+ "ADP|_|advcl": 47,
438
+ "ADP|_|case": 48,
439
+ "ADP|_|cc": 49,
440
+ "ADP|_|ccomp": 50,
441
+ "ADP|_|compound": 51,
442
+ "ADP|_|compound:atov": 52,
443
+ "ADP|_|compound:dir": 53,
444
+ "ADP|_|compound:prt": 54,
445
+ "ADP|_|compound:svc": 55,
446
+ "ADP|_|conj": 56,
447
+ "ADP|_|csubj": 57,
448
+ "ADP|_|dep": 58,
449
+ "ADP|_|discourse": 59,
450
+ "ADP|_|fixed": 60,
451
+ "ADP|_|mark": 61,
452
+ "ADP|_|mark:pcomp": 62,
453
+ "ADP|_|nmod": 63,
454
+ "ADP|_|obl": 64,
455
+ "ADP|_|obl:tmod": 65,
456
+ "ADP|_|parataxis": 66,
457
+ "ADP|_|root": 67,
458
+ "ADP|_|xcomp": 68,
459
+ "ADV|_|acl:subj": 69,
460
+ "ADV|_|advcl": 70,
461
+ "ADV|_|advcl:objective": 71,
462
+ "ADV|_|advmod": 72,
463
+ "ADV|_|advmod:adj": 73,
464
+ "ADV|_|advmod:dir": 74,
465
+ "ADV|_|advmod:neg": 75,
466
+ "ADV|_|appos:nmod": 76,
467
+ "ADV|_|case": 77,
468
+ "ADV|_|compound": 78,
469
+ "ADV|_|compound:apr": 79,
470
+ "ADV|_|compound:atov": 80,
471
+ "ADV|_|compound:dir": 81,
472
+ "ADV|_|compound:prt": 82,
473
+ "ADV|_|compound:redup": 83,
474
+ "ADV|_|compound:svc": 84,
475
+ "ADV|_|conj": 85,
476
+ "ADV|_|discourse": 86,
477
+ "ADV|_|fixed": 87,
478
+ "ADV|_|flat:redup": 88,
479
+ "ADV|_|mark": 89,
480
+ "ADV|_|nmod": 90,
481
+ "ADV|_|obj": 91,
482
+ "ADV|_|obl": 92,
483
+ "ADV|_|obl:adv": 93,
484
+ "ADV|_|obl:tmod": 94,
485
+ "ADV|_|root": 95,
486
+ "ADV|_|xcomp": 96,
487
+ "AUX|_|aux": 97,
488
+ "AUX|_|aux:pass": 98,
489
+ "AUX|_|compound": 99,
490
+ "AUX|_|cop": 100,
491
+ "AUX|_|discourse": 101,
492
+ "AUX|_|parataxis": 102,
493
+ "AUX|_|root": 103,
494
+ "AUX|_|xcomp": 104,
495
+ "CCONJ|_|case": 105,
496
+ "CCONJ|_|cc": 106,
497
+ "CCONJ|_|flat": 107,
498
+ "CCONJ|_|mark": 108,
499
+ "DET|_|advmod:adj": 109,
500
+ "DET|_|clf:det": 110,
501
+ "DET|_|det": 111,
502
+ "DET|_|discourse": 112,
503
+ "DET|_|nmod:poss": 113,
504
+ "DET|_|nsubj": 114,
505
+ "DET|_|obj": 115,
506
+ "DET|_|obl:tmod": 116,
507
+ "INTJ|_|discourse": 117,
508
+ "INTJ|_|root": 118,
509
+ "NOUN|_|acl": 119,
510
+ "NOUN|_|acl:subj": 120,
511
+ "NOUN|_|acl:tmod": 121,
512
+ "NOUN|_|advcl": 122,
513
+ "NOUN|_|advcl:objective": 123,
514
+ "NOUN|_|amod": 124,
515
+ "NOUN|_|appos": 125,
516
+ "NOUN|_|appos:nmod": 126,
517
+ "NOUN|_|case": 127,
518
+ "NOUN|_|ccomp": 128,
519
+ "NOUN|_|clf": 129,
520
+ "NOUN|_|clf:det": 130,
521
+ "NOUN|_|compound": 131,
522
+ "NOUN|_|compound:amod": 132,
523
+ "NOUN|_|compound:dir": 133,
524
+ "NOUN|_|compound:verbnoun": 134,
525
+ "NOUN|_|compound:vmod": 135,
526
+ "NOUN|_|conj": 136,
527
+ "NOUN|_|csubj": 137,
528
+ "NOUN|_|csubj:pass": 138,
529
+ "NOUN|_|csubj:vsubj": 139,
530
+ "NOUN|_|dep": 140,
531
+ "NOUN|_|discourse": 141,
532
+ "NOUN|_|dislocated": 142,
533
+ "NOUN|_|fixed": 143,
534
+ "NOUN|_|flat": 144,
535
+ "NOUN|_|flat:name": 145,
536
+ "NOUN|_|flat:number": 146,
537
+ "NOUN|_|flat:time": 147,
538
+ "NOUN|_|iobj": 148,
539
+ "NOUN|_|list": 149,
540
+ "NOUN|_|nmod": 150,
541
+ "NOUN|_|nmod:poss": 151,
542
+ "NOUN|_|nsubj": 152,
543
+ "NOUN|_|nsubj:nn": 153,
544
+ "NOUN|_|nsubj:pass": 154,
545
+ "NOUN|_|nsubj:xsubj": 155,
546
+ "NOUN|_|nummod": 156,
547
+ "NOUN|_|obj": 157,
548
+ "NOUN|_|obl": 158,
549
+ "NOUN|_|obl:about": 159,
550
+ "NOUN|_|obl:adj": 160,
551
+ "NOUN|_|obl:adv": 161,
552
+ "NOUN|_|obl:agent": 162,
553
+ "NOUN|_|obl:comp": 163,
554
+ "NOUN|_|obl:iobj": 164,
555
+ "NOUN|_|obl:tmod": 165,
556
+ "NOUN|_|obl:with": 166,
557
+ "NOUN|_|parataxis": 167,
558
+ "NOUN|_|root": 168,
559
+ "NOUN|_|vocative": 169,
560
+ "NOUN|_|xcomp": 170,
561
+ "NUM|_|amod": 171,
562
+ "NUM|_|appos": 172,
563
+ "NUM|_|appos:nmod": 173,
564
+ "NUM|_|clf": 174,
565
+ "NUM|_|clf:det": 175,
566
+ "NUM|_|compound": 176,
567
+ "NUM|_|compound:verbnoun": 177,
568
+ "NUM|_|conj": 178,
569
+ "NUM|_|flat:date": 179,
570
+ "NUM|_|flat:name": 180,
571
+ "NUM|_|flat:number": 181,
572
+ "NUM|_|flat:time": 182,
573
+ "NUM|_|nmod": 183,
574
+ "NUM|_|nsubj": 184,
575
+ "NUM|_|nummod": 185,
576
+ "NUM|_|obj": 186,
577
+ "NUM|_|obl": 187,
578
+ "NUM|_|obl:comp": 188,
579
+ "NUM|_|obl:tmod": 189,
580
+ "NUM|_|parataxis": 190,
581
+ "NUM|_|root": 191,
582
+ "PART|_|advcl": 192,
583
+ "PART|_|advmod": 193,
584
+ "PART|_|amod": 194,
585
+ "PART|_|case": 195,
586
+ "PART|_|clf:det": 196,
587
+ "PART|_|compound": 197,
588
+ "PART|_|compound:prt": 198,
589
+ "PART|_|discourse": 199,
590
+ "PART|_|fixed": 200,
591
+ "PART|_|mark": 201,
592
+ "PART|_|obl": 202,
593
+ "PART|_|parataxis": 203,
594
+ "PRON|_|acl:tmod": 204,
595
+ "PRON|_|advcl": 205,
596
+ "PRON|_|appos:nmod": 206,
597
+ "PRON|_|ccomp": 207,
598
+ "PRON|_|compound": 208,
599
+ "PRON|_|compound:pron": 209,
600
+ "PRON|_|compound:prt": 210,
601
+ "PRON|_|conj": 211,
602
+ "PRON|_|det": 212,
603
+ "PRON|_|det:pmod": 213,
604
+ "PRON|_|discourse": 214,
605
+ "PRON|_|expl": 215,
606
+ "PRON|_|fixed": 216,
607
+ "PRON|_|iobj": 217,
608
+ "PRON|_|nmod": 218,
609
+ "PRON|_|nmod:poss": 219,
610
+ "PRON|_|nsubj": 220,
611
+ "PRON|_|nsubj:nn": 221,
612
+ "PRON|_|nsubj:pass": 222,
613
+ "PRON|_|nsubj:xsubj": 223,
614
+ "PRON|_|obj": 224,
615
+ "PRON|_|obl": 225,
616
+ "PRON|_|obl:about": 226,
617
+ "PRON|_|obl:adj": 227,
618
+ "PRON|_|obl:comp": 228,
619
+ "PRON|_|obl:iobj": 229,
620
+ "PRON|_|obl:tmod": 230,
621
+ "PRON|_|obl:with": 231,
622
+ "PRON|_|parataxis": 232,
623
+ "PRON|_|root": 233,
624
+ "PROPN|_|acl:subj": 234,
625
+ "PROPN|_|advcl": 235,
626
+ "PROPN|_|appos": 236,
627
+ "PROPN|_|appos:nmod": 237,
628
+ "PROPN|_|ccomp": 238,
629
+ "PROPN|_|compound": 239,
630
+ "PROPN|_|compound:verbnoun": 240,
631
+ "PROPN|_|conj": 241,
632
+ "PROPN|_|csubj:pass": 242,
633
+ "PROPN|_|dep": 243,
634
+ "PROPN|_|flat": 244,
635
+ "PROPN|_|flat:name": 245,
636
+ "PROPN|_|iobj": 246,
637
+ "PROPN|_|list": 247,
638
+ "PROPN|_|nmod": 248,
639
+ "PROPN|_|nmod:poss": 249,
640
+ "PROPN|_|nsubj": 250,
641
+ "PROPN|_|nsubj:nn": 251,
642
+ "PROPN|_|nsubj:pass": 252,
643
+ "PROPN|_|nsubj:xsubj": 253,
644
+ "PROPN|_|obj": 254,
645
+ "PROPN|_|obl": 255,
646
+ "PROPN|_|obl:agent": 256,
647
+ "PROPN|_|obl:comp": 257,
648
+ "PROPN|_|obl:iobj": 258,
649
+ "PROPN|_|obl:with": 259,
650
+ "PROPN|_|parataxis": 260,
651
+ "PROPN|_|root": 261,
652
+ "PROPN|_|vocative": 262,
653
+ "PUNCT|_|punct": 263,
654
+ "SCONJ|_|advcl": 264,
655
+ "SCONJ|_|case": 265,
656
+ "SCONJ|_|cc": 266,
657
+ "SCONJ|_|compound": 267,
658
+ "SCONJ|_|compound:svc": 268,
659
+ "SCONJ|_|discourse": 269,
660
+ "SCONJ|_|fixed": 270,
661
+ "SCONJ|_|mark": 271,
662
+ "SCONJ|_|obl": 272,
663
+ "SCONJ|_|parataxis": 273,
664
+ "SCONJ|_|root": 274,
665
+ "SCONJ|_|vocative": 275,
666
+ "SYM|_|advcl": 276,
667
+ "SYM|_|appos:nmod": 277,
668
+ "SYM|_|compound": 278,
669
+ "SYM|_|compound:z": 279,
670
+ "SYM|_|discourse": 280,
671
+ "SYM|_|flat": 281,
672
+ "SYM|_|flat:date": 282,
673
+ "SYM|_|flat:name": 283,
674
+ "SYM|_|flat:number": 284,
675
+ "SYM|_|flat:time": 285,
676
+ "SYM|_|nmod": 286,
677
+ "SYM|_|nsubj": 287,
678
+ "SYM|_|obj": 288,
679
+ "VERB|_|acl": 289,
680
+ "VERB|_|acl:relcl": 290,
681
+ "VERB|_|acl:subj": 291,
682
+ "VERB|_|acl:tmod": 292,
683
+ "VERB|_|acl:tonp": 293,
684
+ "VERB|_|advcl": 294,
685
+ "VERB|_|advcl:objective": 295,
686
+ "VERB|_|advmod": 296,
687
+ "VERB|_|amod": 297,
688
+ "VERB|_|appos": 298,
689
+ "VERB|_|appos:nmod": 299,
690
+ "VERB|_|case": 300,
691
+ "VERB|_|ccomp": 301,
692
+ "VERB|_|compound": 302,
693
+ "VERB|_|compound:amod": 303,
694
+ "VERB|_|compound:atov": 304,
695
+ "VERB|_|compound:dir": 305,
696
+ "VERB|_|compound:prt": 306,
697
+ "VERB|_|compound:redup": 307,
698
+ "VERB|_|compound:svc": 308,
699
+ "VERB|_|compound:verbnoun": 309,
700
+ "VERB|_|compound:vmod": 310,
701
+ "VERB|_|conj": 311,
702
+ "VERB|_|csubj": 312,
703
+ "VERB|_|csubj:pass": 313,
704
+ "VERB|_|csubj:vsubj": 314,
705
+ "VERB|_|discourse": 315,
706
+ "VERB|_|fixed": 316,
707
+ "VERB|_|flat:redup": 317,
708
+ "VERB|_|iobj": 318,
709
+ "VERB|_|mark": 319,
710
+ "VERB|_|mark:pcomp": 320,
711
+ "VERB|_|nmod": 321,
712
+ "VERB|_|nmod:poss": 322,
713
+ "VERB|_|nsubj": 323,
714
+ "VERB|_|nsubj:pass": 324,
715
+ "VERB|_|nsubj:xsubj": 325,
716
+ "VERB|_|obj": 326,
717
+ "VERB|_|obl": 327,
718
+ "VERB|_|obl:about": 328,
719
+ "VERB|_|obl:comp": 329,
720
+ "VERB|_|obl:iobj": 330,
721
+ "VERB|_|obl:tmod": 331,
722
+ "VERB|_|parataxis": 332,
723
+ "VERB|_|root": 333,
724
+ "VERB|_|vocative": 334,
725
+ "VERB|_|xcomp": 335,
726
+ "VERB|_|xcomp:adj": 336,
727
+ "VERB|_|xcomp:vcomp": 337,
728
+ "X|_|acl": 338,
729
+ "X|_|acl:subj": 339,
730
+ "X|_|acl:tonp": 340,
731
+ "X|_|advcl": 341,
732
+ "X|_|amod": 342,
733
+ "X|_|case": 343,
734
+ "X|_|cc": 344,
735
+ "X|_|ccomp": 345,
736
+ "X|_|compound": 346,
737
+ "X|_|compound:adj": 347,
738
+ "X|_|compound:prt": 348,
739
+ "X|_|compound:vmod": 349,
740
+ "X|_|compound:z": 350,
741
+ "X|_|conj": 351,
742
+ "X|_|discourse": 352,
743
+ "X|_|dislocated": 353,
744
+ "X|_|goeswith": 354,
745
+ "X|_|mark": 355,
746
+ "X|_|nmod": 356,
747
+ "X|_|nmod:poss": 357,
748
+ "X|_|nsubj": 358,
749
+ "X|_|obj": 359,
750
+ "X|_|obl": 360,
751
+ "X|_|obl:about": 361,
752
+ "X|_|obl:comp": 362,
753
+ "X|_|obl:tmod": 363,
754
+ "X|_|parataxis": 364,
755
+ "X|_|root": 365,
756
+ "X|_|xcomp": 366
757
+ },
758
+ "layer_norm_eps": 1e-05,
759
+ "max_position_embeddings": 258,
760
+ "model_type": "roberta",
761
+ "num_attention_heads": 16,
762
+ "num_hidden_layers": 24,
763
+ "pad_token_id": 1,
764
+ "position_embedding_type": "absolute",
765
+ "tokenizer_class": "PhobertTokenizer",
766
+ "torch_dtype": "float32",
767
+ "transformers_version": "4.22.1",
768
+ "type_vocab_size": 1,
769
+ "use_cache": true,
770
+ "vocab_size": 64001
771
+ }
maker.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #! /usr/bin/python3
2
+ src="vinai/phobert-large"
3
+ tgt="KoichiYasuoka/phobert-large-vietnamese-ud-goeswith"
4
+ import os
5
+ url="https://github.com/UniversalDependencies/UD_Vietnamese-VTB"
6
+ d=os.path.basename(url)
7
+ os.system("test -d "+d+" || git clone --depth=1 "+url)
8
+ os.system("for F in train dev test ; do cp "+d+"/*-$F.conllu $F.conllu ; done")
9
+ url="https://github.com/datquocnguyen/VnDT"
10
+ d=os.path.basename(url)
11
+ os.system("test -d "+d+" || git clone --depth=1 "+url)
12
+ os.system("for F in train dev test ; do cp "+d+"/*-gold-*-$F.conll pre-$F.conll ; done")
13
+ class UDgoeswithDataset(object):
14
+ def __init__(self,conllu,tokenizer):
15
+ self.ids,self.tags,label=[],[],set()
16
+ with open(conllu,"r",encoding="utf-8") as r:
17
+ cls,sep,msk=tokenizer.cls_token_id,tokenizer.sep_token_id,tokenizer.mask_token_id
18
+ dep,c="-|_|dep",[]
19
+ for s in r:
20
+ t=s.split("\t")
21
+ if len(t)==10 and t[0].isdecimal():
22
+ c.append(t)
23
+ elif c!=[]:
24
+ for x in [lambda i:i.replace(" ","_"),lambda i:i.replace("_"," ")]:
25
+ d=list(c)
26
+ v=tokenizer([x(t[1]) for t in d],add_special_tokens=False)["input_ids"]
27
+ for i in range(len(v)-1,-1,-1):
28
+ for j in range(1,len(v[i])):
29
+ d.insert(i+1,[d[i][0],"_","_","X","_","_",d[i][0],"goeswith","_","_"])
30
+ y=["0"]+[t[0] for t in d]
31
+ h=[i if t[6]=="0" else y.index(t[6]) for i,t in enumerate(d,1)]
32
+ p,v=[t[3]+"|"+t[5]+"|"+t[7] for t in d],sum(v,[])
33
+ if len(v)<tokenizer.model_max_length-3:
34
+ self.ids.append([cls]+v+[sep])
35
+ self.tags.append([dep]+p+[dep])
36
+ label=set(sum([self.tags[-1],list(label)],[]))
37
+ for i,k in enumerate(v):
38
+ self.ids.append([cls]+v[0:i]+[msk]+v[i+1:]+[sep,k])
39
+ self.tags.append([dep]+[t if h[j]==i+1 else dep for j,t in enumerate(p)]+[dep,dep])
40
+ c=[]
41
+ self.label2id={l:i for i,l in enumerate(sorted(label))}
42
+ def __call__(*args):
43
+ label=set(sum([list(t.label2id) for t in args],[]))
44
+ lid={l:i for i,l in enumerate(sorted(label))}
45
+ for t in args:
46
+ t.label2id=lid
47
+ return lid
48
+ __len__=lambda self:len(self.ids)
49
+ __getitem__=lambda self,i:{"input_ids":self.ids[i],"labels":[self.label2id[t] for t in self.tags[i]]}
50
+ from transformers import AutoTokenizer,AutoConfig,AutoModelForTokenClassification,DataCollatorForTokenClassification,TrainingArguments,Trainer
51
+ tkz=AutoTokenizer.from_pretrained(src)
52
+ trainDS=UDgoeswithDataset("pre-train.conll",tkz)
53
+ devDS=UDgoeswithDataset("pre-dev.conll",tkz)
54
+ testDS=UDgoeswithDataset("pre-test.conll",tkz)
55
+ lid=trainDS(devDS,testDS)
56
+ cfg=AutoConfig.from_pretrained(src,num_labels=len(lid),label2id=lid,id2label={i:l for l,i in lid.items()},ignore_mismatched_sizes=True)
57
+ arg=TrainingArguments(num_train_epochs=3,per_device_train_batch_size=32,output_dir="/tmp",overwrite_output_dir=True,save_total_limit=2,evaluation_strategy="epoch",learning_rate=5e-05,warmup_ratio=0.1)
58
+ trn=Trainer(args=arg,data_collator=DataCollatorForTokenClassification(tkz),model=AutoModelForTokenClassification.from_pretrained(src,config=cfg,ignore_mismatched_sizes=True),train_dataset=trainDS,eval_dataset=devDS)
59
+ trn.train()
60
+ trn.save_model("tmpdir")
61
+ tkz.save_pretrained("tmpdir")
62
+ trainDS=UDgoeswithDataset("train.conllu",tkz)
63
+ devDS=UDgoeswithDataset("dev.conllu",tkz)
64
+ testDS=UDgoeswithDataset("test.conllu",tkz)
65
+ lid=trainDS(devDS,testDS)
66
+ cfg=AutoConfig.from_pretrained("tmpdir",num_labels=len(lid),label2id=lid,id2label={i:l for l,i in lid.items()},ignore_mismatched_sizes=True)
67
+ arg=TrainingArguments(num_train_epochs=3,per_device_train_batch_size=32,output_dir="/tmp",overwrite_output_dir=True,save_total_limit=2,evaluation_strategy="epoch",learning_rate=5e-05,warmup_ratio=0.1)
68
+ trn=Trainer(args=arg,data_collator=DataCollatorForTokenClassification(tkz),model=AutoModelForTokenClassification.from_pretrained("tmpdir",config=cfg,ignore_mismatched_sizes=True),train_dataset=trainDS,eval_dataset=devDS)
69
+ trn.train()
70
+ trn.save_model(tgt)
71
+ tkz.save_pretrained(tgt)
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b3a891a499af4bbbdc656ff8e134ffdf0bb2fdc5c116fb49304e220bc6e1b10b
3
+ size 1474093809
special_tokens_map.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "cls_token": "<s>",
4
+ "eos_token": "</s>",
5
+ "mask_token": "<mask>",
6
+ "pad_token": "<pad>",
7
+ "sep_token": "</s>",
8
+ "unk_token": "<unk>"
9
+ }
tokenizer_config.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "cls_token": "<s>",
4
+ "eos_token": "</s>",
5
+ "mask_token": "<mask>",
6
+ "model_max_length": 256,
7
+ "pad_token": "<pad>",
8
+ "sep_token": "</s>",
9
+ "special_tokens_map_file": null,
10
+ "tokenizer_class": "PhobertTokenizer",
11
+ "unk_token": "<unk>"
12
+ }
ud.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import TokenClassificationPipeline
2
+
3
+ class UniversalDependenciesPipeline(TokenClassificationPipeline):
4
+ def preprocess(self,sentence,offset_mapping=None):
5
+ import torch
6
+ from tokenizers.pre_tokenizers import Whitespace
7
+ v=Whitespace().pre_tokenize_str(sentence)
8
+ t=[v[0]]
9
+ for k,(s,e) in v[1:]:
10
+ j=t[-1][0]+"_"+k
11
+ if self.tokenizer.convert_tokens_to_ids(j)!=self.tokenizer.unk_token_id:
12
+ t[-1]=(j,(t[-1][1][0],e))
13
+ else:
14
+ t.append((k,(s,e)))
15
+ m=[(0,0)]+[j for i,j in t]+[(0,0)]
16
+ r=super().preprocess(sentence=" ".join(i for i,j in t))
17
+ w=self.tokenizer.convert_ids_to_tokens(r["input_ids"][0])
18
+ if len(m)!=len(w):
19
+ for i,j in enumerate(w):
20
+ if j.endswith("@@"):
21
+ s,e=m[i]
22
+ m.insert(i+1,(s+len(j)-2,e))
23
+ m[i]=(s,s+len(j)-2)
24
+ r["offset_mapping"]=torch.tensor([m])
25
+ r["sentence"]=sentence
26
+ return r
27
+ def _forward(self,model_inputs):
28
+ import torch
29
+ v=model_inputs["input_ids"][0].tolist()
30
+ with torch.no_grad():
31
+ e=self.model(input_ids=torch.tensor([v[0:i]+[self.tokenizer.mask_token_id]+v[i+1:]+[j] for i,j in enumerate(v[1:-1],1)]))
32
+ return {"logits":e.logits[:,1:-2,:],**model_inputs}
33
+ def postprocess(self,model_outputs,**kwargs):
34
+ import numpy
35
+ e=model_outputs["logits"].numpy()
36
+ r=[1 if i==0 else -1 if j.endswith("|root") else 0 for i,j in sorted(self.model.config.id2label.items())]
37
+ e+=numpy.where(numpy.add.outer(numpy.identity(e.shape[0]),r)==0,0,numpy.nan)
38
+ g=self.model.config.label2id["X|_|goeswith"]
39
+ r=numpy.tri(e.shape[0])
40
+ for i in range(e.shape[0]):
41
+ for j in range(i+2,e.shape[1]):
42
+ r[i,j]=r[i,j-1] if numpy.nanargmax(e[i,j-1])==g else 1
43
+ e[:,:,g]+=numpy.where(r==0,0,numpy.nan)
44
+ m,p=numpy.nanmax(e,axis=2),numpy.nanargmax(e,axis=2)
45
+ h=self.chu_liu_edmonds(m)
46
+ z=[i for i,j in enumerate(h) if i==j]
47
+ if len(z)>1:
48
+ k,h=z[numpy.nanargmax(m[z,z])],numpy.nanmin(m)-numpy.nanmax(m)
49
+ m[:,z]+=[[0 if j in z and (i!=j or i==k) else h for i in z] for j in range(m.shape[0])]
50
+ h=self.chu_liu_edmonds(m)
51
+ v=[(s,e) for s,e in model_outputs["offset_mapping"][0].tolist() if s<e]
52
+ q=[self.model.config.id2label[p[j,i]].split("|") for i,j in enumerate(h)]
53
+ g="aggregation_strategy" in kwargs and kwargs["aggregation_strategy"]!="none"
54
+ if g:
55
+ for i,j in reversed(list(enumerate(q[1:],1))):
56
+ if j[-1]=="goeswith" and set([t[-1] for t in q[h[i]+1:i+1]])=={"goeswith"}:
57
+ h=[b if i>b else b-1 for a,b in enumerate(h) if i!=a]
58
+ v[i-1]=(v[i-1][0],v.pop(i)[1])
59
+ q.pop(i)
60
+ t=model_outputs["sentence"].replace("\n"," ")
61
+ u="# text = "+t+"\n"
62
+ for i,(s,e) in enumerate(v):
63
+ u+="\t".join([str(i+1),t[s:e],t[s:e] if g else "_",q[i][0],"_","|".join(q[i][1:-1]),str(0 if h[i]==i else h[i]+1),q[i][-1],"_","_" if i+1<len(v) and e<v[i+1][0] else "SpaceAfter=No"])+"\n"
64
+ return u+"\n"
65
+ def chu_liu_edmonds(self,matrix):
66
+ import numpy
67
+ h=numpy.nanargmax(matrix,axis=0)
68
+ x=[-1 if i==j else j for i,j in enumerate(h)]
69
+ for b in [lambda x,i,j:-1 if i not in x else x[i],lambda x,i,j:-1 if j<0 else x[j]]:
70
+ y=[]
71
+ while x!=y:
72
+ y=list(x)
73
+ for i,j in enumerate(x):
74
+ x[i]=b(x,i,j)
75
+ if max(x)<0:
76
+ return h
77
+ y,x=[i for i,j in enumerate(x) if j==max(x)],[i for i,j in enumerate(x) if j<max(x)]
78
+ z=matrix-numpy.nanmax(matrix,axis=0)
79
+ m=numpy.block([[z[x,:][:,x],numpy.nanmax(z[x,:][:,y],axis=1).reshape(len(x),1)],[numpy.nanmax(z[y,:][:,x],axis=0),numpy.nanmax(z[y,y])]])
80
+ k=[j if i==len(x) else x[j] if j<len(x) else y[numpy.nanargmax(z[y,x[i]])] for i,j in enumerate(self.chu_liu_edmonds(m))]
81
+ h=[j if i in y else k[x.index(i)] for i,j in enumerate(h)]
82
+ i=y[numpy.nanargmax(z[x[k[-1]],y] if k[-1]<len(x) else z[y,y])]
83
+ h[i]=x[k[-1]] if k[-1]<len(x) else i
84
+ return h
vocab.txt ADDED
The diff for this file is too large to render. See raw diff