KoichiYasuoka commited on
Commit
a3a1ef7
1 Parent(s): 194763f

release after the tokenizer refined

Browse files
README.md ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - "ja"
4
+ tags:
5
+ - "japanese"
6
+ - "pos"
7
+ - "dependency-parsing"
8
+ base_model: cyberagent/open-calm-1b
9
+ datasets:
10
+ - "universal_dependencies"
11
+ license: "cc-by-sa-4.0"
12
+ pipeline_tag: "token-classification"
13
+ widget:
14
+ - text: "全学年にわたって小学校の国語の教科書に挿し絵が用いられている"
15
+ ---
16
+
17
+ # open-calm-1b-ud-causal
18
+
19
+ ## Model Description
20
+
21
+ This is a GPT-NeoX model pretrained for POS-tagging and dependency-parsing, derived from [open-calm-1b](https://huggingface.co/cyberagent/open-calm-1b) refined for [UD_Japanese-GSDLUW](https://github.com/UniversalDependencies/UD_Japanese-GSDLUW).
22
+
23
+ ## How to Use
24
+
25
+ ```
26
+ from transformers import pipeline
27
+ nlp=pipeline("universal-dependencies","KoichiYasuoka/open-calm-1b-ud-causal",trust_remote_code=True)
28
+ print(nlp("全学年にわたって小学校の国語の教科書に挿し絵が用いられている"))
29
+ ```
30
+
config.json ADDED
@@ -0,0 +1,374 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "GPTNeoXForTokenClassification"
4
+ ],
5
+ "attention_bias": true,
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 0,
8
+ "classifier_dropout": 0.1,
9
+ "custom_pipelines": {
10
+ "upos": {
11
+ "impl": "ud.BellmanFordTokenClassificationPipeline",
12
+ "pt": "AutoModelForTokenClassification"
13
+ },
14
+ "universal-dependencies": {
15
+ "impl": "ud.UniversalDependenciesCausalPipeline",
16
+ "pt": "AutoModelForTokenClassification"
17
+ }
18
+ },
19
+ "eos_token_id": 0,
20
+ "hidden_act": "gelu",
21
+ "hidden_dropout": 0.0,
22
+ "hidden_size": 2048,
23
+ "id2label": {
24
+ "0": "ADJ",
25
+ "1": "ADJ|l-acl",
26
+ "2": "ADJ|l-advcl",
27
+ "3": "ADJ|l-amod",
28
+ "4": "ADJ|l-ccomp",
29
+ "5": "ADJ|l-csubj",
30
+ "6": "ADJ|l-csubj:outer",
31
+ "7": "ADJ|l-nmod",
32
+ "8": "ADJ|l-nsubj",
33
+ "9": "ADJ|l-obj",
34
+ "10": "ADJ|l-obl",
35
+ "11": "ADJ|r-acl",
36
+ "12": "ADJ|r-amod",
37
+ "13": "ADJ|r-dep",
38
+ "14": "ADJ|root",
39
+ "15": "ADP",
40
+ "16": "ADP|l-case",
41
+ "17": "ADP|r-case",
42
+ "18": "ADP|r-fixed",
43
+ "19": "ADV",
44
+ "20": "ADV|l-advcl",
45
+ "21": "ADV|l-advmod",
46
+ "22": "ADV|l-obj",
47
+ "23": "ADV|r-dep",
48
+ "24": "ADV|root",
49
+ "25": "AUX",
50
+ "26": "AUX|Polarity=Neg",
51
+ "27": "AUX|Polarity=Neg|r-aux",
52
+ "28": "AUX|Polarity=Neg|r-fixed",
53
+ "29": "AUX|r-aux",
54
+ "30": "AUX|r-cop",
55
+ "31": "AUX|r-fixed",
56
+ "32": "AUX|root",
57
+ "33": "B-ADJ",
58
+ "34": "B-ADP",
59
+ "35": "B-ADV",
60
+ "36": "B-AUX",
61
+ "37": "B-AUX|Polarity=Neg",
62
+ "38": "B-CCONJ",
63
+ "39": "B-DET",
64
+ "40": "B-INTJ",
65
+ "41": "B-NOUN",
66
+ "42": "B-NOUN|Polarity=Neg",
67
+ "43": "B-NUM",
68
+ "44": "B-PART",
69
+ "45": "B-PRON",
70
+ "46": "B-PROPN",
71
+ "47": "B-PUNCT",
72
+ "48": "B-SCONJ",
73
+ "49": "B-SYM",
74
+ "50": "B-VERB",
75
+ "51": "B-X",
76
+ "52": "CCONJ",
77
+ "53": "CCONJ|l-cc",
78
+ "54": "CCONJ|r-cc",
79
+ "55": "DET",
80
+ "56": "DET|l-det",
81
+ "57": "I-ADJ",
82
+ "58": "I-ADP",
83
+ "59": "I-ADV",
84
+ "60": "I-AUX",
85
+ "61": "I-AUX|Polarity=Neg",
86
+ "62": "I-CCONJ",
87
+ "63": "I-DET",
88
+ "64": "I-INTJ",
89
+ "65": "I-NOUN",
90
+ "66": "I-NOUN|Polarity=Neg",
91
+ "67": "I-NUM",
92
+ "68": "I-PART",
93
+ "69": "I-PRON",
94
+ "70": "I-PROPN",
95
+ "71": "I-PUNCT",
96
+ "72": "I-SCONJ",
97
+ "73": "I-SYM",
98
+ "74": "I-VERB",
99
+ "75": "I-X",
100
+ "76": "INTJ",
101
+ "77": "INTJ|l-discourse",
102
+ "78": "INTJ|r-discourse",
103
+ "79": "INTJ|root",
104
+ "80": "NOUN",
105
+ "81": "NOUN|Polarity=Neg",
106
+ "82": "NOUN|Polarity=Neg|l-obl",
107
+ "83": "NOUN|Polarity=Neg|root",
108
+ "84": "NOUN|l-acl",
109
+ "85": "NOUN|l-advcl",
110
+ "86": "NOUN|l-ccomp",
111
+ "87": "NOUN|l-compound",
112
+ "88": "NOUN|l-csubj",
113
+ "89": "NOUN|l-csubj:outer",
114
+ "90": "NOUN|l-nmod",
115
+ "91": "NOUN|l-nsubj",
116
+ "92": "NOUN|l-nsubj:outer",
117
+ "93": "NOUN|l-obj",
118
+ "94": "NOUN|l-obl",
119
+ "95": "NOUN|r-compound",
120
+ "96": "NOUN|r-nmod",
121
+ "97": "NOUN|r-nsubj",
122
+ "98": "NOUN|root",
123
+ "99": "NUM",
124
+ "100": "NUM|l-advcl",
125
+ "101": "NUM|l-compound",
126
+ "102": "NUM|l-nmod",
127
+ "103": "NUM|l-nsubj",
128
+ "104": "NUM|l-nsubj:outer",
129
+ "105": "NUM|l-nummod",
130
+ "106": "NUM|l-obj",
131
+ "107": "NUM|l-obl",
132
+ "108": "NUM|r-compound",
133
+ "109": "NUM|root",
134
+ "110": "PART",
135
+ "111": "PART|l-mark",
136
+ "112": "PART|r-mark",
137
+ "113": "PRON",
138
+ "114": "PRON|l-acl",
139
+ "115": "PRON|l-advcl",
140
+ "116": "PRON|l-nmod",
141
+ "117": "PRON|l-nsubj",
142
+ "118": "PRON|l-nsubj:outer",
143
+ "119": "PRON|l-obj",
144
+ "120": "PRON|l-obl",
145
+ "121": "PRON|root",
146
+ "122": "PROPN",
147
+ "123": "PROPN|l-acl",
148
+ "124": "PROPN|l-advcl",
149
+ "125": "PROPN|l-compound",
150
+ "126": "PROPN|l-nmod",
151
+ "127": "PROPN|l-nsubj",
152
+ "128": "PROPN|l-nsubj:outer",
153
+ "129": "PROPN|l-obj",
154
+ "130": "PROPN|l-obl",
155
+ "131": "PROPN|r-compound",
156
+ "132": "PROPN|r-nmod",
157
+ "133": "PROPN|root",
158
+ "134": "PUNCT",
159
+ "135": "PUNCT|l-punct",
160
+ "136": "PUNCT|r-punct",
161
+ "137": "SCONJ",
162
+ "138": "SCONJ|l-dep",
163
+ "139": "SCONJ|r-fixed",
164
+ "140": "SCONJ|r-mark",
165
+ "141": "SYM",
166
+ "142": "SYM|l-compound",
167
+ "143": "SYM|l-dep",
168
+ "144": "SYM|l-nmod",
169
+ "145": "SYM|l-obl",
170
+ "146": "SYM|r-compound",
171
+ "147": "SYM|r-dep",
172
+ "148": "VERB",
173
+ "149": "VERB|l-acl",
174
+ "150": "VERB|l-advcl",
175
+ "151": "VERB|l-ccomp",
176
+ "152": "VERB|l-compound",
177
+ "153": "VERB|l-csubj",
178
+ "154": "VERB|l-csubj:outer",
179
+ "155": "VERB|l-nmod",
180
+ "156": "VERB|l-obj",
181
+ "157": "VERB|l-obl",
182
+ "158": "VERB|r-acl",
183
+ "159": "VERB|r-advcl",
184
+ "160": "VERB|r-compound",
185
+ "161": "VERB|root",
186
+ "162": "X",
187
+ "163": "X|l-nmod",
188
+ "164": "X|r-dep"
189
+ },
190
+ "initializer_range": 0.02,
191
+ "intermediate_size": 8192,
192
+ "label2id": {
193
+ "ADJ": 0,
194
+ "ADJ|l-acl": 1,
195
+ "ADJ|l-advcl": 2,
196
+ "ADJ|l-amod": 3,
197
+ "ADJ|l-ccomp": 4,
198
+ "ADJ|l-csubj": 5,
199
+ "ADJ|l-csubj:outer": 6,
200
+ "ADJ|l-nmod": 7,
201
+ "ADJ|l-nsubj": 8,
202
+ "ADJ|l-obj": 9,
203
+ "ADJ|l-obl": 10,
204
+ "ADJ|r-acl": 11,
205
+ "ADJ|r-amod": 12,
206
+ "ADJ|r-dep": 13,
207
+ "ADJ|root": 14,
208
+ "ADP": 15,
209
+ "ADP|l-case": 16,
210
+ "ADP|r-case": 17,
211
+ "ADP|r-fixed": 18,
212
+ "ADV": 19,
213
+ "ADV|l-advcl": 20,
214
+ "ADV|l-advmod": 21,
215
+ "ADV|l-obj": 22,
216
+ "ADV|r-dep": 23,
217
+ "ADV|root": 24,
218
+ "AUX": 25,
219
+ "AUX|Polarity=Neg": 26,
220
+ "AUX|Polarity=Neg|r-aux": 27,
221
+ "AUX|Polarity=Neg|r-fixed": 28,
222
+ "AUX|r-aux": 29,
223
+ "AUX|r-cop": 30,
224
+ "AUX|r-fixed": 31,
225
+ "AUX|root": 32,
226
+ "B-ADJ": 33,
227
+ "B-ADP": 34,
228
+ "B-ADV": 35,
229
+ "B-AUX": 36,
230
+ "B-AUX|Polarity=Neg": 37,
231
+ "B-CCONJ": 38,
232
+ "B-DET": 39,
233
+ "B-INTJ": 40,
234
+ "B-NOUN": 41,
235
+ "B-NOUN|Polarity=Neg": 42,
236
+ "B-NUM": 43,
237
+ "B-PART": 44,
238
+ "B-PRON": 45,
239
+ "B-PROPN": 46,
240
+ "B-PUNCT": 47,
241
+ "B-SCONJ": 48,
242
+ "B-SYM": 49,
243
+ "B-VERB": 50,
244
+ "B-X": 51,
245
+ "CCONJ": 52,
246
+ "CCONJ|l-cc": 53,
247
+ "CCONJ|r-cc": 54,
248
+ "DET": 55,
249
+ "DET|l-det": 56,
250
+ "I-ADJ": 57,
251
+ "I-ADP": 58,
252
+ "I-ADV": 59,
253
+ "I-AUX": 60,
254
+ "I-AUX|Polarity=Neg": 61,
255
+ "I-CCONJ": 62,
256
+ "I-DET": 63,
257
+ "I-INTJ": 64,
258
+ "I-NOUN": 65,
259
+ "I-NOUN|Polarity=Neg": 66,
260
+ "I-NUM": 67,
261
+ "I-PART": 68,
262
+ "I-PRON": 69,
263
+ "I-PROPN": 70,
264
+ "I-PUNCT": 71,
265
+ "I-SCONJ": 72,
266
+ "I-SYM": 73,
267
+ "I-VERB": 74,
268
+ "I-X": 75,
269
+ "INTJ": 76,
270
+ "INTJ|l-discourse": 77,
271
+ "INTJ|r-discourse": 78,
272
+ "INTJ|root": 79,
273
+ "NOUN": 80,
274
+ "NOUN|Polarity=Neg": 81,
275
+ "NOUN|Polarity=Neg|l-obl": 82,
276
+ "NOUN|Polarity=Neg|root": 83,
277
+ "NOUN|l-acl": 84,
278
+ "NOUN|l-advcl": 85,
279
+ "NOUN|l-ccomp": 86,
280
+ "NOUN|l-compound": 87,
281
+ "NOUN|l-csubj": 88,
282
+ "NOUN|l-csubj:outer": 89,
283
+ "NOUN|l-nmod": 90,
284
+ "NOUN|l-nsubj": 91,
285
+ "NOUN|l-nsubj:outer": 92,
286
+ "NOUN|l-obj": 93,
287
+ "NOUN|l-obl": 94,
288
+ "NOUN|r-compound": 95,
289
+ "NOUN|r-nmod": 96,
290
+ "NOUN|r-nsubj": 97,
291
+ "NOUN|root": 98,
292
+ "NUM": 99,
293
+ "NUM|l-advcl": 100,
294
+ "NUM|l-compound": 101,
295
+ "NUM|l-nmod": 102,
296
+ "NUM|l-nsubj": 103,
297
+ "NUM|l-nsubj:outer": 104,
298
+ "NUM|l-nummod": 105,
299
+ "NUM|l-obj": 106,
300
+ "NUM|l-obl": 107,
301
+ "NUM|r-compound": 108,
302
+ "NUM|root": 109,
303
+ "PART": 110,
304
+ "PART|l-mark": 111,
305
+ "PART|r-mark": 112,
306
+ "PRON": 113,
307
+ "PRON|l-acl": 114,
308
+ "PRON|l-advcl": 115,
309
+ "PRON|l-nmod": 116,
310
+ "PRON|l-nsubj": 117,
311
+ "PRON|l-nsubj:outer": 118,
312
+ "PRON|l-obj": 119,
313
+ "PRON|l-obl": 120,
314
+ "PRON|root": 121,
315
+ "PROPN": 122,
316
+ "PROPN|l-acl": 123,
317
+ "PROPN|l-advcl": 124,
318
+ "PROPN|l-compound": 125,
319
+ "PROPN|l-nmod": 126,
320
+ "PROPN|l-nsubj": 127,
321
+ "PROPN|l-nsubj:outer": 128,
322
+ "PROPN|l-obj": 129,
323
+ "PROPN|l-obl": 130,
324
+ "PROPN|r-compound": 131,
325
+ "PROPN|r-nmod": 132,
326
+ "PROPN|root": 133,
327
+ "PUNCT": 134,
328
+ "PUNCT|l-punct": 135,
329
+ "PUNCT|r-punct": 136,
330
+ "SCONJ": 137,
331
+ "SCONJ|l-dep": 138,
332
+ "SCONJ|r-fixed": 139,
333
+ "SCONJ|r-mark": 140,
334
+ "SYM": 141,
335
+ "SYM|l-compound": 142,
336
+ "SYM|l-dep": 143,
337
+ "SYM|l-nmod": 144,
338
+ "SYM|l-obl": 145,
339
+ "SYM|r-compound": 146,
340
+ "SYM|r-dep": 147,
341
+ "VERB": 148,
342
+ "VERB|l-acl": 149,
343
+ "VERB|l-advcl": 150,
344
+ "VERB|l-ccomp": 151,
345
+ "VERB|l-compound": 152,
346
+ "VERB|l-csubj": 153,
347
+ "VERB|l-csubj:outer": 154,
348
+ "VERB|l-nmod": 155,
349
+ "VERB|l-obj": 156,
350
+ "VERB|l-obl": 157,
351
+ "VERB|r-acl": 158,
352
+ "VERB|r-advcl": 159,
353
+ "VERB|r-compound": 160,
354
+ "VERB|root": 161,
355
+ "X": 162,
356
+ "X|l-nmod": 163,
357
+ "X|r-dep": 164
358
+ },
359
+ "layer_norm_eps": 1e-05,
360
+ "max_position_embeddings": 2048,
361
+ "model_type": "gpt_neox",
362
+ "num_attention_heads": 16,
363
+ "num_hidden_layers": 24,
364
+ "rope_scaling": null,
365
+ "rotary_emb_base": 10000,
366
+ "rotary_pct": 1.0,
367
+ "tie_word_embeddings": false,
368
+ "tokenizer_class": "PreTrainedTokenizerFast",
369
+ "torch_dtype": "float32",
370
+ "transformers_version": "4.43.1",
371
+ "use_cache": true,
372
+ "use_parallel_residual": false,
373
+ "vocab_size": 52096
374
+ }
maker.sh ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #! /bin/sh
2
+ S=cyberagent/open-calm-1b
3
+ T=KoichiYasuoka/open-calm-1b-ud-causal
4
+ U=https://github.com/UniversalDependencies/UD_Japanese-GSDLUW
5
+ D=`basename $U`
6
+ test -d $D || git clone --depth=1 $U
7
+ for F in train dev test
8
+ do cp $D/*-$F.conllu $F.conllu
9
+ done
10
+
11
+ TMPA=./maker$$a.py
12
+ ( echo '#! /usr/bin/python3'
13
+ echo 'src="'$S'"'
14
+ cat << 'EOF'
15
+ import json,unicodedata
16
+ from transformers import AutoTokenizer
17
+ tkz=AutoTokenizer.from_pretrained(src,cls_token="<|endoftext|>",sep_token="<|endoftext|>",mask_token="<|endoftext|>",model_max_length=2048)
18
+ tkz.save_pretrained("tmpdir")
19
+ d=json.loads(tkz.backend_tokenizer.to_str())
20
+ form=set()
21
+ with open("train.conllu","r",encoding="utf-8") as r:
22
+ for s in r:
23
+ w=s.split("\t")
24
+ if len(w)==10 and w[0].isdecimal():
25
+ form.add(w[1])
26
+ m=[t for t in d["model"]["merges"] if len(t)<5]
27
+ for i in range(len(tkz)):
28
+ w=tkz.decode(i)
29
+ if len(w)==2 and w in form and not unicodedata.name(w[0]).startswith("HIRAGANA"):
30
+ k=tkz([w[0],w[1]],add_special_tokens=False)["input_ids"]
31
+ if len(k[0])==1 and len(k[1])==1:
32
+ m.append(" ".join(tkz.convert_ids_to_tokens([k[0][0],k[1][0]])))
33
+ d["model"]["merges"]=m
34
+ tkz.backend_tokenizer.from_str(json.dumps(d)).save("tmpdir/tokenizer.json")
35
+ EOF
36
+ ) > $TMPA
37
+ chmod 755 $TMPA
38
+ $TMPA
39
+
40
+ TMPB=./maker$$b.py
41
+ ( echo '#! /usr/bin/env deepspeed'
42
+ echo 'src="'$S'"'
43
+ echo 'tgt="'$T'"'
44
+ cat << 'EOF'
45
+ from transformers import PreTrainedTokenizerFast,AutoConfig,GPTNeoXForTokenClassification,DefaultDataCollator,TrainingArguments,Trainer
46
+
47
+ class UDCausalDataset(object):
48
+ def __init__(self,conllu,tokenizer,embeddings=None):
49
+ self.conllu=open(conllu,"r",encoding="utf-8")
50
+ self.tokenizer=tokenizer
51
+ self.embeddings=embeddings
52
+ self.max_tokens=3
53
+ self.seeks=[(0,0)]
54
+ label=set(["SYM"])
55
+ dep=set()
56
+ s=self.conllu.readline()
57
+ while s!="":
58
+ if s=="\n":
59
+ self.seeks.append((self.conllu.tell(),0))
60
+ else:
61
+ w=s.split("\t")
62
+ if len(w)==10:
63
+ if w[0].isdecimal():
64
+ p=w[3] if w[5]=="_" else w[3]+"|"+w[5]
65
+ label.add(p)
66
+ dep.add(p+("|" if w[6]=="0" else "|l-" if int(w[0])<int(w[6]) else "|r-")+w[7])
67
+ self.seeks.append((self.seeks[-1][0],int(w[0])))
68
+ self.max_tokens=max(self.max_tokens,int(w[0])*2+1)
69
+ s=self.conllu.readline()
70
+ lid={}
71
+ for i,l in enumerate(sorted(label)):
72
+ lid[l],lid["B-"+l],lid["I-"+l]=i*3,i*3+1,i*3+2
73
+ for i,d in enumerate(sorted(dep),len(lid)):
74
+ lid[d]=i
75
+ self.label2id=lid
76
+ def __call__(*args):
77
+ lid={l:i for i,l in enumerate(sorted(set(sum([list(t.label2id) for t in args],[]))))}
78
+ for t in args:
79
+ t.label2id=lid
80
+ return lid
81
+ def __del__(self):
82
+ self.conllu.close()
83
+ __len__=lambda self:len(self.seeks)-1
84
+ def __getitem__(self,i):
85
+ s,t=self.seeks[i]
86
+ self.conllu.seek(s)
87
+ form,upos,deps,w=[],[],[],[""]
88
+ while w[0]!="\n":
89
+ w=self.conllu.readline().split("\t")
90
+ if len(w)==10:
91
+ form.append(w[1])
92
+ if w[0].isdecimal():
93
+ upos.append(w[3] if w[5]=="_" else w[3]+"|"+w[5])
94
+ deps.append((int(w[6]),w[7]))
95
+ v=self.tokenizer(form,add_special_tokens=False)
96
+ if t==0:
97
+ i,u=[],[]
98
+ for j,(x,y) in enumerate(zip(v["input_ids"],upos)):
99
+ if x!=[]:
100
+ i+=x
101
+ u+=[y] if len(x)==1 else ["B-"+y]+["I-"+y]*(len(x)-1)
102
+ emb=self.embeddings
103
+ pad=self.tokenizer.pad_token_id
104
+ else:
105
+ import torch
106
+ m=[]
107
+ for x in v["input_ids"]:
108
+ if x==[]:
109
+ m.append(self.embeddings[self.tokenizer.unk_token_id,:])
110
+ else:
111
+ m.append(self.embeddings[x,:].sum(axis=0))
112
+ m.append(self.embeddings[self.tokenizer.sep_token_id,:])
113
+ m.append(self.embeddings[self.tokenizer.pad_token_id,:])
114
+ emb=torch.stack(m)
115
+ i,u=list(range(len(upos)+1)),upos+["SYM"]
116
+ i.append(t-1)
117
+ k,d=deps[t-1]
118
+ u.append(upos[t-1]+"|"+d if k==0 else upos[t-1])
119
+ for j in range(t,len(upos)):
120
+ i.append(j)
121
+ a,b=deps[j]
122
+ u.append(upos[j]+"|r-"+b if a==t else upos[t-1]+"|l-"+d if j+1==k else upos[j])
123
+ pad=-1
124
+ j=self.max_tokens-len(i)
125
+ if j>0:
126
+ ids=i+[pad]*j
127
+ upos=u+["SYM"]*j
128
+ else:
129
+ ids=i[0:self.max_tokens]
130
+ upos=u[0:self.max_tokens]
131
+ return {"inputs_embeds":emb[ids,:],"labels":[self.label2id[p] for p in upos]}
132
+
133
+ tkz=PreTrainedTokenizerFast.from_pretrained("tmpdir")
134
+ trainDS=UDCausalDataset("train.conllu",tkz)
135
+ devDS=UDCausalDataset("dev.conllu",tkz)
136
+ testDS=UDCausalDataset("test.conllu",tkz)
137
+ lid=trainDS(devDS,testDS)
138
+ cfg=AutoConfig.from_pretrained(src,num_labels=len(lid),label2id=lid,id2label={i:l for l,i in lid.items()},ignore_mismatched_sizes=True)
139
+ mdl=GPTNeoXForTokenClassification.from_pretrained(src,config=cfg,ignore_mismatched_sizes=True)
140
+ trainDS.embeddings=mdl.get_input_embeddings().weight
141
+ trainDS.max_tokens=min(trainDS.max_tokens,cfg.max_position_embeddings)
142
+ dsp={"fp16":{"enabled":"auto"},"optimizer":{"type":"AdamW"},"scheduler":{"type":"WarmupLR","params":{}},"train_batch_size":"auto","train_micro_batch_size_per_gpu":"auto","zero_optimization":{"stage":3,"offload_optimizer":{"device":"cpu","pin_memory":True},"offload_param":{"device":"cpu","pin_memory":True},"overlap_comm":True,"contiguous_gradients":True,"reduce_bucket_size":"auto","stage3_prefetch_bucket_size":"auto","stage3_param_persistence_threshold":"auto","stage3_gather_16bit_weights_on_model_save":True}}
143
+ arg=TrainingArguments(num_train_epochs=3,per_device_train_batch_size=16,deepspeed=dsp,output_dir=tgt,overwrite_output_dir=True,save_total_limit=2,learning_rate=5e-05,warmup_ratio=0.1,save_safetensors=False)
144
+ trn=Trainer(args=arg,data_collator=DefaultDataCollator(),model=mdl,train_dataset=trainDS)
145
+ trn.train()
146
+ trn.save_model(tgt)
147
+ tkz.save_pretrained(tgt)
148
+ EOF
149
+ ) > $TMPB
150
+ chmod 755 $TMPB
151
+ $TMPB
152
+ exit
pytorch_model-00001-of-00002.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b22c53a8187d1b793b822a1cb54a0b770fd3c89210db9a1e39a03e7ab03b23fa
3
+ size 4992712735
pytorch_model-00002-of-00002.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:10dae3f446d16f3c38d6d7aafbc8160a310b17e2d58adffa7521ba18646dbad7
3
+ size 269925742
pytorch_model.bin.index.json ADDED
@@ -0,0 +1,300 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 5262533268
4
+ },
5
+ "weight_map": {
6
+ "classifier.bias": "pytorch_model-00002-of-00002.bin",
7
+ "classifier.weight": "pytorch_model-00002-of-00002.bin",
8
+ "gpt_neox.embed_in.weight": "pytorch_model-00001-of-00002.bin",
9
+ "gpt_neox.final_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
10
+ "gpt_neox.final_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
11
+ "gpt_neox.layers.0.attention.dense.bias": "pytorch_model-00001-of-00002.bin",
12
+ "gpt_neox.layers.0.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
13
+ "gpt_neox.layers.0.attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
14
+ "gpt_neox.layers.0.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
15
+ "gpt_neox.layers.0.input_layernorm.bias": "pytorch_model-00001-of-00002.bin",
16
+ "gpt_neox.layers.0.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
17
+ "gpt_neox.layers.0.mlp.dense_4h_to_h.bias": "pytorch_model-00001-of-00002.bin",
18
+ "gpt_neox.layers.0.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00002.bin",
19
+ "gpt_neox.layers.0.mlp.dense_h_to_4h.bias": "pytorch_model-00001-of-00002.bin",
20
+ "gpt_neox.layers.0.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
21
+ "gpt_neox.layers.0.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
22
+ "gpt_neox.layers.0.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
23
+ "gpt_neox.layers.1.attention.dense.bias": "pytorch_model-00001-of-00002.bin",
24
+ "gpt_neox.layers.1.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
25
+ "gpt_neox.layers.1.attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
26
+ "gpt_neox.layers.1.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
27
+ "gpt_neox.layers.1.input_layernorm.bias": "pytorch_model-00001-of-00002.bin",
28
+ "gpt_neox.layers.1.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
29
+ "gpt_neox.layers.1.mlp.dense_4h_to_h.bias": "pytorch_model-00001-of-00002.bin",
30
+ "gpt_neox.layers.1.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00002.bin",
31
+ "gpt_neox.layers.1.mlp.dense_h_to_4h.bias": "pytorch_model-00001-of-00002.bin",
32
+ "gpt_neox.layers.1.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
33
+ "gpt_neox.layers.1.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
34
+ "gpt_neox.layers.1.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
35
+ "gpt_neox.layers.10.attention.dense.bias": "pytorch_model-00001-of-00002.bin",
36
+ "gpt_neox.layers.10.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
37
+ "gpt_neox.layers.10.attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
38
+ "gpt_neox.layers.10.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
39
+ "gpt_neox.layers.10.input_layernorm.bias": "pytorch_model-00001-of-00002.bin",
40
+ "gpt_neox.layers.10.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
41
+ "gpt_neox.layers.10.mlp.dense_4h_to_h.bias": "pytorch_model-00001-of-00002.bin",
42
+ "gpt_neox.layers.10.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00002.bin",
43
+ "gpt_neox.layers.10.mlp.dense_h_to_4h.bias": "pytorch_model-00001-of-00002.bin",
44
+ "gpt_neox.layers.10.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
45
+ "gpt_neox.layers.10.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
46
+ "gpt_neox.layers.10.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
47
+ "gpt_neox.layers.11.attention.dense.bias": "pytorch_model-00001-of-00002.bin",
48
+ "gpt_neox.layers.11.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
49
+ "gpt_neox.layers.11.attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
50
+ "gpt_neox.layers.11.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
51
+ "gpt_neox.layers.11.input_layernorm.bias": "pytorch_model-00001-of-00002.bin",
52
+ "gpt_neox.layers.11.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
53
+ "gpt_neox.layers.11.mlp.dense_4h_to_h.bias": "pytorch_model-00001-of-00002.bin",
54
+ "gpt_neox.layers.11.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00002.bin",
55
+ "gpt_neox.layers.11.mlp.dense_h_to_4h.bias": "pytorch_model-00001-of-00002.bin",
56
+ "gpt_neox.layers.11.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
57
+ "gpt_neox.layers.11.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
58
+ "gpt_neox.layers.11.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
59
+ "gpt_neox.layers.12.attention.dense.bias": "pytorch_model-00001-of-00002.bin",
60
+ "gpt_neox.layers.12.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
61
+ "gpt_neox.layers.12.attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
62
+ "gpt_neox.layers.12.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
63
+ "gpt_neox.layers.12.input_layernorm.bias": "pytorch_model-00001-of-00002.bin",
64
+ "gpt_neox.layers.12.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
65
+ "gpt_neox.layers.12.mlp.dense_4h_to_h.bias": "pytorch_model-00001-of-00002.bin",
66
+ "gpt_neox.layers.12.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00002.bin",
67
+ "gpt_neox.layers.12.mlp.dense_h_to_4h.bias": "pytorch_model-00001-of-00002.bin",
68
+ "gpt_neox.layers.12.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
69
+ "gpt_neox.layers.12.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
70
+ "gpt_neox.layers.12.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
71
+ "gpt_neox.layers.13.attention.dense.bias": "pytorch_model-00001-of-00002.bin",
72
+ "gpt_neox.layers.13.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
73
+ "gpt_neox.layers.13.attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
74
+ "gpt_neox.layers.13.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
75
+ "gpt_neox.layers.13.input_layernorm.bias": "pytorch_model-00001-of-00002.bin",
76
+ "gpt_neox.layers.13.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
77
+ "gpt_neox.layers.13.mlp.dense_4h_to_h.bias": "pytorch_model-00001-of-00002.bin",
78
+ "gpt_neox.layers.13.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00002.bin",
79
+ "gpt_neox.layers.13.mlp.dense_h_to_4h.bias": "pytorch_model-00001-of-00002.bin",
80
+ "gpt_neox.layers.13.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
81
+ "gpt_neox.layers.13.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
82
+ "gpt_neox.layers.13.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
83
+ "gpt_neox.layers.14.attention.dense.bias": "pytorch_model-00001-of-00002.bin",
84
+ "gpt_neox.layers.14.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
85
+ "gpt_neox.layers.14.attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
86
+ "gpt_neox.layers.14.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
87
+ "gpt_neox.layers.14.input_layernorm.bias": "pytorch_model-00001-of-00002.bin",
88
+ "gpt_neox.layers.14.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
89
+ "gpt_neox.layers.14.mlp.dense_4h_to_h.bias": "pytorch_model-00001-of-00002.bin",
90
+ "gpt_neox.layers.14.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00002.bin",
91
+ "gpt_neox.layers.14.mlp.dense_h_to_4h.bias": "pytorch_model-00001-of-00002.bin",
92
+ "gpt_neox.layers.14.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
93
+ "gpt_neox.layers.14.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
94
+ "gpt_neox.layers.14.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
95
+ "gpt_neox.layers.15.attention.dense.bias": "pytorch_model-00001-of-00002.bin",
96
+ "gpt_neox.layers.15.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
97
+ "gpt_neox.layers.15.attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
98
+ "gpt_neox.layers.15.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
99
+ "gpt_neox.layers.15.input_layernorm.bias": "pytorch_model-00001-of-00002.bin",
100
+ "gpt_neox.layers.15.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
101
+ "gpt_neox.layers.15.mlp.dense_4h_to_h.bias": "pytorch_model-00001-of-00002.bin",
102
+ "gpt_neox.layers.15.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00002.bin",
103
+ "gpt_neox.layers.15.mlp.dense_h_to_4h.bias": "pytorch_model-00001-of-00002.bin",
104
+ "gpt_neox.layers.15.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
105
+ "gpt_neox.layers.15.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
106
+ "gpt_neox.layers.15.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
107
+ "gpt_neox.layers.16.attention.dense.bias": "pytorch_model-00001-of-00002.bin",
108
+ "gpt_neox.layers.16.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
109
+ "gpt_neox.layers.16.attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
110
+ "gpt_neox.layers.16.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
111
+ "gpt_neox.layers.16.input_layernorm.bias": "pytorch_model-00001-of-00002.bin",
112
+ "gpt_neox.layers.16.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
113
+ "gpt_neox.layers.16.mlp.dense_4h_to_h.bias": "pytorch_model-00001-of-00002.bin",
114
+ "gpt_neox.layers.16.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00002.bin",
115
+ "gpt_neox.layers.16.mlp.dense_h_to_4h.bias": "pytorch_model-00001-of-00002.bin",
116
+ "gpt_neox.layers.16.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
117
+ "gpt_neox.layers.16.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
118
+ "gpt_neox.layers.16.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
119
+ "gpt_neox.layers.17.attention.dense.bias": "pytorch_model-00001-of-00002.bin",
120
+ "gpt_neox.layers.17.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
121
+ "gpt_neox.layers.17.attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
122
+ "gpt_neox.layers.17.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
123
+ "gpt_neox.layers.17.input_layernorm.bias": "pytorch_model-00001-of-00002.bin",
124
+ "gpt_neox.layers.17.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
125
+ "gpt_neox.layers.17.mlp.dense_4h_to_h.bias": "pytorch_model-00001-of-00002.bin",
126
+ "gpt_neox.layers.17.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00002.bin",
127
+ "gpt_neox.layers.17.mlp.dense_h_to_4h.bias": "pytorch_model-00001-of-00002.bin",
128
+ "gpt_neox.layers.17.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
129
+ "gpt_neox.layers.17.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
130
+ "gpt_neox.layers.17.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
131
+ "gpt_neox.layers.18.attention.dense.bias": "pytorch_model-00001-of-00002.bin",
132
+ "gpt_neox.layers.18.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
133
+ "gpt_neox.layers.18.attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
134
+ "gpt_neox.layers.18.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
135
+ "gpt_neox.layers.18.input_layernorm.bias": "pytorch_model-00001-of-00002.bin",
136
+ "gpt_neox.layers.18.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
137
+ "gpt_neox.layers.18.mlp.dense_4h_to_h.bias": "pytorch_model-00001-of-00002.bin",
138
+ "gpt_neox.layers.18.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00002.bin",
139
+ "gpt_neox.layers.18.mlp.dense_h_to_4h.bias": "pytorch_model-00001-of-00002.bin",
140
+ "gpt_neox.layers.18.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
141
+ "gpt_neox.layers.18.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
142
+ "gpt_neox.layers.18.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
143
+ "gpt_neox.layers.19.attention.dense.bias": "pytorch_model-00001-of-00002.bin",
144
+ "gpt_neox.layers.19.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
145
+ "gpt_neox.layers.19.attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
146
+ "gpt_neox.layers.19.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
147
+ "gpt_neox.layers.19.input_layernorm.bias": "pytorch_model-00001-of-00002.bin",
148
+ "gpt_neox.layers.19.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
149
+ "gpt_neox.layers.19.mlp.dense_4h_to_h.bias": "pytorch_model-00001-of-00002.bin",
150
+ "gpt_neox.layers.19.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00002.bin",
151
+ "gpt_neox.layers.19.mlp.dense_h_to_4h.bias": "pytorch_model-00001-of-00002.bin",
152
+ "gpt_neox.layers.19.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
153
+ "gpt_neox.layers.19.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
154
+ "gpt_neox.layers.19.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
155
+ "gpt_neox.layers.2.attention.dense.bias": "pytorch_model-00001-of-00002.bin",
156
+ "gpt_neox.layers.2.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
157
+ "gpt_neox.layers.2.attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
158
+ "gpt_neox.layers.2.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
159
+ "gpt_neox.layers.2.input_layernorm.bias": "pytorch_model-00001-of-00002.bin",
160
+ "gpt_neox.layers.2.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
161
+ "gpt_neox.layers.2.mlp.dense_4h_to_h.bias": "pytorch_model-00001-of-00002.bin",
162
+ "gpt_neox.layers.2.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00002.bin",
163
+ "gpt_neox.layers.2.mlp.dense_h_to_4h.bias": "pytorch_model-00001-of-00002.bin",
164
+ "gpt_neox.layers.2.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
165
+ "gpt_neox.layers.2.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
166
+ "gpt_neox.layers.2.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
167
+ "gpt_neox.layers.20.attention.dense.bias": "pytorch_model-00001-of-00002.bin",
168
+ "gpt_neox.layers.20.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
169
+ "gpt_neox.layers.20.attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
170
+ "gpt_neox.layers.20.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
171
+ "gpt_neox.layers.20.input_layernorm.bias": "pytorch_model-00001-of-00002.bin",
172
+ "gpt_neox.layers.20.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
173
+ "gpt_neox.layers.20.mlp.dense_4h_to_h.bias": "pytorch_model-00001-of-00002.bin",
174
+ "gpt_neox.layers.20.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00002.bin",
175
+ "gpt_neox.layers.20.mlp.dense_h_to_4h.bias": "pytorch_model-00001-of-00002.bin",
176
+ "gpt_neox.layers.20.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
177
+ "gpt_neox.layers.20.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
178
+ "gpt_neox.layers.20.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
179
+ "gpt_neox.layers.21.attention.dense.bias": "pytorch_model-00001-of-00002.bin",
180
+ "gpt_neox.layers.21.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
181
+ "gpt_neox.layers.21.attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
182
+ "gpt_neox.layers.21.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
183
+ "gpt_neox.layers.21.input_layernorm.bias": "pytorch_model-00001-of-00002.bin",
184
+ "gpt_neox.layers.21.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
185
+ "gpt_neox.layers.21.mlp.dense_4h_to_h.bias": "pytorch_model-00001-of-00002.bin",
186
+ "gpt_neox.layers.21.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00002.bin",
187
+ "gpt_neox.layers.21.mlp.dense_h_to_4h.bias": "pytorch_model-00001-of-00002.bin",
188
+ "gpt_neox.layers.21.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
189
+ "gpt_neox.layers.21.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
190
+ "gpt_neox.layers.21.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
191
+ "gpt_neox.layers.22.attention.dense.bias": "pytorch_model-00001-of-00002.bin",
192
+ "gpt_neox.layers.22.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
193
+ "gpt_neox.layers.22.attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
194
+ "gpt_neox.layers.22.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
195
+ "gpt_neox.layers.22.input_layernorm.bias": "pytorch_model-00001-of-00002.bin",
196
+ "gpt_neox.layers.22.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
197
+ "gpt_neox.layers.22.mlp.dense_4h_to_h.bias": "pytorch_model-00002-of-00002.bin",
198
+ "gpt_neox.layers.22.mlp.dense_4h_to_h.weight": "pytorch_model-00002-of-00002.bin",
199
+ "gpt_neox.layers.22.mlp.dense_h_to_4h.bias": "pytorch_model-00001-of-00002.bin",
200
+ "gpt_neox.layers.22.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
201
+ "gpt_neox.layers.22.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
202
+ "gpt_neox.layers.22.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
203
+ "gpt_neox.layers.23.attention.dense.bias": "pytorch_model-00002-of-00002.bin",
204
+ "gpt_neox.layers.23.attention.dense.weight": "pytorch_model-00002-of-00002.bin",
205
+ "gpt_neox.layers.23.attention.query_key_value.bias": "pytorch_model-00002-of-00002.bin",
206
+ "gpt_neox.layers.23.attention.query_key_value.weight": "pytorch_model-00002-of-00002.bin",
207
+ "gpt_neox.layers.23.input_layernorm.bias": "pytorch_model-00002-of-00002.bin",
208
+ "gpt_neox.layers.23.input_layernorm.weight": "pytorch_model-00002-of-00002.bin",
209
+ "gpt_neox.layers.23.mlp.dense_4h_to_h.bias": "pytorch_model-00002-of-00002.bin",
210
+ "gpt_neox.layers.23.mlp.dense_4h_to_h.weight": "pytorch_model-00002-of-00002.bin",
211
+ "gpt_neox.layers.23.mlp.dense_h_to_4h.bias": "pytorch_model-00002-of-00002.bin",
212
+ "gpt_neox.layers.23.mlp.dense_h_to_4h.weight": "pytorch_model-00002-of-00002.bin",
213
+ "gpt_neox.layers.23.post_attention_layernorm.bias": "pytorch_model-00002-of-00002.bin",
214
+ "gpt_neox.layers.23.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
215
+ "gpt_neox.layers.3.attention.dense.bias": "pytorch_model-00001-of-00002.bin",
216
+ "gpt_neox.layers.3.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
217
+ "gpt_neox.layers.3.attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
218
+ "gpt_neox.layers.3.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
219
+ "gpt_neox.layers.3.input_layernorm.bias": "pytorch_model-00001-of-00002.bin",
220
+ "gpt_neox.layers.3.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
221
+ "gpt_neox.layers.3.mlp.dense_4h_to_h.bias": "pytorch_model-00001-of-00002.bin",
222
+ "gpt_neox.layers.3.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00002.bin",
223
+ "gpt_neox.layers.3.mlp.dense_h_to_4h.bias": "pytorch_model-00001-of-00002.bin",
224
+ "gpt_neox.layers.3.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
225
+ "gpt_neox.layers.3.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
226
+ "gpt_neox.layers.3.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
227
+ "gpt_neox.layers.4.attention.dense.bias": "pytorch_model-00001-of-00002.bin",
228
+ "gpt_neox.layers.4.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
229
+ "gpt_neox.layers.4.attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
230
+ "gpt_neox.layers.4.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
231
+ "gpt_neox.layers.4.input_layernorm.bias": "pytorch_model-00001-of-00002.bin",
232
+ "gpt_neox.layers.4.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
233
+ "gpt_neox.layers.4.mlp.dense_4h_to_h.bias": "pytorch_model-00001-of-00002.bin",
234
+ "gpt_neox.layers.4.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00002.bin",
235
+ "gpt_neox.layers.4.mlp.dense_h_to_4h.bias": "pytorch_model-00001-of-00002.bin",
236
+ "gpt_neox.layers.4.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
237
+ "gpt_neox.layers.4.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
238
+ "gpt_neox.layers.4.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
239
+ "gpt_neox.layers.5.attention.dense.bias": "pytorch_model-00001-of-00002.bin",
240
+ "gpt_neox.layers.5.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
241
+ "gpt_neox.layers.5.attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
242
+ "gpt_neox.layers.5.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
243
+ "gpt_neox.layers.5.input_layernorm.bias": "pytorch_model-00001-of-00002.bin",
244
+ "gpt_neox.layers.5.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
245
+ "gpt_neox.layers.5.mlp.dense_4h_to_h.bias": "pytorch_model-00001-of-00002.bin",
246
+ "gpt_neox.layers.5.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00002.bin",
247
+ "gpt_neox.layers.5.mlp.dense_h_to_4h.bias": "pytorch_model-00001-of-00002.bin",
248
+ "gpt_neox.layers.5.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
249
+ "gpt_neox.layers.5.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
250
+ "gpt_neox.layers.5.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
251
+ "gpt_neox.layers.6.attention.dense.bias": "pytorch_model-00001-of-00002.bin",
252
+ "gpt_neox.layers.6.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
253
+ "gpt_neox.layers.6.attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
254
+ "gpt_neox.layers.6.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
255
+ "gpt_neox.layers.6.input_layernorm.bias": "pytorch_model-00001-of-00002.bin",
256
+ "gpt_neox.layers.6.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
257
+ "gpt_neox.layers.6.mlp.dense_4h_to_h.bias": "pytorch_model-00001-of-00002.bin",
258
+ "gpt_neox.layers.6.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00002.bin",
259
+ "gpt_neox.layers.6.mlp.dense_h_to_4h.bias": "pytorch_model-00001-of-00002.bin",
260
+ "gpt_neox.layers.6.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
261
+ "gpt_neox.layers.6.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
262
+ "gpt_neox.layers.6.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
263
+ "gpt_neox.layers.7.attention.dense.bias": "pytorch_model-00001-of-00002.bin",
264
+ "gpt_neox.layers.7.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
265
+ "gpt_neox.layers.7.attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
266
+ "gpt_neox.layers.7.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
267
+ "gpt_neox.layers.7.input_layernorm.bias": "pytorch_model-00001-of-00002.bin",
268
+ "gpt_neox.layers.7.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
269
+ "gpt_neox.layers.7.mlp.dense_4h_to_h.bias": "pytorch_model-00001-of-00002.bin",
270
+ "gpt_neox.layers.7.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00002.bin",
271
+ "gpt_neox.layers.7.mlp.dense_h_to_4h.bias": "pytorch_model-00001-of-00002.bin",
272
+ "gpt_neox.layers.7.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
273
+ "gpt_neox.layers.7.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
274
+ "gpt_neox.layers.7.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
275
+ "gpt_neox.layers.8.attention.dense.bias": "pytorch_model-00001-of-00002.bin",
276
+ "gpt_neox.layers.8.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
277
+ "gpt_neox.layers.8.attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
278
+ "gpt_neox.layers.8.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
279
+ "gpt_neox.layers.8.input_layernorm.bias": "pytorch_model-00001-of-00002.bin",
280
+ "gpt_neox.layers.8.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
281
+ "gpt_neox.layers.8.mlp.dense_4h_to_h.bias": "pytorch_model-00001-of-00002.bin",
282
+ "gpt_neox.layers.8.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00002.bin",
283
+ "gpt_neox.layers.8.mlp.dense_h_to_4h.bias": "pytorch_model-00001-of-00002.bin",
284
+ "gpt_neox.layers.8.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
285
+ "gpt_neox.layers.8.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
286
+ "gpt_neox.layers.8.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
287
+ "gpt_neox.layers.9.attention.dense.bias": "pytorch_model-00001-of-00002.bin",
288
+ "gpt_neox.layers.9.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
289
+ "gpt_neox.layers.9.attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
290
+ "gpt_neox.layers.9.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
291
+ "gpt_neox.layers.9.input_layernorm.bias": "pytorch_model-00001-of-00002.bin",
292
+ "gpt_neox.layers.9.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
293
+ "gpt_neox.layers.9.mlp.dense_4h_to_h.bias": "pytorch_model-00001-of-00002.bin",
294
+ "gpt_neox.layers.9.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00002.bin",
295
+ "gpt_neox.layers.9.mlp.dense_h_to_4h.bias": "pytorch_model-00001-of-00002.bin",
296
+ "gpt_neox.layers.9.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
297
+ "gpt_neox.layers.9.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
298
+ "gpt_neox.layers.9.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin"
299
+ }
300
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|endoftext|>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "cls_token": {
10
+ "content": "<|endoftext|>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "eos_token": {
17
+ "content": "<|endoftext|>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "mask_token": {
24
+ "content": "<|endoftext|>",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "pad_token": {
31
+ "content": "<|padding|>",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ },
37
+ "sep_token": {
38
+ "content": "<|endoftext|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false
43
+ },
44
+ "unk_token": {
45
+ "content": "<|endoftext|>",
46
+ "lstrip": false,
47
+ "normalized": false,
48
+ "rstrip": false,
49
+ "single_word": false
50
+ }
51
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_eos_token": false,
4
+ "add_prefix_space": false,
5
+ "added_tokens_decoder": {
6
+ "0": {
7
+ "content": "<|endoftext|>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false,
12
+ "special": true
13
+ },
14
+ "1": {
15
+ "content": "<|padding|>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false,
20
+ "special": true
21
+ }
22
+ },
23
+ "bos_token": "<|endoftext|>",
24
+ "clean_up_tokenization_spaces": true,
25
+ "cls_token": "<|endoftext|>",
26
+ "eos_token": "<|endoftext|>",
27
+ "mask_token": "<|endoftext|>",
28
+ "model_max_length": 2048,
29
+ "pad_token": "<|padding|>",
30
+ "sep_token": "<|endoftext|>",
31
+ "tokenizer_class": "PreTrainedTokenizerFast",
32
+ "unk_token": "<|endoftext|>"
33
+ }
ud.py ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy
2
+ from transformers import TokenClassificationPipeline
3
+
4
+ class BellmanFordTokenClassificationPipeline(TokenClassificationPipeline):
5
+ def __init__(self,**kwargs):
6
+ super().__init__(**kwargs)
7
+ x=self.model.config.label2id
8
+ y=[k for k in x if k.startswith("B-") or not (k.startswith("I-") or k.endswith("|root") or k.find("|l-")>0 or k.find("|r-")>0)]
9
+ self.transition=numpy.full((len(x),len(x)),numpy.nan)
10
+ for k,v in x.items():
11
+ for j in ["I-"+k[2:]] if k.startswith("B-") else [k]+y if k.startswith("I-") else y:
12
+ self.transition[v,x[j]]=0
13
+ def check_model_type(self,supported_models):
14
+ pass
15
+ def postprocess(self,model_outputs,**kwargs):
16
+ if "logits" not in model_outputs:
17
+ return self.postprocess(model_outputs[0],**kwargs)
18
+ m=model_outputs["logits"][0].numpy()
19
+ e=numpy.exp(m-numpy.max(m,axis=-1,keepdims=True))
20
+ z=e/e.sum(axis=-1,keepdims=True)
21
+ for i in range(m.shape[0]-1,0,-1):
22
+ m[i-1]+=numpy.nanmax(m[i]+self.transition,axis=1)
23
+ k=[numpy.nanargmax(m[0]+self.transition[0])]
24
+ for i in range(1,m.shape[0]):
25
+ k.append(numpy.nanargmax(m[i]+self.transition[k[-1]]))
26
+ w=[{"entity":self.model.config.id2label[j],"start":s,"end":e,"score":z[i,j]} for i,((s,e),j) in enumerate(zip(model_outputs["offset_mapping"][0].tolist(),k)) if s<e]
27
+ if "aggregation_strategy" in kwargs and kwargs["aggregation_strategy"]!="none":
28
+ for i,t in reversed(list(enumerate(w))):
29
+ p=t.pop("entity")
30
+ if p.startswith("I-"):
31
+ w[i-1]["score"]=min(w[i-1]["score"],t["score"])
32
+ w[i-1]["end"]=w.pop(i)["end"]
33
+ elif p.startswith("B-"):
34
+ t["entity_group"]=p[2:]
35
+ else:
36
+ t["entity_group"]=p
37
+ for t in w:
38
+ t["text"]=model_outputs["sentence"][t["start"]:t["end"]]
39
+ return w
40
+
41
+ class UniversalDependenciesCausalPipeline(BellmanFordTokenClassificationPipeline):
42
+ def __init__(self,**kwargs):
43
+ kwargs["aggregation_strategy"]="simple"
44
+ super().__init__(**kwargs)
45
+ x=self.model.config.label2id
46
+ self.root=numpy.full((len(x)),numpy.nan)
47
+ self.left_arc=numpy.full((len(x)),numpy.nan)
48
+ self.right_arc=numpy.full((len(x)),numpy.nan)
49
+ for k,v in x.items():
50
+ if k.endswith("|root"):
51
+ self.root[v]=0
52
+ elif k.find("|l-")>0:
53
+ self.left_arc[v]=0
54
+ elif k.find("|r-")>0:
55
+ self.right_arc[v]=0
56
+ def postprocess(self,model_outputs,**kwargs):
57
+ import torch
58
+ if "logits" not in model_outputs:
59
+ return self.postprocess(model_outputs[0],**kwargs)
60
+ m=model_outputs["logits"][0].numpy()
61
+ for i in range(m.shape[0]-1,0,-1):
62
+ m[i-1]+=numpy.nanmax(m[i]+self.transition,axis=1)
63
+ k=[numpy.nanargmax(m[0]+self.transition[0])]
64
+ for i in range(1,m.shape[0]):
65
+ k.append(numpy.nanargmax(m[i]+self.transition[k[-1]]))
66
+ w=[{"entity":self.model.config.id2label[j],"start":s,"end":e} for i,((s,e),j) in enumerate(zip(model_outputs["offset_mapping"][0].tolist(),k)) if s<e]
67
+ for i,t in reversed(list(enumerate(w))):
68
+ p=t.pop("entity")
69
+ if p.startswith("I-"):
70
+ w[i-1]["end"]=max(w.pop(i)["end"],w[i-1]["end"])
71
+ elif i>0 and w[i-1]["end"]>w[i]["start"]:
72
+ w[i-1]["end"]=max(w.pop(i)["end"],w[i-1]["end"])
73
+ elif p.startswith("B-"):
74
+ t["entity_group"]=p[2:]
75
+ else:
76
+ t["entity_group"]=p
77
+ d=[model_outputs["sentence"][t["start"]:t["end"]] for t in w]
78
+ for i in range(len(d)-1,-1,-1):
79
+ if d[i].startswith(" "):
80
+ j=len(d[i])-len(d[i].lstrip())
81
+ d[i]=d[i].lstrip()
82
+ w[i]["start"]+=j
83
+ if d[i].endswith(" "):
84
+ j=len(d[i])-len(d[i].rstrip())
85
+ d[i]=d[i].rstrip()
86
+ w[i]["end"]-=j
87
+ if d[i].strip()=="":
88
+ d.pop(i)
89
+ w.pop(i)
90
+ v=self.tokenizer(d,add_special_tokens=False)
91
+ e=self.model.get_input_embeddings().weight
92
+ m=[]
93
+ for x in v["input_ids"]:
94
+ if x==[]:
95
+ x=[self.tokenizer.unk_token_id]
96
+ m.append(e[x,:].sum(axis=0))
97
+ m.append(e[self.tokenizer.sep_token_id,:])
98
+ m.append(e[self.tokenizer.pad_token_id,:])
99
+ m=torch.stack(m).to(self.device)
100
+ k=list(range(len(d)+1))
101
+ e=[]
102
+ with torch.no_grad():
103
+ for i in range(len(d)):
104
+ e.append(self.model(inputs_embeds=torch.unsqueeze(m[k+list(range(i,len(d)))+[-1]*i,:],0)).logits[0,-len(d):,:])
105
+ e=torch.stack(e).cpu().numpy()
106
+ for i in range(len(d)):
107
+ for j in range(i):
108
+ e[-j-1,-i-1],e[-i-1,-j-1]=e[-i-1,i-j]+self.left_arc,e[-i-1,i-j]+self.right_arc
109
+ e[-i-1,-i-1]=e[-i-1,0]+self.root
110
+ m,p=numpy.nanmax(e,axis=2),numpy.nanargmax(e,axis=2)
111
+ h=self.chu_liu_edmonds(m)
112
+ z=[i for i,j in enumerate(h) if i==j]
113
+ if len(z)>1:
114
+ k,h=z[numpy.nanargmax(m[z,z])],numpy.nanmin(m)-numpy.nanmax(m)
115
+ m[:,z]+=[[0 if j in z and (i!=j or i==k) else h for i in z] for j in range(m.shape[0])]
116
+ h=self.chu_liu_edmonds(m)
117
+ q=[self.model.config.id2label[p[j,i]].split("|") for i,j in enumerate(h)]
118
+ t=model_outputs["sentence"].replace("\n"," ")
119
+ u="# text = "+t+"\n"
120
+ for i,j in enumerate(d):
121
+ u+="\t".join([str(i+1),j,"_",q[i][0],"_","_" if len(q[i])<3 else "|".join(q[i][1:-1]),str(0 if h[i]==i else h[i]+1),"root" if q[i][-1]=="root" else q[i][-1][2:],"_","_" if i+1<len(d) and w[i]["end"]<w[i+1]["start"] else "SpaceAfter=No"])+"\n"
122
+ return u+"\n"
123
+ def chu_liu_edmonds(self,matrix):
124
+ h=numpy.nanargmax(matrix,axis=0)
125
+ x=[-1 if i==j else j for i,j in enumerate(h)]
126
+ for b in [lambda x,i,j:-1 if i not in x else x[i],lambda x,i,j:-1 if j<0 else x[j]]:
127
+ y=[]
128
+ while x!=y:
129
+ y=list(x)
130
+ for i,j in enumerate(x):
131
+ x[i]=b(x,i,j)
132
+ if max(x)<0:
133
+ return h
134
+ y,x=[i for i,j in enumerate(x) if j==max(x)],[i for i,j in enumerate(x) if j<max(x)]
135
+ z=matrix-numpy.nanmax(matrix,axis=0)
136
+ m=numpy.block([[z[x,:][:,x],numpy.nanmax(z[x,:][:,y],axis=1).reshape(len(x),1)],[numpy.nanmax(z[y,:][:,x],axis=0),numpy.nanmax(z[y,y])]])
137
+ k=[j if i==len(x) else x[j] if j<len(x) else y[numpy.nanargmax(z[y,x[i]])] for i,j in enumerate(self.chu_liu_edmonds(m))]
138
+ h=[j if i in y else k[x.index(i)] for i,j in enumerate(h)]
139
+ i=y[numpy.nanargmax(z[x[k[-1]],y] if k[-1]<len(x) else z[y,y])]
140
+ h[i]=x[k[-1]] if k[-1]<len(x) else i
141
+ return h