KoichiYasuoka commited on
Commit
3b01181
1 Parent(s): eb987b2

initial release

Browse files
README.md ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - "ja"
4
+ tags:
5
+ - "japanese"
6
+ - "pos"
7
+ - "dependency-parsing"
8
+ datasets:
9
+ - "universal_dependencies"
10
+ license: "cc-by-sa-4.0"
11
+ pipeline_tag: "token-classification"
12
+ widget:
13
+ - text: "全学年にわたって小学校の国語の教科書に挿し絵が用いられている"
14
+ ---
15
+
16
+ # roberta-base-japanese-aozora-ud-goeswith
17
+
18
+ ## Model Description
19
+
20
+ This is a DeBERTa(V2) model pretrained on 青空文庫 texts for POS-tagging and dependency-parsing (using `goeswith` for subwords), derived from [roberta-base-japanese-aozora](https://huggingface.co/KoichiYasuoka/roberta-base-japanese-aozora) and [UD_Japanese-GSDLUW](https://github.com/UniversalDependencies/UD_Japanese-GSDLUW).
21
+
22
+ ## How to Use
23
+
24
+ ```py
25
+ class UDgoeswith(object):
26
+ def __init__(self,bert):
27
+ from transformers import AutoTokenizer,AutoModelForTokenClassification
28
+ self.tokenizer=AutoTokenizer.from_pretrained(bert)
29
+ self.model=AutoModelForTokenClassification.from_pretrained(bert)
30
+ def __call__(self,text):
31
+ import numpy,torch,ufal.chu_liu_edmonds
32
+ w=self.tokenizer(text,return_offsets_mapping=True)
33
+ v=w["input_ids"]
34
+ n=len(v)-1
35
+ with torch.no_grad():
36
+ d=self.model(input_ids=torch.tensor([v[0:i]+[self.tokenizer.mask_token_id]+v[i+1:]+[v[i]] for i in range(1,n)]))
37
+ e=d.logits.numpy()[:,1:n,:]
38
+ e[:,:,0]=numpy.nan
39
+ m=numpy.full((n,n),numpy.nan)
40
+ m[1:,1:]=numpy.nanmax(e,axis=2).transpose()
41
+ p=numpy.zeros((n,n))
42
+ p[1:,1:]=numpy.nanargmax(e,axis=2).transpose()
43
+ for i in range(1,n):
44
+ m[i,0],m[i,i],p[i,0]=m[i,i],numpy.nan,p[i,i]
45
+ h=ufal.chu_liu_edmonds.chu_liu_edmonds(m)[0]
46
+ u="# text = "+text+"\n"
47
+ v=[(s,e) for s,e in w["offset_mapping"] if s<e]
48
+ for i,(s,e) in enumerate(v,1):
49
+ q=self.model.config.id2label[p[i,h[i]]].split("|")
50
+ u+="\t".join([str(i),text[s:e],"_",q[0],"_","|".join(q[1:-1]),str(h[i]),q[-1],"_","_" if i<len(v) and e<v[i][0] else "SpaceAfter=No"])+"\n"
51
+ return u+"\n"
52
+
53
+ nlp=UDgoeswith("KoichiYasuoka/roberta-base-japanese-aozora-ud-goeswith")
54
+ print(nlp("全学年にわたって小学校の国語の教科書に挿し絵が用いられている"))
55
+ ```
56
+
57
+ [ufal.chu-liu-edmonds](https://pypi.org/project/ufal.chu-liu-edmonds/) is required.
config.json ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "RobertaForTokenClassification"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "bos_token_id": 2,
7
+ "classifier_dropout": null,
8
+ "eos_token_id": 3,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 768,
12
+ "id2label": {
13
+ "0": "-|_|dep",
14
+ "1": "ADJ|_|acl",
15
+ "2": "ADJ|_|advcl",
16
+ "3": "ADJ|_|amod",
17
+ "4": "ADJ|_|ccomp",
18
+ "5": "ADJ|_|csubj",
19
+ "6": "ADJ|_|dep",
20
+ "7": "ADJ|_|dislocated",
21
+ "8": "ADJ|_|nmod",
22
+ "9": "ADJ|_|nsubj",
23
+ "10": "ADJ|_|obj",
24
+ "11": "ADJ|_|obl",
25
+ "12": "ADJ|_|root",
26
+ "13": "ADP|_|case",
27
+ "14": "ADP|_|fixed",
28
+ "15": "ADV|_|advcl",
29
+ "16": "ADV|_|advmod",
30
+ "17": "ADV|_|dep",
31
+ "18": "ADV|_|obj",
32
+ "19": "ADV|_|root",
33
+ "20": "AUX|Polarity=Neg|aux",
34
+ "21": "AUX|_|aux",
35
+ "22": "AUX|_|cop",
36
+ "23": "AUX|_|fixed",
37
+ "24": "AUX|_|root",
38
+ "25": "CCONJ|_|cc",
39
+ "26": "DET|_|det",
40
+ "27": "INTJ|_|discourse",
41
+ "28": "INTJ|_|root",
42
+ "29": "NOUN|Polarity=Neg|obl",
43
+ "30": "NOUN|Polarity=Neg|root",
44
+ "31": "NOUN|_|acl",
45
+ "32": "NOUN|_|advcl",
46
+ "33": "NOUN|_|ccomp",
47
+ "34": "NOUN|_|compound",
48
+ "35": "NOUN|_|csubj",
49
+ "36": "NOUN|_|dislocated",
50
+ "37": "NOUN|_|nmod",
51
+ "38": "NOUN|_|nsubj",
52
+ "39": "NOUN|_|obj",
53
+ "40": "NOUN|_|obl",
54
+ "41": "NOUN|_|root",
55
+ "42": "NUM|_|advcl",
56
+ "43": "NUM|_|compound",
57
+ "44": "NUM|_|dislocated",
58
+ "45": "NUM|_|nmod",
59
+ "46": "NUM|_|nsubj",
60
+ "47": "NUM|_|nummod",
61
+ "48": "NUM|_|obj",
62
+ "49": "NUM|_|obl",
63
+ "50": "NUM|_|root",
64
+ "51": "PART|_|mark",
65
+ "52": "PRON|_|acl",
66
+ "53": "PRON|_|advcl",
67
+ "54": "PRON|_|dislocated",
68
+ "55": "PRON|_|nmod",
69
+ "56": "PRON|_|nsubj",
70
+ "57": "PRON|_|obj",
71
+ "58": "PRON|_|obl",
72
+ "59": "PRON|_|root",
73
+ "60": "PROPN|_|acl",
74
+ "61": "PROPN|_|advcl",
75
+ "62": "PROPN|_|compound",
76
+ "63": "PROPN|_|dislocated",
77
+ "64": "PROPN|_|nmod",
78
+ "65": "PROPN|_|nsubj",
79
+ "66": "PROPN|_|obj",
80
+ "67": "PROPN|_|obl",
81
+ "68": "PROPN|_|root",
82
+ "69": "PUNCT|_|punct",
83
+ "70": "SCONJ|_|mark",
84
+ "71": "SYM|_|compound",
85
+ "72": "SYM|_|dep",
86
+ "73": "SYM|_|nmod",
87
+ "74": "SYM|_|obl",
88
+ "75": "VERB|_|acl",
89
+ "76": "VERB|_|advcl",
90
+ "77": "VERB|_|ccomp",
91
+ "78": "VERB|_|compound",
92
+ "79": "VERB|_|csubj",
93
+ "80": "VERB|_|dislocated",
94
+ "81": "VERB|_|nmod",
95
+ "82": "VERB|_|obj",
96
+ "83": "VERB|_|obl",
97
+ "84": "VERB|_|root",
98
+ "85": "X|_|dep",
99
+ "86": "X|_|goeswith",
100
+ "87": "X|_|nmod"
101
+ },
102
+ "initializer_range": 0.02,
103
+ "intermediate_size": 3072,
104
+ "label2id": {
105
+ "-|_|dep": 0,
106
+ "ADJ|_|acl": 1,
107
+ "ADJ|_|advcl": 2,
108
+ "ADJ|_|amod": 3,
109
+ "ADJ|_|ccomp": 4,
110
+ "ADJ|_|csubj": 5,
111
+ "ADJ|_|dep": 6,
112
+ "ADJ|_|dislocated": 7,
113
+ "ADJ|_|nmod": 8,
114
+ "ADJ|_|nsubj": 9,
115
+ "ADJ|_|obj": 10,
116
+ "ADJ|_|obl": 11,
117
+ "ADJ|_|root": 12,
118
+ "ADP|_|case": 13,
119
+ "ADP|_|fixed": 14,
120
+ "ADV|_|advcl": 15,
121
+ "ADV|_|advmod": 16,
122
+ "ADV|_|dep": 17,
123
+ "ADV|_|obj": 18,
124
+ "ADV|_|root": 19,
125
+ "AUX|Polarity=Neg|aux": 20,
126
+ "AUX|_|aux": 21,
127
+ "AUX|_|cop": 22,
128
+ "AUX|_|fixed": 23,
129
+ "AUX|_|root": 24,
130
+ "CCONJ|_|cc": 25,
131
+ "DET|_|det": 26,
132
+ "INTJ|_|discourse": 27,
133
+ "INTJ|_|root": 28,
134
+ "NOUN|Polarity=Neg|obl": 29,
135
+ "NOUN|Polarity=Neg|root": 30,
136
+ "NOUN|_|acl": 31,
137
+ "NOUN|_|advcl": 32,
138
+ "NOUN|_|ccomp": 33,
139
+ "NOUN|_|compound": 34,
140
+ "NOUN|_|csubj": 35,
141
+ "NOUN|_|dislocated": 36,
142
+ "NOUN|_|nmod": 37,
143
+ "NOUN|_|nsubj": 38,
144
+ "NOUN|_|obj": 39,
145
+ "NOUN|_|obl": 40,
146
+ "NOUN|_|root": 41,
147
+ "NUM|_|advcl": 42,
148
+ "NUM|_|compound": 43,
149
+ "NUM|_|dislocated": 44,
150
+ "NUM|_|nmod": 45,
151
+ "NUM|_|nsubj": 46,
152
+ "NUM|_|nummod": 47,
153
+ "NUM|_|obj": 48,
154
+ "NUM|_|obl": 49,
155
+ "NUM|_|root": 50,
156
+ "PART|_|mark": 51,
157
+ "PRON|_|acl": 52,
158
+ "PRON|_|advcl": 53,
159
+ "PRON|_|dislocated": 54,
160
+ "PRON|_|nmod": 55,
161
+ "PRON|_|nsubj": 56,
162
+ "PRON|_|obj": 57,
163
+ "PRON|_|obl": 58,
164
+ "PRON|_|root": 59,
165
+ "PROPN|_|acl": 60,
166
+ "PROPN|_|advcl": 61,
167
+ "PROPN|_|compound": 62,
168
+ "PROPN|_|dislocated": 63,
169
+ "PROPN|_|nmod": 64,
170
+ "PROPN|_|nsubj": 65,
171
+ "PROPN|_|obj": 66,
172
+ "PROPN|_|obl": 67,
173
+ "PROPN|_|root": 68,
174
+ "PUNCT|_|punct": 69,
175
+ "SCONJ|_|mark": 70,
176
+ "SYM|_|compound": 71,
177
+ "SYM|_|dep": 72,
178
+ "SYM|_|nmod": 73,
179
+ "SYM|_|obl": 74,
180
+ "VERB|_|acl": 75,
181
+ "VERB|_|advcl": 76,
182
+ "VERB|_|ccomp": 77,
183
+ "VERB|_|compound": 78,
184
+ "VERB|_|csubj": 79,
185
+ "VERB|_|dislocated": 80,
186
+ "VERB|_|nmod": 81,
187
+ "VERB|_|obj": 82,
188
+ "VERB|_|obl": 83,
189
+ "VERB|_|root": 84,
190
+ "X|_|dep": 85,
191
+ "X|_|goeswith": 86,
192
+ "X|_|nmod": 87
193
+ },
194
+ "layer_norm_eps": 1e-12,
195
+ "max_position_embeddings": 512,
196
+ "model_type": "roberta",
197
+ "num_attention_heads": 12,
198
+ "num_hidden_layers": 12,
199
+ "pad_token_id": 0,
200
+ "position_embedding_type": "absolute",
201
+ "tokenizer_class": "RemBertTokenizerFast",
202
+ "torch_dtype": "float32",
203
+ "transformers_version": "4.22.1",
204
+ "type_vocab_size": 2,
205
+ "use_cache": true,
206
+ "vocab_size": 250315
207
+ }
maker.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #! /usr/bin/python3
2
+ src="KoichiYasuoka/roberta-base-japanese-aozora"
3
+ tgt="KoichiYasuoka/roberta-base-japanese-aozora-ud-goeswith"
4
+ url="https://github.com/UniversalDependencies/UD_Japanese-GSDLUW"
5
+ import os
6
+ d=os.path.basename(url)
7
+ os.system("test -d "+d+" || git clone --depth=1 "+url)
8
+ os.system("for F in train dev test ; do cp "+d+"/*-$F.conllu $F.conllu ; done")
9
+ class UDgoeswithDataset(object):
10
+ def __init__(self,conllu,tokenizer):
11
+ self.ids,self.tags,label=[],[],set()
12
+ with open(conllu,"r",encoding="utf-8") as r:
13
+ cls,sep,msk=tokenizer.cls_token_id,tokenizer.sep_token_id,tokenizer.mask_token_id
14
+ dep,c="-|_|dep",[]
15
+ for s in r:
16
+ t=s.split("\t")
17
+ if len(t)==10 and t[0].isdecimal():
18
+ c.append(t)
19
+ elif c!=[]:
20
+ v=tokenizer([t[1] for t in c],add_special_tokens=False)["input_ids"]
21
+ for i in range(len(v)-1,-1,-1):
22
+ for j in range(1,len(v[i])):
23
+ c.insert(i+1,[c[i][0],"_","_","X","_","_",c[i][0],"goeswith","_","_"])
24
+ y=["0"]+[t[0] for t in c]
25
+ h=[i if t[6]=="0" else y.index(t[6]) for i,t in enumerate(c,1)]
26
+ p,v=[t[3]+"|"+t[5]+"|"+t[7] for t in c],sum(v,[])
27
+ self.ids.append([cls]+v+[sep])
28
+ self.tags.append([dep]+p+[dep])
29
+ label=set(sum([self.tags[-1],list(label)],[]))
30
+ for i,k in enumerate(v):
31
+ self.ids.append([cls]+v[0:i]+[msk]+v[i+1:]+[sep,k])
32
+ self.tags.append([dep]+[t if h[j]==i+1 else dep for j,t in enumerate(p)]+[dep,dep])
33
+ c=[]
34
+ self.label2id={l:i for i,l in enumerate(sorted(label))}
35
+ def __call__(*args):
36
+ label=set(sum([list(t.label2id) for t in args],[]))
37
+ lid={l:i for i,l in enumerate(sorted(label))}
38
+ for t in args:
39
+ t.label2id=lid
40
+ return lid
41
+ __len__=lambda self:len(self.ids)
42
+ __getitem__=lambda self,i:{"input_ids":self.ids[i],"labels":[self.label2id[t] for t in self.tags[i]]}
43
+ from transformers import AutoTokenizer,AutoConfig,AutoModelForTokenClassification,DataCollatorForTokenClassification,TrainingArguments,Trainer
44
+ tkz=AutoTokenizer.from_pretrained(src)
45
+ trainDS=UDgoeswithDataset("train.conllu",tkz)
46
+ devDS=UDgoeswithDataset("dev.conllu",tkz)
47
+ testDS=UDgoeswithDataset("test.conllu",tkz)
48
+ lid=trainDS(devDS,testDS)
49
+ cfg=AutoConfig.from_pretrained(src,num_labels=len(lid),label2id=lid,id2label={i:l for l,i in lid.items()})
50
+ arg=TrainingArguments(num_train_epochs=3,per_device_train_batch_size=32,output_dir="/tmp",overwrite_output_dir=True,save_total_limit=2,evaluation_strategy="epoch",learning_rate=5e-05,warmup_ratio=0.1)
51
+ trn=Trainer(args=arg,data_collator=DataCollatorForTokenClassification(tkz),model=AutoModelForTokenClassification.from_pretrained(src,config=cfg),train_dataset=trainDS,eval_dataset=devDS)
52
+ trn.train()
53
+ trn.save_model(tgt)
54
+ tkz.save_pretrained(tgt)
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:176379b7215aabd78754609ce40e1658ee4b1a11a3855d24a51d307b401d3814
3
+ size 1111112369
sentencepiece.model ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b
3
+ size 1
special_tokens_map.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "[CLS]",
3
+ "cls_token": "[CLS]",
4
+ "eos_token": "[SEP]",
5
+ "mask_token": {
6
+ "content": "[MASK]",
7
+ "lstrip": true,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "pad_token": "[PAD]",
13
+ "sep_token": "[SEP]",
14
+ "unk_token": "[UNK]"
15
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:423c0eb777bc9f4b469d414f432e844020bc0db9da97a6c82a8fd89cf4caf178
3
+ size 16599144
tokenizer_config.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "[CLS]",
3
+ "cls_token": "[CLS]",
4
+ "do_lower_case": false,
5
+ "eos_token": "[SEP]",
6
+ "keep_accents": true,
7
+ "mask_token": {
8
+ "__type": "AddedToken",
9
+ "content": "[MASK]",
10
+ "lstrip": true,
11
+ "normalized": true,
12
+ "rstrip": false,
13
+ "single_word": false
14
+ },
15
+ "model_max_length": 512,
16
+ "pad_token": "[PAD]",
17
+ "remove_space": true,
18
+ "sep_token": "[SEP]",
19
+ "tokenizer_class": "RemBertTokenizerFast",
20
+ "unk_token": "[UNK]"
21
+ }