KoichiYasuoka commited on
Commit
f55d1bb
1 Parent(s): 199a96a

initial release

Browse files
README.md ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - "th"
4
+ tags:
5
+ - "thai"
6
+ - "question-answering"
7
+ - "dependency-parsing"
8
+ datasets:
9
+ - "universal_dependencies"
10
+ license: "apache-2.0"
11
+ pipeline_tag: "question-answering"
12
+ widget:
13
+ - text: "กว่า"
14
+ context: "หลายหัวดีกว่าหัวเดียว"
15
+ - text: "หลาย"
16
+ context: "หลายหัวดีกว่าหัวเดียว"
17
+ - text: "หัว"
18
+ context: "หลาย[MASK]ดีกว่าหัวเดียว"
19
+ ---
20
+
21
+ # deberta-base-thai-ud-head
22
+
23
+ ## Model Description
24
+
25
+ This is a DeBERTa(V2) model pretrained on Thai Wikipedia texts for dependency-parsing (head-detection on Universal Dependencies) as question-answering, derived from [deberta-base-thai](https://huggingface.co/KoichiYasuoka/deberta-base-thai). Use [MASK] inside `context` to avoid ambiguity when specifying a multiple-used word as `question`.
26
+
27
+ ## How to Use
28
+
29
+ ```py
30
+ import torch
31
+ from transformers import AutoTokenizer,AutoModelForQuestionAnswering
32
+ tokenizer=AutoTokenizer.from_pretrained("KoichiYasuoka/deberta-base-thai-ud-head")
33
+ model=AutoModelForQuestionAnswering.from_pretrained("KoichiYasuoka/deberta-base-thai-ud-head")
34
+ question="กว่า"
35
+ context="หลายหัวดีกว่าหัวเดียว"
36
+ inputs=tokenizer(question,context,return_tensors="pt",return_offsets_mapping=True)
37
+ offsets=inputs.pop("offset_mapping").tolist()[0]
38
+ outputs=model(**inputs)
39
+ start,end=torch.argmax(outputs.start_logits),torch.argmax(outputs.end_logits)
40
+ print(context[offsets[start][0]:offsets[end][-1]])
41
+ ```
42
+
43
+ or (with [ufal.chu-liu-edmonds](https://pypi.org/project/ufal.chu-liu-edmonds/))
44
+
45
+ ```py
46
+ class TransformersUD(object):
47
+ def __init__(self,bert):
48
+ import os
49
+ from transformers import (AutoTokenizer,AutoModelForQuestionAnswering,
50
+ AutoModelForTokenClassification,AutoConfig,TokenClassificationPipeline)
51
+ self.tokenizer=AutoTokenizer.from_pretrained(bert)
52
+ self.model=AutoModelForQuestionAnswering.from_pretrained(bert)
53
+ x=AutoModelForTokenClassification.from_pretrained
54
+ if os.path.isdir(bert):
55
+ d,t=x(os.path.join(bert,"deprel")),x(os.path.join(bert,"tagger"))
56
+ else:
57
+ from transformers.file_utils import hf_bucket_url
58
+ c=AutoConfig.from_pretrained(hf_bucket_url(bert,"deprel/config.json"))
59
+ d=x(hf_bucket_url(bert,"deprel/pytorch_model.bin"),config=c)
60
+ s=AutoConfig.from_pretrained(hf_bucket_url(bert,"tagger/config.json"))
61
+ t=x(hf_bucket_url(bert,"tagger/pytorch_model.bin"),config=s)
62
+ self.deprel=TokenClassificationPipeline(model=d,tokenizer=self.tokenizer,
63
+ aggregation_strategy="simple")
64
+ self.tagger=TokenClassificationPipeline(model=t,tokenizer=self.tokenizer)
65
+ def __call__(self,text):
66
+ import numpy,torch,ufal.chu_liu_edmonds
67
+ w=[(t["start"],t["end"],t["entity_group"]) for t in self.deprel(text)]
68
+ z,n={t["start"]:t["entity"].split("|") for t in self.tagger(text)},len(w)
69
+ r,m=[text[s:e] for s,e,p in w],numpy.full((n+1,n+1),numpy.nan)
70
+ v,c=self.tokenizer(r,add_special_tokens=False)["input_ids"],[]
71
+ for i,t in enumerate(v):
72
+ q=[self.tokenizer.cls_token_id]+t+[self.tokenizer.sep_token_id]
73
+ c.append([q]+v[0:i]+[[self.tokenizer.mask_token_id]]+v[i+1:]+[[q[-1]]])
74
+ b=[[len(sum(x[0:j+1],[])) for j in range(len(x))] for x in c]
75
+ with torch.no_grad():
76
+ d=self.model(input_ids=torch.tensor([sum(x,[]) for x in c]),
77
+ token_type_ids=torch.tensor([[0]*x[0]+[1]*(x[-1]-x[0]) for x in b]))
78
+ s,e=d.start_logits.tolist(),d.end_logits.tolist()
79
+ for i in range(n):
80
+ for j in range(n):
81
+ m[i+1,0 if i==j else j+1]=s[i][b[i][j]]+e[i][b[i][j+1]-1]
82
+ h=ufal.chu_liu_edmonds.chu_liu_edmonds(m)[0]
83
+ if [0 for i in h if i==0]!=[0]:
84
+ i=([p for s,e,p in w]+["root"]).index("root")
85
+ j=i+1 if i<n else numpy.nanargmax(m[:,0])
86
+ m[0:j,0]=m[j+1:,0]=numpy.nan
87
+ h=ufal.chu_liu_edmonds.chu_liu_edmonds(m)[0]
88
+ u="# text = "+text.replace("\n"," ")+"\n"
89
+ for i,(s,e,p) in enumerate(w,1):
90
+ p="root" if h[i]==0 else "dep" if p=="root" else p
91
+ u+="\t".join([str(i),r[i-1],"_",z[s][0][2:],"_","|".join(z[s][1:]),
92
+ str(h[i]),p,"_","_" if i<n and w[i][0]<e else "SpaceAfter=No"])+"\n"
93
+ return u+"\n"
94
+
95
+ nlp=TransformersUD("KoichiYasuoka/deberta-base-thai-ud-head")
96
+ print(nlp("หลายหัวดีกว่าหัวเดียว"))
97
+ ```
98
+
config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "DebertaV2ForQuestionAnswering"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "hidden_act": "gelu",
7
+ "hidden_dropout_prob": 0.1,
8
+ "hidden_size": 768,
9
+ "initializer_range": 0.02,
10
+ "intermediate_size": 3072,
11
+ "layer_norm_eps": 1e-07,
12
+ "max_position_embeddings": 512,
13
+ "max_relative_positions": -1,
14
+ "model_type": "deberta-v2",
15
+ "num_attention_heads": 12,
16
+ "num_hidden_layers": 12,
17
+ "pad_token_id": 0,
18
+ "pooler_dropout": 0,
19
+ "pooler_hidden_act": "gelu",
20
+ "pooler_hidden_size": 768,
21
+ "pos_att_type": null,
22
+ "position_biased_input": true,
23
+ "relative_attention": false,
24
+ "tokenizer_class": "DebertaV2TokenizerFast",
25
+ "torch_dtype": "float32",
26
+ "transformers_version": "4.19.4",
27
+ "type_vocab_size": 0,
28
+ "vocab_size": 3000
29
+ }
deprel/config.json ADDED
@@ -0,0 +1,189 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "DebertaV2ForTokenClassification"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "hidden_act": "gelu",
7
+ "hidden_dropout_prob": 0.1,
8
+ "hidden_size": 768,
9
+ "id2label": {
10
+ "0": "B-acl",
11
+ "1": "B-acl:relcl",
12
+ "2": "B-advcl",
13
+ "3": "B-advmod",
14
+ "4": "B-appos",
15
+ "5": "B-aux",
16
+ "6": "B-aux:pass",
17
+ "7": "B-case",
18
+ "8": "B-cc",
19
+ "9": "B-cc:preconj",
20
+ "10": "B-ccomp",
21
+ "11": "B-clf",
22
+ "12": "B-compound",
23
+ "13": "B-compound:prt",
24
+ "14": "B-conj",
25
+ "15": "B-cop",
26
+ "16": "B-csubj",
27
+ "17": "B-det",
28
+ "18": "B-det:predet",
29
+ "19": "B-discourse",
30
+ "20": "B-dislocated",
31
+ "21": "B-fixed",
32
+ "22": "B-flat:name",
33
+ "23": "B-goeswith",
34
+ "24": "B-iobj",
35
+ "25": "B-mark",
36
+ "26": "B-nmod",
37
+ "27": "B-nmod:poss",
38
+ "28": "B-nsubj",
39
+ "29": "B-nsubj:pass",
40
+ "30": "B-nummod",
41
+ "31": "B-obj",
42
+ "32": "B-obl",
43
+ "33": "B-obl:poss",
44
+ "34": "B-obl:tmod",
45
+ "35": "B-parataxis",
46
+ "36": "B-punct",
47
+ "37": "B-reparandum",
48
+ "38": "B-root",
49
+ "39": "B-vocative",
50
+ "40": "B-xcomp",
51
+ "41": "I-acl",
52
+ "42": "I-acl:relcl",
53
+ "43": "I-advcl",
54
+ "44": "I-advmod",
55
+ "45": "I-appos",
56
+ "46": "I-aux",
57
+ "47": "I-aux:pass",
58
+ "48": "I-case",
59
+ "49": "I-cc",
60
+ "50": "I-ccomp",
61
+ "51": "I-clf",
62
+ "52": "I-compound",
63
+ "53": "I-conj",
64
+ "54": "I-cop",
65
+ "55": "I-csubj",
66
+ "56": "I-det",
67
+ "57": "I-det:predet",
68
+ "58": "I-discourse",
69
+ "59": "I-dislocated",
70
+ "60": "I-fixed",
71
+ "61": "I-flat:name",
72
+ "62": "I-goeswith",
73
+ "63": "I-mark",
74
+ "64": "I-nmod",
75
+ "65": "I-nmod:poss",
76
+ "66": "I-nsubj",
77
+ "67": "I-nsubj:pass",
78
+ "68": "I-nummod",
79
+ "69": "I-obj",
80
+ "70": "I-obl",
81
+ "71": "I-obl:poss",
82
+ "72": "I-obl:tmod",
83
+ "73": "I-parataxis",
84
+ "74": "I-punct",
85
+ "75": "I-root",
86
+ "76": "I-vocative",
87
+ "77": "I-xcomp"
88
+ },
89
+ "initializer_range": 0.02,
90
+ "intermediate_size": 3072,
91
+ "label2id": {
92
+ "B-acl": 0,
93
+ "B-acl:relcl": 1,
94
+ "B-advcl": 2,
95
+ "B-advmod": 3,
96
+ "B-appos": 4,
97
+ "B-aux": 5,
98
+ "B-aux:pass": 6,
99
+ "B-case": 7,
100
+ "B-cc": 8,
101
+ "B-cc:preconj": 9,
102
+ "B-ccomp": 10,
103
+ "B-clf": 11,
104
+ "B-compound": 12,
105
+ "B-compound:prt": 13,
106
+ "B-conj": 14,
107
+ "B-cop": 15,
108
+ "B-csubj": 16,
109
+ "B-det": 17,
110
+ "B-det:predet": 18,
111
+ "B-discourse": 19,
112
+ "B-dislocated": 20,
113
+ "B-fixed": 21,
114
+ "B-flat:name": 22,
115
+ "B-goeswith": 23,
116
+ "B-iobj": 24,
117
+ "B-mark": 25,
118
+ "B-nmod": 26,
119
+ "B-nmod:poss": 27,
120
+ "B-nsubj": 28,
121
+ "B-nsubj:pass": 29,
122
+ "B-nummod": 30,
123
+ "B-obj": 31,
124
+ "B-obl": 32,
125
+ "B-obl:poss": 33,
126
+ "B-obl:tmod": 34,
127
+ "B-parataxis": 35,
128
+ "B-punct": 36,
129
+ "B-reparandum": 37,
130
+ "B-root": 38,
131
+ "B-vocative": 39,
132
+ "B-xcomp": 40,
133
+ "I-acl": 41,
134
+ "I-acl:relcl": 42,
135
+ "I-advcl": 43,
136
+ "I-advmod": 44,
137
+ "I-appos": 45,
138
+ "I-aux": 46,
139
+ "I-aux:pass": 47,
140
+ "I-case": 48,
141
+ "I-cc": 49,
142
+ "I-ccomp": 50,
143
+ "I-clf": 51,
144
+ "I-compound": 52,
145
+ "I-conj": 53,
146
+ "I-cop": 54,
147
+ "I-csubj": 55,
148
+ "I-det": 56,
149
+ "I-det:predet": 57,
150
+ "I-discourse": 58,
151
+ "I-dislocated": 59,
152
+ "I-fixed": 60,
153
+ "I-flat:name": 61,
154
+ "I-goeswith": 62,
155
+ "I-mark": 63,
156
+ "I-nmod": 64,
157
+ "I-nmod:poss": 65,
158
+ "I-nsubj": 66,
159
+ "I-nsubj:pass": 67,
160
+ "I-nummod": 68,
161
+ "I-obj": 69,
162
+ "I-obl": 70,
163
+ "I-obl:poss": 71,
164
+ "I-obl:tmod": 72,
165
+ "I-parataxis": 73,
166
+ "I-punct": 74,
167
+ "I-root": 75,
168
+ "I-vocative": 76,
169
+ "I-xcomp": 77
170
+ },
171
+ "layer_norm_eps": 1e-07,
172
+ "max_position_embeddings": 512,
173
+ "max_relative_positions": -1,
174
+ "model_type": "deberta-v2",
175
+ "num_attention_heads": 12,
176
+ "num_hidden_layers": 12,
177
+ "pad_token_id": 0,
178
+ "pooler_dropout": 0,
179
+ "pooler_hidden_act": "gelu",
180
+ "pooler_hidden_size": 768,
181
+ "pos_att_type": null,
182
+ "position_biased_input": true,
183
+ "relative_attention": false,
184
+ "tokenizer_class": "DebertaV2TokenizerFast",
185
+ "torch_dtype": "float32",
186
+ "transformers_version": "4.19.4",
187
+ "type_vocab_size": 0,
188
+ "vocab_size": 3000
189
+ }
deprel/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cb969274f09436f6db74d4ca0173472ef4ab8d17110ea8833f5d4ff591160087
3
+ size 351323763
deprel/special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
1
+ {"bos_token": "[CLS]", "eos_token": "[SEP]", "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
deprel/spm.model ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b
3
+ size 1
deprel/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
deprel/tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
1
+ {"do_lower_case": false, "bos_token": "[CLS]", "eos_token": "[SEP]", "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "split_by_punct": true, "keep_accents": true, "model_max_length": 512, "tokenizer_class": "DebertaV2TokenizerFast"}
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eb8b1499345afa306337ede4327ea2ca987070c5595181917bda6b139dd367d7
3
+ size 351090035
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
1
+ {"bos_token": "[CLS]", "eos_token": "[SEP]", "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
spm.model ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b
3
+ size 1
tagger/config.json ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "DebertaV2ForTokenClassification"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "hidden_act": "gelu",
7
+ "hidden_dropout_prob": 0.1,
8
+ "hidden_size": 768,
9
+ "id2label": {
10
+ "0": "B-ADP|_",
11
+ "1": "B-ADV|PronType=Int",
12
+ "2": "B-ADV|_",
13
+ "3": "B-AUX|_",
14
+ "4": "B-CCONJ|_",
15
+ "5": "B-DET|PronType=Int",
16
+ "6": "B-DET|_",
17
+ "7": "B-INTJ|_",
18
+ "8": "B-NOUN|_",
19
+ "9": "B-NUM|_",
20
+ "10": "B-PART|Aspect=Perf",
21
+ "11": "B-PART|Aspect=Prog",
22
+ "12": "B-PART|Polarity=Neg",
23
+ "13": "B-PART|PronType=Int",
24
+ "14": "B-PART|_",
25
+ "15": "B-PRON|Person=1",
26
+ "16": "B-PRON|Person=2",
27
+ "17": "B-PRON|Person=3",
28
+ "18": "B-PRON|PronType=Int",
29
+ "19": "B-PRON|_",
30
+ "20": "B-PROPN|_",
31
+ "21": "B-PUNCT|_",
32
+ "22": "B-SCONJ|_",
33
+ "23": "B-SYM|_",
34
+ "24": "B-VERB|Mood=Imp",
35
+ "25": "B-VERB|Voice=Pass",
36
+ "26": "B-VERB|_",
37
+ "27": "B-X|_",
38
+ "28": "I-ADP|_",
39
+ "29": "I-ADV|PronType=Int",
40
+ "30": "I-ADV|_",
41
+ "31": "I-AUX|_",
42
+ "32": "I-CCONJ|_",
43
+ "33": "I-DET|_",
44
+ "34": "I-INTJ|_",
45
+ "35": "I-NOUN|_",
46
+ "36": "I-NUM|_",
47
+ "37": "I-PART|Aspect=Perf",
48
+ "38": "I-PART|Aspect=Prog",
49
+ "39": "I-PART|Polarity=Neg",
50
+ "40": "I-PART|PronType=Int",
51
+ "41": "I-PART|_",
52
+ "42": "I-PRON|Person=1",
53
+ "43": "I-PRON|Person=2",
54
+ "44": "I-PRON|Person=3",
55
+ "45": "I-PRON|_",
56
+ "46": "I-PROPN|_",
57
+ "47": "I-PUNCT|_",
58
+ "48": "I-VERB|Voice=Pass",
59
+ "49": "I-VERB|_",
60
+ "50": "I-X|_"
61
+ },
62
+ "initializer_range": 0.02,
63
+ "intermediate_size": 3072,
64
+ "label2id": {
65
+ "B-ADP|_": 0,
66
+ "B-ADV|PronType=Int": 1,
67
+ "B-ADV|_": 2,
68
+ "B-AUX|_": 3,
69
+ "B-CCONJ|_": 4,
70
+ "B-DET|PronType=Int": 5,
71
+ "B-DET|_": 6,
72
+ "B-INTJ|_": 7,
73
+ "B-NOUN|_": 8,
74
+ "B-NUM|_": 9,
75
+ "B-PART|Aspect=Perf": 10,
76
+ "B-PART|Aspect=Prog": 11,
77
+ "B-PART|Polarity=Neg": 12,
78
+ "B-PART|PronType=Int": 13,
79
+ "B-PART|_": 14,
80
+ "B-PRON|Person=1": 15,
81
+ "B-PRON|Person=2": 16,
82
+ "B-PRON|Person=3": 17,
83
+ "B-PRON|PronType=Int": 18,
84
+ "B-PRON|_": 19,
85
+ "B-PROPN|_": 20,
86
+ "B-PUNCT|_": 21,
87
+ "B-SCONJ|_": 22,
88
+ "B-SYM|_": 23,
89
+ "B-VERB|Mood=Imp": 24,
90
+ "B-VERB|Voice=Pass": 25,
91
+ "B-VERB|_": 26,
92
+ "B-X|_": 27,
93
+ "I-ADP|_": 28,
94
+ "I-ADV|PronType=Int": 29,
95
+ "I-ADV|_": 30,
96
+ "I-AUX|_": 31,
97
+ "I-CCONJ|_": 32,
98
+ "I-DET|_": 33,
99
+ "I-INTJ|_": 34,
100
+ "I-NOUN|_": 35,
101
+ "I-NUM|_": 36,
102
+ "I-PART|Aspect=Perf": 37,
103
+ "I-PART|Aspect=Prog": 38,
104
+ "I-PART|Polarity=Neg": 39,
105
+ "I-PART|PronType=Int": 40,
106
+ "I-PART|_": 41,
107
+ "I-PRON|Person=1": 42,
108
+ "I-PRON|Person=2": 43,
109
+ "I-PRON|Person=3": 44,
110
+ "I-PRON|_": 45,
111
+ "I-PROPN|_": 46,
112
+ "I-PUNCT|_": 47,
113
+ "I-VERB|Voice=Pass": 48,
114
+ "I-VERB|_": 49,
115
+ "I-X|_": 50
116
+ },
117
+ "layer_norm_eps": 1e-07,
118
+ "max_position_embeddings": 512,
119
+ "max_relative_positions": -1,
120
+ "model_type": "deberta-v2",
121
+ "num_attention_heads": 12,
122
+ "num_hidden_layers": 12,
123
+ "pad_token_id": 0,
124
+ "pooler_dropout": 0,
125
+ "pooler_hidden_act": "gelu",
126
+ "pooler_hidden_size": 768,
127
+ "pos_att_type": null,
128
+ "position_biased_input": true,
129
+ "relative_attention": false,
130
+ "tokenizer_class": "DebertaV2TokenizerFast",
131
+ "torch_dtype": "float32",
132
+ "transformers_version": "4.19.4",
133
+ "type_vocab_size": 0,
134
+ "vocab_size": 3000
135
+ }
tagger/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c04364f9ff1092028f45c20225a1e8af96bcc95df57e50ffa7049ec7e7a6d186
3
+ size 351240755
tagger/special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
1
+ {"bos_token": "[CLS]", "eos_token": "[SEP]", "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
tagger/spm.model ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b
3
+ size 1
tagger/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
tagger/tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
1
+ {"do_lower_case": false, "bos_token": "[CLS]", "eos_token": "[SEP]", "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "split_by_punct": true, "keep_accents": true, "model_max_length": 512, "tokenizer_class": "DebertaV2TokenizerFast"}
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
1
+ {"do_lower_case": false, "bos_token": "[CLS]", "eos_token": "[SEP]", "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "split_by_punct": true, "keep_accents": true, "model_max_length": 512, "tokenizer_class": "DebertaV2TokenizerFast"}