KoichiYasuoka commited on
Commit
b352d69
1 Parent(s): 1e70cce

model improved

Browse files
config.json CHANGED
@@ -68,7 +68,8 @@
68
  "56": "SCONJ",
69
  "57": "SYM",
70
  "58": "VERB",
71
- "59": "VERB+SCONJ"
 
72
  },
73
  "initializer_range": 0.02,
74
  "intermediate_size": 4096,
@@ -132,7 +133,8 @@
132
  "SCONJ": 56,
133
  "SYM": 57,
134
  "VERB": 58,
135
- "VERB+SCONJ": 59
 
136
  },
137
  "layer_norm_eps": 1e-07,
138
  "max_position_embeddings": 512,
@@ -144,9 +146,12 @@
144
  "pooler_dropout": 0,
145
  "pooler_hidden_act": "gelu",
146
  "pooler_hidden_size": 1024,
147
- "pos_att_type": null,
148
- "position_biased_input": true,
149
- "relative_attention": false,
 
 
 
150
  "task_specific_params": {
151
  "upos_multiword": {
152
  "ADJ+AUX": {
@@ -391,7 +396,7 @@
391
  },
392
  "tokenizer_class": "DebertaV2TokenizerFast",
393
  "torch_dtype": "float32",
394
- "transformers_version": "4.19.2",
395
  "type_vocab_size": 0,
396
  "vocab_size": 32000
397
  }
68
  "56": "SCONJ",
69
  "57": "SYM",
70
  "58": "VERB",
71
+ "59": "VERB+SCONJ",
72
+ "60": "X"
73
  },
74
  "initializer_range": 0.02,
75
  "intermediate_size": 4096,
133
  "SCONJ": 56,
134
  "SYM": 57,
135
  "VERB": 58,
136
+ "VERB+SCONJ": 59,
137
+ "X": 60
138
  },
139
  "layer_norm_eps": 1e-07,
140
  "max_position_embeddings": 512,
146
  "pooler_dropout": 0,
147
  "pooler_hidden_act": "gelu",
148
  "pooler_hidden_size": 1024,
149
+ "pos_att_type": [
150
+ "p2c",
151
+ "c2p"
152
+ ],
153
+ "position_biased_input": false,
154
+ "relative_attention": true,
155
  "task_specific_params": {
156
  "upos_multiword": {
157
  "ADJ+AUX": {
396
  },
397
  "tokenizer_class": "DebertaV2TokenizerFast",
398
  "torch_dtype": "float32",
399
+ "transformers_version": "4.22.1",
400
  "type_vocab_size": 0,
401
  "vocab_size": 32000
402
  }
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6e9c374541f717f9eabb6064a95a89766bb56a75bb226f7c8d4f45453338f77b
3
- size 1342797683
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:af614bebfbf0fab5e29ed2172e9031435558bde227326e7c9171ed33c2731b6f
3
+ size 1546455731
special_tokens_map.json CHANGED
@@ -1 +1,9 @@
1
- {"bos_token": "[CLS]", "eos_token": "[SEP]", "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "[CLS]",
3
+ "cls_token": "[CLS]",
4
+ "eos_token": "[SEP]",
5
+ "mask_token": "[MASK]",
6
+ "pad_token": "[PAD]",
7
+ "sep_token": "[SEP]",
8
+ "unk_token": "[UNK]"
9
+ }
supar.model CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:08261816ec7da11116a145547b46d89ea5e58b8fbcb3732cb99613ac6f38bc53
3
- size 1391495275
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:82fd730d5d73950432dd8134a490dde302913700cb885148ae9f68f674d3b4b6
3
+ size 1595136555
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
tokenizer_config.json CHANGED
@@ -1 +1,14 @@
1
- {"do_lower_case": false, "bos_token": "[CLS]", "eos_token": "[SEP]", "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "split_by_punct": true, "keep_accents": true, "model_max_length": 512, "tokenizer_class": "DebertaV2TokenizerFast"}
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "[CLS]",
3
+ "cls_token": "[CLS]",
4
+ "do_lower_case": false,
5
+ "eos_token": "[SEP]",
6
+ "keep_accents": true,
7
+ "mask_token": "[MASK]",
8
+ "model_max_length": 512,
9
+ "pad_token": "[PAD]",
10
+ "sep_token": "[SEP]",
11
+ "split_by_punct": true,
12
+ "tokenizer_class": "DebertaV2TokenizerFast",
13
+ "unk_token": "[UNK]"
14
+ }