KoichiYasuoka commited on
Commit
eb97827
1 Parent(s): ef7ff3a

initial release

Browse files
README.md CHANGED
@@ -1,3 +1,40 @@
1
- ---
2
- license: cc-by-sa-4.0
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - "be"
4
+ tags:
5
+ - "belarusian"
6
+ - "token-classification"
7
+ - "pos"
8
+ - "dependency-parsing"
9
+ datasets:
10
+ - "universal_dependencies"
11
+ license: "cc-by-sa-4.0"
12
+ pipeline_tag: "token-classification"
13
+ ---
14
+
15
+ # roberta-small-belarusian-upos
16
+
17
+ ## Model Description
18
+
19
+ This is a RoBERTa model pre-trained with [UD_Belarusian](https://universaldependencies.org/be/) for POS-tagging and dependency-parsing, derived from [roberta-small-belarusian](https://huggingface.co/KoichiYasuoka/roberta-small-belarusian). Every word is tagged by [UPOS](https://universaldependencies.org/u/pos/) (Universal Part-Of-Speech).
20
+
21
+ ## How to Use
22
+
23
+ ```py
24
+ import torch
25
+ from transformers import AutoTokenizer,AutoModelForTokenClassification
26
+ tokenizer=AutoTokenizer.from_pretrained("KoichiYasuoka/roberta-small-belarusian-upos")
27
+ model=AutoModelForTokenClassification.from_pretrained("KoichiYasuoka/roberta-small-belarusian-upos")
28
+ ```
29
+
30
+ or
31
+
32
+ ```
33
+ import esupar
34
+ nlp=esupar.load("KoichiYasuoka/roberta-small-belarusian-upos")
35
+ ```
36
+
37
+ ## See Also
38
+
39
+ [esupar](https://github.com/KoichiYasuoka/esupar): Tokenizer POS-tagger and Dependency-parser with BERT/RoBERTa models
40
+
config.json ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "RobertaForTokenClassification"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "bos_token_id": 2,
7
+ "classifier_dropout": null,
8
+ "eos_token_id": 3,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 256,
12
+ "id2label": {
13
+ "0": "ADJ",
14
+ "1": "ADP",
15
+ "2": "ADV",
16
+ "3": "AUX",
17
+ "4": "B-ADJ",
18
+ "5": "B-ADP",
19
+ "6": "B-ADV",
20
+ "7": "B-AUX",
21
+ "8": "B-CCONJ",
22
+ "9": "B-DET",
23
+ "10": "B-INTJ",
24
+ "11": "B-NOUN",
25
+ "12": "B-NUM",
26
+ "13": "B-PART",
27
+ "14": "B-PRON",
28
+ "15": "B-PROPN",
29
+ "16": "B-PUNCT",
30
+ "17": "B-SCONJ",
31
+ "18": "B-SYM",
32
+ "19": "B-VERB",
33
+ "20": "B-X",
34
+ "21": "CCONJ",
35
+ "22": "DET",
36
+ "23": "I-ADJ",
37
+ "24": "I-ADP",
38
+ "25": "I-ADV",
39
+ "26": "I-AUX",
40
+ "27": "I-CCONJ",
41
+ "28": "I-DET",
42
+ "29": "I-INTJ",
43
+ "30": "I-NOUN",
44
+ "31": "I-NUM",
45
+ "32": "I-PART",
46
+ "33": "I-PRON",
47
+ "34": "I-PROPN",
48
+ "35": "I-PUNCT",
49
+ "36": "I-SCONJ",
50
+ "37": "I-SYM",
51
+ "38": "I-VERB",
52
+ "39": "I-X",
53
+ "40": "INTJ",
54
+ "41": "NOUN",
55
+ "42": "NUM",
56
+ "43": "PART",
57
+ "44": "PRON",
58
+ "45": "PROPN",
59
+ "46": "PUNCT",
60
+ "47": "SCONJ",
61
+ "48": "SYM",
62
+ "49": "VERB",
63
+ "50": "X"
64
+ },
65
+ "initializer_range": 0.02,
66
+ "intermediate_size": 768,
67
+ "label2id": {
68
+ "ADJ": 0,
69
+ "ADP": 1,
70
+ "ADV": 2,
71
+ "AUX": 3,
72
+ "B-ADJ": 4,
73
+ "B-ADP": 5,
74
+ "B-ADV": 6,
75
+ "B-AUX": 7,
76
+ "B-CCONJ": 8,
77
+ "B-DET": 9,
78
+ "B-INTJ": 10,
79
+ "B-NOUN": 11,
80
+ "B-NUM": 12,
81
+ "B-PART": 13,
82
+ "B-PRON": 14,
83
+ "B-PROPN": 15,
84
+ "B-PUNCT": 16,
85
+ "B-SCONJ": 17,
86
+ "B-SYM": 18,
87
+ "B-VERB": 19,
88
+ "B-X": 20,
89
+ "CCONJ": 21,
90
+ "DET": 22,
91
+ "I-ADJ": 23,
92
+ "I-ADP": 24,
93
+ "I-ADV": 25,
94
+ "I-AUX": 26,
95
+ "I-CCONJ": 27,
96
+ "I-DET": 28,
97
+ "I-INTJ": 29,
98
+ "I-NOUN": 30,
99
+ "I-NUM": 31,
100
+ "I-PART": 32,
101
+ "I-PRON": 33,
102
+ "I-PROPN": 34,
103
+ "I-PUNCT": 35,
104
+ "I-SCONJ": 36,
105
+ "I-SYM": 37,
106
+ "I-VERB": 38,
107
+ "I-X": 39,
108
+ "INTJ": 40,
109
+ "NOUN": 41,
110
+ "NUM": 42,
111
+ "PART": 43,
112
+ "PRON": 44,
113
+ "PROPN": 45,
114
+ "PUNCT": 46,
115
+ "SCONJ": 47,
116
+ "SYM": 48,
117
+ "VERB": 49,
118
+ "X": 50
119
+ },
120
+ "layer_norm_eps": 1e-12,
121
+ "max_position_embeddings": 128,
122
+ "model_type": "roberta",
123
+ "num_attention_heads": 4,
124
+ "num_hidden_layers": 12,
125
+ "pad_token_id": 0,
126
+ "position_embedding_type": "absolute",
127
+ "tokenizer_class": "BertTokenizerFast",
128
+ "torch_dtype": "float32",
129
+ "transformers_version": "4.17.0",
130
+ "type_vocab_size": 2,
131
+ "use_cache": true,
132
+ "vocab_size": 30000
133
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:34bc0dc5b64ff4fda738470268c2dc17f9267a6b58e14d260abcf8d68ef914dc
3
+ size 62598033
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
1
+ {"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
supar.model ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c70647136e8bce1ad7f73a3a1624f1046a1834a6a85bbef5c464e73eb1337f56
3
+ size 117684157
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
1
+ {"do_lower_case": true, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": false, "do_lowercase": false, "never_split": ["[CLS]", "[PAD]", "[SEP]", "[UNK]", "[MASK]"], "do_basic_tokenize": true, "model_max_length": 128, "tokenizer_class": "BertTokenizerFast"}
vocab.txt ADDED
The diff for this file is too large to render. See raw diff