KoichiYasuoka commited on
Commit
6c98742
1 Parent(s): f490b32

initial release

Browse files
README.md ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - "vi"
4
+ tags:
5
+ - "vietnamese"
6
+ - "token-classification"
7
+ - "pos"
8
+ - "dependency-parsing"
9
+ datasets:
10
+ - "universal_dependencies"
11
+ license: "cc-by-sa-4.0"
12
+ pipeline_tag: "token-classification"
13
+ widget:
14
+ - text: "Hai cái đầu thì tốt hơn một."
15
+ ---
16
+
17
+ # roberta-base-vietnamese-upos
18
+
19
+ ## Model Description
20
+
21
+ This is a RoBERTa model pre-trained on Vietnamese texts for POS-tagging and dependency-parsing, derived from [roberta-base-vietnamese](https://huggingface.co/KoichiYasuoka/roberta-base-vietnamese). Every word is tagged by [UPOS](https://universaldependencies.org/u/pos/)(Universal Part-Of-Speech).
22
+
23
+ ## How to Use
24
+
25
+ ```py
26
+ from transformers import AutoTokenizer,AutoModelForTokenClassification,TokenClassificationPipeline
27
+ tokenizer=AutoTokenizer.from_pretrained("KoichiYasuoka/roberta-base-vietnamese-upos")
28
+ model=AutoModelForTokenClassification.from_pretrained("KoichiYasuoka/roberta-base-vietnamese-upos")
29
+ pipeline=TokenClassificationPipeline(tokenizer=tokenizer,model=model,aggregation_strategy="simple")
30
+ nlp=lambda x:[(x[t["start"]:t["end"]],t["entity_group"]) for t in pipeline(x)]
31
+ print(nlp("Hai cái đầu thì tốt hơn một."))
32
+ ```
33
+
34
+ or
35
+
36
+ ```py
37
+ import esupar
38
+ nlp=esupar.load("KoichiYasuoka/roberta-base-vietnamese-upos")
39
+ print(nlp("Hai cái đầu thì tốt hơn một."))
40
+ ```
41
+
42
+ ## See Also
43
+
44
+ [esupar](https://github.com/KoichiYasuoka/esupar): Tokenizer POS-tagger and Dependency-parser with BERT/RoBERTa/DeBERTa models
config.json ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "RobertaForTokenClassification"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "bos_token_id": 0,
7
+ "classifier_dropout": null,
8
+ "eos_token_id": 2,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 768,
12
+ "id2label": {
13
+ "0": "ADJ",
14
+ "1": "ADP",
15
+ "2": "ADV",
16
+ "3": "AUX",
17
+ "4": "B-ADJ",
18
+ "5": "B-ADP",
19
+ "6": "B-ADV",
20
+ "7": "B-AUX",
21
+ "8": "B-DET",
22
+ "9": "B-INTJ",
23
+ "10": "B-NOUN",
24
+ "11": "B-NUM",
25
+ "12": "B-PART",
26
+ "13": "B-PRON",
27
+ "14": "B-PROPN",
28
+ "15": "B-PUNCT",
29
+ "16": "B-SCONJ",
30
+ "17": "B-VERB",
31
+ "18": "B-X",
32
+ "19": "CCONJ",
33
+ "20": "DET",
34
+ "21": "I-ADJ",
35
+ "22": "I-ADP",
36
+ "23": "I-ADV",
37
+ "24": "I-AUX",
38
+ "25": "I-DET",
39
+ "26": "I-INTJ",
40
+ "27": "I-NOUN",
41
+ "28": "I-NUM",
42
+ "29": "I-PART",
43
+ "30": "I-PRON",
44
+ "31": "I-PROPN",
45
+ "32": "I-PUNCT",
46
+ "33": "I-SCONJ",
47
+ "34": "I-VERB",
48
+ "35": "I-X",
49
+ "36": "INTJ",
50
+ "37": "NOUN",
51
+ "38": "NUM",
52
+ "39": "PART",
53
+ "40": "PRON",
54
+ "41": "PROPN",
55
+ "42": "PUNCT",
56
+ "43": "SCONJ",
57
+ "44": "SYM",
58
+ "45": "VERB",
59
+ "46": "X"
60
+ },
61
+ "initializer_range": 0.02,
62
+ "intermediate_size": 3072,
63
+ "label2id": {
64
+ "ADJ": 0,
65
+ "ADP": 1,
66
+ "ADV": 2,
67
+ "AUX": 3,
68
+ "B-ADJ": 4,
69
+ "B-ADP": 5,
70
+ "B-ADV": 6,
71
+ "B-AUX": 7,
72
+ "B-DET": 8,
73
+ "B-INTJ": 9,
74
+ "B-NOUN": 10,
75
+ "B-NUM": 11,
76
+ "B-PART": 12,
77
+ "B-PRON": 13,
78
+ "B-PROPN": 14,
79
+ "B-PUNCT": 15,
80
+ "B-SCONJ": 16,
81
+ "B-VERB": 17,
82
+ "B-X": 18,
83
+ "CCONJ": 19,
84
+ "DET": 20,
85
+ "I-ADJ": 21,
86
+ "I-ADP": 22,
87
+ "I-ADV": 23,
88
+ "I-AUX": 24,
89
+ "I-DET": 25,
90
+ "I-INTJ": 26,
91
+ "I-NOUN": 27,
92
+ "I-NUM": 28,
93
+ "I-PART": 29,
94
+ "I-PRON": 30,
95
+ "I-PROPN": 31,
96
+ "I-PUNCT": 32,
97
+ "I-SCONJ": 33,
98
+ "I-VERB": 34,
99
+ "I-X": 35,
100
+ "INTJ": 36,
101
+ "NOUN": 37,
102
+ "NUM": 38,
103
+ "PART": 39,
104
+ "PRON": 40,
105
+ "PROPN": 41,
106
+ "PUNCT": 42,
107
+ "SCONJ": 43,
108
+ "SYM": 44,
109
+ "VERB": 45,
110
+ "X": 46
111
+ },
112
+ "layer_norm_eps": 1e-12,
113
+ "max_position_embeddings": 512,
114
+ "model_type": "roberta",
115
+ "num_attention_heads": 12,
116
+ "num_hidden_layers": 12,
117
+ "pad_token_id": 1,
118
+ "position_embedding_type": "absolute",
119
+ "tokenizer_class": "BertTokenizerFast",
120
+ "torch_dtype": "float32",
121
+ "transformers_version": "4.22.1",
122
+ "type_vocab_size": 2,
123
+ "use_cache": true,
124
+ "vocab_size": 32000
125
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b7c89687de3aa86007f8fd46be8cf7dbf71e23f757d2bda03bb861ffc3bd83e5
3
+ size 440322545
special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
supar.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:614afd98426748985a7cdfd6291d5edc0c19630f9fdd4129225f32c0349d99da
3
+ size 490825509
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "do_basic_tokenize": true,
4
+ "do_lower_case": true,
5
+ "do_lowercase": true,
6
+ "mask_token": "[MASK]",
7
+ "model_max_length": 512,
8
+ "never_split": [
9
+ "[CLS]",
10
+ "[PAD]",
11
+ "[SEP]",
12
+ "[UNK]",
13
+ "[MASK]"
14
+ ],
15
+ "pad_token": "[PAD]",
16
+ "sep_token": "[SEP]",
17
+ "strip_accents": false,
18
+ "tokenize_chinese_chars": true,
19
+ "tokenizer_class": "BertTokenizerFast",
20
+ "unk_token": "[UNK]"
21
+ }
vocab.txt ADDED
The diff for this file is too large to render. See raw diff