atsuki-yamaguchi commited on
Commit
baae33c
β€’
1 Parent(s): 9006dd9

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +80 -0
README.md CHANGED
@@ -1,3 +1,83 @@
1
  ---
2
  license: cc-by-nc-sa-4.0
 
 
 
 
 
 
 
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
  license: cc-by-nc-sa-4.0
3
+ datasets:
4
+ - wikipedia
5
+ - cc100
6
+ language:
7
+ - ja
8
+ library_name: transformers
9
+ pipeline_tag: fill-mask
10
  ---
11
+
12
+ BERT-base (MeCab + WordPiece)
13
+ ===
14
+
15
+ ## How to load the tokenizer
16
+ Please download the dictionary file for MeCab + WordPiece from [our GitHub repository](https://github.com/hitachi-nlp/compare-ja-tokenizer/blob/public/data/dict/mecab_wordpiece.json).
17
+ Then you can load the tokenizer by specifying the path of the dictionary file to `dict_path`.
18
+
19
+ ```python
20
+ from typing import Optional
21
+
22
+ from tokenizers import Tokenizer, NormalizedString, PreTokenizedString
23
+ from tokenizers.processors import BertProcessing
24
+ from tokenizers.pre_tokenizers import PreTokenizer
25
+ from transformers import PreTrainedTokenizerFast
26
+
27
+ from MeCab import Tagger
28
+ import textspan
29
+
30
+ class MecabPreTokenizer:
31
+ def __init__(self, mecab_dict_path: Optional[str] = None):
32
+ mecab_option = (f"-Owakati -d {mecab_dict_path}" if mecab_dict_path is not None else "-Owakati")
33
+ self.mecab = Tagger(mecab_option)
34
+
35
+ def tokenize(self, sequence: str) -> list[str]:
36
+ return self.mecab.parse(sequence).strip().split(" ")
37
+
38
+ def custom_split(self, i: int, normalized_string: NormalizedString) -> list[NormalizedString]:
39
+ text = str(normalized_string)
40
+ tokens = self.tokenize(text)
41
+ tokens_spans = textspan.get_original_spans(tokens, text)
42
+ return [normalized_string[st:ed] for cahr_spans in tokens_spans for st,ed in cahr_spans]
43
+
44
+ def pre_tokenize(self, pretok: PreTokenizedString):
45
+ pretok.split(self.custom_split)
46
+
47
+ # load a tokenizer
48
+ dict_path = /path/to/mecab_wordpiece.json
49
+ tokenizer = Tokenizer.from_file(dict_path)
50
+ # load a pre-tokenizer
51
+ pre_tokenizer = MecabPreTokenizer()
52
+ tokenizer.post_processor = BertProcessing(
53
+ cls=("[CLS]", tokenizer.token_to_id('[CLS]')),
54
+ sep=("[SEP]", tokenizer.token_to_id('[SEP]'))
55
+ )
56
+ # convert to PreTrainedTokenizerFast
57
+ tokenizer = PreTrainedTokenizerFast(
58
+ tokenizer_object=tokenizer,
59
+ unk_token='[UNK]',
60
+ cls_token='[CLS]',
61
+ sep_token='[SEP]',
62
+ pad_token='[PAD]',
63
+ mask_token='[MASK]'
64
+ )
65
+ # set a pre-tokenizer
66
+ tokenizer._tokenizer.pre_tokenizer = PreTokenizer.custom(pre_tokenizer)
67
+ ```
68
+
69
+ ```python
70
+ # Test
71
+ test_str = "γ“γ‚“γ«γ‘γ―γ€‚η§γ―ε½’ζ…‹η΄ θ§£ζžε™¨γ«γ€γ„γ¦η ”η©Άγ‚’γ—γ¦γ„γΎγ™γ€‚"
72
+ tokenizer.convert_ids_to_tokens(tokenizer(test_str).input_ids)
73
+ # -> ['[CLS]','こ','##γ‚“','##に','##け','##は','。','私','は','ε½’ζ…‹','##η΄ ','解','##析','器','に぀いて','η ”η©Ά','γ‚’','し','て','い','ます','。','[SEP]']
74
+ ```
75
+
76
+ ## How to load the model
77
+ ```python
78
+ from transformers import AutoModelForMaskedLM
79
+ model = AutoModelForMaskedLM.from_pretrained("hitachi-nlp/bert-base_mecab-wordpiece")
80
+ ```
81
+
82
+
83
+ **See [our repository](https://github.com/hitachi-nlp/compare-ja-tokenizer) for more details!**