name: wordpiece_tokenizer | |
config_type: preprocessor | |
max_length: 512 | |
truncation: longest_first | |
truncation_side: right | |
stride: 0 | |
padding: longest | |
padding_side: right | |
pad_to_multiple_of: 0 | |
pad_token_type_id: 0 | |
unk_token: '[UNK]' | |
sep_token: '[SEP]' | |
pad_token: '[PAD]' | |
cls_token: '[CLS]' | |
mask_token: '[MASK]' | |
wordpieces_prefix: '##' | |
vocab_size: 30000 | |
min_frequency: 2 | |
limit_alphabet: 1000 | |
initial_alphabet: [] | |
show_progress: true | |