File size: 1,246 Bytes
5ad5b72
 
 
 
 
 
 
 
 
 
 
 
 
13d02d7
 
 
5ad5b72
 
 
 
 
 
 
 
 
13d02d7
89c14b0
5ad5b72
13d02d7
5ad5b72
 
 
 
 
 
 
13d02d7
5ad5b72
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
from datasets import load_dataset, concatenate_datasets
from tokenizers import ByteLevelBPETokenizer
from transformers import AutoConfig
from pythainlp.tokenize import word_tokenize

language = "th"
model_config = "roberta-base"
model_dir = model_config + f"-pretrained-{language}"
config = AutoConfig.from_pretrained(model_config)
config.save_pretrained(f"{model_dir}")

# load dataset
# only the train subset for tokenizing purposes
raw_dataset = load_dataset(
    "oscar", f"unshuffled_deduplicated_{language}", split="train"
)

# Instantiate tokenizer
tokenizer = ByteLevelBPETokenizer()

## For Thai NLP Library, please feel free to check https://pythainlp.github.io/docs/2.3/api/tokenize.html
def th_tokenize(text):
    result = " ".join(word_tokenize(text, engine="newmm", keep_whitespace=False))
    return result


def batch_iterator(batch_size=10000):
    for i in range(0, len(raw_dataset), batch_size):
        yield [th_tokenize(text) for text in raw_dataset[i : i + batch_size]["text"]]


# Customized training
tokenizer.train_from_iterator(
    batch_iterator(),
    vocab_size=50265,
    min_frequency=2,
    special_tokens=["<s>", "<pad>", "</s>", "<unk>", "<mask>",],
)

# Save files to disk
tokenizer.save(f"./tokenizer.json")