| from datasets import load_dataset, concatenate_datasets |
| from tokenizers import trainers, Tokenizer, normalizers, ByteLevelBPETokenizer |
|
|
| model_dir = "./" |
|
|
| |
| dataset = load_dataset("json", data_files=["/mnt/disks/flaxdisk/corpus/norwegian_colossal_corpus_validation.json","/mnt/disks/flaxdisk/corpus/special_chars.json"], split='train') |
|
|
|
|
| |
| tokenizer = ByteLevelBPETokenizer() |
|
|
| def batch_iterator(batch_size=1000): |
| for i in range(0, len(dataset), batch_size): |
| yield dataset[i: i + batch_size]["text"] |
|
|
| |
| tokenizer.train_from_iterator(batch_iterator(), vocab_size=50265, min_frequency=2, special_tokens=[ |
| "<s>", |
| "<pad>", |
| "</s>", |
| "<unk>", |
| "<mask>", |
| ]) |
|
|
|
|
| |
| tokenizer.save(f"{model_dir}/tokenizer.json") |
|
|