from datasets import load_from_disk from tokenizers import trainers, Tokenizer, normalizers, ByteLevelBPETokenizer # load dataset dataset = load_from_disk("/researchdisk/lm_training_dataset_full")["train"] # Instantiate tokenizer tokenizer = ByteLevelBPETokenizer() def batch_iterator(batch_size=5000): for i in range(0, len(dataset), batch_size): yield dataset[i: i + batch_size]["text"] # Customized training tokenizer.train_from_iterator(batch_iterator(), vocab_size=50265, min_frequency=2, special_tokens=[ "", "", "", "", "", ]) # Save files to disk tokenizer.save("./tokenizer.json")