dustalov commited on
Commit
ada97aa
1 Parent(s): c9d9e0d

Upload wikitext-wordlevel.py

Browse files
Files changed (1) hide show
  1. wikitext-wordlevel.py +40 -0
wikitext-wordlevel.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+
3
+ import argparse
4
+ from collections.abc import Iterator
5
+
6
+ from datasets import load_dataset
7
+ from tokenizers import Tokenizer
8
+ from tokenizers.models import WordLevel
9
+ from tokenizers.normalizers import Sequence, NFC, Strip, Lowercase
10
+ from tokenizers.pre_tokenizers import Whitespace
11
+ from tokenizers.trainers import WordLevelTrainer
12
+ from tqdm.auto import tqdm
13
+
14
+
15
+ def main() -> None:
16
+ parser = argparse.ArgumentParser()
17
+ parser.add_argument('--vocabulary', type=int, default=75000, help='Vocabulary size')
18
+ parser.add_argument('--batch', type=int, default=1024, help='Batch size')
19
+ args = parser.parse_args()
20
+
21
+ dataset = load_dataset('wikitext', 'wikitext-103-raw-v1', split='train+validation+test')
22
+
23
+ tokenizer = Tokenizer(WordLevel(unk_token='<unk>'))
24
+ tokenizer.normalizer = Sequence([NFC(), Strip(), Lowercase()])
25
+ tokenizer.pre_tokenizer = Whitespace()
26
+
27
+ def batches(batch_size: int) -> Iterator[str]:
28
+ for batch in tqdm(dataset.iter(batch_size=batch_size), desc='Tokenization'):
29
+ yield batch['text']
30
+
31
+ trainer = WordLevelTrainer(vocab_size=args.vocabulary,
32
+ special_tokens=['<s>', '</s>', '<unk>'])
33
+
34
+ tokenizer.train_from_iterator(batches(args.batch), trainer=trainer, length=len(dataset))
35
+
36
+ tokenizer.save('tokenizer.json', pretty=True)
37
+
38
+
39
+ if __name__ == '__main__':
40
+ main()