teragron commited on
Commit
051df9a
1 Parent(s): e49d9f3

Create tinystories_turkish_char.py

Browse files
Files changed (1) hide show
  1. tinystories_turkish_char.py +165 -0
tinystories_turkish_char.py ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import glob
3
+ import json
4
+ import os
5
+ import random
6
+ from typing import List
7
+ from concurrent.futures import ProcessPoolExecutor
8
+ from functools import partial
9
+
10
+ import numpy as np
11
+ import requests
12
+ import sentencepiece as spm
13
+ import torch
14
+ import torch.distributed as dist
15
+ from tqdm import tqdm
16
+
17
+ from tokenizer import Tokenizer
18
+
19
+ DATA_CACHE_DIR = "data"
20
+
21
+
22
+
23
+ def process_shard(args, vocab_size):
24
+ shard_id, shard = args
25
+ tokenizer_model = get_tokenizer_model_path(vocab_size)
26
+ enc = Tokenizer(tokenizer_model)
27
+
28
+ try:
29
+ print(f"Processing shard {shard_id} - {shard}")
30
+
31
+ with open(shard, "r", encoding="utf-8") as f:
32
+ data = json.load(f)
33
+
34
+ all_tokens = []
35
+ for example in tqdm(data, position=shard_id):
36
+ text = example["poet"] + ":" + example["poem"]
37
+ print(text)
38
+ text = text.strip() # get rid of leading/trailing whitespace
39
+ tokens = enc.encode(text, bos=True, eos=False) # encode the text, use BOS
40
+ all_tokens.extend(tokens)
41
+
42
+ # convert to uint16 nparray
43
+ all_tokens = np.array(all_tokens, dtype=np.uint16)
44
+
45
+ if vocab_size == 0:
46
+ # if we're using Llama 2, just save the tokenized file in the same dir
47
+ tokenized_filename = shard.replace(".json", ".bin")
48
+ else:
49
+ # save .bin files into a new tok{N} directory
50
+ bin_dir = os.path.join(DATA_CACHE_DIR, f"tok{vocab_size}")
51
+ shard_basename = os.path.basename(shard)
52
+ bin_basename = shard_basename.replace(".json", ".bin")
53
+ tokenized_filename = os.path.join(bin_dir, bin_basename)
54
+
55
+ # write the bytes
56
+ with open(tokenized_filename, "wb") as f:
57
+ f.write(all_tokens.tobytes())
58
+
59
+ # calculate the average sequence length (they are separated by BOS=1)
60
+ avg_seq_len = all_tokens.size / ((all_tokens == 1).sum())
61
+ print(f"Saved {tokenized_filename}, average seqlen: {avg_seq_len:.2f}")
62
+
63
+ except Exception as e:
64
+ print(f"Error processing shard {shard_id}: {str(e)}")
65
+
66
+ def pretokenize(vocab_size):
67
+ # iterate the shards and tokenize all of them one by one
68
+ data_dir = os.path.join(DATA_CACHE_DIR, "TinyStories_all_data")
69
+ shard_filenames = sorted(glob.glob(os.path.join(data_dir, "*.json")))
70
+
71
+ if vocab_size > 0:
72
+ # .bin files will be saved into tok{N} directory, create it once here
73
+ bin_dir = os.path.join(DATA_CACHE_DIR, f"tok{vocab_size}")
74
+ os.makedirs(bin_dir, exist_ok=True)
75
+
76
+ # process all the shards in a process pool
77
+ fun = partial(process_shard, vocab_size=vocab_size)
78
+ with ProcessPoolExecutor() as executor:
79
+ executor.map(fun, enumerate(shard_filenames))
80
+
81
+ print("Done.")
82
+
83
+ # Call pretokenize with your desired vocab_size
84
+
85
+
86
+
87
+ class PretokDataset(torch.utils.data.IterableDataset):
88
+ """Loads pretokenized examples from disk and yields them as PyTorch tensors."""
89
+
90
+ def __init__(self, split, max_seq_len, vocab_size, vocab_source):
91
+ super().__init__()
92
+ self.split = split
93
+ self.max_seq_len = max_seq_len
94
+ self.vocab_size = vocab_size
95
+ self.vocab_source = vocab_source
96
+
97
+ def __iter__(self):
98
+ # get worker info within a DataLoader
99
+ worker_info = torch.utils.data.get_worker_info()
100
+ worker_id = worker_info.id if worker_info else 0
101
+ # get DDP rank info
102
+ rank = dist.get_rank() if dist.is_initialized() else 0
103
+ # combine the worker_id and worker_rank to create a unique seed for rng
104
+ seed = 42 + worker_id + 1337 * rank
105
+ rng = random.Random(seed)
106
+ print(f"Created a PretokDataset with rng seed {seed}")
107
+ if self.vocab_source == "llama2":
108
+ # the .bin files are right along the .json files
109
+ bin_dir = os.path.join(DATA_CACHE_DIR, "TinyStories_all_data")
110
+ shard_filenames = sorted(glob.glob(os.path.join(bin_dir, "*.bin")))
111
+ elif self.vocab_source == "custom":
112
+ # the .bin files are in tok{N} directory
113
+ bin_dir = os.path.join(DATA_CACHE_DIR, f"tok{self.vocab_size}")
114
+ shard_filenames = sorted(glob.glob(os.path.join(bin_dir, "*.bin")))
115
+ # train/test split. let's use only shard 0 for test split, rest train
116
+ shard_filenames = shard_filenames[1:] if self.split == "train" else shard_filenames[:1]
117
+ assert len(shard_filenames)>0, f"No bin files found in {bin_dir}"
118
+ while True:
119
+ rng.shuffle(shard_filenames)
120
+ for shard in shard_filenames:
121
+ # open the dataset for reading but keep it on disk with memmap
122
+ m = np.memmap(shard, dtype=np.uint16, mode="r")
123
+ num_batches = len(m) // self.max_seq_len
124
+ num_batches -= 1 # drop the last partial batch
125
+ assert num_batches > 0, "this shard is way too small? investigate."
126
+ ixs = list(range(num_batches))
127
+ rng.shuffle(ixs)
128
+ for ix in ixs:
129
+ start = ix * self.max_seq_len
130
+ end = start + self.max_seq_len + 1
131
+ # calling .astype will copy the data into a new numpy array, now in RAM
132
+ chunk = torch.from_numpy((m[start:end]).astype(np.int64))
133
+ x = chunk[:-1]
134
+ y = chunk[1:]
135
+ yield x, y
136
+
137
+ # -----------------------------------------------------------------------------
138
+ # public interface functions
139
+
140
+ def get_tokenizer_model_path(vocab_size):
141
+ """
142
+ Returns path to the sentencepiece tokenizer model for a given vocab size
143
+ vocab_size = 0 designates the default Llama 2 tokenizer, in that case
144
+ None is returned.
145
+ """
146
+ if vocab_size == 0:
147
+ return None
148
+ else:
149
+ return os.path.join(DATA_CACHE_DIR, f"tok{vocab_size}.model")
150
+
151
+ class Task:
152
+
153
+ @staticmethod
154
+ def iter_batches(batch_size, device, num_workers=0, **dataset_kwargs):
155
+ ds = PretokDataset(**dataset_kwargs)
156
+ dl = torch.utils.data.DataLoader(
157
+ ds, batch_size=batch_size, pin_memory=True, num_workers=num_workers
158
+ )
159
+ for x, y in dl:
160
+ x = x.to(device, non_blocking=True)
161
+ y = y.to(device, non_blocking=True)
162
+ yield x, y
163
+
164
+ if __name__ == '__main__':
165
+ pretokenize(vocab_size=0)