db commited on
Commit
2494b0a
1 Parent(s): da674fb
Files changed (2) hide show
  1. .idea/workspace.xml +14 -1
  2. app.py +74 -66
.idea/workspace.xml CHANGED
@@ -6,6 +6,7 @@
6
  <component name="ChangeListManager">
7
  <list default="true" id="6f3a79aa-e3bd-440d-b0d9-38be2ab06fa3" name="Changes" comment="init">
8
  <change beforePath="$PROJECT_DIR$/.idea/workspace.xml" beforeDir="false" afterPath="$PROJECT_DIR$/.idea/workspace.xml" afterDir="false" />
 
9
  </list>
10
  <option name="SHOW_DIALOG" value="false" />
11
  <option name="HIGHLIGHT_CONFLICTS" value="true" />
@@ -28,6 +29,11 @@
28
  <property name="RunOnceActivity.ShowReadmeOnStart" value="true" />
29
  <property name="last_opened_file_path" value="$PROJECT_DIR$" />
30
  </component>
 
 
 
 
 
31
  <component name="SpellCheckerSettings" RuntimeDictionaries="0" Folders="0" CustomDictionaries="0" DefaultDictionary="application-level" UseSingleDictionary="true" transferred="true" />
32
  <component name="TaskManager">
33
  <task active="true" id="Default" summary="Default task">
@@ -135,7 +141,14 @@
135
  <option name="project" value="LOCAL" />
136
  <updated>1683459236060</updated>
137
  </task>
138
- <option name="localTasksCounter" value="15" />
 
 
 
 
 
 
 
139
  <servers />
140
  </component>
141
  <component name="Vcs.Log.Tabs.Properties">
 
6
  <component name="ChangeListManager">
7
  <list default="true" id="6f3a79aa-e3bd-440d-b0d9-38be2ab06fa3" name="Changes" comment="init">
8
  <change beforePath="$PROJECT_DIR$/.idea/workspace.xml" beforeDir="false" afterPath="$PROJECT_DIR$/.idea/workspace.xml" afterDir="false" />
9
+ <change beforePath="$PROJECT_DIR$/app.py" beforeDir="false" afterPath="$PROJECT_DIR$/app.py" afterDir="false" />
10
  </list>
11
  <option name="SHOW_DIALOG" value="false" />
12
  <option name="HIGHLIGHT_CONFLICTS" value="true" />
 
29
  <property name="RunOnceActivity.ShowReadmeOnStart" value="true" />
30
  <property name="last_opened_file_path" value="$PROJECT_DIR$" />
31
  </component>
32
+ <component name="RecentsManager">
33
+ <key name="CopyFile.RECENT_KEYS">
34
+ <recent name="$PROJECT_DIR$" />
35
+ </key>
36
+ </component>
37
  <component name="SpellCheckerSettings" RuntimeDictionaries="0" Folders="0" CustomDictionaries="0" DefaultDictionary="application-level" UseSingleDictionary="true" transferred="true" />
38
  <component name="TaskManager">
39
  <task active="true" id="Default" summary="Default task">
 
141
  <option name="project" value="LOCAL" />
142
  <updated>1683459236060</updated>
143
  </task>
144
+ <task id="LOCAL-00015" summary="init">
145
+ <created>1683459694836</created>
146
+ <option name="number" value="00015" />
147
+ <option name="presentableId" value="LOCAL-00015" />
148
+ <option name="project" value="LOCAL" />
149
+ <updated>1683459694836</updated>
150
+ </task>
151
+ <option name="localTasksCounter" value="16" />
152
  <servers />
153
  </component>
154
  <component name="Vcs.Log.Tabs.Properties">
app.py CHANGED
@@ -1,72 +1,80 @@
1
- """
2
- Prepare the Shakespeare dataset for character-level language modeling.
3
- So instead of encoding with GPT-2 BPE tokens, we just map characters to ints.
4
- Will save train.bin, val.bin containing the ids, and meta.pkl containing the
5
- encoder and decoder and some other related info.
6
- """
7
  import os
8
- import pickle
9
- import requests
10
  import numpy as np
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
 
12
- # download the tiny shakespeare dataset
13
- input_file_path = os.path.join(os.path.dirname(__file__), 'input.txt')
14
- if not os.path.exists(input_file_path):
15
- data_url = 'https://raw.githubusercontent.com/karpathy/char-rnn/master/data/tinyshakespeare/input.txt'
16
- with open(input_file_path, 'w') as f:
17
- f.write(requests.get(data_url).text)
18
-
19
- with open(input_file_path, 'r') as f:
20
- data = f.read()
21
- print(f"length of dataset in characters: {len(data):,}")
22
-
23
- # get all the unique characters that occur in this text
24
- chars = sorted(list(set(data)))
25
- vocab_size = len(chars)
26
- print("all the unique characters:", ''.join(chars))
27
- print(f"vocab size: {vocab_size:,}")
28
-
29
- # create a mapping from characters to integers
30
- stoi = { ch:i for i,ch in enumerate(chars) }
31
- itos = { i:ch for i,ch in enumerate(chars) }
32
- def encode(s):
33
- return [stoi[c] for c in s] # encoder: take a string, output a list of integers
34
- def decode(l):
35
- return ''.join([itos[i] for i in l]) # decoder: take a list of integers, output a string
36
-
37
- # create the train and test splits
38
- n = len(data)
39
- train_data = data[:int(n*0.9)]
40
- val_data = data[int(n*0.9):]
41
-
42
- # encode both to integers
43
- train_ids = encode(train_data)
44
- val_ids = encode(val_data)
45
- print(f"train has {len(train_ids):,} tokens")
46
- print(f"val has {len(val_ids):,} tokens")
47
-
48
- # export to bin files
49
- train_ids = np.array(train_ids, dtype=np.uint16)
50
- val_ids = np.array(val_ids, dtype=np.uint16)
51
- train_ids.tofile(os.path.join(os.path.dirname(__file__), 'train.bin'))
52
- val_ids.tofile(os.path.join(os.path.dirname(__file__), 'val.bin'))
53
-
54
- # save the meta information as well, to help us encode/decode later
55
- meta = {
56
- 'vocab_size': vocab_size,
57
- 'itos': itos,
58
- 'stoi': stoi,
59
- }
60
- with open(os.path.join(os.path.dirname(__file__), 'meta.pkl'), 'wb') as f:
61
- pickle.dump(meta, f)
62
-
63
- # length of dataset in characters: 1115394
64
- # all the unique characters:
65
- # !$&',-.3:;?ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz
66
- # vocab size: 65
67
- # train has 1003854 tokens
68
- # val has 111540 tokens
69
-
70
 
71
  """
72
  This training script can be run both on a single gpu in debug mode,
 
1
+ # saves the openwebtext dataset to a binary file for training. following was helpful:
2
+ # https://github.com/HazyResearch/flash-attention/blob/main/training/src/datamodules/language_modeling_hf.py
3
+
 
 
 
4
  import os
5
+ from tqdm import tqdm
 
6
  import numpy as np
7
+ import tiktoken
8
+ from datasets import load_dataset # huggingface datasets
9
+
10
+ # number of workers in .map() call
11
+ # good number to use is ~order number of cpu cores // 2
12
+ num_proc = 8
13
+
14
+ # takes 54GB in huggingface .cache dir, about 8M documents (8,013,769)
15
+ dataset = load_dataset("openwebtext")
16
+
17
+ # owt by default only contains the 'train' split, so create a test split
18
+ split_dataset = dataset["train"].train_test_split(test_size=0.0005, seed=2357, shuffle=True)
19
+ split_dataset['val'] = split_dataset.pop('test') # rename the test split to val
20
+
21
+ # this results in:
22
+ # >>> split_dataset
23
+ # DatasetDict({
24
+ # train: Dataset({
25
+ # features: ['text'],
26
+ # num_rows: 8009762
27
+ # })
28
+ # val: Dataset({
29
+ # features: ['text'],
30
+ # num_rows: 4007
31
+ # })
32
+ # })
33
+
34
+ # we now want to tokenize the dataset. first define the encoding function (gpt2 bpe)
35
+ enc = tiktoken.get_encoding("gpt2")
36
+ def process(example):
37
+ ids = enc.encode_ordinary(example['text']) # encode_ordinary ignores any special tokens
38
+ ids.append(enc.eot_token) # add the end of text token, e.g. 50256 for gpt2 bpe
39
+ # note: I think eot should be prepended not appended... hmm. it's called "eot" though...
40
+ out = {'ids': ids, 'len': len(ids)}
41
+ return out
42
 
43
+ # tokenize the dataset
44
+ tokenized = split_dataset.map(
45
+ process,
46
+ remove_columns=['text'],
47
+ desc="tokenizing the splits",
48
+ num_proc=num_proc,
49
+ )
50
+
51
+ # concatenate all the ids in each dataset into one large file we can use for training
52
+ for split, dset in tokenized.items():
53
+ arr_len = np.sum(dset['len'])
54
+ filename = os.path.join(os.path.dirname(__file__), f'{split}.bin')
55
+ dtype = np.uint16 # (can do since enc.max_token_value == 50256 is < 2**16)
56
+ arr = np.memmap(filename, dtype=dtype, mode='w+', shape=(arr_len,))
57
+ total_batches = 1024
58
+
59
+ idx = 0
60
+ for batch_idx in tqdm(range(total_batches), desc=f'writing {filename}'):
61
+ # Batch together samples for faster write
62
+ batch = dset.shard(num_shards=total_batches, index=batch_idx, contiguous=True).with_format('numpy')
63
+ arr_batch = np.concatenate(batch['ids'])
64
+ # Write into mmap
65
+ arr[idx : idx + len(arr_batch)] = arr_batch
66
+ idx += len(arr_batch)
67
+ arr.flush()
68
+
69
+ # train.bin is ~17GB, val.bin ~8.5MB
70
+ # train has ~9B tokens (9,035,582,198)
71
+ # val has ~4M tokens (4,434,897)
72
+
73
+ # to read the bin files later, e.g. with numpy:
74
+ # m = np.memmap('train.bin', dtype=np.uint16, mode='r')
75
+
76
+
77
+ ##########################################################################################
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
78
 
79
  """
80
  This training script can be run both on a single gpu in debug mode,