sjzhao commited on
Commit
0b7f300
1 Parent(s): a7267c6

Create dataset.py

Browse files
Files changed (1) hide show
  1. dataset.py +220 -0
dataset.py ADDED
@@ -0,0 +1,220 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torchdata.datapipes as dp
2
+ import json
3
+ from PIL import Image
4
+ import functools
5
+ import numpy as np
6
+ import torch
7
+ import pickle
8
+ import random
9
+ import os
10
+ from braceexpand import braceexpand
11
+ from torch.utils.data.datapipes.iter.sharding import SHARDING_PRIORITIES
12
+ from torch.utils.data.dataloader import default_collate
13
+ import base64
14
+ import io
15
+ import tarfile
16
+ from torchdata.datapipes.iter import TarArchiveLoader
17
+ from typing import cast, IO, Iterable, Iterator, Optional, Tuple, Dict
18
+ from torchdata.datapipes import functional_datapipe
19
+ from io import BufferedIOBase
20
+ from torchdata.datapipes.utils import StreamWrapper
21
+ from torchdata.datapipes.utils.common import validate_pathname_binary_tuple
22
+ import warnings
23
+ from torchdata.datapipes.iter import IterDataPipe
24
+ import hydra
25
+ from omegaconf import OmegaConf
26
+ import pyrootutils
27
+
28
+ pyrootutils.setup_root(__file__, indicator='.project-root', pythonpath=True)
29
+
30
+ BOI_TOKEN = '<img>'
31
+ EOI_TOKEN = '</img>'
32
+ IMG_TOKEN = '<img_{:05d}>'
33
+
34
+
35
+ @functional_datapipe("load_from_tar_wo_exception")
36
+ class TarArchiveLoaderWoException(TarArchiveLoader):
37
+ def __iter__(self) -> Iterator[Tuple[str, BufferedIOBase]]:
38
+ for data in self.datapipe:
39
+ validate_pathname_binary_tuple(data)
40
+ pathname, data_stream = data
41
+ try:
42
+ if isinstance(data_stream, StreamWrapper) and isinstance(data_stream.file_obj, tarfile.TarFile):
43
+ tar = data_stream.file_obj
44
+ else:
45
+ reading_mode = (self.mode if hasattr(data_stream, "seekable") and data_stream.seekable() else
46
+ self.mode.replace(":", "|"))
47
+ # typing.cast is used here to silence mypy's type checker
48
+ tar = tarfile.open(fileobj=cast(
49
+ Optional[IO[bytes]], data_stream), mode=reading_mode)
50
+ for tarinfo in tar:
51
+ if not tarinfo.isfile():
52
+ continue
53
+ extracted_fobj = tar.extractfile(tarinfo)
54
+ if extracted_fobj is None:
55
+ warnings.warn(
56
+ f"failed to extract file {tarinfo.name} from source tarfile {pathname}")
57
+ raise tarfile.ExtractError
58
+ inner_pathname = os.path.normpath(
59
+ os.path.join(pathname, tarinfo.name))
60
+ # type: ignore[misc]
61
+ yield inner_pathname, StreamWrapper(extracted_fobj, data_stream, name=inner_pathname)
62
+ except Exception as e:
63
+ warnings.warn(
64
+ f"Unable to extract files from corrupted tarfile stream {pathname} due to: {e}, abort!")
65
+ # raise e
66
+ finally:
67
+ if isinstance(data_stream, StreamWrapper):
68
+ data_stream.autoclose()
69
+
70
+
71
+ def decode_obelisc_data_for_llm(item, tokenizer=None, max_length=512, reverse_ratio=0.5, max_images=None):
72
+ key, value = item
73
+
74
+ sample = {}
75
+ if key.endswith(".pkl"):
76
+ try:
77
+ value = pickle.load(value)
78
+ except Exception as e:
79
+ print(f'Error occured when load pkl: {e}')
80
+ return key, sample
81
+
82
+ image_list = value['image_ids']
83
+ text_list = value['texts']
84
+ metadata = value['metadata']
85
+
86
+ reverse_flag = np.random.uniform(0, 1) < reverse_ratio
87
+ if reverse_flag:
88
+ idx = 0
89
+ while idx < len(image_list) - 1:
90
+ if image_list[idx] is not None:
91
+ image_list[idx], image_list[idx +
92
+ 1] = image_list[idx + 1], image_list[idx]
93
+ text_list[idx], text_list[idx +
94
+ 1] = text_list[idx + 1], text_list[idx]
95
+ idx += 2
96
+ else:
97
+ idx += 1
98
+
99
+ unified_tokens = tokenizer.bos_token
100
+ cur_images = 0
101
+ for image_id, text in zip(image_list, text_list):
102
+ if (image_id is None) + (text is None) != 1:
103
+ print('Incorrect data format, skip.')
104
+ return key, {}
105
+ if image_id is not None:
106
+ if max_images is not None and cur_images >= max_images:
107
+ break
108
+ image_tokens = BOI_TOKEN + \
109
+ ''.join([IMG_TOKEN.format(int(item))
110
+ for item in image_id]) + EOI_TOKEN
111
+ unified_tokens += image_tokens
112
+ cur_images += 1
113
+ else:
114
+ unified_tokens += text
115
+ if max_images is not None and cur_images >= max_images:
116
+ break
117
+ unified_tokens += tokenizer.eos_token
118
+
119
+ tokenized = tokenizer(unified_tokens,
120
+ max_length=max_length,
121
+ add_special_tokens=False,
122
+ truncation=True,
123
+ padding='max_length',
124
+ return_tensors='pt')
125
+
126
+ input_ids = tokenized['input_ids'][0]
127
+ attention_mask = tokenized['attention_mask'][0]
128
+ labels = torch.clone(input_ids)
129
+ labels[labels == tokenizer.pad_token_id] = -100
130
+ filter_flag = True
131
+
132
+ return key, {
133
+ 'input_ids': input_ids,
134
+ 'attention_mask': attention_mask,
135
+ 'labels': labels,
136
+ 'filter_flag': filter_flag,
137
+ }
138
+ else:
139
+ return key, None
140
+
141
+
142
+ def unwarp_data(item):
143
+ unwarpped = {}
144
+ for key, value in item.items():
145
+ if isinstance(value, dict):
146
+ unwarpped.update(value)
147
+ elif value is not None:
148
+ unwarpped[key] = value
149
+ if 'metadata' not in unwarpped:
150
+ unwarpped['metadata'] = '{}'
151
+ if '__key__' in unwarpped:
152
+ unwarpped['__key__'] = unwarpped['__key__'].split('/')[-1]
153
+ return unwarpped
154
+
155
+
156
+ def filter_data_for_llm(item):
157
+ if 'input_ids' in item and item.get('filter_flag', True):
158
+ return True
159
+ else:
160
+ print('A sample has been filtered out.')
161
+ return False
162
+
163
+
164
+ def build_obelisc_datapipes_for_llm(data_dir,
165
+ tokenizer=None,
166
+ max_length=512,
167
+ reverse_ratio=0.5,
168
+ max_images=None,
169
+ recursive=True,
170
+ batch_size=None,
171
+ cycle_count=None):
172
+ """
173
+ datapipe of image interleaved dataset (such as mmc4...) with webdataset format
174
+ """
175
+
176
+ decode_partial = functools.partial(decode_obelisc_data_for_llm,
177
+ tokenizer=tokenizer,
178
+ max_length=max_length,
179
+ reverse_ratio=reverse_ratio,
180
+ max_images=max_images)
181
+
182
+ if isinstance(data_dir, str):
183
+ data_dir = list(braceexpand(data_dir))
184
+ # datapipe = dp.iter.FileLister(root=data_dir, masks='mmc4-0-000004.tar', recursive=True)
185
+ datapipe = dp.iter.FileLister(
186
+ root=data_dir, masks='*.tar', recursive=recursive)
187
+ datapipe = datapipe.cycle(count=cycle_count)
188
+ datapipe = datapipe.shuffle()
189
+ datapipe = datapipe.sharding_filter()
190
+ datapipe = datapipe.open_files(mode='b')
191
+ datapipe = datapipe.load_from_tar_wo_exception()
192
+ datapipe = datapipe.map(decode_partial)
193
+ datapipe = datapipe.webdataset()
194
+ datapipe = datapipe.map(unwarp_data)
195
+ datapipe = datapipe.filter(filter_data_for_llm)
196
+ datapipe = datapipe.shuffle(buffer_size=4096)
197
+ if batch_size is not None:
198
+ datapipe = datapipe.batch(batch_size)
199
+ datapipe = datapipe.collate()
200
+ return datapipe
201
+
202
+
203
+ if __name__ == '__main__':
204
+ from torchdata.dataloader2 import DataLoader2, MultiProcessingReadingService, DistributedReadingService, SequentialReadingService
205
+ data_dir = './obelisc'
206
+ seed_tokenizer_cfg_path = ''
207
+ dataloader_num_workers = 2
208
+
209
+ seed_tokenizer_cfg = OmegaConf.load(seed_tokenizer_cfg_path)
210
+ seed_tokenizer = hydra.utils.instantiate(seed_tokenizer_cfg)
211
+
212
+ dataset = build_obelisc_datapipes_for_llm(
213
+ data_dir=data_dir, tokenizer=seed_tokenizer, max_length=1024, cycle_count=1)
214
+
215
+ mp_rs = MultiProcessingReadingService(
216
+ num_workers=dataloader_num_workers)
217
+ dist_rs = DistributedReadingService()
218
+ rs = SequentialReadingService(dist_rs, mp_rs)
219
+
220
+ dataloader = DataLoader2(dataset, reading_service=rs)