SuperCS commited on
Commit
e051419
·
verified ·
1 Parent(s): e31e7b4

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. dataset_code/sft_sftnews/offload/dataset_tool/AIP_dataset.py +309 -0
  2. dataset_code/sft_sftnews/offload/dataset_tool/__init__.py +5 -0
  3. dataset_code/sft_sftnews/offload/dataset_tool/__pycache__/AIP_dataset.cpython-310.pyc +0 -0
  4. dataset_code/sft_sftnews/offload/dataset_tool/__pycache__/AIP_dataset.cpython-311.pyc +0 -0
  5. dataset_code/sft_sftnews/offload/dataset_tool/__pycache__/__init__.cpython-310.pyc +0 -0
  6. dataset_code/sft_sftnews/offload/dataset_tool/__pycache__/__init__.cpython-311.pyc +0 -0
  7. dataset_code/sft_sftnews/offload/dataset_tool/__pycache__/__init__.cpython-313.pyc +0 -0
  8. dataset_code/sft_sftnews/offload/dataset_tool/__pycache__/collection_dataset.cpython-310.pyc +0 -0
  9. dataset_code/sft_sftnews/offload/dataset_tool/__pycache__/collection_dataset.cpython-311.pyc +0 -0
  10. dataset_code/sft_sftnews/offload/dataset_tool/__pycache__/dataset_hdfs.cpython-310.pyc +0 -0
  11. dataset_code/sft_sftnews/offload/dataset_tool/__pycache__/dataset_hdfs.cpython-311.pyc +0 -0
  12. dataset_code/sft_sftnews/offload/dataset_tool/__pycache__/dataset_hdfs.cpython-313.pyc +0 -0
  13. dataset_code/sft_sftnews/offload/dataset_tool/__pycache__/image_dataset.cpython-310.pyc +0 -0
  14. dataset_code/sft_sftnews/offload/dataset_tool/__pycache__/image_dataset.cpython-311.pyc +0 -0
  15. dataset_code/sft_sftnews/offload/dataset_tool/collection_dataset.py +672 -0
  16. dataset_code/sft_sftnews/offload/dataset_tool/dataset_hdfs.py +198 -0
  17. dataset_code/sft_sftnews/offload/dataset_tool/image_dataset.py +929 -0
  18. dataset_code/sft_sftnews/offload/dataset_tool/parquet_dataset/__init__.py +0 -0
  19. dataset_code/sft_sftnews/offload/dataset_tool/parquet_dataset/__pycache__/__init__.cpython-310.pyc +0 -0
  20. dataset_code/sft_sftnews/offload/dataset_tool/parquet_dataset/__pycache__/__init__.cpython-311.pyc +0 -0
  21. dataset_code/sft_sftnews/offload/dataset_tool/parquet_dataset/__pycache__/base_parquet.cpython-310.pyc +0 -0
  22. dataset_code/sft_sftnews/offload/dataset_tool/parquet_dataset/__pycache__/base_parquet.cpython-311.pyc +0 -0
  23. dataset_code/sft_sftnews/offload/dataset_tool/parquet_dataset/__pycache__/parquet_utils.cpython-310.pyc +0 -0
  24. dataset_code/sft_sftnews/offload/dataset_tool/parquet_dataset/__pycache__/parquet_utils.cpython-311.pyc +0 -0
  25. dataset_code/sft_sftnews/offload/dataset_tool/parquet_dataset/__pycache__/tos_client.cpython-310.pyc +0 -0
  26. dataset_code/sft_sftnews/offload/dataset_tool/parquet_dataset/__pycache__/tos_client.cpython-311.pyc +0 -0
  27. dataset_code/sft_sftnews/offload/dataset_tool/parquet_dataset/__pycache__/video_parquet.cpython-310.pyc +0 -0
  28. dataset_code/sft_sftnews/offload/dataset_tool/parquet_dataset/__pycache__/video_parquet.cpython-311.pyc +0 -0
  29. dataset_code/sft_sftnews/offload/dataset_tool/parquet_dataset/base_parquet.py +289 -0
  30. dataset_code/sft_sftnews/offload/dataset_tool/parquet_dataset/parquet_utils.py +142 -0
  31. dataset_code/sft_sftnews/offload/dataset_tool/parquet_dataset/samplers/__init__.py +0 -0
  32. dataset_code/sft_sftnews/offload/dataset_tool/parquet_dataset/samplers/__pycache__/__init__.cpython-310.pyc +0 -0
  33. dataset_code/sft_sftnews/offload/dataset_tool/parquet_dataset/samplers/__pycache__/__init__.cpython-311.pyc +0 -0
  34. dataset_code/sft_sftnews/offload/dataset_tool/parquet_dataset/samplers/__pycache__/frame_sampler.cpython-310.pyc +0 -0
  35. dataset_code/sft_sftnews/offload/dataset_tool/parquet_dataset/samplers/__pycache__/frame_sampler.cpython-311.pyc +0 -0
  36. dataset_code/sft_sftnews/offload/dataset_tool/parquet_dataset/samplers/__pycache__/text_sampler.cpython-310.pyc +0 -0
  37. dataset_code/sft_sftnews/offload/dataset_tool/parquet_dataset/samplers/__pycache__/text_sampler.cpython-311.pyc +0 -0
  38. dataset_code/sft_sftnews/offload/dataset_tool/parquet_dataset/samplers/__pycache__/utils.cpython-310.pyc +0 -0
  39. dataset_code/sft_sftnews/offload/dataset_tool/parquet_dataset/samplers/__pycache__/utils.cpython-311.pyc +0 -0
  40. dataset_code/sft_sftnews/offload/dataset_tool/parquet_dataset/samplers/frame_sampler.py +375 -0
  41. dataset_code/sft_sftnews/offload/dataset_tool/parquet_dataset/samplers/text_sampler.py +332 -0
  42. dataset_code/sft_sftnews/offload/dataset_tool/parquet_dataset/samplers/utils.py +42 -0
  43. dataset_code/sft_sftnews/offload/dataset_tool/parquet_dataset/tos_client.py +192 -0
  44. dataset_code/sft_sftnews/offload/dataset_tool/parquet_dataset/utils/__pycache__/distributed_utils.cpython-310.pyc +0 -0
  45. dataset_code/sft_sftnews/offload/dataset_tool/parquet_dataset/utils/__pycache__/distributed_utils.cpython-311.pyc +0 -0
  46. dataset_code/sft_sftnews/offload/dataset_tool/parquet_dataset/utils/__pycache__/hdfs_utils.cpython-310.pyc +0 -0
  47. dataset_code/sft_sftnews/offload/dataset_tool/parquet_dataset/utils/__pycache__/hdfs_utils.cpython-311.pyc +0 -0
  48. dataset_code/sft_sftnews/offload/dataset_tool/parquet_dataset/utils/__pycache__/partition_utils.cpython-310.pyc +0 -0
  49. dataset_code/sft_sftnews/offload/dataset_tool/parquet_dataset/utils/__pycache__/partition_utils.cpython-311.pyc +0 -0
  50. dataset_code/sft_sftnews/offload/dataset_tool/parquet_dataset/utils/distributed_utils.py +149 -0
dataset_code/sft_sftnews/offload/dataset_tool/AIP_dataset.py ADDED
@@ -0,0 +1,309 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch.utils.data.dataset import Dataset
3
+ from torchvision.transforms.functional import to_tensor
4
+ from nebudata import refds
5
+ from .parquet_dataset.utils import hdfs_utils
6
+ from .parquet_dataset.parquet_utils import get_random_for_rank_and_worker, get_portion_for_rank_and_worker, get_worker_id, get_worker_count
7
+ from .parquet_dataset.utils.distributed_utils import get_data_parallel_rank, get_data_parallel_world_size
8
+ import random
9
+ from PIL import Image
10
+ import copy
11
+ import numpy as np
12
+ import json
13
+ from multiprocessing import Pool
14
+ import traceback
15
+
16
+
17
+ try:
18
+ import av
19
+ import io
20
+ pyav_enabled = True
21
+ except:
22
+ pyav_enabled = False
23
+
24
+ try:
25
+ import imageio.v3 as iio
26
+ imageio_enabled = True
27
+ except:
28
+ imageio_enabled = False
29
+
30
+
31
+ def get_length(path, ignore_prefixes):
32
+ dataset = refds.RefDataset(
33
+ path, ignore_prefixes=ignore_prefixes)
34
+ return dataset.rank_total
35
+
36
+
37
+ def get_length_subprocess(path, ignore_prefixes):
38
+ with Pool(1) as pool:
39
+ counts = pool.apply(
40
+ get_length, args=(path, ignore_prefixes, ))
41
+ return counts
42
+
43
+
44
+ def sampling(video_length, sample_n_frames, sample_stride, skip_start_end=10):
45
+ # Jacob Sep 17th: If sample frames > video frames, we drop this video
46
+ if (sample_n_frames - 1) * sample_stride + 1 > (video_length - skip_start_end * 2):
47
+ return None
48
+ clip_length = min(
49
+ video_length, (sample_n_frames - 1) * sample_stride + 1)
50
+ start_idx = random.randint(
51
+ skip_start_end, video_length - clip_length - skip_start_end)
52
+ batch_index = np.linspace(
53
+ start_idx, start_idx + clip_length - 1, sample_n_frames, dtype=int)
54
+ return batch_index
55
+
56
+
57
+ class AIPVideoDataset(Dataset):
58
+ def __init__(self,
59
+ path,
60
+ sample_size=256,
61
+ sample_stride=4,
62
+ sample_n_frames=16,
63
+ caption_key='caption',
64
+ caption_path="",
65
+ fps=24,
66
+ shuffle=True,
67
+ infinite=True,
68
+ parquet_batch=128,
69
+ video_toskey='clip_toskey',
70
+ bytes_key='bytes',
71
+ ignore_prefixes=None,
72
+ decode_backend='pyav',
73
+ force_partition=False,
74
+ data_world_size=10000, # TODO: can be dynamic
75
+ local_cache_prefix='',
76
+ ):
77
+ self.sample_size = sample_size
78
+ assert self.sample_size == -1, \
79
+ "only support original size, consider using sample_size==-1 for bucketing"
80
+ self.sample_stride = sample_stride
81
+ self.sample_n_frames = sample_n_frames
82
+ self.shuffle = shuffle
83
+ self.infinite = infinite # this doesn't work, the dataset is always infinite
84
+ self.fps = fps
85
+ self.force_partition = force_partition
86
+ self.data_world_size = data_world_size
87
+ self.state_dict = {'data_world_size': self.data_world_size, 'seen_times': [0 for _ in range(self.data_world_size)]}
88
+ self.remaining_ranks = []
89
+ self.local_cache_prefix = local_cache_prefix
90
+
91
+ self.path = path
92
+ self.parquet_batch = parquet_batch
93
+ self.video_toskey = video_toskey
94
+ self.caption_key = caption_key # the key used to store caption
95
+ self.bytes_key = bytes_key # the key used to store real bytes
96
+ self.ignore_prefixes = ignore_prefixes
97
+ self.decode_backend = decode_backend
98
+
99
+ self.total_length = None
100
+ # read caption json file from caption_path seperately, for Seed V2 dataset
101
+ self.caption_data = None
102
+ if caption_path != "":
103
+ # with open(caption_path, 'r') as f:
104
+ if caption_path.startswith("hdfs"):
105
+ caption_path = hdfs_utils.download(caption_path, './')
106
+ with open(caption_path, 'r') as f:
107
+ caption_data = json.load(f)
108
+ caption_data = json.loads(hdfs_utils.read(caption_path))
109
+ self.total_length = len(caption_data)
110
+ self.caption_data = {item['uttid']: item[self.caption_key]
111
+ for item in caption_data}
112
+
113
+ if self.decode_backend == 'imageio':
114
+ assert imageio_enabled, 'failed to install imageio'
115
+ elif self.decode_backend == 'pyav':
116
+ assert pyav_enabled, 'failed to install pyav'
117
+
118
+ def __iter__(self):
119
+ rank = get_data_parallel_rank()
120
+ world_size = get_data_parallel_world_size()
121
+ worker_id = get_worker_id()
122
+ worker_count = get_worker_count()
123
+ overall_workers = world_size * worker_count
124
+
125
+ self.local_cache_path = f'{self.local_cache_prefix}_{rank}_{worker_id}.txt'
126
+ refs = [(self.video_toskey, self.bytes_key)
127
+ ] if self.video_toskey != '' else []
128
+
129
+ worker_ranks = get_portion_for_rank_and_worker(self.remaining_ranks, allow_empty=True)
130
+
131
+ while True:
132
+ if self.shuffle:
133
+ get_random_for_rank_and_worker(None).shuffle(worker_ranks)
134
+
135
+ for rank in worker_ranks:
136
+ with open(self.local_cache_path, 'a') as f:
137
+ f.write(f'{rank}\n')
138
+ filereader = refds.RefDataset(self.path, ignore_prefixes=self.ignore_prefixes, world_size=self.data_world_size, rank=rank)
139
+ for batch in filereader.iter_batches(batch_size=self.parquet_batch, refs=refs):
140
+ actual_size = len(batch[self.bytes_key])
141
+ columns = [col for col in batch.column_names]
142
+ for i in range(actual_size):
143
+ params_dict = {col: batch[col]
144
+ [i].as_py() for col in columns}
145
+ if self.caption_data is not None:
146
+ # if we have caption_data, use it to replace caption
147
+ uttid = params_dict['uttid']
148
+ if uttid not in self.caption_data:
149
+ continue
150
+ params_dict[self.caption_key] = self.caption_data[uttid]
151
+ frames, metadata = self._data_process(params_dict)
152
+ if frames is None:
153
+ continue
154
+ yield self._pack_frames(frames, metadata)
155
+
156
+ overall_ranks = []
157
+ while len(overall_ranks) < overall_workers:
158
+ overall_ranks += list(range(self.data_world_size))
159
+ worker_ranks = get_portion_for_rank_and_worker(overall_ranks, force=True)
160
+
161
+ def _pack_frames(self, frames, metadata):
162
+ tensor_frames = []
163
+ for frame in frames:
164
+ frame = to_tensor(frame)
165
+ tensor_frames.append(frame)
166
+ tensor_frames = torch.stack(tensor_frames)
167
+ # make value from -1.0 to 1.0
168
+ pixel_values = tensor_frames * 2.0 - 1.0
169
+ item = dict(
170
+ mp4=pixel_values,
171
+ txt=metadata[self.caption_key],
172
+ num_frames=self.sample_n_frames,
173
+ fps=metadata.get('fps', self.fps),
174
+ )
175
+ return item
176
+
177
+ def _data_process(self, params):
178
+ tosbytes = params[self.bytes_key]
179
+ del params[self.bytes_key] # remove the bytes key
180
+ metadata = copy.deepcopy(params)
181
+ try:
182
+ frames = self._bytes_to_PILs(tosbytes)
183
+ except:
184
+ print("data error: ", metadata)
185
+ traceback.print_exc()
186
+ return None, None
187
+ if frames is None:
188
+ return None, None
189
+ return frames, metadata
190
+
191
+ def _bytes_to_PILs(self, video_bytes):
192
+ if self.decode_backend == 'imageio':
193
+ raw_frames = iio.imread(
194
+ video_bytes, index=None, format_hint=".mp4")
195
+ video_length = raw_frames.shape[0]
196
+ video_idxs = sampling(
197
+ video_length, self.sample_n_frames, self.sample_stride)
198
+ if video_idxs is None:
199
+ return None
200
+ frames = []
201
+ for i in video_idxs:
202
+ frames.append(Image.fromarray(raw_frames[i], 'RGB'))
203
+
204
+ elif self.decode_backend[:4] == 'pyav':
205
+ file_io = io.BytesIO(video_bytes)
206
+ container = av.open(file_io)
207
+ stream = container.streams.video[0]
208
+ video_length = container.streams.video[0].frames
209
+ video_idxs = sampling(
210
+ video_length, self.sample_n_frames, self.sample_stride)
211
+ if video_idxs is None:
212
+ return None
213
+ frames_sorted = []
214
+ key_frame_idxs = []
215
+
216
+ # Get keyframe without decoding
217
+ stream.codec_context.skip_frame = "NONKEY"
218
+ for packet in container.demux(stream):
219
+ if packet.is_keyframe:
220
+ frame_idx = int(
221
+ packet.pts * stream.time_base * stream.average_rate + 1e-6)
222
+ key_frame_idxs.append(frame_idx)
223
+
224
+ # Reset for decode any frames
225
+ stream.codec_context.skip_frame = "DEFAULT"
226
+
227
+ # Sort the frames under the cases that frames are unsorted
228
+ video_idxs_sort_idx = np.argsort(np.array(video_idxs))
229
+ video_idxs_sorted = np.array(video_idxs)[video_idxs_sort_idx]
230
+
231
+ # The keyframe assignment for each frame
232
+ keyframe_assignment = np.clip(((np.array(video_idxs_sorted)[
233
+ None] - np.array(key_frame_idxs)[:, None]) > 0).sum(0) - 1, 0, None)
234
+
235
+ time_base = container.streams.video[0].time_base
236
+ framerate = container.streams.video[0].average_rate
237
+
238
+ previous_keyframe_assigment = -1
239
+ for ii, frame_num in enumerate(video_idxs_sorted):
240
+ this_assignment = keyframe_assignment[ii]
241
+
242
+ # Reseek only if when the keyframe are changed, avoid redecode frames
243
+ if this_assignment != previous_keyframe_assigment:
244
+ # Calculate the timestamp for the desired frame
245
+ frame_container_pts = int(
246
+ ((key_frame_idxs[this_assignment] + 1) / framerate) / time_base)
247
+
248
+ # Seek to the closest keyframe before the desired timestamp
249
+ container.seek(frame_container_pts, backward=True,
250
+ stream=container.streams.video[0])
251
+ previous_keyframe_assigment = this_assignment
252
+
253
+ # Record where we start, for debug only
254
+ # start_idx = key_frame_idxs[this_assignment]
255
+
256
+ previous_frame_idx = -1
257
+ while previous_frame_idx < frame_num:
258
+ frame = next(container.decode(video=0))
259
+ previous_frame_idx = int(
260
+ frame.pts * stream.time_base * stream.average_rate + 1e-6)
261
+ # Debug code to check if always get the desired frame
262
+ # print(f"start={start_idx}, source={previous_frame_idx}, target={frame_num}, ")
263
+ frames_sorted.append(frame.to_image())
264
+
265
+ # Recollect to the original sorts => inverse sort
266
+ frames = [None for _ in range(len(video_idxs))]
267
+ for i, idx in enumerate(video_idxs_sort_idx):
268
+ frames[idx] = frames_sorted[i]
269
+ elif self.decode_backend == 'image_bytes':
270
+ video_length = len(video_bytes)
271
+ video_idxs = sampling(
272
+ video_length, self.sample_n_frames, self.sample_stride)
273
+ if video_idxs is None:
274
+ return None
275
+ frames = []
276
+ for idx in video_idxs:
277
+ frame_byte = video_bytes[idx]
278
+ with Image.open(io.BytesIO(frame_byte)) as frame:
279
+ frame = frame.convert("RGB")
280
+ frames.append(frame)
281
+
282
+ return frames
283
+
284
+ def load_state_dict(self, state_dict):
285
+ # get remaining ranks
286
+ if 'data_world_size' not in self.state_dict:
287
+ print('[AIP_dataset] no state_dict; init data loading')
288
+ elif self.data_world_size != self.state_dict['data_world_size']:
289
+ print('[AIP_dataset] inconsistent data_world_size, init data loading')
290
+ elif self.state_dict['data_world_size'] != len(self.state_dict.get('seen_times', [])):
291
+ print('[AIP_dataset] corrupted state_dict; init data loading')
292
+ else:
293
+ #this has to be the same across all workers
294
+ self.state_dict = state_dict
295
+ print('[AIP_dataset] resume data loading from state_dict')
296
+ max_times = max(self.state_dict['seen_times'])
297
+ for rank, times in enumerate(self.state_dict['seen_times']):
298
+ for _ in range(max_times-times):
299
+ self.remaining_ranks.append(rank)
300
+
301
+ def __len__(self):
302
+ if self.total_length is None:
303
+ counts = get_length_subprocess(self.path, self.ignore_prefixes)
304
+ self.total_length = counts
305
+ return self.total_length
306
+
307
+ @ classmethod
308
+ def create_dataset_function(cls, data_path, args, **kwargs):
309
+ return cls(path=data_path, **kwargs)
dataset_code/sft_sftnews/offload/dataset_tool/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ from .dataset_hdfs import *
2
+ from .image_dataset import T2IHDFSDataset, T2IHDFSDataset_dump
3
+ from .parquet_dataset.video_parquet import SeedV1Dataset, SeedV1Dataset_dump
4
+ from .AIP_dataset import AIPVideoDataset
5
+ from .collection_dataset import CollectionDataset, CollectionDataset_dump, collate_fn_map
dataset_code/sft_sftnews/offload/dataset_tool/__pycache__/AIP_dataset.cpython-310.pyc ADDED
Binary file (8.84 kB). View file
 
dataset_code/sft_sftnews/offload/dataset_tool/__pycache__/AIP_dataset.cpython-311.pyc ADDED
Binary file (16.7 kB). View file
 
dataset_code/sft_sftnews/offload/dataset_tool/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (541 Bytes). View file
 
dataset_code/sft_sftnews/offload/dataset_tool/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (653 Bytes). View file
 
dataset_code/sft_sftnews/offload/dataset_tool/__pycache__/__init__.cpython-313.pyc ADDED
Binary file (557 Bytes). View file
 
dataset_code/sft_sftnews/offload/dataset_tool/__pycache__/collection_dataset.cpython-310.pyc ADDED
Binary file (13.6 kB). View file
 
dataset_code/sft_sftnews/offload/dataset_tool/__pycache__/collection_dataset.cpython-311.pyc ADDED
Binary file (33.1 kB). View file
 
dataset_code/sft_sftnews/offload/dataset_tool/__pycache__/dataset_hdfs.cpython-310.pyc ADDED
Binary file (5.38 kB). View file
 
dataset_code/sft_sftnews/offload/dataset_tool/__pycache__/dataset_hdfs.cpython-311.pyc ADDED
Binary file (9.76 kB). View file
 
dataset_code/sft_sftnews/offload/dataset_tool/__pycache__/dataset_hdfs.cpython-313.pyc ADDED
Binary file (8.98 kB). View file
 
dataset_code/sft_sftnews/offload/dataset_tool/__pycache__/image_dataset.cpython-310.pyc ADDED
Binary file (24.8 kB). View file
 
dataset_code/sft_sftnews/offload/dataset_tool/__pycache__/image_dataset.cpython-311.pyc ADDED
Binary file (49.6 kB). View file
 
dataset_code/sft_sftnews/offload/dataset_tool/collection_dataset.py ADDED
@@ -0,0 +1,672 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import io
2
+ import os
3
+ import json
4
+ import glob
5
+ import torch
6
+ from copy import deepcopy
7
+ from typing import Dict, List
8
+ import importlib
9
+ import random
10
+ from torch.utils.data import ChainDataset, IterableDataset, Dataset
11
+ import torchvision.transforms as transforms
12
+ from torch.utils.data._utils.collate import default_collate
13
+ from torchvision.transforms import functional as F
14
+ import concurrent.futures
15
+
16
+ from dataset_tool.AIP_dataset import AIPVideoDataset
17
+
18
+ from PIL import Image
19
+ from diffusers.utils import export_to_video
20
+ from diffusers.training_utils import free_memory
21
+
22
+ def collate_fn_map(samples):
23
+ """
24
+ Custom collate function that processes a list of samples into a batch.
25
+ """
26
+ if type(samples) is list and type(samples[0]) is list:
27
+ samples = samples[0] # remove the first batch, as it is always 1
28
+ if isinstance(samples[0], dict):
29
+ none_keys = []
30
+ for key in samples[0]:
31
+ values = [sample[key] for sample in samples]
32
+ if any(value is None for value in values):
33
+ none_keys.append(key)
34
+
35
+ if none_keys:
36
+ print(f"Warning: Found None values in keys: {none_keys}")
37
+
38
+ return {key: default_collate([sample[key] for sample in samples]) for key in samples[0]}
39
+ raise NotImplementedError
40
+ else:
41
+ return default_collate(samples)
42
+
43
+
44
+ class CollectionDataset_dump(IterableDataset):
45
+ def __init__(
46
+ self,
47
+ train_data: list[str],
48
+ train_data_weights: list[int | float],
49
+ dataset_collections: Dict[str, Dict],
50
+ batch_size=1,
51
+ image_batch_size=48,
52
+ enable_bucket=False,
53
+ infinite=True,
54
+ shuffle=True,
55
+ local_cache='', # this should be a ByteNAS path
56
+ data_cache_prefix={'AIPVideoDataset': 'aip_dataset_cache'},
57
+ ):
58
+ # prepare for bucketings
59
+ self.enable_bucket = enable_bucket
60
+ self.batch_size = batch_size
61
+ self.image_batch_size = image_batch_size
62
+
63
+ self.buckets = {}
64
+ self.buckets_transform = {}
65
+ self.resolutions = set()
66
+ if not self.enable_bucket:
67
+ assert batch_size == 1, "if not enable_bucket, batch_size must be 1"
68
+
69
+ self.train_data_weights = train_data_weights
70
+
71
+ self.dataset_list = []
72
+ self.dataset_names = []
73
+ self.image_dataset_names = []
74
+ self.dataset_collections = dataset_collections
75
+ self.dataset_to_aspect_ratios = {}
76
+ self.init_state_dict = {}
77
+ self.local_cache_prefix_list = []
78
+ for data_name in train_data:
79
+ if data_name not in dataset_collections:
80
+ print(f'{data_name} not in dataset collections')
81
+ return
82
+ self.dataset_config = dataset_collections[data_name]
83
+ aspect_ratios = self.dataset_config['aspect_ratios']
84
+ self.dataset_to_aspect_ratios[data_name] = aspect_ratios
85
+ self.add_aspect_ratios(aspect_ratios)
86
+
87
+ module, cls = self.dataset_config['target'].rsplit(".", 1)
88
+ data_class = getattr(
89
+ importlib.import_module(module, package=None), cls)
90
+ if cls == 'T2IHDFSDataset' or cls == 'T2IHDFSDataset_dump':
91
+ self.image_dataset_names.append(data_name)
92
+
93
+ if cls in data_cache_prefix:
94
+ data_cache = os.path.join(local_cache, data_cache_prefix[cls])
95
+ os.makedirs(data_cache, exist_ok=True)
96
+ local_cache_prefix = os.path.join(data_cache, data_name)
97
+ self.clean_cache(local_cache_prefix)
98
+ self.dataset_config['params']['local_cache_prefix'] = local_cache_prefix
99
+ self.local_cache_prefix_list.append(local_cache_prefix)
100
+ else:
101
+ self.local_cache_prefix_list.append('')
102
+ dataset = data_class.create_dataset_function(
103
+ self.dataset_config['path'], None, **self.dataset_config['params'])
104
+ if cls == 'AIPVideoDataset':
105
+ self.init_state_dict[data_name] = dataset.state_dict
106
+ self.dataset_list.append(dataset)
107
+ self.dataset_names.append(data_name)
108
+ self.length = sum([len(dataset) for dataset in self.dataset_list])
109
+ self.dataset_iter_list = [iter(dataset) for dataset in self.dataset_list]
110
+
111
+ def add_aspect_ratios(self, aspect_ratios):
112
+ for key in aspect_ratios.keys():
113
+ self.buckets[key] = []
114
+
115
+ for key, sample_size in aspect_ratios.items():
116
+ sample_size = tuple(sample_size)
117
+ self.buckets_transform[key] = transforms.Compose([
118
+ transforms.Resize(min(sample_size[0], sample_size[1])), # fix when height > width
119
+ transforms.CenterCrop(sample_size),
120
+ ])
121
+ for h, w in aspect_ratios.values():
122
+ self.resolutions.add((49, h, w))
123
+
124
+ def get_bucket_id(self, item, dataset_name):
125
+ """
126
+ for large resolution data, we may have multiple bucket ids
127
+ """
128
+ _,_,_,H,W = item['mp4']['latent_256_size']
129
+ H = H * 64
130
+ W = W* 64
131
+ ratio = float(H) / float(W)
132
+
133
+ ratio_strategy = self.dataset_collections[dataset_name]['ratio_strategy']
134
+ ratios = self.dataset_to_aspect_ratios[dataset_name]
135
+ if ratio_strategy == 'random':
136
+ bucket_id = random.choice(list(ratios.keys()))
137
+ elif ratio_strategy == 'closest':
138
+ bucket_id = min(ratios.items(),
139
+ key=lambda r: abs(float(r[1][0]) / float(r[1][1]) - ratio))[0]
140
+ else:
141
+ raise f"ratio_strategy {ratio_strategy} not support ..."
142
+
143
+ return bucket_id
144
+
145
+ def __len__(self):
146
+ return self.length
147
+
148
+ def crop_and_resize(self, image, h_prime, w_prime):
149
+ """
150
+ Crop and resize a 4D tensor image.
151
+
152
+ Args:
153
+ image: The input 4D tensor image of shape (frame, channel, h, w).
154
+ h_prime: Desired height of the cropped image.
155
+ w_prime: Desired width of the cropped image.
156
+
157
+ Returns:
158
+ The cropped and resized 4D tensor image.
159
+ """
160
+ frames, channels, h, w = image.shape
161
+ aspect_ratio_original = h / w
162
+ aspect_ratio_target = h_prime / w_prime
163
+
164
+ if aspect_ratio_original >= aspect_ratio_target:
165
+ new_h = int(w * aspect_ratio_target)
166
+ top = (h - new_h) // 2
167
+ bottom = top + new_h
168
+ left = 0
169
+ right = w
170
+ else:
171
+ new_w = int(h / aspect_ratio_target)
172
+ left = (w - new_w) // 2
173
+ right = left + new_w
174
+ top = 0
175
+ bottom = h
176
+ # print(f"left {left}, right {right}, top {top}, bottom {bottom}")
177
+ # Crop the image
178
+ cropped_image = image[:, :, top:bottom, left:right]
179
+ # Resize the cropped image
180
+ resized_image = F.resize(cropped_image, (h_prime, w_prime))
181
+ return resized_image
182
+
183
+ def put_to_bucket(self, item, dataset_name):
184
+ if len(item['latent'].shape) == 5:
185
+ _,_,_,H,W = item['latent'].shape
186
+ else:
187
+ _,_,H,W = item['latent'].shape
188
+ bucket_id = []
189
+ for key, value in self.dataset_to_aspect_ratios[dataset_name].items():
190
+ if value == [H * 64, W* 64]:
191
+ bucket_id = key
192
+ ori_frams, ori_c, ori_H, ori_W = item['mp4'].shape
193
+ ori_ratio = ori_H / ori_W
194
+ bucket_h, bucket_w = self.dataset_to_aspect_ratios[dataset_name][bucket_id][0], self.dataset_to_aspect_ratios[dataset_name][bucket_id][1]
195
+ bucket_ratio = bucket_h / bucket_w
196
+ # print(f"ori_H {ori_H}, ori_W {ori_W}, ori_ratio {ori_ratio}. bucket_h {bucket_h}, bucket_w {bucket_w}, bucket_ratio {bucket_ratio}")
197
+ item['mp4'] = self.crop_and_resize(item['mp4'], bucket_h, bucket_w)
198
+
199
+ # ori_frams, ori_c, ori_H, ori_W = item['mp4'].shape
200
+ # ori_ratio = ori_H / ori_W
201
+ # bucket_h, bucket_w = self.dataset_to_aspect_ratios[dataset_name][bucket_id][0], self.dataset_to_aspect_ratios[dataset_name][bucket_id][1]
202
+ # bucket_ratio = bucket_h / bucket_w
203
+ # # print(f"ori_H {ori_H}, ori_W {ori_W}, ori_ratio {ori_ratio}. bucket_h {bucket_h}, bucket_w {bucket_w}, bucket_ratio {bucket_ratio}")
204
+ # item['mp4'] = self.crop_and_resize(item['mp4'], bucket_h, bucket_w)
205
+
206
+ # frames, c, H, W = item['mp4'].shape
207
+ # # rewrite item to the same format as the original dataset
208
+ new_item = {}
209
+ new_item['videos'] = item['mp4']
210
+ if len(item['latent'].shape) == 5:
211
+ new_item['latent'] = item['latent'][0]
212
+ else:
213
+ new_item['latent'] = item['latent']
214
+ new_item['prompts'] = item['txt'] if item['txt'] is not None else "" # check text
215
+ latent_tail = item.get('latent_tail')
216
+ if latent_tail is not None:
217
+ new_item['latent_tail'] = item['latent_tail']
218
+ latent_flow = item.get('latent_flow')
219
+ if latent_flow is not None:
220
+ new_item['latent_flow'] = item['latent_flow']
221
+ # else:
222
+ # new_item['latent_tail'] = None
223
+ # new_item['video_metadata'] = {
224
+ # 'num_frames': frames,
225
+ # 'height': H,
226
+ # 'width': W,
227
+ # }
228
+ self.buckets[bucket_id].append(new_item)
229
+
230
+ batch = None
231
+ cur_batch_size = self.image_batch_size if bucket_id.startswith("i-") else self.batch_size
232
+ if len(self.buckets[bucket_id]) >= cur_batch_size:
233
+ batch = self.buckets[bucket_id]
234
+ self.buckets[bucket_id] = []
235
+ return batch
236
+
237
+ def __iter__(self):
238
+ def __native__iter():
239
+ while True:
240
+ dataset_idx = random.choices(
241
+ list(range(len(self.dataset_list))), weights=self.dataset_weights)[0]
242
+ dataset = self.dataset_iter_list[dataset_idx]
243
+ yield next(dataset)
244
+
245
+ def __bucket__iter():
246
+ def get_next_item(dataset):
247
+ return next(dataset)
248
+ while True:
249
+ dataset_idx = random.choices(
250
+ list(range(len(self.dataset_list))), weights=self.train_data_weights)[0]
251
+ dataset = self.dataset_iter_list[dataset_idx]
252
+ dataset_name = self.dataset_names[dataset_idx]
253
+ if dataset_name in self.image_dataset_names:
254
+ replicate_times = max(int(self.image_batch_size / self.batch_size), 1)
255
+ batch_data_list = []
256
+ while replicate_times > 0:
257
+ item = next(dataset)
258
+ batch_data = self.put_to_bucket(item, dataset_name)
259
+ if batch_data is not None:
260
+ batch_data_list.append(batch_data)
261
+ replicate_times -= 1
262
+ for batch_data in batch_data_list:
263
+ yield batch_data
264
+ # else:
265
+ # item = next(dataset)
266
+ # if item == "wtf_is_abnormal":
267
+ # print(f"too much abnormal from {dataset_name}, continue")
268
+ # continue
269
+ # if item == "max_bad_file_count_reached":
270
+ # print(f"{dataset_name} for this worker is corrupted, continue")
271
+ # continue
272
+ # batch_data = self.put_to_bucket(item, dataset_name)
273
+ # if batch_data is not None:
274
+ # yield batch_data
275
+ else:
276
+ with concurrent.futures.ThreadPoolExecutor() as executor:
277
+ future = executor.submit(get_next_item, dataset)
278
+ try:
279
+ item = future.result(timeout=10)
280
+ except concurrent.futures.TimeoutError:
281
+ print(f"timeout for get data from {dataset_name}")
282
+ continue
283
+ if item == "wtf_is_abnormal":
284
+ print(f"too much abnormal from {dataset_name}, continue")
285
+ continue
286
+ if item == "max_bad_file_count_reached":
287
+ print(f"{dataset_name} for this worker is corrupted, continue")
288
+ continue
289
+ batch_data = self.put_to_bucket( item, dataset_name)
290
+ if batch_data is not None:
291
+ yield batch_data
292
+
293
+ if self.enable_bucket:
294
+ return __bucket__iter()
295
+ else:
296
+ return __native__iter()
297
+
298
+ def state_dict(self):
299
+ output_state_dict = deepcopy(self.init_state_dict)
300
+ for dataset_name, local_cache_prefix in zip(self.dataset_names, self.local_cache_prefix_list):
301
+ if dataset_name not in self.init_state_dict:
302
+ continue
303
+ cache_list = glob.glob(f'{local_cache_prefix}*')
304
+ for cache_path in cache_list:
305
+ with open(cache_path, 'r') as f:
306
+ for l in f.readlines():
307
+ r = int(l.strip())
308
+ output_state_dict[dataset_name]['seen_times'][r] += 1
309
+ return output_state_dict
310
+
311
+ def load_state_dict(self, state_dict):
312
+ for dataset_name, local_cache_prefix, dataset in zip(self.dataset_names, self.local_cache_prefix_list, self.dataset_list):
313
+ if dataset_name not in state_dict:
314
+ continue
315
+ if dataset_name not in self.init_state_dict:
316
+ continue
317
+ self.clean_cache(local_cache_prefix)
318
+ dataset.load_state_dict(state_dict[dataset_name])
319
+ self.init_state_dict[dataset_name] = dataset.state_dict
320
+
321
+ def clean_cache(self, local_cache_prefix):
322
+ for fname in glob.glob(f'{local_cache_prefix}*'):
323
+ try:
324
+ os.remove(fname)
325
+ except OSError:
326
+ pass
327
+
328
+ @classmethod
329
+ def create_dataset_function(cls, data, data_weights, **kwargs):
330
+ return cls(data, data_weights, **kwargs)
331
+
332
+
333
+
334
+ class CollectionDataset(IterableDataset):
335
+ def __init__(
336
+ self,
337
+ train_data: list[str],
338
+ train_data_weights: list[int | float],
339
+ dataset_collections: Dict[str, Dict],
340
+ batch_size=1,
341
+ image_batch_size=48,
342
+ enable_bucket=False,
343
+ infinite=True,
344
+ shuffle=True,
345
+ local_cache='', # this should be a ByteNAS path
346
+ data_cache_prefix={'AIPVideoDataset': 'aip_dataset_cache'},
347
+ ):
348
+ # prepare for bucketings
349
+ self.enable_bucket = enable_bucket
350
+ self.batch_size = batch_size
351
+ self.image_batch_size = image_batch_size
352
+
353
+ self.buckets = {}
354
+ self.buckets_transform = {}
355
+ self.resolutions = set()
356
+ if not self.enable_bucket:
357
+ assert batch_size == 1, "if not enable_bucket, batch_size must be 1"
358
+
359
+ self.train_data_weights = train_data_weights
360
+
361
+ self.dataset_list = []
362
+ self.dataset_names = []
363
+ self.image_dataset_names = []
364
+ self.dataset_collections = dataset_collections
365
+ self.dataset_to_aspect_ratios = {}
366
+ self.init_state_dict = {}
367
+ self.local_cache_prefix_list = []
368
+ for data_name in train_data:
369
+ if data_name not in dataset_collections:
370
+ print(f'{data_name} not in dataset collections')
371
+ return
372
+ self.dataset_config = dataset_collections[data_name]
373
+ aspect_ratios = self.dataset_config['aspect_ratios']
374
+ self.dataset_to_aspect_ratios[data_name] = aspect_ratios
375
+ self.add_aspect_ratios(aspect_ratios)
376
+
377
+ module, cls = self.dataset_config['target'].rsplit(".", 1)
378
+ data_class = getattr(
379
+ importlib.import_module(module, package=None), cls)
380
+ if cls == 'T2IHDFSDataset':
381
+ self.image_dataset_names.append(data_name)
382
+
383
+ if cls in data_cache_prefix:
384
+ data_cache = os.path.join(local_cache, data_cache_prefix[cls])
385
+ os.makedirs(data_cache, exist_ok=True)
386
+ local_cache_prefix = os.path.join(data_cache, data_name)
387
+ self.clean_cache(local_cache_prefix)
388
+ self.dataset_config['params']['local_cache_prefix'] = local_cache_prefix
389
+ self.local_cache_prefix_list.append(local_cache_prefix)
390
+ else:
391
+ self.local_cache_prefix_list.append('')
392
+ dataset = data_class.create_dataset_function(
393
+ self.dataset_config['path'], None, **self.dataset_config['params'])
394
+ if cls == 'AIPVideoDataset':
395
+ self.init_state_dict[data_name] = dataset.state_dict
396
+ self.dataset_list.append(dataset)
397
+ self.dataset_names.append(data_name)
398
+
399
+ self.length = sum([len(dataset) for dataset in self.dataset_list])
400
+ self.dataset_iter_list = [iter(dataset) for dataset in self.dataset_list]
401
+
402
+ def add_aspect_ratios(self, aspect_ratios):
403
+ for key in aspect_ratios.keys():
404
+ self.buckets[key] = []
405
+
406
+ for key, sample_size in aspect_ratios.items():
407
+ sample_size = tuple(sample_size)
408
+ self.buckets_transform[key] = transforms.Compose([
409
+ transforms.Resize(min(sample_size[0], sample_size[1])), # fix when height > width
410
+ transforms.CenterCrop(sample_size),
411
+ ])
412
+ for h, w in aspect_ratios.values():
413
+ self.resolutions.add((49, h, w))
414
+
415
+ def get_bucket_id(self, item, dataset_name):
416
+ """
417
+ for large resolution data, we may have multiple bucket ids
418
+ """
419
+ frames, c, H, W = item['mp4'].shape
420
+ ratio = float(H) / float(W)
421
+
422
+ ratio_strategy = self.dataset_collections[dataset_name]['ratio_strategy']
423
+ ratios = self.dataset_to_aspect_ratios[dataset_name]
424
+ if ratio_strategy == 'random':
425
+ bucket_id = random.choice(list(ratios.keys()))
426
+ elif ratio_strategy == 'closest':
427
+ bucket_id = min(ratios.items(),
428
+ key=lambda r: abs(float(r[1][0]) / float(r[1][1]) - ratio))[0]
429
+ else:
430
+ raise f"ratio_strategy {ratio_strategy} not support ..."
431
+
432
+ return bucket_id
433
+
434
+ def __len__(self):
435
+ return self.length
436
+
437
+ def crop_and_resize(self, image, h_prime, w_prime):
438
+ """
439
+ Crop and resize a 4D tensor image.
440
+
441
+ Args:
442
+ image: The input 4D tensor image of shape (frame, channel, h, w).
443
+ h_prime: Desired height of the cropped image.
444
+ w_prime: Desired width of the cropped image.
445
+
446
+ Returns:
447
+ The cropped and resized 4D tensor image.
448
+ """
449
+ frames, channels, h, w = image.shape
450
+ aspect_ratio_original = h / w
451
+ aspect_ratio_target = h_prime / w_prime
452
+
453
+ if aspect_ratio_original >= aspect_ratio_target:
454
+ new_h = int(w * aspect_ratio_target)
455
+ top = (h - new_h) // 2
456
+ bottom = top + new_h
457
+ left = 0
458
+ right = w
459
+ else:
460
+ new_w = int(h / aspect_ratio_target)
461
+ left = (w - new_w) // 2
462
+ right = left + new_w
463
+ top = 0
464
+ bottom = h
465
+ # print(f"left {left}, right {right}, top {top}, bottom {bottom}")
466
+ # Crop the image
467
+ cropped_image = image[:, :, top:bottom, left:right]
468
+ # Resize the cropped image
469
+ resized_image = F.resize(cropped_image, (h_prime, w_prime))
470
+ return resized_image
471
+
472
+ def _save_frames(self, frame_raw, uid, fps, stride=None, base_path="/mnt/bn/yufan-dev-my/ysh/Datasets/sft_sftnews_videos"):
473
+ if stride:
474
+ output_path = f"{base_path}/processed/stride{stride}"
475
+ else:
476
+ output_path = f"{base_path}/processed"
477
+ os.makedirs(output_path, exist_ok=True)
478
+
479
+ save_list = []
480
+ frame_height, frame_width = None, None
481
+ for frame in frame_raw:
482
+ frame = (frame + 1) / 2 * 255
483
+ frame = transforms.ToPILImage()(frame.to(torch.uint8)).convert("RGB")
484
+ if frame_height is None:
485
+ frame_height, frame_width = frame.height, frame.width
486
+ video_path = f"{output_path}/{uid}_{len(frame_raw)}_{frame_height}_{frame_width}.mp4"
487
+ if os.path.exists(video_path):
488
+ print(f"skip original video: {video_path}")
489
+ return
490
+ save_list.append(frame)
491
+ frame = None
492
+ del frame
493
+
494
+ if not save_list:
495
+ return
496
+
497
+ export_to_video(save_list, video_path, fps=fps)
498
+ print(f"save to {video_path}")
499
+
500
+ save_list = None
501
+ del save_list
502
+
503
+ free_memory()
504
+
505
+
506
+ def put_to_bucket(self, item, dataset_name):
507
+ bucket_id = self.get_bucket_id(item, dataset_name)
508
+ ori_frams, ori_c, ori_H, ori_W = item['mp4'].shape
509
+ ori_ratio = ori_H / ori_W
510
+ bucket_h, bucket_w = self.dataset_to_aspect_ratios[dataset_name][bucket_id][0], self.dataset_to_aspect_ratios[dataset_name][bucket_id][1]
511
+ bucket_ratio = bucket_h / bucket_w
512
+ # print(f"ori_H {ori_H}, ori_W {ori_W}, ori_ratio {ori_ratio}. bucket_h {bucket_h}, bucket_w {bucket_w}, bucket_ratio {bucket_ratio}")
513
+ item['mp4'] = self.crop_and_resize(item['mp4'], bucket_h, bucket_w)
514
+
515
+ # ----- save video -----
516
+ try:
517
+ if item["topk_avg_motion_scores_t"] >= 400:
518
+ base_path="/mnt/bn/yufan-dev-my/ysh/Datasets/sft_sftnews_videos/high_motion"
519
+ else:
520
+ base_path="/mnt/bn/yufan-dev-my/ysh/Datasets/sft_sftnews_videos/low_motion"
521
+ self._save_frames(item['mp4'], item["uttid"], item['fps'], base_path=base_path)
522
+
523
+ item["stride_mp4"] = self.crop_and_resize(item["stride_mp4"], bucket_h, bucket_w)
524
+ self._save_frames(item["stride_mp4"], item["uttid"], item['fps'], stride=item['stride'], base_path=base_path)
525
+ except:
526
+ pass
527
+ # ----- save video -----
528
+
529
+ # ----- save meta -----
530
+ if item["topk_avg_motion_scores_t"] >= 400:
531
+ base_path="/mnt/bn/yufan-dev-my/ysh/Datasets/sft_sftnews_videos/metadata/high_motion"
532
+ else:
533
+ base_path="/mnt/bn/yufan-dev-my/ysh/Datasets/sft_sftnews_videos/metadata/low_motion"
534
+ os.makedirs(base_path, exist_ok=True)
535
+ output_path = os.path.join(base_path, f"{item['uttid']}.json")
536
+ if not os.path.exists(output_path):
537
+ meta = {
538
+ "uttid": item["uttid"],
539
+ "text": item['txt'],
540
+ "ori_num_frames": item["ori_num_frames"],
541
+ "ori_height": item["ori_height"],
542
+ "ori_width": item["ori_width"],
543
+ "cur_num_frames": item["cur_num_frames"],
544
+ "cur_height": item['mp4'].shape[-2],
545
+ "cur_width": item['mp4'].shape[-1],
546
+ "topk_avg_motion_scores_t": item["topk_avg_motion_scores_t"],
547
+ }
548
+ with open(output_path, 'w',) as f:
549
+ json.dump(meta, f, indent=2)
550
+ print(f"save json to {output_path}")
551
+ # ----- save meta -----
552
+
553
+ first_frame = item['mp4'][0]
554
+ item["first_frames_images"] = (first_frame + 1) / 2 * 255
555
+
556
+ frames, c, H, W = item['mp4'].shape
557
+ # rewrite item to the same format as the original dataset
558
+ new_item = {}
559
+ new_item['videos'] = item['mp4']
560
+ new_item['prompts'] = item['txt'] if item['txt'] is not None else "" # check text
561
+ new_item['video_metadata'] = {
562
+ 'num_frames': frames,
563
+ 'height': H,
564
+ 'width': W,
565
+ }
566
+ new_item["first_frames_images"] = item["first_frames_images"]
567
+ new_item["uttid"] = item["uttid"]
568
+ new_item['stride_videos'] = item["stride_mp4"]
569
+ new_item["topk_avg_motion_scores_t"] = item["topk_avg_motion_scores_t"]
570
+ self.buckets[bucket_id].append(new_item)
571
+
572
+ batch = None
573
+ cur_batch_size = self.image_batch_size if bucket_id.startswith("i-") else self.batch_size
574
+ if len(self.buckets[bucket_id]) >= cur_batch_size:
575
+ batch = self.buckets[bucket_id]
576
+ self.buckets[bucket_id] = []
577
+
578
+ # item["uttid"] = None
579
+ # item['txt'] = None
580
+ # item["ori_num_frames"] = None
581
+ # item["ori_height"] = None
582
+ # item["ori_width"] = None
583
+ # item["cur_num_frames"] = None
584
+ # item['mp4'] = None
585
+ # item["topk_avg_motion_scores_t"] = None
586
+ # item["first_frames_images"] = None
587
+ # new_item['videos'] = None
588
+ # new_item['prompts'] = None
589
+ # new_item["first_frames_images"] = None
590
+ # new_item["uttid"] = None
591
+ # new_item['stride_videos'] = None
592
+ # new_item["topk_avg_motion_scores_t"] = None
593
+
594
+ new_item = None
595
+ item = None
596
+ meta = None
597
+ del meta
598
+ del item
599
+ del new_item
600
+ free_memory()
601
+
602
+ return batch
603
+
604
+ def __iter__(self):
605
+ def __native__iter():
606
+ while True:
607
+ dataset_idx = random.choices(
608
+ list(range(len(self.dataset_list))), weights=self.dataset_weights)[0]
609
+ dataset = self.dataset_iter_list[dataset_idx]
610
+ yield next(dataset)
611
+
612
+ def __bucket__iter():
613
+ while True:
614
+ dataset_idx = random.choices(
615
+ list(range(len(self.dataset_list))), weights=self.train_data_weights)[0]
616
+ dataset = self.dataset_iter_list[dataset_idx]
617
+ dataset_name = self.dataset_names[dataset_idx]
618
+ if dataset_name in self.image_dataset_names:
619
+ replicate_times = max(int(self.image_batch_size / self.batch_size), 1)
620
+ batch_data_list = []
621
+ while replicate_times > 0:
622
+ item = next(dataset)
623
+ batch_data = self.put_to_bucket(item, dataset_name)
624
+ if batch_data is not None:
625
+ batch_data_list.append(batch_data)
626
+ replicate_times -= 1
627
+ for batch_data in batch_data_list:
628
+ yield batch_data
629
+ else:
630
+ item = next(dataset)
631
+ batch_data = self.put_to_bucket(item, dataset_name)
632
+ if batch_data is not None:
633
+ yield batch_data
634
+
635
+ if self.enable_bucket:
636
+ return __bucket__iter()
637
+ else:
638
+ return __native__iter()
639
+
640
+ def state_dict(self):
641
+ output_state_dict = deepcopy(self.init_state_dict)
642
+ for dataset_name, local_cache_prefix in zip(self.dataset_names, self.local_cache_prefix_list):
643
+ if dataset_name not in self.init_state_dict:
644
+ continue
645
+ cache_list = glob.glob(f'{local_cache_prefix}*')
646
+ for cache_path in cache_list:
647
+ with open(cache_path, 'r') as f:
648
+ for l in f.readlines():
649
+ r = int(l.strip())
650
+ output_state_dict[dataset_name]['seen_times'][r] += 1
651
+ return output_state_dict
652
+
653
+ def load_state_dict(self, state_dict):
654
+ for dataset_name, local_cache_prefix, dataset in zip(self.dataset_names, self.local_cache_prefix_list, self.dataset_list):
655
+ if dataset_name not in state_dict:
656
+ continue
657
+ if dataset_name not in self.init_state_dict:
658
+ continue
659
+ self.clean_cache(local_cache_prefix)
660
+ dataset.load_state_dict(state_dict[dataset_name])
661
+ self.init_state_dict[dataset_name] = dataset.state_dict
662
+
663
+ def clean_cache(self, local_cache_prefix):
664
+ for fname in glob.glob(f'{local_cache_prefix}*'):
665
+ try:
666
+ os.remove(fname)
667
+ except OSError:
668
+ pass
669
+
670
+ @classmethod
671
+ def create_dataset_function(cls, data, data_weights, **kwargs):
672
+ return cls(data, data_weights, **kwargs)
dataset_code/sft_sftnews/offload/dataset_tool/dataset_hdfs.py ADDED
@@ -0,0 +1,198 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -------------------------------------
2
+ # Modified by: Jacob Zhiyuan Fang
3
+ # Date: 2024/09/10
4
+ # Email: jacob.fang@bytedance.com
5
+ # Author: Xun Guo
6
+ # Email: guoxun.99@bytedance.com
7
+ # Date: 2024/05/29
8
+ # -------------------------------------
9
+
10
+ import os
11
+ import json
12
+ import time
13
+ import random
14
+ import subprocess
15
+
16
+ import torch
17
+ import numpy as np
18
+ import tensorflow as tf
19
+ import multiprocessing as mp
20
+ import torchvision.transforms as transforms
21
+
22
+ from .parquet_dataset.parquet_utils import get_random_for_rank_and_worker, get_portion_for_rank_and_worker
23
+ from typing import List, Tuple
24
+ from dataloader import KVReader
25
+ from torch.utils.data.dataset import Dataset
26
+ from torchvision.transforms.functional import to_pil_image
27
+
28
+ from diffusers.training_utils import free_memory
29
+
30
+ class T2VHDFSDataset(Dataset):
31
+ def __init__(self,
32
+ json_path,
33
+ sample_size=256,
34
+ sample_stride=4,
35
+ sample_n_frames=16,
36
+ is_image=False,
37
+ pick=False,
38
+ fps=24,
39
+ shuffle=True,
40
+ infinite=True,
41
+ ):
42
+ super().__init__()
43
+
44
+ with open(json_path, 'r') as jsonfile:
45
+ self.dataset = json.load(jsonfile)
46
+ assert type(
47
+ self.dataset) == list, "The annotation file should contain a list !!!"
48
+
49
+ # IMPORTANT: Prevent tf load tensor to GPU.
50
+ tf.config.set_visible_devices([], 'GPU')
51
+ self._context_features = {
52
+ 'title': tf.io.FixedLenFeature([], dtype=tf.string)}
53
+ self._sequence_features = {
54
+ 'data': tf.io.FixedLenSequenceFeature([], dtype=tf.string)}
55
+
56
+ self.length = len(self.dataset)
57
+ self.sample_n_frames = sample_n_frames
58
+ self.sample_stride = sample_stride
59
+ self.is_image = is_image
60
+ self.pick = pick
61
+ self.num_parallel_reader = 32
62
+ self.shuffle = shuffle
63
+ self.infinite = infinite
64
+ if sample_size == -1: # if sample_size is None, using Identity transformation
65
+ self.pixel_transforms = transforms.Compose([
66
+ transforms.Lambda(lambda x: x)
67
+ ])
68
+ else:
69
+ sample_size = tuple(sample_size) if not isinstance(
70
+ sample_size, int) else (sample_size, sample_size)
71
+ self.pixel_transforms = transforms.Compose([
72
+ transforms.Resize(sample_size[0]),
73
+ transforms.CenterCrop(sample_size),
74
+ ])
75
+ self.fps = fps
76
+
77
+ def __iter__(self):
78
+ if self.shuffle:
79
+ get_random_for_rank_and_worker(None).shuffle(self.dataset)
80
+ part_dataset = get_portion_for_rank_and_worker(self.dataset)
81
+ while True:
82
+ if self.shuffle:
83
+ get_random_for_rank_and_worker(None).shuffle(part_dataset)
84
+ for idx in range(len(part_dataset)):
85
+ try:
86
+ to_return = self.__getitem_impl__(idx)
87
+ yield to_return
88
+ except (RuntimeError, ValueError):
89
+ print('Appearing HDFS iops error setting src img \n' * 5)
90
+ # idx = random.sample(range(self.length), 1)[0]
91
+ if not self.infinite:
92
+ break
93
+
94
+ def __len__(self):
95
+ return len(self.dataset)
96
+
97
+ def decode_image(self, raw_data):
98
+ return tf.image.decode_jpeg(raw_data, channels=3, dct_method='INTEGER_ACCURATE').numpy()
99
+
100
+ def get_batch(self, idx):
101
+ video_dict = self.dataset[idx]
102
+ video_name, index_file, caption = video_dict[
103
+ 'video_name'], video_dict['index_file'], video_dict['caption']
104
+ reader = KVReader(index_file, self.num_parallel_reader)
105
+ keys = reader.list_keys()
106
+ assert video_name in keys, "video file not in this index file !!!"
107
+ values = reader.read_many([video_name])[0]
108
+
109
+ # Decode record
110
+ contexts, sequences = tf.io.parse_single_sequence_example(
111
+ serialized=values,
112
+ context_features=self._context_features,
113
+ sequence_features=self._sequence_features)
114
+
115
+ # Raw frames data
116
+ raw_frames = sequences['data']
117
+ del reader
118
+ video_length = len(raw_frames)
119
+
120
+ # Sample frames
121
+ if not self.is_image:
122
+
123
+ # Jacob Sep 17th: If sample frames > video frames, we drop this video
124
+ if (self.sample_n_frames - 1) * self.sample_stride + 1 > video_length:
125
+ return None, None
126
+ clip_length = min(
127
+ video_length, (self.sample_n_frames - 1) * self.sample_stride + 1)
128
+ start_idx = random.randint(0, video_length - clip_length)
129
+ batch_index = np.linspace(
130
+ start_idx, start_idx + clip_length - 1, self.sample_n_frames, dtype=int)
131
+ else:
132
+ batch_index = [random.randint(0, video_length - 1)]
133
+
134
+ # Decode frames
135
+ pixel_values = []
136
+ for idx in batch_index:
137
+ frame = raw_frames[idx]
138
+ frame = self.decode_image(frame)
139
+ frame = torch.as_tensor(frame).float().permute(2, 0, 1)
140
+ frame = (frame - 127.5) / 127.5
141
+ pixel_values.append(frame)
142
+
143
+ if self.is_image:
144
+ pixel_values = pixel_values[0]
145
+
146
+ pixel_values = torch.stack(pixel_values, dim=0)
147
+ return pixel_values, caption
148
+
149
+ def __getitem_impl__(self, idx, candidate=None):
150
+ # To avoid bad videos, we retry if there is an Exception.
151
+ # By default the size of videos are all 512, 910 so no need filter.
152
+ if candidate is None:
153
+ candidate = list(range(self.length))
154
+ while True:
155
+ try:
156
+ pixel_values, caption = self.get_batch(idx)
157
+
158
+ if pixel_values is None:
159
+ # restart
160
+ idx = random.sample(candidate, 1)[0]
161
+ else:
162
+ # end the iteration
163
+ break
164
+ except Exception as e:
165
+ print(f"VideoTextPairDataset got unexpected exception: {e}")
166
+ idx = random.sample(candidate, 1)[0]
167
+ pixel_values = self.pixel_transforms(pixel_values)
168
+
169
+ # pixel_values in shape of Frames x channel x H x W
170
+ sample = dict(
171
+ mp4=pixel_values,
172
+ txt=caption,
173
+ num_frames=self.sample_n_frames,
174
+ fps=self.fps,
175
+ )
176
+
177
+ return sample
178
+
179
+ @classmethod
180
+ def create_dataset_function(cls, json_path, args, **kwargs):
181
+ return cls(json_path=json_path, **kwargs)
182
+
183
+
184
+ # Dataset unit test checking how many videos are not preferred
185
+ if __name__ == "__main__":
186
+ dataset = T2VHDFSDataset(
187
+ json_path="/mnt/bn/icvg/video_gen/captions/pond5_res/pond5_data_res_human.json",
188
+ sample_size=512,
189
+ sample_stride=4,
190
+ sample_n_frames=49,
191
+ is_image=False,
192
+ pick=False,
193
+ )
194
+ dataloader = torch.utils.data.DataLoader(
195
+ dataset, batch_size=1, num_workers=1)
196
+ for idx, batch in enumerate(dataloader):
197
+ if idx % 100 == 0:
198
+ breakpoint()
dataset_code/sft_sftnews/offload/dataset_tool/image_dataset.py ADDED
@@ -0,0 +1,929 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ import time
4
+ import torch
5
+ import random
6
+ import bson, json
7
+ from dataloader import KVReader, FalconReader
8
+ from dataclasses import dataclass, field
9
+ from typing import Any, Dict, List, Optional, Tuple
10
+ from torch.utils.data.dataset import Dataset
11
+ from torchvision.transforms import functional as TVF
12
+ from torchvision.transforms.functional import InterpolationMode
13
+ from torchvision.transforms import Compose, ToTensor, Normalize, RandomResizedCrop
14
+ from pyarrow import fs, Field
15
+ import pyarrow.parquet as pq
16
+ import numpy as np
17
+
18
+ ########## Utils ##########
19
+ def hlist_files(folders, postfix=".index"):
20
+ """
21
+ 罗列一些 hdfs 路径下的文件。
22
+ """
23
+ import subprocess
24
+ import os
25
+ if isinstance(folders, str):
26
+ folders = [folders]
27
+ files = []
28
+ for folder in folders:
29
+ if folder.startswith('hdfs'):
30
+ pipe = subprocess.Popen("hdfs dfs -ls -R {}".format(folder), shell=True,
31
+ stdout=subprocess.PIPE)
32
+ # output, _ = pipe.communicate()
33
+ for line in pipe.stdout: # type: ignore
34
+ line = line.strip()
35
+ # drwxr-xr-x - user group 4 file
36
+ if len(line.split()) < 5:
37
+ continue
38
+ filepath = line.split()[-1].decode("utf8")
39
+ if filepath.endswith(postfix):
40
+ files.append(filepath)
41
+ pipe.stdout.close() # type: ignore
42
+ pipe.wait()
43
+ else:
44
+ return []
45
+ files = sorted(files)
46
+ return files
47
+
48
+
49
+ def resize_crop(image, image_height, image_width, use_resize_random_crop=False):
50
+ aspect_ratio = image_width / image_height
51
+ if not use_resize_random_crop:
52
+ resize = RandomResizedCrop(
53
+ size=(image_height, image_width), # Crop to target width height
54
+ scale=(1, 1), # Do not scale.
55
+ ratio=(aspect_ratio, aspect_ratio), # Keep target aspect ratio.
56
+ interpolation=InterpolationMode.LANCZOS # Use LANCZO for downsample.
57
+ )
58
+ crop_top_coord, crop_left_coord, _, _ = resize.get_params(image, scale=(1, 1), ratio=(
59
+ aspect_ratio, aspect_ratio))
60
+ crop_coords_top_left = torch.tensor([crop_top_coord, crop_left_coord])
61
+ image = resize(image)
62
+ else:
63
+ image_aspect_ratio = image.width / image.height
64
+ if image_aspect_ratio >= aspect_ratio:
65
+ image_resize_h = image_height
66
+ image_resize_w = int(round(image_height * (image.width / image.height)))
67
+ crop_top_coord = 0
68
+ crop_left_coord = random.randint(0, image_resize_w - image_width)
69
+ else:
70
+ image_resize_w = image_width
71
+ image_resize_h = int(round(image_width * (image.height / image.width)))
72
+ crop_top_coord = random.randint(0, image_resize_h - image_height)
73
+ crop_left_coord = 0
74
+ image = TVF.resize(image, size=[image_resize_h, image_resize_w],
75
+ interpolation=InterpolationMode.LANCZOS)
76
+ image = TVF.crop(image, crop_top_coord, crop_left_coord, image_height,
77
+ image_width)
78
+ crop_coords_top_left = torch.tensor([crop_top_coord, crop_left_coord])
79
+ return image, crop_coords_top_left
80
+
81
+
82
+ def partition_by_size(data: List[Any], size: int) -> List[List[Any]]:
83
+ """
84
+ Partition a list by size.
85
+ When indivisible, the last group contains fewer items than the target size.
86
+
87
+ Examples:
88
+ - data: [1,2,3,4,5]
89
+ - size: 2
90
+ - return: [[1,2], [3,4], [5]]
91
+ """
92
+ return [data[i:i+size] for i in range(0, len(data), size)]
93
+
94
+
95
+ class timer:
96
+ def __init__(self, op, wait_seconds):
97
+ self.op = op
98
+ self.wait_seconds = wait_seconds
99
+
100
+ def __enter__(self):
101
+ self.start_time = time.time()
102
+
103
+ def __exit__(self, *exc_info):
104
+ self.stop_time = time.time()
105
+ self.elapsed_seconds = self.stop_time - self.start_time
106
+ if self.elapsed_seconds > self.wait_seconds:
107
+ print(f"Op: '{self.op}' took: {round(self.elapsed_seconds, 2)} seconds.", file=sys.stderr)
108
+
109
+
110
+ ########## ImageDecoder ##########
111
+ import io
112
+ from PIL import Image
113
+ from base64 import b64decode
114
+ from abc import abstractmethod
115
+
116
+ class ImageDecoder:
117
+ """
118
+ Decode image from json dictionary.
119
+ Return None or raise exception if sample cannot be decoded to skip forward.
120
+ """
121
+ @abstractmethod
122
+ def __call__(self, item: Dict[str, Any]) -> Optional[Image.Image]:
123
+ raise NotImplementedError()
124
+
125
+
126
+ class GeneralImageDecoder(ImageDecoder):
127
+ """
128
+ Read image from hdfs data entry, usually is in bytes format
129
+ """
130
+ def __init__(self):
131
+ # Avoid image too large warning messages.
132
+ Image.MAX_IMAGE_PIXELS = 1000000000
133
+
134
+ def __call__(self, item: Dict[str, Any]) -> Optional[Image.Image]:
135
+ image_data = item.get("image_org") or item.get("image") or item.get("binary")
136
+ if image_data is None:
137
+ return None
138
+
139
+ if isinstance(image_data, bytes):
140
+ image_bytes = image_data
141
+ else:
142
+ image_bytes = b64decode(image_data)
143
+
144
+ with Image.open(io.BytesIO(image_bytes)) as image:
145
+ if image.mode == "RGBA" or image.info.get("transparency", None) is not None:
146
+ image = image.convert("RGBA")
147
+ white = Image.new(mode="RGB", size=image.size, color=(255, 255, 255))
148
+ white.paste(image, mask=image.split()[3])
149
+ image = white
150
+ else:
151
+ image = image.convert("RGB")
152
+ return image
153
+
154
+
155
+ ########## ImagePredicate ##########
156
+ class ImagePredicate:
157
+ """
158
+ Check if image satifiy a certaion requirements.
159
+ Return False if not satisfied and True if pass the check.
160
+
161
+ Be sure to pass key-value pair when using
162
+ """
163
+ @abstractmethod
164
+ def __call__(self, image: Image.Image, **kwargs) -> bool:
165
+ raise NotImplementedError()
166
+
167
+
168
+ class ImageMultiPredicate(ImagePredicate):
169
+ def __init__(self, predicates: List[ImagePredicate]):
170
+ self.predicates = predicates
171
+
172
+ def __call__(self, image: Image.Image, **kwargs) -> bool:
173
+ for predicate in self.predicates:
174
+ if not predicate(image, **kwargs):
175
+ return False
176
+ return True
177
+
178
+
179
+ class ImageBucketResolutionPredicate(ImagePredicate):
180
+ def __call__(self, image: Image.Image, bucket: Any, **kwargs) -> bool:
181
+ if image.size[0] < bucket.image_width or image.size[1] < bucket.image_height:
182
+ return False
183
+ return True
184
+
185
+
186
+ class ImageAestheticPredicate(ImagePredicate):
187
+ def __init__(self, aes_thed=0):
188
+ self.aes_thed = aes_thed
189
+
190
+ def __call__(self, image: Image.Image, content: dict, **kwargs) -> bool:
191
+ return ("aesthetic" not in content) or (content["aesthetic"] >= self.aes_thed)
192
+
193
+
194
+ ########## TextCleaner ##########
195
+ import re
196
+ import ftfy
197
+ import html
198
+ import urllib.parse as ul
199
+ from bs4 import BeautifulSoup
200
+
201
+ class TextCleaner:
202
+ """
203
+ Clear up a caption with strange/improper contents
204
+ """
205
+ bad_punct_regex = re.compile(
206
+ r'[' + '#®•©™&@·º½¾¿¡§~' + '\)' + '\(' + '\]' + '\[' + '\}' + '\{' + '\|' + '\\' + '\/' + '\*' + r']{1,}')
207
+
208
+ def __call__(self, text):
209
+ # The exact text cleaning as was in the training stage:
210
+ text = self.clean_caption(text)
211
+ text = self.clean_caption(text)
212
+ return text
213
+
214
+ @staticmethod
215
+ def basic_clean(text):
216
+ text = ftfy.fix_text(text)
217
+ text = html.unescape(html.unescape(text))
218
+ return text.strip()
219
+
220
+ def clean_caption(self, caption):
221
+ caption = str(caption)
222
+ caption = ul.unquote_plus(caption)
223
+ caption = caption.strip().lower()
224
+ caption = re.sub('<person>', 'person', caption)
225
+ caption = re.sub('<br>', ' ', caption)
226
+ # urls:
227
+ caption = re.sub(
228
+ r'\b((?:https?:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))',
229
+ # noqa
230
+ '', caption) # regex for urls
231
+ caption = re.sub(
232
+ r'\b((?:www:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))',
233
+ # noqa
234
+ '', caption) # regex for urls
235
+ # html:
236
+ caption = BeautifulSoup(caption, features='html.parser').text
237
+
238
+ # @<nickname>
239
+ caption = re.sub(r'@[\w\d]+\b', '', caption)
240
+
241
+ # 31C0—31EF CJK Strokes
242
+ # 31F0—31FF Katakana Phonetic Extensions
243
+ # 3200—32FF Enclosed CJK Letters and Months
244
+ # 3300—33FF CJK Compatibility
245
+ # 3400—4DBF CJK Unified Ideographs Extension A
246
+ # 4DC0—4DFF Yijing Hexagram Symbols
247
+ # 4E00—9FFF CJK Unified Ideographs
248
+ caption = re.sub(r'[\u31c0-\u31ef]+', '', caption)
249
+ caption = re.sub(r'[\u31f0-\u31ff]+', '', caption)
250
+ caption = re.sub(r'[\u3200-\u32ff]+', '', caption)
251
+ caption = re.sub(r'[\u3300-\u33ff]+', '', caption)
252
+ caption = re.sub(r'[\u3400-\u4dbf]+', '', caption)
253
+ caption = re.sub(r'[\u4dc0-\u4dff]+', '', caption)
254
+ caption = re.sub(r'[\u4e00-\u9fff]+', '', caption)
255
+ #######################################################
256
+
257
+ # все виды тире / all types of dash --> "-"
258
+ caption = re.sub(
259
+ r'[\u002D\u058A\u05BE\u1400\u1806\u2010-\u2015\u2E17\u2E1A\u2E3A\u2E3B\u2E40\u301C\u3030\u30A0\uFE31\uFE32\uFE58\uFE63\uFF0D]+',
260
+ # noqa
261
+ '-', caption)
262
+
263
+ # кавычки к одному стандарту
264
+ caption = re.sub(r'[`´«»“”¨]', '"', caption)
265
+ caption = re.sub(r'[‘’]', "'", caption)
266
+
267
+ # &quot;
268
+ caption = re.sub(r'&quot;?', '', caption)
269
+ # &amp
270
+ caption = re.sub(r'&amp', '', caption)
271
+
272
+ # ip adresses:
273
+ caption = re.sub(r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}', ' ', caption)
274
+
275
+ # article ids:
276
+ caption = re.sub(r'\d:\d\d\s+$', '', caption)
277
+
278
+ # \n
279
+ caption = re.sub(r'\\n', ' ', caption)
280
+
281
+ # "#123"
282
+ caption = re.sub(r'#\d{1,3}\b', '', caption)
283
+ # "#12345.."
284
+ caption = re.sub(r'#\d{5,}\b', '', caption)
285
+ # "123456.."
286
+ caption = re.sub(r'\b\d{6,}\b', '', caption)
287
+ # filenames:
288
+ caption = re.sub(
289
+ r'[\S]+\.(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)', '', caption)
290
+
291
+ #
292
+ caption = re.sub(r'[\"\']{2,}', r'"', caption) # """AUSVERKAUFT"""
293
+ caption = re.sub(r'[\.]{2,}', r' ', caption) # """AUSVERKAUFT"""
294
+
295
+ # ***AUSVERKAUFT***, #AUSVERKAUFT
296
+ caption = re.sub(self.bad_punct_regex, r' ', caption)
297
+ caption = re.sub(r'\s+\.\s+', r' ', caption) # " . "
298
+
299
+ # this-is-my-cute-cat / this_is_my_cute_cat
300
+ regex2 = re.compile(r'(?:\-|\_)')
301
+ if len(re.findall(regex2, caption)) > 3:
302
+ caption = re.sub(regex2, ' ', caption)
303
+
304
+ caption = self.basic_clean(caption)
305
+
306
+ caption = re.sub(r'\b[a-zA-Z]{1,3}\d{3,15}\b', '', caption) # jc6640
307
+ caption = re.sub(r'\b[a-zA-Z]+\d+[a-zA-Z]+\b', '', caption) # jc6640vc
308
+ caption = re.sub(r'\b\d+[a-zA-Z]+\d+\b', '', caption) # 6640vc231
309
+
310
+ caption = re.sub(r'(worldwide\s+)?(free\s+)?shipping', '', caption)
311
+ caption = re.sub(r'(free\s)?download(\sfree)?', '', caption)
312
+ caption = re.sub(r'\bclick\b\s(?:for|on)\s\w+', '', caption)
313
+ caption = re.sub(
314
+ r'\b(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)(\simage[s]?)?', '', caption)
315
+ caption = re.sub(r'\bpage\s+\d+\b', '', caption)
316
+
317
+ # j2d1a2a...
318
+ caption = re.sub(
319
+ r'\b\d*[a-zA-Z]+\d+[a-zA-Z]+\d+[a-zA-Z\d]*\b', r' ', caption)
320
+
321
+ caption = re.sub(r'\b\d+\.?\d*[xх×]\d+\.?\d*\b', '', caption)
322
+
323
+ caption = re.sub(r'\b\s+\:\s+', r': ', caption)
324
+ caption = re.sub(r'(\D[,\./])\b', r'\1 ', caption)
325
+ caption = re.sub(r'\s+', ' ', caption)
326
+
327
+ caption.strip()
328
+
329
+ caption = re.sub(r'^[\"\']([\w\W]+)[\"\']$', r'\1', caption)
330
+ caption = re.sub(r'^[\'\_,\-\:;]', r'', caption)
331
+ caption = re.sub(r'[\'\_,\-\:\-\+]$', r'', caption)
332
+ caption = re.sub(r'^\.\S+$', '', caption)
333
+
334
+ return caption.strip()
335
+
336
+
337
+ ########## T2IHDFSDataset ##########
338
+ @dataclass
339
+ class Bucket:
340
+ index_files: List[str] = field(default_factory=list) # the .index filenames
341
+ image_count: int = field(default=0) # the total number of images
342
+ image_height: int = field(default=0) # the image height
343
+ image_width: int = field(default=0) # the image width
344
+
345
+ class T2IHDFSDataset(Dataset):
346
+ def __init__(self,
347
+ hdfs_path,
348
+ resolution,
349
+ caption_key,
350
+ aspect_ratios,
351
+ debug=False,
352
+ use_resize_random_crop=False,
353
+ skip_caption_ratios=[0, 0.0655]):
354
+ super().__init__()
355
+
356
+ self.resolution = resolution
357
+ self.image_decoder = GeneralImageDecoder()
358
+ self.image_predicate = ImageMultiPredicate([
359
+ ImageAestheticPredicate(),
360
+ ImageBucketResolutionPredicate(),
361
+ ])
362
+ self.image_transform = Compose([
363
+ ToTensor(),
364
+ Normalize(mean=0.5, std=0.5),
365
+ ])
366
+ self.text_transform = TextCleaner()
367
+ self.caption_keys = caption_key
368
+ self.debug = debug
369
+ self.rank = 0 # mock value
370
+ self.use_resize_random_crop = use_resize_random_crop
371
+ self.skip_caption_ratios = skip_caption_ratios
372
+
373
+ self.buckets = dict()
374
+ self.bucket_override = list(map(lambda ratio: (ratio[0], ratio[1]), aspect_ratios.values())) # w, h
375
+
376
+ if isinstance(hdfs_path, str):
377
+ hdfs_path = [hdfs_path]
378
+ filepath_list = hlist_files(hdfs_path, postfix=".index")
379
+
380
+ for filepath in filepath_list:
381
+ # Parse name, example:
382
+ # filepath: "/laion5b_aesv2_512plus_buckets/2_19_256-896_00002_00196.index"
383
+ # filename: "/laion5b_aesv2_512plus_buckets/2_19_256-896_00002_00196"
384
+ # basename: "2_19_256-896_00002_00196"
385
+ # extension: ".index"
386
+ filename, extension = os.path.splitext(filepath)
387
+ basename = os.path.basename(filename)
388
+
389
+ # Parse basename, example:
390
+ # {id}_{image_count}_{image_height}-{image_width}_{other_info}
391
+ if extension in [".index", ".snappy"] and "tempstate" not in filename and 'tmp' not in filename:
392
+ image_count, image_height, image_width = basename.replace("_", "-").split("-")[1:4]
393
+ # skip invalid file.
394
+ try:
395
+ image_count = int(image_count)
396
+ image_height = int(image_height)
397
+ image_width = int(image_width)
398
+ except:
399
+ continue
400
+ if image_width <=0 or image_height<=0:
401
+ continue
402
+
403
+ image_ratio = image_width / image_height
404
+ override_image_width, override_image_height = self._override_resolution_if_needed_v1(image_width,
405
+ image_height)
406
+ override_image_ratio = override_image_width / override_image_height
407
+ # Omit buckets with unreasonable size ratio, such as (128, 1536)
408
+ if override_image_ratio / image_ratio > 1.5 or override_image_ratio / image_ratio < 0.7:
409
+ continue
410
+
411
+ bucket_key = (override_image_width, override_image_height)
412
+ bucket_entry = self.buckets.get(bucket_key, Bucket())
413
+ bucket_entry.index_files.append(filename)
414
+ bucket_entry.image_count += image_count
415
+ bucket_entry.image_height = override_image_height
416
+ bucket_entry.image_width = override_image_width
417
+ self.buckets[bucket_key] = bucket_entry
418
+
419
+ for i, bucket_entry in enumerate(self.buckets.values()):
420
+ print(
421
+ f"Bucket {i}: {bucket_entry.image_width}x{bucket_entry.image_height} " +
422
+ f"contains {bucket_entry.image_count} images."
423
+ )
424
+ print(f"Total samples: {sum([bucket_entry.image_count for bucket_entry in self.buckets.values()])}")
425
+
426
+ def _override_resolution_if_needed_v1(self, width: int, height: int) -> Tuple[int, int]:
427
+ """
428
+ Override the bucket resolution if configured:
429
+ Example:
430
+ - bucket override: [(1000, 200), (200, 1000)]
431
+ - current resolution: (300, 900)
432
+ - return (200, 1000) because it is the closest in aspect ratio.
433
+ """
434
+ if self.bucket_override is not None:
435
+ # If bucket override is defined, find a new resolution from the override list that best matches the aspect ratio.
436
+ assert len(self.bucket_override) > 0, "bucket_override must not be an empty list."
437
+ target_aspect_ratio = width / height
438
+ bucket_resolutions = self.bucket_override
439
+ bucket_aspect_ratios = torch.tensor([w / h for w, h in bucket_resolutions], dtype=torch.float64)
440
+ bucket_idx = bucket_aspect_ratios.sub(target_aspect_ratio).abs().argmin().item()
441
+ width, height = bucket_resolutions[bucket_idx]
442
+
443
+ if self.resolution != 512:
444
+ # The buckets are defined in 512 resolution. If target resolution is not 512, we need to scale it and make sure divisible by 64.
445
+ ratio = self.resolution / 512
446
+ width = (width * ratio) // 64 * 64
447
+ height = (height * ratio) // 64 * 64
448
+
449
+ return int(width), int(height)
450
+
451
+ def __len__(self):
452
+ return sum(bucket.image_count for bucket in self.buckets.values())
453
+
454
+ def __iter__(self):
455
+ bucket_entries = list(self.buckets.values())
456
+ bucket_weights = list(map(lambda bucket: bucket.image_count, bucket_entries))
457
+ bucket_iterators = list(map(lambda bucket: self._iterate_bucket(bucket), bucket_entries))
458
+
459
+ while True:
460
+ try:
461
+ bucket_iterator = random.choices(bucket_iterators, bucket_weights)[0]
462
+ bucket, index_file, key, content, image, original_size_as_tuple = next(bucket_iterator)
463
+ # get caption
464
+ text = self.get_caption(content)
465
+ # Skip sample if text returned None.
466
+ if text is None:
467
+ if self.debug: print("text is None")
468
+ continue
469
+
470
+ if self.debug:
471
+ print(f"Original_size_as_tuple {original_size_as_tuple}")
472
+ print(f"Image size: {image.size}")
473
+ print(f"Text length: {len(text)}")
474
+
475
+ # Resize and crop image
476
+ with timer(op=f"[Rank:{self.rank}] Resize image from {index_file}, key: {key}", wait_seconds=2):
477
+ image, crop_coords_top_left = resize_crop(image, bucket.image_height,
478
+ bucket.image_width, self.use_resize_random_crop)
479
+
480
+ # Transform image and text
481
+ with timer(op=f"[Rank:{self.rank}] Transform image and text from {index_file}, key: {key}",
482
+ wait_seconds=2):
483
+ if self.image_transform is not None:
484
+ image = self.image_transform(image)
485
+ image = image.unsqueeze(0) # Add temporal dim
486
+
487
+ # filter pure black image
488
+ if isinstance(image, torch.Tensor) and image.std() < 0.02 and image.mean() < -0.9:
489
+ if self.debug: print("image is too dark")
490
+ continue
491
+
492
+ if self.text_transform is not None:
493
+ text = self.text_transform(text)
494
+ if text == "":
495
+ if self.debug: print("text is empty")
496
+ continue
497
+
498
+ if self.debug:
499
+ print(f"dataset loading current text: en is {text}")
500
+
501
+ item = dict(
502
+ mp4=image,
503
+ txt=text,
504
+ num_frames=1
505
+ )
506
+ yield item
507
+ except Exception as ex:
508
+ raise ex
509
+ # Error should not happen here, but we add a guard anyway.
510
+ #print(f"Bucket dataset processing sample received unexpected exception at file: {index_file}", ex,
511
+ # file=sys.stderr)
512
+ continue
513
+
514
+ def _iterate_bucket(self, bucket: Bucket):
515
+ # Copy the list.
516
+ index_files = list(bucket.index_files)
517
+ count_unsatisfy_image_predicor = 0
518
+ while True:
519
+ # Shuffle files
520
+ random.shuffle(index_files)
521
+ # Loop through all the .index files
522
+ for index_file in index_files:
523
+ try:
524
+ with timer(
525
+ op=f"[Rank:{self.rank}] KVReader opens and lists keys from index file {index_file}",
526
+ wait_seconds=3
527
+ ):
528
+ reader = FalconReader(index_file)
529
+ keys = reader.list_keys()
530
+
531
+ # We devide keys to batches then shuffle the batch order.
532
+ # Note that keys within a batch are still contiguous for faster data loading.
533
+ keys_batches = partition_by_size(keys, 64)
534
+ random.shuffle(keys_batches)
535
+
536
+ for key_batch in keys_batches:
537
+ with timer(
538
+ op=f"[Rank:{self.rank}] KVReader reads values from index file {index_file}, keys: {key_batch}",
539
+ wait_seconds=10,
540
+ ):
541
+ # Read values. The keys within this batch are contiguous for faster loading.
542
+ value_batch = reader.read_many(key_batch)
543
+
544
+ # Shuffle samples within this batch.
545
+ key_value_batch = list(zip(key_batch, value_batch))
546
+ random.shuffle(key_value_batch)
547
+
548
+ for key, value in key_value_batch:
549
+ # Decode json
550
+ with timer(op=f"[Rank:{self.rank}] Decoding bson/json from {index_file}, key: {key}",
551
+ wait_seconds=2):
552
+ try:
553
+ content = bson.loads(value)
554
+ except:
555
+ content = json.loads(value)
556
+
557
+ # Decode image
558
+ with timer(op=f"[Rank:{self.rank}] Decoding image from {index_file}, key: {key}",
559
+ wait_seconds=2):
560
+ image = self.image_decoder(content)
561
+ original_size_as_tuple = torch.tensor([image.height, image.width])
562
+ # check if image meets requirements, skip if not
563
+ if image is None:
564
+ if self.debug: print("find empty image")
565
+ continue
566
+ if self.image_predicate is not None and \
567
+ not self.image_predicate(image=image, content=content, bucket=bucket):
568
+ if self.debug: print("image does not satifiy image predicates", index_file)
569
+ count_unsatisfy_image_predicor += 1
570
+ # Find the consecutive 500 samples that do not satisfy image_predicate.
571
+ # This kv file may cause the dataloader queue to be empty,
572
+ # leading to program interruption. Therefore, skip this kv file.
573
+ if count_unsatisfy_image_predicor > 500:
574
+ count_unsatisfy_image_predicor = 0
575
+ raise RuntimeError("Find invalid kv file, skip!")
576
+ continue
577
+ else:
578
+ count_unsatisfy_image_predicor = 0
579
+ yield bucket, index_file, key, content, image, original_size_as_tuple
580
+
581
+ except Exception as ex:
582
+ # Error may happen due to network issue when reading from data from this file.
583
+ # Skip to the next index file regardless.
584
+ print(f"Bucket dataset reading data received unexpected exception at file: {index_file}", ex, file=sys.stderr)
585
+ continue
586
+
587
+ def get_caption(self, content):
588
+ text_key = None
589
+ if len(self.caption_keys) == 1: # only one key
590
+ res = content.get(self.caption_keys[0], None)
591
+ else: # 2 or more keys
592
+ for caption_key, skip_ratio in zip(self.caption_keys, self.skip_caption_ratios):
593
+ r1 = random.random()
594
+ if r1 >= skip_ratio and content.get(caption_key, None) is not None:
595
+ text_key = caption_key
596
+ break
597
+ # if all previous captions are skipped, use the last one (original caption)
598
+ if text_key is None:
599
+ if self.debug:
600
+ print("v1 {} v2 {} use original caption".format(self.caption_keys[0] in content, self.caption_keys[1] in content))
601
+ res = content.get(self.caption_keys[-1], None)
602
+ else:
603
+ if self.debug:
604
+ print("v1 {} v2 {} use {}".format(self.caption_keys[0] in content, self.caption_keys[1] in content, text_key))
605
+ res = content[text_key]
606
+ if res is None:
607
+ return None
608
+ else:
609
+ return res["text"]
610
+
611
+ @classmethod
612
+ def create_dataset_function(cls, hdfs_path, args, **kwargs):
613
+ return cls(hdfs_path=hdfs_path, **kwargs)
614
+
615
+
616
+ class T2IHDFSDataset_dump(Dataset):
617
+ def __init__(self,
618
+ hdfs_path,
619
+ resolution,
620
+ caption_key,
621
+ aspect_ratios,
622
+ debug=False,
623
+ use_resize_random_crop=False,
624
+ skip_caption_ratios=[0, 0.0655]):
625
+ super().__init__()
626
+ ###delete
627
+ self.resolution = resolution
628
+ self.image_decoder = GeneralImageDecoder()
629
+ self.image_predicate = ImageMultiPredicate([
630
+ ImageAestheticPredicate(),
631
+ ImageBucketResolutionPredicate(),
632
+ ])
633
+ self.image_transform = Compose([
634
+ ToTensor(),
635
+ Normalize(mean=0.5, std=0.5),
636
+ ])
637
+ self.text_transform = TextCleaner()
638
+ self.caption_keys = caption_key
639
+ self.debug = debug
640
+ self.rank = 0 # mock value
641
+ self.use_resize_random_crop = use_resize_random_crop
642
+ self.skip_caption_ratios = skip_caption_ratios
643
+
644
+ self.buckets = dict()
645
+ self.bucket_override = list(map(lambda ratio: (ratio[0], ratio[1]), aspect_ratios.values())) # w, h
646
+ if isinstance(hdfs_path, str):
647
+ hdfs_path = [hdfs_path]
648
+ filepath_list = hlist_files(hdfs_path, postfix=".parquet")
649
+
650
+ for filepath in filepath_list:
651
+ # Parse name, example:
652
+ # filepath: "/laion5b_aesv2_512plus_buckets/2_19_256-896_00002_00196.index"
653
+ # filename: "/laion5b_aesv2_512plus_buckets/2_19_256-896_00002_00196"
654
+ # basename: "2_19_256-896_00002_00196"
655
+ # extension: ".index"
656
+ filename, extension = os.path.splitext(filepath)
657
+ basename = os.path.basename(filename)
658
+
659
+ # Parse basename, example:
660
+ # {id}_{image_count}_{image_height}-{image_width}_{other_info}
661
+ if 'good' in filename and extension in [".parquet"]:
662
+ image_count, image_height, image_width = basename.replace("_", "-").split("-")[2:5]
663
+ elif extension in [".parquet"]:
664
+ image_count, image_height, image_width = basename.replace("_", "-").split("-")[1:4]
665
+ # skip invalid file.
666
+ try:
667
+ image_count = int(image_count)
668
+ image_height = int(image_height)
669
+ image_width = int(image_width)
670
+ except:
671
+ continue
672
+ if image_width <=0 or image_height<=0:
673
+ continue
674
+
675
+ image_ratio = image_width / image_height
676
+ override_image_width, override_image_height = self._override_resolution_if_needed_v1(image_width,
677
+ image_height)
678
+ override_image_ratio = override_image_width / override_image_height
679
+ # Omit buckets with unreasonable size ratio, such as (128, 1536)
680
+ if override_image_ratio / image_ratio > 1.5 or override_image_ratio / image_ratio < 0.7:
681
+ continue
682
+
683
+ bucket_key = (override_image_width, override_image_height)
684
+ bucket_entry = self.buckets.get(bucket_key, Bucket())
685
+ bucket_entry.index_files.append(filename)
686
+ bucket_entry.image_count += image_count
687
+ bucket_entry.image_height = override_image_height
688
+ bucket_entry.image_width = override_image_width
689
+ self.buckets[bucket_key] = bucket_entry
690
+
691
+ for i, bucket_entry in enumerate(self.buckets.values()):
692
+ print(
693
+ f"Bucket {i}: {bucket_entry.image_width}x{bucket_entry.image_height} " +
694
+ f"contains {bucket_entry.image_count} images."
695
+ )
696
+ print(f"Total samples: {sum([bucket_entry.image_count for bucket_entry in self.buckets.values()])}")
697
+
698
+ def _override_resolution_if_needed_v1(self, width: int, height: int) -> Tuple[int, int]:
699
+ """
700
+ Override the bucket resolution if configured:
701
+ Example:
702
+ - bucket override: [(1000, 200), (200, 1000)]
703
+ - current resolution: (300, 900)
704
+ - return (200, 1000) because it is the closest in aspect ratio.
705
+ """
706
+ if self.bucket_override is not None:
707
+ # If bucket override is defined, find a new resolution from the override list that best matches the aspect ratio.
708
+ assert len(self.bucket_override) > 0, "bucket_override must not be an empty list."
709
+ target_aspect_ratio = width / height
710
+ bucket_resolutions = self.bucket_override
711
+ bucket_aspect_ratios = torch.tensor([w / h for w, h in bucket_resolutions], dtype=torch.float64)
712
+ bucket_idx = bucket_aspect_ratios.sub(target_aspect_ratio).abs().argmin().item()
713
+ width, height = bucket_resolutions[bucket_idx]
714
+
715
+ if self.resolution != 512:
716
+ # The buckets are defined in 512 resolution. If target resolution is not 512, we need to scale it and make sure divisible by 64.
717
+ ratio = self.resolution / 512
718
+ width = (width * ratio) // 64 * 64
719
+ height = (height * ratio) // 64 * 64
720
+
721
+ return int(width), int(height)
722
+
723
+ def __len__(self):
724
+ return sum(bucket.image_count for bucket in self.buckets.values())
725
+
726
+ def __iter__(self):
727
+ bucket_entries = list(self.buckets.values())
728
+ bucket_weights = list(map(lambda bucket: bucket.image_count, bucket_entries))
729
+ bucket_iterators = list(map(lambda bucket: self._iterate_bucket(bucket), bucket_entries))
730
+
731
+ while True:
732
+ try:
733
+ bucket_iterator = random.choices(bucket_iterators, bucket_weights)[0]
734
+ bucket, content, image, original_size_as_tuple = next(bucket_iterator)
735
+
736
+ if self.resolution == 256:
737
+ latent = np.frombuffer(content['latent_256'], dtype=np.float32)
738
+ latent = latent.reshape(content['latent_256_size'])
739
+ latent = torch.from_numpy(latent).to(torch.bfloat16)
740
+ if self.resolution == 512:
741
+ latent = np.frombuffer(content['latent_512'], dtype=np.float32)
742
+ latent = latent.reshape(content['latent_512_size'])
743
+ latent = torch.from_numpy(latent).to(torch.bfloat16)
744
+
745
+ image, crop_coords_top_left = resize_crop(image, bucket.image_height,
746
+ bucket.image_width, self.use_resize_random_crop)
747
+ if self.image_transform is not None:
748
+ image = self.image_transform(image)
749
+ image = image.unsqueeze(0) # Add temporal dim
750
+
751
+ # get caption
752
+ image_crop_256 = content.get('image_crop_256')
753
+ if image_crop_256 is not None:
754
+ text = self.get_caption_new(content)
755
+ else:
756
+ text = self.get_caption(content)
757
+ # Skip sample if text returned None.
758
+ if text is None:
759
+ if self.debug: print("text is None")
760
+ continue
761
+
762
+ # Transform image and text
763
+ if self.text_transform is not None:
764
+ text = self.text_transform(text)
765
+ if text == "" or text == 'none':
766
+ if self.debug: print("text is empty")
767
+ continue
768
+
769
+ if self.debug:
770
+ print(f"dataset loading current text: en is {text}")
771
+
772
+ item = dict(
773
+ mp4=image,
774
+ latent = latent,
775
+ txt=text,
776
+ num_frames=1
777
+ )
778
+ yield item
779
+ except Exception as ex:
780
+ raise ex
781
+ # Error should not happen here, but we add a guard anyway.
782
+ #print(f"Bucket dataset processing sample received unexpected exception at file: {index_file}", ex,
783
+ # file=sys.stderr)
784
+ continue
785
+
786
+ def _iterate_bucket(self, bucket: Bucket):
787
+ # Copy the list.
788
+ index_files = list(bucket.index_files)
789
+ count_unsatisfy_image_predicor = 0
790
+ while True:
791
+ # Shuffle files
792
+ random.shuffle(index_files)
793
+ # Loop through all the .index files
794
+ for index_file in index_files:
795
+ try:
796
+ ##read parquet file
797
+ filesystem = fs.HadoopFileSystem('hdfs://harunasg', 0)
798
+ index_file = index_file + '.parquet'
799
+ with pq.ParquetFile(index_file, filesystem=filesystem) as fr:
800
+ # print(f'--- total: {fr.metadata.num_rows} ---- {fr.num_row_groups}')
801
+ # keys = []
802
+ # for i in range(fr.num_row_groups):
803
+ # # 读取当前的 Row Group
804
+ # row_group = fr.read_row_group(i).to_pylist()
805
+ # keys += row_group
806
+ random_index = random.randint(0, fr.num_row_groups - 1)
807
+ keys = fr.read_row_group(random_index).to_pylist()
808
+
809
+ # We devide keys to batches then shuffle the batch order.
810
+ # Note that keys within a batch are still contiguous for faster data loading.
811
+ keys_batches = partition_by_size(keys, 64)
812
+ random.shuffle(keys_batches)
813
+
814
+ for key_batch in keys_batches:
815
+ random.shuffle(key_batch)
816
+
817
+ for content in key_batch:
818
+ if self.resolution == 256:
819
+ latent = content['latent_256']
820
+ else:
821
+ latent = content['latent_512']
822
+ if not latent:
823
+ count_unsatisfy_image_predicor += 1
824
+ # Find the consecutive 500 samples that do not satisfy image_predicate.
825
+ # This kv file may cause the dataloader queue to be empty,
826
+ # leading to program interruption. Therefore, skip this kv file.
827
+ if count_unsatisfy_image_predicor > 500:
828
+ count_unsatisfy_image_predicor = 0
829
+ raise RuntimeError("Find invalid kv file, skip!")
830
+ continue
831
+ else:
832
+ count_unsatisfy_image_predicor = 0
833
+ image = self.image_decoder(content)
834
+ original_size_as_tuple = torch.tensor([image.height, image.width])
835
+
836
+ yield bucket, content, image, original_size_as_tuple
837
+
838
+ except Exception as ex:
839
+ # Error may happen due to network issue when reading from data from this file.
840
+ # Skip to the next index file regardless.
841
+ print(f"Bucket dataset reading data received unexpected exception at file: {index_file}", ex, file=sys.stderr)
842
+ continue
843
+
844
+ def get_caption(self, content):
845
+ text_key = None
846
+ if len(self.caption_keys) == 1: # only one key
847
+ res = content.get(self.caption_keys[0], None)
848
+ else: # 2 or more keys
849
+ for caption_key, skip_ratio in zip(self.caption_keys, self.skip_caption_ratios):
850
+ r1 = random.random()
851
+ if r1 >= skip_ratio and content.get(caption_key, None) is not None:
852
+ text_key = caption_key
853
+ break
854
+ # if all previous captions are skipped, use the last one (original caption)
855
+ if text_key is None:
856
+ if self.debug:
857
+ print("v1 {} v2 {} use original caption".format(self.caption_keys[0] in content, self.caption_keys[1] in content))
858
+ res = content.get(self.caption_keys[-1], None)
859
+ else:
860
+ if self.debug:
861
+ print("v1 {} v2 {} use {}".format(self.caption_keys[0] in content, self.caption_keys[1] in content, text_key))
862
+ res = content[text_key]
863
+ if res is None:
864
+ return None
865
+ else:
866
+ return res
867
+ def get_caption_new(self, content):
868
+ caption_dict = json.loads(content['caption_dict'])
869
+ caption_list = []
870
+ for k, v in caption_dict.items():
871
+ if '_en_' in k and '_text' in k:
872
+ caption_list.append(v)
873
+ if len(caption_list) == 0:
874
+ return None
875
+ res = random.choice(caption_list)
876
+ return res
877
+
878
+ @classmethod
879
+ def create_dataset_function(cls, hdfs_path, args, **kwargs):
880
+ return cls(hdfs_path=hdfs_path, **kwargs)
881
+
882
+
883
+ if __name__ == "__main__":
884
+ from omegaconf import OmegaConf
885
+ from torch.utils.data import DataLoader
886
+ from torch.utils.data.distributed import DistributedSampler
887
+ from matplotlib import pyplot as plt
888
+ import numpy as np
889
+ from training.dataset_tool import CollectionDataset, collate_fn_map
890
+
891
+ hdfs_path = "hdfs://harunasg/home/byte_icvg_aigc_cp/user/seed_t2i/kexuanyi/data/train_data/pretrained_data/kv/v2.0/pretrained_en/v2.0_data_512_src_data"
892
+ config = "/mnt/bn/icvg/users/minxuan.lin/Workspace/video-factory/config/dataset_config/test_collection_config_sg.yaml"
893
+ seed = 0
894
+
895
+ # set seed
896
+ random.seed(seed)
897
+ np.random.seed(seed)
898
+ torch.manual_seed(seed)
899
+ torch.cuda.manual_seed(seed)
900
+ torch.cuda.manual_seed_all(seed)
901
+
902
+ configs = OmegaConf.load(config)
903
+ train_dataset = CollectionDataset.create_dataset_function(configs['train_data'],
904
+ configs['train_data_weights'],
905
+ **configs['data']['params'])
906
+ # train_dataset = T2IHDFSDataset.create_dataset_function(hdfs_path=hdfs_path, args=None, **configs['data']['params']['dataset_collections']['seedv2-t2i']['params'])
907
+
908
+ # sampler = DistributedSampler(train_dataset, rank=rank, num_replicas=world_size,)
909
+ train_dataloader = DataLoader(
910
+ train_dataset,
911
+ batch_size=1,
912
+ num_workers=1,
913
+ collate_fn=collate_fn_map,
914
+ pin_memory=False
915
+ )
916
+
917
+ output_dir = "outputs/test1"
918
+ os.makedirs(output_dir, exist_ok=True)
919
+
920
+ for i, batch in enumerate(train_dataloader):
921
+ print(batch.keys())
922
+ print(batch['prompts'])
923
+ print(batch['videos'].size())
924
+ print(batch['video_metadata'])
925
+ print(torch.min(batch['videos']), torch.max(batch['videos']))
926
+ for j in range(batch['videos'].size()[0]):
927
+ plt.imsave(f"{output_dir}/test_{i}_{j}.jpg", ((batch['videos'][j,0,...]+1)*127.5).permute(1,2,0).numpy().astype(np.uint8))
928
+ if i > 20:
929
+ break
dataset_code/sft_sftnews/offload/dataset_tool/parquet_dataset/__init__.py ADDED
File without changes
dataset_code/sft_sftnews/offload/dataset_tool/parquet_dataset/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (181 Bytes). View file
 
dataset_code/sft_sftnews/offload/dataset_tool/parquet_dataset/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (197 Bytes). View file
 
dataset_code/sft_sftnews/offload/dataset_tool/parquet_dataset/__pycache__/base_parquet.cpython-310.pyc ADDED
Binary file (8.39 kB). View file
 
dataset_code/sft_sftnews/offload/dataset_tool/parquet_dataset/__pycache__/base_parquet.cpython-311.pyc ADDED
Binary file (15.6 kB). View file
 
dataset_code/sft_sftnews/offload/dataset_tool/parquet_dataset/__pycache__/parquet_utils.cpython-310.pyc ADDED
Binary file (4.36 kB). View file
 
dataset_code/sft_sftnews/offload/dataset_tool/parquet_dataset/__pycache__/parquet_utils.cpython-311.pyc ADDED
Binary file (7.34 kB). View file
 
dataset_code/sft_sftnews/offload/dataset_tool/parquet_dataset/__pycache__/tos_client.cpython-310.pyc ADDED
Binary file (6.31 kB). View file
 
dataset_code/sft_sftnews/offload/dataset_tool/parquet_dataset/__pycache__/tos_client.cpython-311.pyc ADDED
Binary file (10.7 kB). View file
 
dataset_code/sft_sftnews/offload/dataset_tool/parquet_dataset/__pycache__/video_parquet.cpython-310.pyc ADDED
Binary file (15.5 kB). View file
 
dataset_code/sft_sftnews/offload/dataset_tool/parquet_dataset/__pycache__/video_parquet.cpython-311.pyc ADDED
Binary file (38.1 kB). View file
 
dataset_code/sft_sftnews/offload/dataset_tool/parquet_dataset/base_parquet.py ADDED
@@ -0,0 +1,289 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from itertools import chain
2
+ from multiprocessing import Pool
3
+ from pyarrow.parquet import ParquetFile
4
+ from torch.utils.data import IterableDataset
5
+ from typing import List, Literal, Optional, Union
6
+ from pyarrow.fs import HadoopFileSystem, LocalFileSystem
7
+
8
+ from .utils.hdfs_utils import listdir_with_metafile, exists
9
+ from .parquet_utils import (
10
+ get_portion_for_worker_only,
11
+ get_random_for_rank_and_worker,
12
+ get_portion_for_rank_and_worker,
13
+ )
14
+
15
+ def hack_s_data(filepath):
16
+ if "vae-1011" in filepath:
17
+ return filepath.replace("byte_data_tt_m/VGFM/data/packed/vae-1011", "byte_icvg_aigc_cp/user/video/temp/19900101/packed/vae-1011")
18
+ elif "dit-1126" in filepath:
19
+ return filepath.replace("byte_data_tt_m/user/sheng.bi/vgfm/packed/dit-1126", "byte_icvg_aigc_cp/user/video/temp/19900101/dit-1126")
20
+ else:
21
+ return filepath
22
+
23
+ def get_filesystem(path: str) -> Union[LocalFileSystem, HadoopFileSystem]:
24
+ """
25
+ Get filesystem based on the path.
26
+ """
27
+ if path.startswith("hdfs://"):
28
+ return HadoopFileSystem.from_uri(path)
29
+ else:
30
+ return LocalFileSystem()
31
+
32
+
33
+ def read_metadata(
34
+ path: str,
35
+ ):
36
+ fs = get_filesystem(path)
37
+ with ParquetFile(path, filesystem=fs) as file:
38
+ metadata = file.metadata
39
+ return metadata
40
+
41
+
42
+ class ParquetDataset(IterableDataset):
43
+ """
44
+ Parquet dataset.
45
+
46
+ Arguments:
47
+ path: a directory path that contains *.parquet files.
48
+ seed: seed for deterministic sampling. If None, just random.
49
+ partition: partition strategy. Split by *.parquet file or by row groups in each file.
50
+ force_partition: if True, raise error if partition is indivisible.
51
+ num_parallel_files: number of parallel files to read.
52
+ infinite: If True, data will be returned infinitely.
53
+ """
54
+
55
+ def __init__(
56
+ self,
57
+ path: Union[str, List[str]],
58
+ seed: Optional[int],
59
+ partition: Literal["file", "group", "dump"] = "file",
60
+ force_partition: bool = False,
61
+ num_parallel_files: int = 8,
62
+ infinite: bool = True,
63
+ path_mode: Literal["dir", "file"] = "dir",
64
+ shuffle: bool = True,
65
+ columns: Optional[List[str]] = None,
66
+ plugin_caption_path="",
67
+ dump_path = "",
68
+ ):
69
+ assert partition in ["file", "group", "dump"]
70
+ assert path_mode in ["dir", "file"]
71
+
72
+ # Save settings.
73
+ self.seed = seed
74
+ self.infinite = infinite
75
+ self.partition = partition
76
+ self.force_partition = force_partition
77
+ self.num_parallel_files = num_parallel_files
78
+ self.shuffle = shuffle
79
+ self.columns = columns
80
+
81
+ # List file paths.
82
+ filepaths = path if isinstance(path, list) else [path]
83
+ if path_mode == "dir":
84
+ filepaths = map(listdir_with_metafile, filepaths)
85
+ filepaths = chain(*filepaths)
86
+ filepaths = filter(lambda path: path.endswith(".parquet"), filepaths)
87
+ filepaths = [hack_s_data(path) for path in filepaths]
88
+ filepaths = sorted(filepaths)
89
+ assert len(filepaths) > 0
90
+
91
+ # Create file readers.
92
+ self.filereaders = [
93
+ ParquetFileReader(
94
+ path=path,
95
+ seed=seed,
96
+ partition=partition,
97
+ force_partition=force_partition,
98
+ shuffle=shuffle,
99
+ columns=columns,
100
+ plugin_caption_path=plugin_caption_path.rstrip(
101
+ '/')+"/"+path.split('/')[-1] if plugin_caption_path != "" else "",
102
+ dump_path = dump_path.rstrip(
103
+ '/')+"/"+path.split('/')[-1] if dump_path != "" else "",
104
+ )
105
+ for path in filepaths
106
+ ]
107
+
108
+ # Please don't use a fake __len__(self)! Try making other functions e.g. get_size() instead.
109
+ def __len__(self):
110
+ if not hasattr(self, "count"):
111
+ # Calculate an approximate dataset item count.
112
+ # We open 5 files and compute the average items per file.
113
+ # Then we use this to approximate total dataset item count.
114
+
115
+ with Pool(1) as pool:
116
+ counts = pool.map(len, self.filereaders[:5])
117
+ self.count = int(sum(counts) / len(counts) * len(self.filereaders))
118
+ return self.count
119
+
120
+ def __iter__(self):
121
+ epoch = 0
122
+ filereaders = self.filereaders
123
+ random = get_random_for_rank_and_worker(self.seed)
124
+
125
+ # Partition by files if needed.
126
+ if self.partition == "file":
127
+ filereaders = get_portion_for_rank_and_worker(
128
+ filereaders, self.force_partition)
129
+
130
+ while True:
131
+ # Initialize filereaders iterators.
132
+ if len(filereaders) == 0:
133
+ filereaders = get_portion_for_rank_and_worker(
134
+ self.filereaders, self.force_partition, resample = True)
135
+ iterators = [reader.__iter__(epoch=epoch)
136
+ for reader in filereaders]
137
+ if self.shuffle:
138
+ random.shuffle(iterators)
139
+
140
+ # Yield samples.
141
+ bad_file_count = 0
142
+ max_bad_file_count = len(iterators)
143
+ while any(iterators):
144
+ if self.shuffle:
145
+ iterator = random.choice(
146
+ iterators[: self.num_parallel_files])
147
+ else:
148
+ iterator = iterators[0]
149
+ try:
150
+ result = next(iterator)
151
+ if result == "invalid parquet file!":
152
+ print("encounter data-caption file problem, removing iterator")
153
+ iterators.remove(iterator)
154
+ bad_file_count += 1
155
+ if bad_file_count >= max_bad_file_count:
156
+ bad_file_count = 0
157
+ yield "max_bad_file_count_reached"
158
+ continue
159
+ else:
160
+ yield result
161
+ except StopIteration:
162
+ iterators.remove(iterator)
163
+
164
+ # Break after the first epoch if not infinite.
165
+ if not self.infinite:
166
+ break
167
+
168
+ # Increment epoch.
169
+ epoch += 1
170
+
171
+
172
+ class ParquetFileReader:
173
+ """
174
+ Read a single *.parquet file.
175
+
176
+ Arguments:
177
+ path: a *.parquet file path.
178
+ seed: seed for deterministic sampling. If None, just random.
179
+ partition: partition strategy.
180
+ force_partition: if True, raise error if partition is indivisible.
181
+ """
182
+
183
+ def __init__(
184
+ self,
185
+ path: str,
186
+ seed: Optional[int],
187
+ partition: bool,
188
+ force_partition: bool,
189
+ shuffle: bool,
190
+ columns: Optional[List[str]],
191
+ plugin_caption_path: str,
192
+ dump_path: str,
193
+ ):
194
+ self.path = path
195
+ self.seed = seed
196
+ self.partition = partition
197
+ self.force_partition = force_partition
198
+ self.shuffle = shuffle
199
+ self.columns = columns
200
+ self.plugin_caption_path = plugin_caption_path
201
+ self.dump_path = dump_path
202
+
203
+ def __len__(self):
204
+ fs = get_filesystem(self.path)
205
+ with ParquetFile(self.path, filesystem=fs) as file:
206
+ return file.metadata.num_rows
207
+
208
+ def __iter_parallel(self, epoch):
209
+ fs = get_filesystem(self.path)
210
+ print(self.path)
211
+ if not exists(self.path) or not exists(self.plugin_caption_path) or not exists(self.dump_path):
212
+ # return and make the iter empty
213
+ print(f"parallel loading warning: {self.path} or {self.plugin_caption_path} not exists, return empty iter")
214
+ yield "invalid parquet file!"
215
+ with ParquetFile(self.path, filesystem=fs) as file, \
216
+ ParquetFile(self.plugin_caption_path, filesystem=fs) as plugin_caption, \
217
+ ParquetFile(self.dump_path, filesystem=fs) as dump_file:
218
+ # List all groups.
219
+ groups = list(range(file.num_row_groups))
220
+
221
+ # Partition groups if needed.
222
+ if self.partition == "group":
223
+ groups = get_portion_for_rank_and_worker(
224
+ groups, self.force_partition)
225
+ elif self.partition == "dump":
226
+ groups = get_portion_for_worker_only(groups)
227
+
228
+ if self.shuffle:
229
+ # Shuffle groups
230
+ seed = (self.seed + epoch) if self.seed is not None else None
231
+ get_random_for_rank_and_worker(seed).shuffle(groups)
232
+
233
+ # Iteration over all samples from all row groups.
234
+ for group in groups:
235
+ print(group)
236
+ iter_main = file.iter_batches(
237
+ batch_size=1, row_groups=[group], columns=self.columns,
238
+ use_threads=False,)
239
+ iter_plugin_caption = plugin_caption.iter_batches(
240
+ batch_size=1, row_groups=[group], columns=None,
241
+ use_threads=False,)
242
+ iter_dump = dump_file.iter_batches(
243
+ batch_size=1, row_groups=[group], columns=None,
244
+ use_threads=False,)
245
+
246
+ # Zip the two iterators to read rows "in parallel"
247
+ for main_batch, caption_batch, dump_batch in zip(iter_main, iter_plugin_caption, iter_dump):
248
+ # Convert each single-row batch to a dict
249
+ main_batch_dict = main_batch.to_pandas().iloc[0].to_dict()
250
+ caption_batch_dict = caption_batch.to_pandas(
251
+ ).iloc[0].to_dict()
252
+ dump_batch_dict = dump_batch.to_pandas().iloc[0].to_dict()
253
+ assert caption_batch_dict['uttid'] == main_batch_dict[
254
+ 'uttid'] and caption_batch_dict['uttid'] == dump_batch_dict['uttid'], f"uttid not match {caption_batch_dict['uttid']} vs {main_batch_dict['uttid']}"
255
+ main_batch_dict.update(caption_batch_dict)
256
+ main_batch_dict.update(dump_batch_dict)
257
+ yield main_batch_dict
258
+
259
+ def __iter_normal(self, epoch):
260
+ fs = get_filesystem(self.path)
261
+ with ParquetFile(self.path, filesystem=fs) as file:
262
+ # List all groups.
263
+ groups = list(range(file.num_row_groups))
264
+
265
+ # Partition groups if needed.
266
+ if self.partition == "group":
267
+ groups = get_portion_for_rank_and_worker(
268
+ groups, self.force_partition)
269
+ elif self.partition == "dump":
270
+ groups = get_portion_for_worker_only(groups)
271
+
272
+ if self.shuffle:
273
+ # Shuffle groups
274
+ seed = (self.seed + epoch) if self.seed is not None else None
275
+ get_random_for_rank_and_worker(seed).shuffle(groups)
276
+
277
+ # Iteration over all samples from all row groups.
278
+ for group in groups:
279
+ for sample in file.iter_batches(
280
+ batch_size=1, row_groups=[group], columns=self.columns,
281
+ use_threads=False,
282
+ ):
283
+ yield sample.to_pandas().iloc[0].to_dict()
284
+
285
+ def __iter__(self, epoch=0):
286
+ if self.plugin_caption_path != "":
287
+ return self.__iter_parallel(epoch)
288
+ else:
289
+ return self.__iter_normal(epoch)
dataset_code/sft_sftnews/offload/dataset_tool/parquet_dataset/parquet_utils.py ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import random
3
+ import importlib
4
+ import contextlib
5
+ import numpy as np
6
+
7
+ from typing import Any, Dict, List, Optional
8
+ from torch.utils.data import get_worker_info
9
+ from omegaconf import DictConfig, ListConfig
10
+
11
+ from .utils.partition_utils import partition_by_groups
12
+ from .utils.distributed_utils import get_data_parallel_rank, get_data_parallel_world_size
13
+
14
+
15
+ def get_worker_id() -> int:
16
+ """
17
+ Get the current dataloader worker id.
18
+ """
19
+ return get_worker_info().id if get_worker_info() is not None else 0
20
+
21
+
22
+ def get_worker_count() -> int:
23
+ """
24
+ Get the total dataloader worker count.
25
+ """
26
+ return get_worker_info().num_workers if get_worker_info() is not None else 1
27
+
28
+
29
+ def get_seed_for_rank_and_worker(seed: Optional[int]) -> Optional[int]:
30
+ """
31
+ Get seed for current rank and worker.
32
+ """
33
+ if seed is None:
34
+ return None
35
+ return seed + get_data_parallel_rank() * get_worker_count() + get_worker_id()
36
+
37
+
38
+ def get_random_for_rank_and_worker(seed: Optional[int]) -> random.Random:
39
+ """
40
+ Get random.Random for the current rank and worker.
41
+ """
42
+ return random.Random(get_seed_for_rank_and_worker(seed))
43
+
44
+
45
+ def get_random_for_all_ranks(seed: Optional[int]) -> random.Random:
46
+ """
47
+ Get random.Random that is the same for all ranks.
48
+ """
49
+ return random.Random(seed or 0)
50
+
51
+
52
+ def get_portion_for_rank_and_worker(items: List[Any], force: bool = False, allow_empty: bool = False, resample: bool = False) -> List[Any]:
53
+ """
54
+ Get the portion of items for current rank and worker.
55
+ """
56
+ rank = get_data_parallel_rank()
57
+ world_size = get_data_parallel_world_size()
58
+ worker_id = get_worker_id()
59
+ worker_count = get_worker_count()
60
+ if resample:
61
+ return random.sample(items, len(items)//(world_size*worker_count))
62
+
63
+ if world_size * worker_count <= len(items):
64
+ # If there are enough items to be divided, we divide the items
65
+ items = partition_by_groups(items, world_size)[rank]
66
+ items = partition_by_groups(items, worker_count)[worker_id]
67
+ elif allow_empty:
68
+ if rank * worker_count + worker_id < len(items):
69
+ items = [items[rank * worker_count + worker_id]]
70
+ else:
71
+ items = []
72
+ elif not force:
73
+ # If not enough items to be divided, all ranks and workers shuffle it
74
+ # with different seed.
75
+ items = list(items)
76
+ get_random_for_rank_and_worker(0).shuffle(items)
77
+ else:
78
+ raise ValueError("Items not divisible by world_size * worker_count")
79
+ return items
80
+
81
+
82
+ def get_portion_for_worker_only(items: List[Any]) -> List[Any]:
83
+ """
84
+ Get the portion of items for current worker.
85
+ """
86
+ worker_id = get_worker_id()
87
+ worker_count = get_worker_count()
88
+
89
+ items = partition_by_groups(items, worker_count)[worker_id]
90
+ return items
91
+
92
+
93
+ @contextlib.contextmanager
94
+ def local_seed(seed: Optional[int]):
95
+ """
96
+ Create a local context with seed is set, but exit back to the original random state.
97
+ If seed is None, do nothing.
98
+ """
99
+ if seed is not None:
100
+ random_state = random.getstate()
101
+ np_state = np.random.get_state()
102
+ torch_state = torch.get_rng_state()
103
+ random.seed(seed)
104
+ np.random.seed(seed)
105
+ torch.manual_seed(seed)
106
+ try:
107
+ yield
108
+ finally:
109
+ random.setstate(random_state)
110
+ np.random.set_state(np_state)
111
+ torch.set_rng_state(torch_state)
112
+ else:
113
+ yield
114
+
115
+
116
+ def _as_list(datasets):
117
+ if isinstance(datasets, list):
118
+ return datasets
119
+ if isinstance(datasets, dict):
120
+ return [d for d in datasets.values() if d is not None]
121
+ raise ValueError
122
+
123
+
124
+ def import_item(path: str, name: str) -> Any:
125
+ """
126
+ Import a python item. Example: import_item("path.to.file", "MyClass") -> MyClass
127
+ """
128
+ return getattr(importlib.import_module(path), name)
129
+
130
+
131
+ def create_dataset(path: str, *args, **kwargs) -> Any:
132
+ """
133
+ Create a dataset. Requires the file to contain a "create_dataset" function.
134
+ """
135
+ return import_item(path, "create_dataset")(*args, **kwargs)
136
+
137
+
138
+ def shift_seed(seed: Optional[int], shift: int) -> Optional[int]:
139
+ """
140
+ Shift the seed by a given amount. Or return None if seed is None.
141
+ """
142
+ return (seed + shift) if seed is not None else None
dataset_code/sft_sftnews/offload/dataset_tool/parquet_dataset/samplers/__init__.py ADDED
File without changes
dataset_code/sft_sftnews/offload/dataset_tool/parquet_dataset/samplers/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (207 Bytes). View file
 
dataset_code/sft_sftnews/offload/dataset_tool/parquet_dataset/samplers/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (206 Bytes). View file
 
dataset_code/sft_sftnews/offload/dataset_tool/parquet_dataset/samplers/__pycache__/frame_sampler.cpython-310.pyc ADDED
Binary file (11.7 kB). View file
 
dataset_code/sft_sftnews/offload/dataset_tool/parquet_dataset/samplers/__pycache__/frame_sampler.cpython-311.pyc ADDED
Binary file (19.8 kB). View file
 
dataset_code/sft_sftnews/offload/dataset_tool/parquet_dataset/samplers/__pycache__/text_sampler.cpython-310.pyc ADDED
Binary file (8.57 kB). View file
 
dataset_code/sft_sftnews/offload/dataset_tool/parquet_dataset/samplers/__pycache__/text_sampler.cpython-311.pyc ADDED
Binary file (15 kB). View file
 
dataset_code/sft_sftnews/offload/dataset_tool/parquet_dataset/samplers/__pycache__/utils.cpython-310.pyc ADDED
Binary file (1.39 kB). View file
 
dataset_code/sft_sftnews/offload/dataset_tool/parquet_dataset/samplers/__pycache__/utils.cpython-311.pyc ADDED
Binary file (2.02 kB). View file
 
dataset_code/sft_sftnews/offload/dataset_tool/parquet_dataset/samplers/frame_sampler.py ADDED
@@ -0,0 +1,375 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Frame samplers.
3
+ """
4
+
5
+ import numpy as np
6
+
7
+ from dataclasses import dataclass
8
+ from abc import ABC, abstractmethod
9
+ from typing import Any, Dict, List, Literal, NamedTuple, Optional, Tuple, Union
10
+
11
+
12
+ class FrameSamplerOutput(NamedTuple):
13
+ """
14
+ Return indices for frame decoding,
15
+ and optionally additional information to return to user.
16
+ """
17
+
18
+ indices: List[int]
19
+ additional_info: Dict[str, Any] = {}
20
+
21
+
22
+ class FrameSampler(ABC):
23
+ """
24
+ Frame sampler base class.
25
+
26
+ Child class must implement __call__ method to return the decoding indices.
27
+ Or raise if the video cannot be sampled (e.g. too short, etc.)
28
+ """
29
+
30
+ @abstractmethod
31
+ def __call__(self, num_frames: int) -> FrameSamplerOutput:
32
+ raise NotImplementedError
33
+
34
+
35
+ class AllFrameSampler(FrameSampler):
36
+ """
37
+ All frame sampler. Returns all frames in a video.
38
+ """
39
+
40
+ def __call__(self, num_frames: int) -> FrameSamplerOutput:
41
+ return FrameSamplerOutput(list(range(num_frames)))
42
+
43
+
44
+ class AdaptiveFrameSampler(FrameSampler):
45
+ """
46
+ Adaptive frame sampler.
47
+
48
+ Arguments:
49
+ length: frame length to return.
50
+ For example, [5,10] denotes to always return 5 frames or 10 frames.
51
+ It will choose the longest length that fits the original video.
52
+ For example, if the video is 9 frames total, it will clip to 5 frames.
53
+ stride: frame skip.
54
+ For example, 1 denotes no skip. 2 denotes select every other frame. 3
55
+ denotes select every third frame. When a list is given, stride is randomly
56
+ chosen with even probability. However, user may set it to [1,1,2] to
57
+ denote 1 with 66% probability and 2 with 33% proability.
58
+ clip: clip location.
59
+ "center": clip video at the center.
60
+ "uniform": clip video uniformly at random.
61
+ jitter: jitter to the location.
62
+ Only applicable when clip is "center".
63
+ The value is the stdev of the normal distribution to shift the index.
64
+ """
65
+
66
+ def __init__(
67
+ self,
68
+ lengths: Union[int, List[int]],
69
+ strides: Union[int, List[int]] = 1,
70
+ clip: Literal["center", "uniform"] = "uniform",
71
+ jitter: float = 0.0,
72
+ ):
73
+ lengths = [lengths] if isinstance(lengths, int) else lengths
74
+ strides = [strides] if isinstance(strides, int) else strides
75
+ assert len(lengths) > 0
76
+ assert len(strides) > 0
77
+ assert clip in ["center", "uniform"]
78
+ assert jitter >= 0
79
+ self.lengths = np.array(lengths)
80
+ self.strides = np.array(strides)
81
+ self.clip = clip
82
+ self.jitter = jitter
83
+
84
+ def __call__(
85
+ self,
86
+ num_frames: int,
87
+ ) -> FrameSamplerOutput:
88
+ # Choose stride.
89
+ # Drop strides that are too long for this video.
90
+ # Then randomly choose a valid stride.
91
+ valid_strides = np.any(num_frames // self.strides >=
92
+ self.lengths.reshape(-1, 1), axis=0)
93
+ valid_strides = self.strides[valid_strides]
94
+ if valid_strides.size <= 0:
95
+ raise ValueError(f"Video is too short ({num_frames} frames).")
96
+ stride = np.random.choice(valid_strides)
97
+
98
+ # Choose length.
99
+ # Pick the max length that can fit the video under the current stride.
100
+ valid_lengths = self.lengths[num_frames // stride >= self.lengths]
101
+ length = np.max(valid_lengths)
102
+
103
+ # Choose start index.
104
+ min_start_index = 0
105
+ max_start_index = num_frames - 1 - stride * (length - 1)
106
+ mid_start_index = round((min_start_index + max_start_index) / 2)
107
+ jitter = round(np.random.normal(loc=0, scale=self.jitter))
108
+
109
+ if self.clip == "center":
110
+ start_index = mid_start_index + jitter
111
+ elif self.clip == "uniform":
112
+ start_index = np.random.randint(
113
+ min_start_index, max_start_index + 1)
114
+ else:
115
+ raise NotImplementedError
116
+
117
+ start_index = np.clip(start_index, min_start_index, max_start_index)
118
+
119
+ # Compute indices
120
+ indices = np.arange(start_index, start_index + length * stride, stride)
121
+
122
+ # Return indices and additional information to return to user.
123
+ return FrameSamplerOutput(
124
+ indices=indices.tolist(),
125
+ additional_info={
126
+ "stride": stride,
127
+ "start_frame": start_index,
128
+ "end_frame": start_index + length * stride,
129
+ "total_frames": num_frames,
130
+ },
131
+ )
132
+
133
+
134
+ @dataclass
135
+ class AdaptiveAdvancedFrameSamplerStrategy:
136
+ stride: int
137
+ stride_prob: float
138
+ frame_lengths: List[int]
139
+ frame_lengths_prob: Union[Literal["uniform", "harmonic"], List[float]]
140
+
141
+
142
+ class AdaptiveAdvancedFrameSampler(FrameSampler):
143
+ """
144
+ Advanced adaptive frame sampler supports different frame lengths for different strides,
145
+ and supports probabilistic sampling of both the stride and the frame length.
146
+
147
+ strategies: A list of strategies to sample from.
148
+ clip: clip location.
149
+ "center": clip video at the center.
150
+ "uniform": clip video uniformly at random.
151
+ jitter: jitter to the location.
152
+ Only applicable when clip is "center".
153
+ The value is the stdev of the normal distribution to shift the index.
154
+ """
155
+
156
+ def __init__(
157
+ self,
158
+ strategies: List[AdaptiveAdvancedFrameSamplerStrategy],
159
+ clip: Literal["center", "uniform","simple"] = "uniform",
160
+ jitter: float = 0.0,
161
+ aligned: bool = False,
162
+ ):
163
+ assert len(strategies) > 0, "Strategies must not be empty"
164
+ assert len({s.stride for s in strategies}) == len(
165
+ strategies), "Strides cannot duplicate."
166
+ assert clip in ["center", "uniform","simple"]
167
+ assert jitter >= 0
168
+ self.aligned = aligned
169
+ self.clip = clip
170
+ self.jitter = jitter
171
+ self.strides = []
172
+ self.strides_prob = []
173
+ self.frame_lengths = []
174
+ self.frame_lengths_prob = []
175
+
176
+ for strategy in sorted(strategies, key=lambda s: s.stride):
177
+ # Validate strides.
178
+ assert isinstance(
179
+ strategy.stride, int), "Stride must be an integer."
180
+ assert strategy.stride > 0, "Stride must be a positive integer."
181
+ self.strides.append(strategy.stride)
182
+
183
+ # Assign strides_prob.
184
+ assert isinstance(strategy.stride_prob, (int, float)
185
+ ), "Stride prob is not int/float."
186
+ assert strategy.stride_prob >= 0, "Stride prob must be non-negative."
187
+ self.strides_prob.append(strategy.stride_prob)
188
+
189
+ # Assign frame lengths, sort by value.
190
+ assert len(
191
+ strategy.frame_lengths) > 0, "Frame lengths must not be empty."
192
+ frame_lengths = np.array(strategy.frame_lengths)
193
+ assert frame_lengths.dtype == int, "Frame lengths must be integers."
194
+ assert np.all(frame_lengths >
195
+ 0), "Frame lengths must be positive integers."
196
+ frame_lengths_sorted_idx = np.argsort(frame_lengths)
197
+ frame_lengths = frame_lengths[frame_lengths_sorted_idx]
198
+ self.frame_lengths.append(frame_lengths)
199
+
200
+ # Assign frame lengths prob, apply the sorting to prob as well.
201
+ if strategy.frame_lengths_prob == "uniform":
202
+ # e.g. [0.2, 0.2, 0.2, 0.2, 0.2]
203
+ frame_lengths_prob = np.full(
204
+ len(frame_lengths), 1.0 / len(frame_lengths))
205
+ elif strategy.frame_lengths_prob == "harmonic":
206
+ # e.g. [0.2, 0.25, 0.33, 0.5, 1]
207
+ frame_lengths_prob = np.flip(
208
+ 1 / np.arange(1, len(frame_lengths) + 1))
209
+ elif isinstance(strategy.frame_lengths_prob, list):
210
+ frame_lengths_prob = np.array(strategy.frame_lengths_prob)
211
+ frame_lengths_prob = frame_lengths_prob[frame_lengths_sorted_idx]
212
+ else:
213
+ raise NotImplementedError
214
+ assert len(frame_lengths_prob) == len(
215
+ frame_lengths), "Frame lengths prob mismatch."
216
+ assert np.all(frame_lengths_prob >=
217
+ 0), "Frame lengths prob must not be negative."
218
+ assert frame_lengths_prob.sum() > 0, "Frame lengths prob must not be all zeros."
219
+ frame_lengths_prob /= frame_lengths_prob.sum()
220
+ self.frame_lengths_prob.append(frame_lengths_prob)
221
+
222
+ self.strides = np.array(self.strides)
223
+ self.strides_prob = np.array(self.strides_prob)
224
+ assert self.strides_prob.sum() > 0, "Strides prob must not be all zeros."
225
+ self.strides_prob /= self.strides_prob.sum()
226
+
227
+ def __call__(self, num_frames: int, frames_meta=None):
228
+ global_start_idx, global_end_idx = 0, num_frames
229
+ if self.aligned:
230
+ assert frames_meta is not None
231
+ global_start_idx = frames_meta['start_idxs']
232
+ global_end_idx = frames_meta['end_idxs']
233
+ num_frames = global_end_idx - global_start_idx
234
+
235
+ if self.clip != 'simple':
236
+ sample_result = adptive_sample_framelen_and_stride(
237
+ num_frames=num_frames,
238
+ strides=self.strides,
239
+ strides_prob=self.strides_prob,
240
+ frame_lengths=self.frame_lengths,
241
+ frame_lengths_prob=self.frame_lengths_prob,
242
+ )
243
+
244
+ stride = sample_result["stride"]
245
+ length = sample_result["frame_length"]
246
+ else:
247
+ stride = self.strides[0]
248
+ length = self.frame_lengths[0][0]
249
+
250
+ # Choose start index.
251
+ min_start_index = 0
252
+ max_start_index = num_frames - 1 - stride * (length - 1)
253
+ mid_start_index = round((min_start_index + max_start_index) / 2)
254
+ jitter = round(np.random.normal(loc=0, scale=self.jitter))
255
+
256
+ if self.clip == 'simple':
257
+ start_index = global_start_idx
258
+ ## can only load dump data, will fix further
259
+ # if self.clip == "center":
260
+ # start_index = mid_start_index + jitter
261
+ # elif self.clip == "uniform":
262
+ # start_index = np.random.randint(
263
+ # min_start_index, max_start_index + 1)
264
+ # else:
265
+ # raise NotImplementedError
266
+ # else:
267
+ # start_index += global_start_idx
268
+ # min_start_index += global_start_idx
269
+ # max_start_index += global_start_idx
270
+ # start_index = np.clip(start_index, min_start_index, max_start_index)
271
+
272
+ # Compute indices
273
+ indices = np.arange(start_index, start_index + length * stride, stride)
274
+
275
+ # Return indices and additional information to return to user.
276
+ return FrameSamplerOutput(
277
+ indices=indices.tolist(),
278
+ additional_info={
279
+ "stride": stride,
280
+ "start_frame": start_index,
281
+ "end_frame": start_index + length * stride,
282
+ "total_frames": num_frames,
283
+ },
284
+ )
285
+
286
+
287
+ def normalize_probabilities(
288
+ items: np.ndarray,
289
+ probs: np.ndarray,
290
+ masks: np.ndarray,
291
+ ) -> Tuple[np.ndarray, np.ndarray]:
292
+ assert len(items), "Items must not be empty."
293
+ assert len(items) == len(masks) == len(probs), "Lengths must match."
294
+ assert isinstance(items, np.ndarray), "Items must be an np.ndarray."
295
+ assert isinstance(probs, np.ndarray), "Probs must be an np.ndarray."
296
+ assert isinstance(masks, np.ndarray), "Masks must be an np.ndarray."
297
+ assert masks.dtype == bool, "Masks must be boolean."
298
+ assert np.any(masks), "Masks must not be all False."
299
+ assert np.all(np.diff(masks.astype("int")) <=
300
+ 0), "Masks must not break monotonicity."
301
+
302
+ ret_items = items[masks]
303
+ ret_probs = probs[masks]
304
+
305
+ # Accumulate the probabilities of infeasible items to the last feasible one.
306
+ ret_probs[-1] += probs[~masks].sum()
307
+
308
+ return ret_items, ret_probs
309
+
310
+
311
+ def adptive_sample_framelen_and_stride(
312
+ num_frames: int,
313
+ strides: np.ndarray,
314
+ strides_prob: np.ndarray,
315
+ frame_lengths: List[np.ndarray],
316
+ frame_lengths_prob: List[Optional[np.ndarray]],
317
+ ) -> Dict[str, Any]:
318
+ """Adaptively sample frame length and stride for a video.
319
+
320
+ Args:
321
+ num_frames: Number of frames in the current video.
322
+ strides: A list of strides.
323
+ strides_prob: The probability for each stride.
324
+ frame_lengths: The number of frames (sorted) to sample from at the current stride.
325
+ For example, `frame_length=10` at `stride=2` means that we need to have 20 frames.
326
+ When the number of frames to sample is infeasible, it will select the feasible frame
327
+ lengths and re-normalize the probability according to the feasible frames at hand.
328
+ For example, if `num_frames=10`, `frame_lengths[stride2]=[4, 5]`,
329
+ `frame_lengths[stride3]=[1, 3, 5]`, we can sample frame lengths 1, 2, and 5 at
330
+ `stride=2` (2, 4, and 10 frames) but only frame lengths 1, 3 at `stride=3`. In this
331
+ case, we will add the probability of `frame_length=5` at `stride=3` to `frame_length=3`
332
+ at `stride=3`, making it more likely to be selected.
333
+ frame_lengths_prob: The frame probabilities to sample from the corresponding frame lengths.
334
+ Defaults to None for uniform sampling.
335
+ Returns:
336
+ dictionary: A dictionary containing the selected frames and strides. if none is feasible,
337
+ it will raise an exception.
338
+ """
339
+ assert len(strides) == len(strides_prob) == len(
340
+ frame_lengths) == len(frame_lengths_prob)
341
+
342
+ # Prepare frame_lengths_mask for each stride.
343
+ frame_lengths_mask = [num_frames // s >=
344
+ l for s, l in zip(strides, frame_lengths)]
345
+
346
+ # Prepare stride mask and prob.
347
+ strides_idxs = np.arange(len(strides))
348
+ strides_mask = np.array([np.any(mask) for mask in frame_lengths_mask])
349
+ assert np.any(strides_mask), (
350
+ f"Cannot sample frames={num_frames} "
351
+ + f"from strides={strides} and lengths={frame_lengths}"
352
+ )
353
+
354
+ # Drop infeasible strides and normalize probability.
355
+ strides_idxs, strides_prob = normalize_probabilities(
356
+ strides_idxs, strides_prob, strides_mask)
357
+
358
+ # Choose stride.
359
+ stride_idx = np.random.choice(strides_idxs, p=strides_prob)
360
+ stride = strides[stride_idx]
361
+
362
+ # Prepare frame_lengths mask and prob for the current stride.
363
+ lengths = frame_lengths[stride_idx]
364
+ lengths_mask = frame_lengths_mask[stride_idx]
365
+ lengths_prob = frame_lengths_prob[stride_idx]
366
+ if lengths_prob is None:
367
+ lengths_prob = np.full(len(lengths), 1.0 / len(lengths))
368
+
369
+ # Drop infeasible lengths and normalize probability.
370
+ lengths, lengths_prob = normalize_probabilities(
371
+ lengths, lengths_prob, lengths_mask)
372
+
373
+ # Choose frame length.
374
+ length = np.random.choice(lengths, p=lengths_prob)
375
+ return dict(stride=stride, frame_length=length)
dataset_code/sft_sftnews/offload/dataset_tool/parquet_dataset/samplers/text_sampler.py ADDED
@@ -0,0 +1,332 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Text samplers.
3
+ """
4
+
5
+ from bs4 import BeautifulSoup
6
+ import urllib.parse as ul
7
+ import html
8
+ import ftfy
9
+ import re
10
+ import numpy as np
11
+
12
+ from abc import ABC, abstractmethod
13
+ from typing import Dict, List, NamedTuple, Union
14
+
15
+
16
+ class TextSamplerOutput(NamedTuple):
17
+ """
18
+ Return keys for text embedding,
19
+ and optionally additional information to return to user.
20
+ """
21
+
22
+ keys: Union[str, List[str]]
23
+
24
+
25
+ class TextSampler(ABC):
26
+ """
27
+ Text sampler base class.
28
+
29
+ Child class must implement __call__ method to return the embedding keys.
30
+ Or raise if the text cannot be sampled (key does not exist.)
31
+ """
32
+
33
+ @abstractmethod
34
+ def __call__(self, text: Dict[str, str]) -> TextSamplerOutput:
35
+ raise NotImplementedError
36
+
37
+
38
+ class TextAllSampler(TextSampler):
39
+ """
40
+ All text sampler. Returns all texts.
41
+
42
+ e.g.
43
+ text_sampler:
44
+ type: all
45
+ """
46
+
47
+ def __init__(
48
+ self,
49
+ all: List[str] = None,
50
+ **kwargs,
51
+ ):
52
+ self.all = all
53
+
54
+ def __call__(self, text: Dict[str, str]) -> TextSamplerOutput:
55
+ assert len(text) > 0, "The input text does not exist."
56
+
57
+ # Get keys.
58
+ keys = list(text.keys())
59
+
60
+ if self.all is not None:
61
+ keys = [key for key in self.all if key in keys]
62
+ assert len(
63
+ keys) > 0, f"No valid text sample found under keys: {text.keys()}."
64
+
65
+ return TextSamplerOutput(keys=keys)
66
+
67
+
68
+ class TextFrequencySampler(TextSampler):
69
+ """
70
+ Sample text based on frequency.
71
+
72
+ e.g.
73
+ text_sampler:
74
+ type: frequency
75
+ frequency:
76
+ no_title_qwen_caption_en_v2_text: 0.9
77
+ no_title_qwen_caption_en_text: 0.9
78
+ origin_caption: 0.1
79
+
80
+ # support regular expression
81
+ -----
82
+ .+qwen_caption_en.+: 0.95
83
+ origin_caption: 0.05
84
+ -----
85
+ .+caption_qwen_recaption_cn_long_2_82_text: 0.9
86
+ .+caption_qwen_recaption_cn_2_95_text: 0.9
87
+ origin_caption: 0.1
88
+ -----
89
+ """
90
+
91
+ def __init__(
92
+ self,
93
+ frequency: Dict[str, float] = {},
94
+ ):
95
+ self.frequency = frequency
96
+ # Get regular expression.
97
+ self.patterns = (
98
+ {k: re.compile(k) for k in frequency.keys()
99
+ } if frequency is not None else None
100
+ )
101
+
102
+ def __call__(self, text: Dict[str, str]) -> TextSamplerOutput:
103
+
104
+ assert len(text) > 0, "The input text does not exist."
105
+
106
+ # Get keys.
107
+ keys = list(text.keys())
108
+
109
+ # Get weights.
110
+ if self.frequency is None or len(self.frequency) == 0:
111
+ weights = np.array([1.0] * len(keys))
112
+ else:
113
+ matchs = {k: (False, "") for k in text.keys()}
114
+ counter = {k: 0 for k in self.frequency.keys()}
115
+ for k in keys:
116
+ for pstr, pat in self.patterns.items():
117
+ if pat.match(k) is not None:
118
+ matchs[k] = (True, pstr)
119
+ counter[pstr] += 1
120
+ break
121
+ weights = np.array(
122
+ [
123
+ self.frequency[matchs[k][1]] /
124
+ counter[matchs[k][1]] if matchs[k][0] else 0.0
125
+ for k in keys
126
+ ]
127
+ )
128
+ weights_sum = weights.sum()
129
+ assert weights_sum > 0, f"No valid text sample found under keys: {keys}."
130
+ weights /= weights_sum
131
+
132
+ # Sample key.
133
+ keys = str(np.random.choice(keys, p=weights))
134
+ return TextSamplerOutput(keys=keys)
135
+
136
+
137
+ class TextPrioritySampler(TextSampler):
138
+ """
139
+ Sample text based on priority.
140
+
141
+ e.g.
142
+ text_sampler:
143
+ type: priority
144
+ priority:
145
+ - no_title_qwen_caption_en_v2_text
146
+ - no_title_qwen_caption_en_text
147
+ - origin_caption
148
+ """
149
+
150
+ def __init__(
151
+ self,
152
+ priority: List[str] = [],
153
+ ):
154
+ self.priority = priority
155
+
156
+ def __call__(self, text: Dict[str, str]) -> TextSamplerOutput:
157
+
158
+ assert len(text) > 0, "The input text does not exist."
159
+
160
+ # Get keys.
161
+ keys = list(text.keys())
162
+
163
+ # Get priorities.
164
+ priorities = [key for key in self.priority if key in keys]
165
+
166
+ # Select key.
167
+ if priorities:
168
+ keys = priorities[0]
169
+ else:
170
+ keys = str(np.random.choice(keys))
171
+
172
+ return TextSamplerOutput(keys=keys)
173
+
174
+
175
+ """
176
+ Text cleaner. Copied from DeepFloyd IF.
177
+ (https://github.com/deep-floyd/IF/blob/develop/deepfloyd_if/modules/t5.py#L125)
178
+ """
179
+
180
+
181
+ class TextCleaner:
182
+ """
183
+ Clear up a caption with strange/improper contents
184
+ """
185
+
186
+ bad_punct_regex = re.compile(
187
+ r"["
188
+ + "#®•©™&@·º½¾¿¡§~"
189
+ + r"\)"
190
+ + r"\("
191
+ + r"\]"
192
+ + r"\["
193
+ + r"\}"
194
+ + r"\{"
195
+ + r"\|"
196
+ + "\\"
197
+ + r"\/"
198
+ + r"\*"
199
+ + r"]{1,}"
200
+ )
201
+
202
+ def __call__(self, text):
203
+ # The exact text cleaning as was in the training stage:
204
+ text = self.clean_caption(text)
205
+ text = self.clean_caption(text)
206
+ return text
207
+
208
+ @staticmethod
209
+ def basic_clean(text):
210
+ text = ftfy.fix_text(text)
211
+ text = html.unescape(html.unescape(text))
212
+ return text.strip()
213
+
214
+ def clean_caption(self, caption):
215
+ caption = str(caption)
216
+ caption = ul.unquote_plus(caption)
217
+ caption = caption.strip().lower()
218
+ caption = re.sub("<person>", "person", caption)
219
+ caption = re.sub("<br>", " ", caption)
220
+ # urls:
221
+ caption = re.sub(
222
+ r"\b((?:https?:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))", # noqa: E501
223
+ "",
224
+ caption,
225
+ ) # regex for urls
226
+ caption = re.sub(
227
+ r"\b((?:www:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))", # noqa: E501
228
+ "",
229
+ caption,
230
+ ) # regex for urls
231
+ # html:
232
+ caption = BeautifulSoup(caption, features="html.parser").text
233
+
234
+ # @<nickname>
235
+ caption = re.sub(r"@[\w\d]+\b", "", caption)
236
+
237
+ # 31C0—31EF CJK Strokes
238
+ # 31F0—31FF Katakana Phonetic Extensions
239
+ # 3200—32FF Enclosed CJK Letters and Months
240
+ # 3300—33FF CJK Compatibility
241
+ # 3400—4DBF CJK Unified Ideographs Extension A
242
+ # 4DC0—4DFF Yijing Hexagram Symbols
243
+ # 4E00—9FFF CJK Unified Ideographs
244
+ caption = re.sub(r"[\u31c0-\u31ef]+", "", caption)
245
+ caption = re.sub(r"[\u31f0-\u31ff]+", "", caption)
246
+ caption = re.sub(r"[\u3200-\u32ff]+", "", caption)
247
+ caption = re.sub(r"[\u3300-\u33ff]+", "", caption)
248
+ caption = re.sub(r"[\u3400-\u4dbf]+", "", caption)
249
+ caption = re.sub(r"[\u4dc0-\u4dff]+", "", caption)
250
+ caption = re.sub(r"[\u4e00-\u9fff]+", "", caption)
251
+ #######################################################
252
+
253
+ # все виды тире / all types of dash --> "-"
254
+ caption = re.sub(
255
+ r"[\u002D\u058A\u05BE\u1400\u1806\u2010-\u2015\u2E17\u2E1A\u2E3A\u2E3B\u2E40\u301C\u3030\u30A0\uFE31\uFE32\uFE58\uFE63\uFF0D]+", # noqa: E501
256
+ # noqa
257
+ "-",
258
+ caption,
259
+ )
260
+
261
+ # кавычки к одному стандарту
262
+ caption = re.sub(r"[`´«»“”¨]", '"', caption)
263
+ caption = re.sub(r"[‘’]", "'", caption)
264
+
265
+ # &quot;
266
+ caption = re.sub(r"&quot;?", "", caption)
267
+ # &amp
268
+ caption = re.sub(r"&amp", "", caption)
269
+
270
+ # ip adresses:
271
+ caption = re.sub(r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", " ", caption)
272
+
273
+ # article ids:
274
+ caption = re.sub(r"\d:\d\d\s+$", "", caption)
275
+
276
+ # \n
277
+ caption = re.sub(r"\\n", " ", caption)
278
+
279
+ # "#123"
280
+ caption = re.sub(r"#\d{1,3}\b", "", caption)
281
+ # "#12345.."
282
+ caption = re.sub(r"#\d{5,}\b", "", caption)
283
+ # "123456.."
284
+ caption = re.sub(r"\b\d{6,}\b", "", caption)
285
+ # filenames:
286
+ caption = re.sub(
287
+ r"[\S]+\.(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)", "", caption)
288
+
289
+ #
290
+ caption = re.sub(r"[\"\']{2,}", r'"', caption) # """AUSVERKAUFT"""
291
+ caption = re.sub(r"[\.]{2,}", r" ", caption) # """AUSVERKAUFT"""
292
+
293
+ # ***AUSVERKAUFT***, #AUSVERKAUFT
294
+ caption = re.sub(self.bad_punct_regex, r" ", caption)
295
+ caption = re.sub(r"\s+\.\s+", r" ", caption) # " . "
296
+
297
+ # this-is-my-cute-cat / this_is_my_cute_cat
298
+ regex2 = re.compile(r"(?:\-|\_)")
299
+ if len(re.findall(regex2, caption)) > 3:
300
+ caption = re.sub(regex2, " ", caption)
301
+
302
+ caption = self.basic_clean(caption)
303
+
304
+ caption = re.sub(r"\b[a-zA-Z]{1,3}\d{3,15}\b", "", caption) # jc6640
305
+ caption = re.sub(r"\b[a-zA-Z]+\d+[a-zA-Z]+\b", "", caption) # jc6640vc
306
+ caption = re.sub(r"\b\d+[a-zA-Z]+\d+\b", "", caption) # 6640vc231
307
+
308
+ caption = re.sub(r"(worldwide\s+)?(free\s+)?shipping", "", caption)
309
+ caption = re.sub(r"(free\s)?download(\sfree)?", "", caption)
310
+ caption = re.sub(r"\bclick\b\s(?:for|on)\s\w+", "", caption)
311
+ caption = re.sub(
312
+ r"\b(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)(\simage[s]?)?", "", caption)
313
+ caption = re.sub(r"\bpage\s+\d+\b", "", caption)
314
+
315
+ # j2d1a2a...
316
+ caption = re.sub(
317
+ r"\b\d*[a-zA-Z]+\d+[a-zA-Z]+\d+[a-zA-Z\d]*\b", r" ", caption)
318
+
319
+ caption = re.sub(r"\b\d+\.?\d*[xх×]\d+\.?\d*\b", "", caption)
320
+
321
+ caption = re.sub(r"\b\s+\:\s+", r": ", caption)
322
+ caption = re.sub(r"(\D[,\./])\b", r"\1 ", caption)
323
+ caption = re.sub(r"\s+", " ", caption)
324
+
325
+ caption.strip()
326
+
327
+ caption = re.sub(r"^[\"\']([\w\W]+)[\"\']$", r"\1", caption)
328
+ caption = re.sub(r"^[\'\_,\-\:;]", r"", caption)
329
+ caption = re.sub(r"[\'\_,\-\:\-\+]$", r"", caption)
330
+ caption = re.sub(r"^\.\S+$", "", caption)
331
+
332
+ return caption.strip()
dataset_code/sft_sftnews/offload/dataset_tool/parquet_dataset/samplers/utils.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from omegaconf import DictConfig, OmegaConf
2
+
3
+ from .text_sampler import (TextAllSampler,
4
+ TextFrequencySampler,
5
+ TextPrioritySampler,
6
+ TextSampler,
7
+ )
8
+ from .frame_sampler import (AdaptiveAdvancedFrameSampler,
9
+ AdaptiveAdvancedFrameSamplerStrategy,
10
+ AdaptiveFrameSampler,
11
+ AllFrameSampler,
12
+ FrameSampler,
13
+ )
14
+
15
+ TEXT_SAMPLER_TYPES = {
16
+ "all": TextAllSampler,
17
+ "frequency": TextFrequencySampler,
18
+ "priority": TextPrioritySampler,
19
+ }
20
+
21
+
22
+ def create_text_sampler(config: dict) -> TextSampler:
23
+ config = OmegaConf.to_object(config)
24
+ sampler_type = config.pop("type")
25
+ return TEXT_SAMPLER_TYPES[sampler_type](**config)
26
+
27
+
28
+ FRAME_SAMPLER_TYPES = {
29
+ "all": AllFrameSampler,
30
+ "adaptive": AdaptiveFrameSampler,
31
+ "adaptive_advanced": AdaptiveAdvancedFrameSampler,
32
+ }
33
+
34
+
35
+ def create_frame_sampler(config: dict) -> FrameSampler:
36
+ config = OmegaConf.to_object(config)
37
+ sampler_type = config.pop("type")
38
+ if sampler_type == "adaptive_advanced":
39
+ config["strategies"] = [
40
+ AdaptiveAdvancedFrameSamplerStrategy(**s) for s in config["strategies"]
41
+ ]
42
+ return FRAME_SAMPLER_TYPES[sampler_type](**config)
dataset_code/sft_sftnews/offload/dataset_tool/parquet_dataset/tos_client.py ADDED
@@ -0,0 +1,192 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import os
3
+ import yaml
4
+ import hashlib
5
+ import traceback
6
+ from typing import Any, Union, List, Optional
7
+ import bytedtos
8
+ import io
9
+ import decord
10
+ import torch
11
+ from pyarrow import fs
12
+
13
+ def hdfs_read(file_path) -> bytes:
14
+ fp = str(file_path)
15
+ filesystem = resolve_fs(fp)
16
+
17
+ with filesystem.open_input_stream(fp) as f:
18
+ content = f.readall()
19
+ return content
20
+
21
+ def sha256_hashs(b: bytes, nbytes=32, bit_len=128) -> bytes:
22
+ m = hashlib.sha256()
23
+ m.update(b)
24
+ mb = m.digest()
25
+ bb = mb[:nbytes]
26
+ truncated_hashs = bb[: bit_len // 8]
27
+ return truncated_hashs.hex().lower()
28
+
29
+ def retry(func, retry=3):
30
+ if retry == 0:
31
+ return func
32
+
33
+ def wrapper(*args, **kwargs):
34
+ for i in range(retry):
35
+ error = ''
36
+ try:
37
+ return func(*args, **kwargs)
38
+ except KeyboardInterrupt:
39
+ raise KeyboardInterrupt
40
+ except:
41
+ print(f"In {__file__}, retry {i + 1} times!")
42
+ error = traceback.format_exc()
43
+ raise Exception(f"Traceback: {error}")
44
+
45
+ return wrapper
46
+
47
+
48
+ def resolve_fs(paths: Union[str, list[str]]) -> fs.FileSystem:
49
+ _p: str = paths # type: ignore
50
+ if isinstance(paths, list):
51
+ _p = paths[0]
52
+ _p = "/".join(_p.split("/")[:3])
53
+ filesystem, _ = fs._resolve_filesystem_and_path(_p)
54
+
55
+ return filesystem
56
+
57
+
58
+
59
+ class BaseClient:
60
+ def __init__(self, retry=0, **kwargs):
61
+ self.retry = retry
62
+
63
+ def __call__(self, keys: Union[str, List[str]], hashs: Optional[Union[str, List[str]]]=None) -> Union[bytes, List[bytes]]:
64
+ """
65
+ Read bytes from remote data source.
66
+ Args:
67
+ keys (str or list[str]): tos keys or hdfs uri or etc.
68
+ hashs (str or list[str]): hashs of the data.
69
+ Returns:
70
+ bytes (or list[bytes]]): bytes read from remote data source.
71
+ """
72
+ if isinstance(keys, str):
73
+ assert hashs is None or isinstance(hashs, str)
74
+ keys = [keys]
75
+ hashs = [hashs] if hashs is not None else None
76
+ return_list = False
77
+ else:
78
+ return_list = True
79
+
80
+ if hashs is not None:
81
+ bytes_get = retry(self.get_bytes_and_check, retry=3)(keys, hashs)
82
+ else:
83
+ bytes_get = retry(self.get_bytes, retry=self.retry)(keys)
84
+
85
+ if return_list:
86
+ return bytes_get
87
+ else:
88
+ return bytes_get[0]
89
+
90
+ def get_bytes_and_check(self, keys: List[bytes], hashs: List[bytes]) -> List[bytes]:
91
+ bytes_get = self.get_bytes(keys)
92
+ for k, b, h in zip(keys, bytes_get, hashs):
93
+ if sha256_hashs(b) != h:
94
+ raise Exception(f"hashs check failed on keyss {k}, {sha256_hashs(b)} != {h}!")
95
+ return bytes_get
96
+
97
+ def get_bytes(self, keys: List[bytes]) -> List[bytes]:
98
+ """
99
+ Read bytes from remote data source.
100
+ Args:
101
+ keyss: tos keys or hdfs uri or etc.
102
+ Returns:
103
+ bytes: bytes read from remote data source.
104
+ """
105
+ raise NotImplementedError
106
+
107
+ class TosClient(BaseClient):
108
+ def __init__(
109
+ self,
110
+ ak,
111
+ bucket,
112
+ idc,
113
+ timeout=10,
114
+ **kwargs,
115
+ ):
116
+ super().__init__(**kwargs)
117
+ self.tos_client = bytedtos.Client(bucket, ak, timeout=timeout, idc=idc)
118
+
119
+ # Input => toskeys
120
+ def get_bytes(self, keys: List[bytes]) -> List[bytes]:
121
+ """
122
+ Read bytes from tos keys.
123
+ Args:
124
+ keys (str or list[str]): tos keys.
125
+ Returns:
126
+ bytes (or list[bytes]]): bytes read from tos.
127
+ """
128
+ return [self.tos_client.get_object(keys).data for keys in keys]
129
+
130
+ class NebuTosClient(TosClient):
131
+ default_config = {
132
+ "nebudata-us": "hdfs://harunava/home/byte_icaip_nebudata/proj/nebudata/conf/nebuconfig_va_20240925.yaml",
133
+ "nebudata-sg": "hdfs://harunasg/home/byte_icaip_nebudata_sg/proj/nebudata/conf/nebuconfig_sg_20240925.yaml", # Default
134
+ }
135
+
136
+ def __init__(
137
+ self,
138
+ ref_tos_bucket: Union[str, None] = None,
139
+ idc: Union[str, None] = None,
140
+ **kwargs,
141
+ ):
142
+ logging.info(f"NebuTos config: {ref_tos_bucket=} {idc=}")
143
+ if idc is None:
144
+ idc = os.environ.get("RUNTIME_IDC_NAME", "my2")
145
+
146
+ if ref_tos_bucket is not None:
147
+ assert ref_tos_bucket in self.default_config, f"Unknow tos_bucket {ref_tos_bucket}, please use one of {self.default_config.keyss()}."
148
+ nebuconfig_file = self.default_config.get(ref_tos_bucket)
149
+ else:
150
+ arnold_base_dir = os.environ.get("ARNOLD_BASE_DIR", "hdfs://harunasg")
151
+ for ref_tos_bucket, nebuconfig_file in self.default_config.items():
152
+ if arnold_base_dir in nebuconfig_file:
153
+ break
154
+
155
+ nebuconfig = yaml.safe_load(hdfs_read(nebuconfig_file).decode("utf-8"))
156
+ default_access_keys = nebuconfig['tos_user_access_key']
157
+ tos_ak = os.environ.get("TOS_USER_ACCESS_key", default_access_keys)
158
+
159
+ super().__init__(tos_ak, ref_tos_bucket, idc, **kwargs)
160
+
161
+
162
+ if __name__ == "__main__":
163
+ client = NebuTosClient(ref_tos_bucket="nebudata-sg", idc="my2")
164
+ # toskey = 'cas/596ccf6d8de5d16e0ca5a91c0610d9bd'
165
+ toskey = 'cas/0c862903f94897a08bde81ee10104c48'
166
+ results = [client(toskey, hashs=toskey.split('cas/')[-1])]
167
+ # with open('output_video.mp4', 'wb') as f:
168
+ # f.write(results[0])
169
+ # np_array = np.frombuffer(results[0], dtype=np.uint8)
170
+ file_io = io.BytesIO(results[0])
171
+ reader = decord.VideoReader(file_io, ctx=decord.cpu(0))
172
+ video_length = len(reader)
173
+ # sampler = FrameSamplerCollection(data_configs['samplers'])
174
+ # video_idxs, structure = self.sampler(video_length, params)
175
+ # frames_idxs = copy.deepcopy(video_idxs)
176
+ # in_range_len = len(video_idxs)
177
+ # out_range_idxs = self.add_out_range_sample(
178
+ # video_idxs, video_length, params)
179
+ # video_idxs = video_idxs + out_range_idxs
180
+ # video_idxs_array = np.array(video_idxs)
181
+ # video_idxs_valid_mask = video_idxs_array >= 0
182
+ # valid_indices = video_idxs_array[video_idxs_valid_mask]
183
+ valid_indices = list(range(121))
184
+ frames_batch = reader.get_batch(valid_indices).asnumpy()
185
+ frames_tensor = torch.from_numpy(frames_batch).float()
186
+ frames_tensor = (frames_tensor / 127.5) - 1
187
+ frames_tensor = frames_tensor.permute(0, 3, 1, 2)
188
+ del reader
189
+
190
+ # 'clip_toskey': 'cas/596ccf6d8de5d16e0ca5a91c0610d9bd'
191
+ # 'clip_tosurl': 'https://tosv.byted.org/obj/nebudata-sg/cas/596ccf6d8de5d16e0ca5a91c0610d9bd'
192
+ # 'clip_url': 'https://tosv-sg.tiktok-row.org/obj/nebudata-sg/cas/596ccf6d8de5d16e0ca5a91c0610d9bd'
dataset_code/sft_sftnews/offload/dataset_tool/parquet_dataset/utils/__pycache__/distributed_utils.cpython-310.pyc ADDED
Binary file (4.04 kB). View file
 
dataset_code/sft_sftnews/offload/dataset_tool/parquet_dataset/utils/__pycache__/distributed_utils.cpython-311.pyc ADDED
Binary file (6.4 kB). View file
 
dataset_code/sft_sftnews/offload/dataset_tool/parquet_dataset/utils/__pycache__/hdfs_utils.cpython-310.pyc ADDED
Binary file (6.93 kB). View file
 
dataset_code/sft_sftnews/offload/dataset_tool/parquet_dataset/utils/__pycache__/hdfs_utils.cpython-311.pyc ADDED
Binary file (12.5 kB). View file
 
dataset_code/sft_sftnews/offload/dataset_tool/parquet_dataset/utils/__pycache__/partition_utils.cpython-310.pyc ADDED
Binary file (1.73 kB). View file
 
dataset_code/sft_sftnews/offload/dataset_tool/parquet_dataset/utils/__pycache__/partition_utils.cpython-311.pyc ADDED
Binary file (2.45 kB). View file
 
dataset_code/sft_sftnews/offload/dataset_tool/parquet_dataset/utils/distributed_utils.py ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Distributed basic functions.
3
+ """
4
+
5
+ import os
6
+ import torch
7
+ import torch.distributed as dist
8
+
9
+ from typing import Optional
10
+ from torch.nn.parallel import DistributedDataParallel
11
+
12
+ _DATA_PARALLEL_GROUP = None
13
+ _SEQUENCE_PARALLEL_GROUP = None
14
+ _SEQUENCE_PARALLEL_CPU_GROUP = None
15
+
16
+
17
+ def get_global_rank() -> int:
18
+ """
19
+ Get the global rank, the global index of the GPU.
20
+ """
21
+ return int(os.environ.get("RANK", "0"))
22
+
23
+
24
+ def get_local_rank() -> int:
25
+ """
26
+ Get the local rank, the local index of the GPU.
27
+ """
28
+ return int(os.environ.get("LOCAL_RANK", "0"))
29
+
30
+
31
+ def get_world_size() -> int:
32
+ """
33
+ Get the world size, the total amount of GPUs.
34
+ """
35
+ return int(os.environ.get("WORLD_SIZE", "1"))
36
+
37
+
38
+ def get_device() -> torch.device:
39
+ """
40
+ Get current rank device.
41
+ """
42
+ return torch.device("cuda", get_local_rank())
43
+
44
+
45
+ def barrier_if_distributed(*args, **kwargs):
46
+ """
47
+ Synchronizes all processes if under distributed context.
48
+ """
49
+ if dist.is_initialized():
50
+ return dist.barrier(*args, **kwargs)
51
+
52
+
53
+ def init_torch(cudnn_benchmark=True):
54
+ """
55
+ Common PyTorch initialization configuration.
56
+ """
57
+ torch.backends.cuda.matmul.allow_tf32 = True
58
+ torch.backends.cudnn.allow_tf32 = True
59
+ torch.backends.cudnn.benchmark = cudnn_benchmark
60
+ torch.cuda.set_device(get_local_rank())
61
+ dist.init_process_group(
62
+ backend="nccl",
63
+ rank=get_global_rank(),
64
+ world_size=get_world_size(),
65
+ )
66
+
67
+
68
+ def convert_to_ddp(module: torch.nn.Module, **kwargs) -> DistributedDataParallel:
69
+ return DistributedDataParallel(
70
+ module=module,
71
+ device_ids=[get_local_rank()],
72
+ output_device=get_local_rank(),
73
+ **kwargs,
74
+ )
75
+
76
+
77
+ def get_data_parallel_group() -> Optional[dist.ProcessGroup]:
78
+ """
79
+ Get data parallel process group.
80
+ """
81
+ return _DATA_PARALLEL_GROUP
82
+
83
+
84
+ def get_sequence_parallel_group() -> Optional[dist.ProcessGroup]:
85
+ """
86
+ Get sequence parallel process group.
87
+ """
88
+ return _SEQUENCE_PARALLEL_GROUP
89
+
90
+
91
+ def get_sequence_parallel_cpu_group() -> Optional[dist.ProcessGroup]:
92
+ """
93
+ Get sequence parallel CPU process group.
94
+ """
95
+ return _SEQUENCE_PARALLEL_CPU_GROUP
96
+
97
+
98
+ def get_data_parallel_rank() -> int:
99
+ """
100
+ Get data parallel rank.
101
+ """
102
+ group = get_data_parallel_group()
103
+ return dist.get_rank(group) if group else get_global_rank()
104
+
105
+
106
+ def get_data_parallel_world_size() -> int:
107
+ """
108
+ Get data parallel world size.
109
+ """
110
+ group = get_data_parallel_group()
111
+ return dist.get_world_size(group) if group else get_world_size()
112
+
113
+
114
+ def get_sequence_parallel_rank() -> int:
115
+ """
116
+ Get sequence parallel rank.
117
+ """
118
+ group = get_sequence_parallel_group()
119
+ return dist.get_rank(group) if group else 0
120
+
121
+
122
+ def get_sequence_parallel_world_size() -> int:
123
+ """
124
+ Get sequence parallel world size.
125
+ """
126
+ group = get_sequence_parallel_group()
127
+ return dist.get_world_size(group) if group else 1
128
+
129
+
130
+ def init_sequence_parallel(sequence_parallel_size: int):
131
+ """
132
+ Initialize sequence parallel.
133
+ """
134
+ global _DATA_PARALLEL_GROUP
135
+ global _SEQUENCE_PARALLEL_GROUP
136
+ global _SEQUENCE_PARALLEL_CPU_GROUP
137
+ assert dist.is_initialized()
138
+ world_size = dist.get_world_size()
139
+ rank = dist.get_rank()
140
+ data_parallel_size = world_size // sequence_parallel_size
141
+ for i in range(data_parallel_size):
142
+ start_rank = i * sequence_parallel_size
143
+ end_rank = (i + 1) * sequence_parallel_size
144
+ ranks = range(start_rank, end_rank)
145
+ group = dist.new_group(ranks)
146
+ cpu_group = dist.new_group(ranks, backend="gloo")
147
+ if rank in ranks:
148
+ _SEQUENCE_PARALLEL_GROUP = group
149
+ _SEQUENCE_PARALLEL_CPU_GROUP = cpu_group