adams-story
commited on
Create README.md
Browse files
README.md
ADDED
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
task_categories:
|
3 |
+
- image-to-text
|
4 |
+
- text-to-image
|
5 |
+
pretty_name: Data Filtering Networks, 200m, datacomp large
|
6 |
+
size_categories:
|
7 |
+
- 100M<n<1B
|
8 |
+
---
|
9 |
+
# Data filtering networks, 200m
|
10 |
+
|
11 |
+
This is a dataset released from the Data Filtering Networks paper. It consists of a subset of Datacomp large.
|
12 |
+
|
13 |
+
These parquet files are that subset. The following script was used to filter the parquet files using the subset from apf1/datafilteringnetworks_2b.
|
14 |
+
|
15 |
+
```
|
16 |
+
import os
|
17 |
+
from os import path
|
18 |
+
import numpy as np
|
19 |
+
import pyarrow.parquet as pq
|
20 |
+
from glob import glob
|
21 |
+
from multiprocessing import Pool
|
22 |
+
|
23 |
+
parquet_files = list(glob("../*.parquet"))
|
24 |
+
out_path = "../resampled/"
|
25 |
+
os.makedirs(out_path, exist_ok=True)
|
26 |
+
subset_file = "../indices/datacomp_large_dfn_200m_inds.npy"
|
27 |
+
u16 = np.dtype("u8,u8")
|
28 |
+
|
29 |
+
|
30 |
+
def load_subset():
|
31 |
+
return np.load(subset_file, mmap_mode="r")
|
32 |
+
|
33 |
+
|
34 |
+
def process_parquet(parquet_file):
|
35 |
+
print("filtering", parquet_file)
|
36 |
+
subset = load_subset()
|
37 |
+
table = pq.read_table(parquet_file)
|
38 |
+
mask = []
|
39 |
+
for uid in table["uid"]:
|
40 |
+
uid = str(uid)
|
41 |
+
key_u16 = np.array([divmod(int(uid, 16), 2**64)], u16)[0]
|
42 |
+
a = np.searchsorted(subset, key_u16, "left")
|
43 |
+
b = np.searchsorted(subset, key_u16, "right")
|
44 |
+
count = b - a
|
45 |
+
|
46 |
+
assert count == 1 or count == 0
|
47 |
+
|
48 |
+
mask.append(count == 1)
|
49 |
+
|
50 |
+
table = table.filter(mask)
|
51 |
+
|
52 |
+
out_filename = out_path + "/" + path.basename(parquet_file)
|
53 |
+
pq.write_table(table, out_filename)
|
54 |
+
|
55 |
+
print("wrote ", out_filename)
|
56 |
+
|
57 |
+
|
58 |
+
with Pool(4) as pool:
|
59 |
+
pool.map(process_parquet, parquet_files)
|
60 |
+
|
61 |
+
print("done.")
|
62 |
+
```
|