|
import gzip |
|
import multiprocessing |
|
import os |
|
import shutil |
|
import time |
|
from argparse import Namespace |
|
from collections import Counter |
|
import numpy as np |
|
from datasets import load_dataset, utils |
|
import re |
|
from huggingface_hub import Repository |
|
from multiprocessing import Pool |
|
from tqdm import tqdm |
|
|
|
|
|
config = { |
|
"dataset_name": "./data/github", |
|
"num_workers": 96, |
|
"line_max": 1000, |
|
"out_path": "./data/github-code", |
|
"repo_name": "github-code", |
|
"org": "lvwerra", |
|
"shard_size": 1000 << 20} |
|
|
|
args = Namespace(**config) |
|
|
|
PATTERN = re.compile(r'\s+') |
|
|
|
|
|
def hash_func(text): |
|
return hashlib.md5(re.sub(PATTERN, '', text).encode("utf-8")).hexdigest() |
|
|
|
def get_hash(example): |
|
"""Get hash of content field.""" |
|
return {"hash": hash_func(example["content"])} |
|
|
|
|
|
def line_stats(example): |
|
"""Calculates mean and max line length of file.""" |
|
line_lengths = [len(line) for line in example["content"].splitlines()] |
|
return {"line_mean": np.mean(line_lengths), "line_max": max(line_lengths)} |
|
|
|
|
|
def alpha_stats(example): |
|
"""Calculates mean and max line length of file.""" |
|
alpha_frac = np.mean([c.isalnum() for c in example["content"]]) |
|
return {"alpha_frac": alpha_frac} |
|
|
|
|
|
def check_uniques(example, uniques): |
|
"""Check if current hash is still in set of unique hashes and remove if true.""" |
|
if example["hash"] in uniques: |
|
uniques.remove(example["hash"]) |
|
return True |
|
else: |
|
return False |
|
|
|
|
|
def is_autogenerated(example, scan_width=5): |
|
"""Check if file is autogenerated by looking for keywords in the first few lines of the file.""" |
|
keywords = ["auto-generated", "autogenerated", "automatically generated"] |
|
lines = example["content"].splitlines() |
|
for _, line in zip(range(scan_width), lines): |
|
for keyword in keywords: |
|
if keyword in line.lower(): |
|
return {"autogenerated": True} |
|
else: |
|
return {"autogenerated": False} |
|
|
|
|
|
def preprocess(example): |
|
"""Chain all preprocessing steps into one function to not fill cache.""" |
|
results = dict() |
|
results.update(get_hash(example)) |
|
results.update(line_stats(example)) |
|
return results |
|
|
|
|
|
def filter(example, uniques, args): |
|
"""Filter dataset with heuristics.""" |
|
if not check_uniques(example, uniques): |
|
return False |
|
elif example["line_max"] > args.line_max: |
|
return False |
|
else: |
|
return True |
|
|
|
def save_shard(shard_tuple): |
|
"""Save shard""" |
|
filename, shard = shard_tuple |
|
shard.to_parquet(filename) |
|
|
|
|
|
t_start = time.time() |
|
ds = load_dataset(args.dataset_name, split="train", chunksize=40<<20) |
|
print(f"Time to load dataset: {time.time()-t_start:.2f}") |
|
|
|
|
|
t_start = time.time() |
|
ds = ds.map(preprocess, num_proc=args.num_workers) |
|
print(f"Time to preprocess dataset: {time.time()-t_start:.2f}") |
|
print(ds) |
|
|
|
|
|
uniques = set(ds.unique("hash")) |
|
frac = len(uniques) / len(ds) |
|
print(f"Fraction of duplicates: {1-frac:.2%}") |
|
|
|
|
|
t_start = time.time() |
|
ds = ds.filter(filter, fn_kwargs={"uniques": uniques, "args": args}) |
|
ds = ds.remove_columns(["line_mean", "line_max", "copies", "hash"]) |
|
print(f"Time to filter dataset: {time.time()-t_start:.2f}") |
|
print(f"Size of filtered dataset: {len(ds)}") |
|
|
|
|
|
|
|
repo = Repository( |
|
local_dir=args.out_path, |
|
clone_from=args.org + "/" + args.repo_name, |
|
repo_type="dataset", |
|
private=True, |
|
use_auth_token=True, |
|
git_user="lvwerra", |
|
git_email="leandro.vonwerra@gmail.com", |
|
) |
|
|
|
os.mkdir(args.out_path + "/data") |
|
|
|
if ds._indices is not None: |
|
dataset_nbytes = ds.data.nbytes * len(ds._indices) / len(ds.data) |
|
else: |
|
dataset_nbytes = ds.data.nbytes |
|
|
|
num_shards = int(dataset_nbytes / args.shard_size) + 1 |
|
print(f"Number of shards: {num_shards}") |
|
|
|
t_start = time.time() |
|
shards = (ds.shard(num_shards=num_shards, index=i, contiguous=True) for i in range(num_shards)) |
|
filenames = (f"{args.out_path}/data/train-{index:05d}-of-{num_shards:05d}.parquet" for index in range(num_shards)) |
|
|
|
with Pool(16) as p: |
|
list(tqdm(p.imap_unordered(save_shard, zip(filenames, shards), chunksize=4), total=num_shards)) |
|
print(f"Time to save dataset: {time.time()-t_start:.2f}") |
|
|
|
|