github-jupyter-text-code-pairs / preprocessing.py
loubnabnl's picture
loubnabnl HF staff
update function comments
40bf1b3
from tqdm import tqdm
from datasets import load_dataset, Dataset
import hashlib
import re
import time
from datasets import load_dataset
PATTERN = re.compile(r"\s+")
def parse_data(ds):
"""Parse data into markdown-code pairs"""
markdowns = []
code_snippets = []
paths = []
repo_names = []
licenses = []
for i in tqdm(range(len(ds))):
inner_markdowns = []
inner_code_snippets = []
types = ds[i]["types"]
path = ds[i]["path"]
repo = ds[i]["repo_name"]
license = ds[i]["license"]
if types[0] == "code":
# drop first cell of code to have the notebook start with markdown
cells = ds[i]["cells"][1:]
types = types[1:]
else:
# drop first the two cells of markdown followed by code
# the first markown cell of a notebook is often a long description of the whole notebook
cells = ds[i]["cells"][2:]
types = ds[i]["types"][2:]
if len(cells) % 2 == 0:
inner_markdowns = [cells[j] for j in range(len(cells)) if j % 2 == 0]
inner_code_snippets = [cells[j+1] for j in range(len(cells) - 1) if j % 2 == 0]
else:
# delete last markdown cell that has no code next
inner_markdowns = [cells[j] for j in range(len(cells) - 1) if j % 2 == 0]
inner_code_snippets = [cells[j+1] for j in range(len(cells) - 2) if j % 2 == 0]
markdowns.extend(inner_markdowns)
code_snippets.extend(inner_code_snippets)
paths.extend([path] * len(inner_markdowns))
repo_names.extend([repo] * len(inner_markdowns))
licenses.extend([license] * len(inner_markdowns))
return markdowns, code_snippets, paths, repo_names, licenses
def get_hash(example):
"""Get hash of markdown + code"""
text = example["markdown"] + example["code"]
return {"hash": hashlib.md5(re.sub(PATTERN, "", text).encode("utf-8")).hexdigest()}
def preprocess(example):
"""add hash column to dataset."""
results = dict()
results.update(get_hash(example))
return results
def check_uniques(example, uniques):
"""Check if current hash is still in set of unique hashes and remove if true."""
if example["hash"] in uniques:
uniques.remove(example["hash"])
return True
else:
return False
def filter(example, uniques):
if not check_uniques(example, uniques):
return False
else:
return True
if __name__ == "__main__":
ds = load_dataset("codeparrot/github-jupyter-parsed", split="train")
print("Parsing data...")
markdowns, code_snippets, paths, repo_names, licenses = parse_data(ds)
data = {"markdown": markdowns, "code": code_snippets, "path": paths, "repo_name": repo_names, "license": licenses}
parsed_data = Dataset.from_dict(data)
print("Deduplication...")
parsed_data = parsed_data.map(preprocess)
# Deduplicate hashes
uniques = set(parsed_data.unique("hash"))
frac = len(uniques) / len(parsed_data)
print(f"Fraction of duplicates: {1-frac:.2%}")
ds_filter = parsed_data.filter(filter, fn_kwargs={"uniques": uniques})
ds_filter.push_to_hub("codeparrot/github-jupyter-text-code-pairs")