|
|
|
|
|
import datasets |
|
|
|
def get_hash(example): |
|
"""Get hash of text field.""" |
|
return {"hash": hash(example["text"])} |
|
|
|
def check_uniques(example, uniques): |
|
"""Check if current hash is still in set of unique hashes and remove if true.""" |
|
if example["hash"] in uniques: |
|
uniques.remove(example["hash"]) |
|
return True |
|
else: |
|
return False |
|
|
|
def filter(example, uniques): |
|
"""Filter dataset with unique values.""" |
|
if not check_uniques(example, uniques): |
|
return False |
|
else: |
|
return True |
|
|
|
dataset = datasets.load_dataset("csv", data_files={"train": "train.csv", "validation": "valid.csv"}) |
|
|
|
|
|
|
|
len_train = len(dataset["train"]) |
|
print(f"Size of original dataset train: {len_train}") |
|
|
|
dataset["train"] = dataset["train"].map(get_hash, num_proc=64, writer_batch_size=100000) |
|
|
|
|
|
uniques = set(dataset["train"].unique("hash")) |
|
frac = len(uniques) / len(dataset["train"]) |
|
print(f"Fraction of duplicates: {1-frac:.2%}") |
|
|
|
|
|
dataset_train_deduplicated = dataset["train"].filter(filter, fn_kwargs={"uniques": uniques}) |
|
print(f"Size of filtered dataset train: {len(dataset_train_deduplicated)}") |
|
|
|
|
|
|
|
len_val = len(dataset["validation"]) |
|
print(f"Size of original dataset valid: {len_val}") |
|
|
|
dataset["validation"] = dataset["validation"].map(get_hash, num_proc=64, writer_batch_size=100000) |
|
|
|
|
|
uniques = set(dataset["validation"].unique("hash")) |
|
frac = len(uniques) / len(dataset["validation"]) |
|
print(f"Fraction of duplicates: {1-frac:.2%}") |
|
|
|
|
|
dataset_valid_deduplicated = dataset["validation"].filter(filter, fn_kwargs={"uniques": uniques}) |
|
print(f"Size of filtered dataset valid: {len(dataset_valid_deduplicated)}") |
|
|
|
|
|
dataset_train_deduplicated = dataset_train_deduplicated.remove_columns(["hash"]) |
|
dataset_valid_deduplicated = dataset_valid_deduplicated.remove_columns(["hash"]) |
|
|
|
dataset_train_deduplicated.to_csv("train.csv", num_proc=64, index=False) |
|
dataset_valid_deduplicated.to_csv("valid.csv", num_proc=64, index=False) |