File size: 2,264 Bytes
248c176
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
# adapted from: https://github.com/huggingface/transformers/blob/master/examples/research_projects/codeparrot/scripts/preprocessing.py

import datasets


def get_hash(example):
    """Get hash of text field."""
    return {"hash": hash(example["text"])}


def check_uniques(example, uniques):
    """Check if current hash is still in set of unique hashes and remove if true."""
    if example["hash"] in uniques:
        uniques.remove(example["hash"])
        return True
    else:
        return False


def filter(example, uniques):
    """Filter dataset with unique values."""
    if not check_uniques(example, uniques):
        return False
    else:
        return True


dataset = datasets.load_from_disk("/researchdisk/mc4_3.1.0_fi_cleaned")

# TRAIN SPLIT DEDUPLICATION

print(f"Size of original dataset train: {len(dataset['train'])}")

dataset["train"] = dataset["train"].map(get_hash, num_proc=96)

# Deduplicate hashes
uniques = set(dataset["train"].unique("hash"))
frac = len(uniques) / len(dataset["train"])
print(f"Fraction of duplicates: {1-frac:.2%}")

# Deduplicate data
dataset_train_deduplicated = dataset["train"].filter(
    filter, fn_kwargs={"uniques": uniques}, num_proc=64
)
print(f"Size of filtered dataset train: {len(dataset_train_deduplicated)}")

# VALIDATION SPLIT DEDUPLICATION

print(f"Size of original dataset valid: {len(dataset['validation'])}")

dataset["validation"] = dataset["validation"].map(get_hash, num_proc=96)

# Deduplicate hashes
uniques = set(dataset["validation"].unique("hash"))
frac = len(uniques) / len(dataset["validation"])
print(f"Fraction of duplicates: {1-frac:.2%}")

# Deduplicate data
dataset_valid_deduplicated = dataset["validation"].filter(
    filter, fn_kwargs={"uniques": uniques}, num_proc=32
)
print(f"Size of filtered dataset valid: {len(dataset_valid_deduplicated)}")

# SAVE DEDUPLICATED DATASET
dataset_train_deduplicated = dataset_train_deduplicated.remove_columns(["hash"])
dataset_valid_deduplicated = dataset_valid_deduplicated.remove_columns(["hash"])

fi_mc4_cleaned = datasets.DatasetDict()
fi_mc4_cleaned["train"] = dataset_train_deduplicated
fi_mc4_cleaned["validation"] = dataset_valid_deduplicated

fi_mc4_cleaned.save_to_disk("/researchdisk/mc4_3.1.0_fi_cleaned", num_proc=32)