File size: 2,849 Bytes
248c176
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
import datasets
from clean_funcs import clean_text
from filter_stats_funcs import filter_stats

fi_mc4 = datasets.load_from_disk("/researchdisk/mc4_3.1.0_fi")
print(fi_mc4)

min_alphabet_ratio = 0.75
max_upper_ratio = 0.10
max_number_ratio = 0.05
min_pred_lang_percentage = 0.95

# TRAIN SPLIT
print(f"Original dataset train rows {fi_mc4['train'].num_rows}")
fi_mc4["train"] = fi_mc4["train"].map(
    clean_text, num_proc=64, batched=False
)

fi_train_only_longer = fi_mc4["train"].filter(
    lambda example: len(example["text"].split()) >= 20, num_proc=64
)
print(f"Only longer texts dataset train rows {fi_train_only_longer.num_rows}")

fi_train_only_longer = fi_train_only_longer.map(
    filter_stats, num_proc=64, batched=False
)

fi_train_cleaned = fi_train_only_longer.filter(
    lambda example: example["alphabet_ratio"] > min_alphabet_ratio
    and example["upper_ratio"] < max_upper_ratio
    and example["number_ratio"] < max_number_ratio
    and example["predicted_lang"] == "__label__fi"
    and example["predicted_lang_percentage"] > min_pred_lang_percentage,
    num_proc=64,
)
print(f"Final cleaned dataset train rows {fi_train_cleaned.num_rows}")

# VAL SPLIT
print(f"Original dataset val rows {fi_mc4['validation'].num_rows}")
fi_mc4["validation"] = fi_mc4["validation"].map(
    clean_text, num_proc=32, batched=False
)

fi_val_only_longer = fi_mc4["validation"].filter(
    lambda example: len(example["text"].split()) >= 20, num_proc=32
)
print(f"Only longer texts dataset val rows {fi_val_only_longer.num_rows}")

fi_val_only_longer = fi_val_only_longer.map(filter_stats, num_proc=32, batched=False)

fi_val_cleaned = fi_val_only_longer.filter(
    lambda example: example["alphabet_ratio"] > min_alphabet_ratio
    and example["upper_ratio"] < max_upper_ratio
    and example["number_ratio"] < max_number_ratio
    and example["predicted_lang"] == "__label__fi"
    and example["predicted_lang_percentage"] > min_pred_lang_percentage,
    num_proc=32,
)
print(f"Final cleaned dataset val rows {fi_val_cleaned.num_rows}")

# SAVE TO DISK
fi_train_cleaned = fi_train_cleaned.remove_columns(
    [
        "alphabet_len",
        "number_len",
        "upper_len",
        "total_len",
        "predicted_lang",
        "predicted_lang_percentage",
        "alphabet_ratio",
        "number_ratio",
        "upper_ratio",
    ]
)
fi_val_cleaned = fi_val_cleaned.remove_columns(
    [
        "alphabet_len",
        "number_len",
        "upper_len",
        "total_len",
        "predicted_lang",
        "predicted_lang_percentage",
        "alphabet_ratio",
        "number_ratio",
        "upper_ratio",
    ]
)

fi_mc4_cleaned = datasets.DatasetDict()
fi_mc4_cleaned["train"] = fi_train_cleaned
fi_mc4_cleaned["validation"] = fi_val_cleaned

fi_mc4_cleaned.save_to_disk("/researchdisk/mc4_3.1.0_fi_cleaned", num_proc=32)