mc4_3.1.0_fi_cleaned / clean_data.py
aapot
Add dataset creation scripts
248c176
import datasets
from clean_funcs import clean_text
from filter_stats_funcs import filter_stats
fi_mc4 = datasets.load_from_disk("/researchdisk/mc4_3.1.0_fi")
print(fi_mc4)
min_alphabet_ratio = 0.75
max_upper_ratio = 0.10
max_number_ratio = 0.05
min_pred_lang_percentage = 0.95
# TRAIN SPLIT
print(f"Original dataset train rows {fi_mc4['train'].num_rows}")
fi_mc4["train"] = fi_mc4["train"].map(
clean_text, num_proc=64, batched=False
)
fi_train_only_longer = fi_mc4["train"].filter(
lambda example: len(example["text"].split()) >= 20, num_proc=64
)
print(f"Only longer texts dataset train rows {fi_train_only_longer.num_rows}")
fi_train_only_longer = fi_train_only_longer.map(
filter_stats, num_proc=64, batched=False
)
fi_train_cleaned = fi_train_only_longer.filter(
lambda example: example["alphabet_ratio"] > min_alphabet_ratio
and example["upper_ratio"] < max_upper_ratio
and example["number_ratio"] < max_number_ratio
and example["predicted_lang"] == "__label__fi"
and example["predicted_lang_percentage"] > min_pred_lang_percentage,
num_proc=64,
)
print(f"Final cleaned dataset train rows {fi_train_cleaned.num_rows}")
# VAL SPLIT
print(f"Original dataset val rows {fi_mc4['validation'].num_rows}")
fi_mc4["validation"] = fi_mc4["validation"].map(
clean_text, num_proc=32, batched=False
)
fi_val_only_longer = fi_mc4["validation"].filter(
lambda example: len(example["text"].split()) >= 20, num_proc=32
)
print(f"Only longer texts dataset val rows {fi_val_only_longer.num_rows}")
fi_val_only_longer = fi_val_only_longer.map(filter_stats, num_proc=32, batched=False)
fi_val_cleaned = fi_val_only_longer.filter(
lambda example: example["alphabet_ratio"] > min_alphabet_ratio
and example["upper_ratio"] < max_upper_ratio
and example["number_ratio"] < max_number_ratio
and example["predicted_lang"] == "__label__fi"
and example["predicted_lang_percentage"] > min_pred_lang_percentage,
num_proc=32,
)
print(f"Final cleaned dataset val rows {fi_val_cleaned.num_rows}")
# SAVE TO DISK
fi_train_cleaned = fi_train_cleaned.remove_columns(
[
"alphabet_len",
"number_len",
"upper_len",
"total_len",
"predicted_lang",
"predicted_lang_percentage",
"alphabet_ratio",
"number_ratio",
"upper_ratio",
]
)
fi_val_cleaned = fi_val_cleaned.remove_columns(
[
"alphabet_len",
"number_len",
"upper_len",
"total_len",
"predicted_lang",
"predicted_lang_percentage",
"alphabet_ratio",
"number_ratio",
"upper_ratio",
]
)
fi_mc4_cleaned = datasets.DatasetDict()
fi_mc4_cleaned["train"] = fi_train_cleaned
fi_mc4_cleaned["validation"] = fi_val_cleaned
fi_mc4_cleaned.save_to_disk("/researchdisk/mc4_3.1.0_fi_cleaned", num_proc=32)