aapot
commited on
Commit
•
248c176
1
Parent(s):
b57841c
Add dataset creation scripts
Browse files- calculate_kenlm_perplexity.py +26 -0
- clean_data.py +93 -0
- clean_funcs.py +116 -0
- create_dataset.py +28 -0
- deduplicate.py +72 -0
- filter_stats_funcs.py +62 -0
- jq_process_loaded_mc4_files.sh +30 -0
- kenlm_5gram_fi.bin +3 -0
- load_mc4_3.1.0_fi.sh +6 -0
calculate_kenlm_perplexity.py
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import datasets
|
2 |
+
import kenlm
|
3 |
+
|
4 |
+
kenlm_model = kenlm.Model("kenlm_5gram_fi.bin")
|
5 |
+
|
6 |
+
|
7 |
+
def get_perplexity(example):
|
8 |
+
"""Get perplexity of text field."""
|
9 |
+
example["perplexity_kenlm"] = int(kenlm_model.perplexity(example["text"]))
|
10 |
+
return example
|
11 |
+
|
12 |
+
|
13 |
+
dataset = datasets.load_from_disk("/researchdisk/mc4_3.1.0_fi_cleaned")
|
14 |
+
|
15 |
+
# TRAIN SPLIT PERPLEXITY
|
16 |
+
dataset["train"] = dataset["train"].map(
|
17 |
+
get_perplexity, num_proc=64
|
18 |
+
)
|
19 |
+
|
20 |
+
# VALIDATION SPLIT PERPLEXITY
|
21 |
+
dataset["validation"] = dataset["validation"].map(
|
22 |
+
get_perplexity, num_proc=64
|
23 |
+
)
|
24 |
+
|
25 |
+
# SAVE DATASET
|
26 |
+
dataset.save_to_disk("/researchdisk/mc4_3.1.0_fi_cleaned", num_proc=32)
|
clean_data.py
ADDED
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import datasets
|
2 |
+
from clean_funcs import clean_text
|
3 |
+
from filter_stats_funcs import filter_stats
|
4 |
+
|
5 |
+
fi_mc4 = datasets.load_from_disk("/researchdisk/mc4_3.1.0_fi")
|
6 |
+
print(fi_mc4)
|
7 |
+
|
8 |
+
min_alphabet_ratio = 0.75
|
9 |
+
max_upper_ratio = 0.10
|
10 |
+
max_number_ratio = 0.05
|
11 |
+
min_pred_lang_percentage = 0.95
|
12 |
+
|
13 |
+
# TRAIN SPLIT
|
14 |
+
print(f"Original dataset train rows {fi_mc4['train'].num_rows}")
|
15 |
+
fi_mc4["train"] = fi_mc4["train"].map(
|
16 |
+
clean_text, num_proc=64, batched=False
|
17 |
+
)
|
18 |
+
|
19 |
+
fi_train_only_longer = fi_mc4["train"].filter(
|
20 |
+
lambda example: len(example["text"].split()) >= 20, num_proc=64
|
21 |
+
)
|
22 |
+
print(f"Only longer texts dataset train rows {fi_train_only_longer.num_rows}")
|
23 |
+
|
24 |
+
fi_train_only_longer = fi_train_only_longer.map(
|
25 |
+
filter_stats, num_proc=64, batched=False
|
26 |
+
)
|
27 |
+
|
28 |
+
fi_train_cleaned = fi_train_only_longer.filter(
|
29 |
+
lambda example: example["alphabet_ratio"] > min_alphabet_ratio
|
30 |
+
and example["upper_ratio"] < max_upper_ratio
|
31 |
+
and example["number_ratio"] < max_number_ratio
|
32 |
+
and example["predicted_lang"] == "__label__fi"
|
33 |
+
and example["predicted_lang_percentage"] > min_pred_lang_percentage,
|
34 |
+
num_proc=64,
|
35 |
+
)
|
36 |
+
print(f"Final cleaned dataset train rows {fi_train_cleaned.num_rows}")
|
37 |
+
|
38 |
+
# VAL SPLIT
|
39 |
+
print(f"Original dataset val rows {fi_mc4['validation'].num_rows}")
|
40 |
+
fi_mc4["validation"] = fi_mc4["validation"].map(
|
41 |
+
clean_text, num_proc=32, batched=False
|
42 |
+
)
|
43 |
+
|
44 |
+
fi_val_only_longer = fi_mc4["validation"].filter(
|
45 |
+
lambda example: len(example["text"].split()) >= 20, num_proc=32
|
46 |
+
)
|
47 |
+
print(f"Only longer texts dataset val rows {fi_val_only_longer.num_rows}")
|
48 |
+
|
49 |
+
fi_val_only_longer = fi_val_only_longer.map(filter_stats, num_proc=32, batched=False)
|
50 |
+
|
51 |
+
fi_val_cleaned = fi_val_only_longer.filter(
|
52 |
+
lambda example: example["alphabet_ratio"] > min_alphabet_ratio
|
53 |
+
and example["upper_ratio"] < max_upper_ratio
|
54 |
+
and example["number_ratio"] < max_number_ratio
|
55 |
+
and example["predicted_lang"] == "__label__fi"
|
56 |
+
and example["predicted_lang_percentage"] > min_pred_lang_percentage,
|
57 |
+
num_proc=32,
|
58 |
+
)
|
59 |
+
print(f"Final cleaned dataset val rows {fi_val_cleaned.num_rows}")
|
60 |
+
|
61 |
+
# SAVE TO DISK
|
62 |
+
fi_train_cleaned = fi_train_cleaned.remove_columns(
|
63 |
+
[
|
64 |
+
"alphabet_len",
|
65 |
+
"number_len",
|
66 |
+
"upper_len",
|
67 |
+
"total_len",
|
68 |
+
"predicted_lang",
|
69 |
+
"predicted_lang_percentage",
|
70 |
+
"alphabet_ratio",
|
71 |
+
"number_ratio",
|
72 |
+
"upper_ratio",
|
73 |
+
]
|
74 |
+
)
|
75 |
+
fi_val_cleaned = fi_val_cleaned.remove_columns(
|
76 |
+
[
|
77 |
+
"alphabet_len",
|
78 |
+
"number_len",
|
79 |
+
"upper_len",
|
80 |
+
"total_len",
|
81 |
+
"predicted_lang",
|
82 |
+
"predicted_lang_percentage",
|
83 |
+
"alphabet_ratio",
|
84 |
+
"number_ratio",
|
85 |
+
"upper_ratio",
|
86 |
+
]
|
87 |
+
)
|
88 |
+
|
89 |
+
fi_mc4_cleaned = datasets.DatasetDict()
|
90 |
+
fi_mc4_cleaned["train"] = fi_train_cleaned
|
91 |
+
fi_mc4_cleaned["validation"] = fi_val_cleaned
|
92 |
+
|
93 |
+
fi_mc4_cleaned.save_to_disk("/researchdisk/mc4_3.1.0_fi_cleaned", num_proc=32)
|
clean_funcs.py
ADDED
@@ -0,0 +1,116 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import re
|
2 |
+
import unicodedata
|
3 |
+
from functools import reduce
|
4 |
+
from itertools import groupby
|
5 |
+
from string import punctuation
|
6 |
+
|
7 |
+
url_regex = re.compile(r"((www\.[^\s]+)|(https?://[^\s]+)|(http?://[^\s]+))")
|
8 |
+
control_char_regex = re.compile(r"[\r\n\t]+")
|
9 |
+
phone_number_regex = re.compile("\(?\+?\d[\s\d()-]{5,}\d")
|
10 |
+
finnish_social_id_regex = re.compile("\d{6}[-+Aa]\d{3}[a-zA-Z0-9]")
|
11 |
+
email_regex = re.compile(r"[\w.+-]+@[\w-]+\.[\w.-]+")
|
12 |
+
|
13 |
+
|
14 |
+
def clean_text(example) -> str:
|
15 |
+
data_preprocessing_funcs = [
|
16 |
+
_replace_identity,
|
17 |
+
_replace_phone_number,
|
18 |
+
_replace_url,
|
19 |
+
_replace_email,
|
20 |
+
_fix_html,
|
21 |
+
_standardise_punc,
|
22 |
+
_remove_unicode_symbols,
|
23 |
+
_remove_control_char,
|
24 |
+
_remove_remaining_control_chars,
|
25 |
+
_remove_multiple_punctuation,
|
26 |
+
_remove_multi_space,
|
27 |
+
]
|
28 |
+
return reduce(lambda x, y: y(x["text"]), data_preprocessing_funcs, example)
|
29 |
+
|
30 |
+
|
31 |
+
def _standardise_punc(text: str) -> str:
|
32 |
+
transl_table = dict([(ord(x), ord(y)) for x, y in zip("‘’´“”–-", "'''\"\"--")])
|
33 |
+
text_mod = text.translate(transl_table)
|
34 |
+
text_mod = re.sub(r"[^a-zA-Z0-9À-ÿ .,'%&€$=@+;<>/()!?%:-]", " ", text_mod)
|
35 |
+
return {"text": text_mod}
|
36 |
+
|
37 |
+
|
38 |
+
def _remove_control_char(text: str) -> str:
|
39 |
+
text_mod = re.sub(control_char_regex, " ", text)
|
40 |
+
return {"text": text_mod}
|
41 |
+
|
42 |
+
|
43 |
+
def _remove_remaining_control_chars(text: str) -> str:
|
44 |
+
text_mod = "".join(ch for ch in text if unicodedata.category(ch)[0] != "C")
|
45 |
+
return {"text": text_mod}
|
46 |
+
|
47 |
+
|
48 |
+
def _remove_multi_space(text: str) -> str:
|
49 |
+
text_mod = " ".join([txt for txt in text.split(" ") if txt and txt != ""])
|
50 |
+
return {"text": text_mod}
|
51 |
+
|
52 |
+
|
53 |
+
def _remove_unicode_symbols(text: str) -> str:
|
54 |
+
text_mod = "".join(ch for ch in text if unicodedata.category(ch)[0:2] != "So")
|
55 |
+
return {"text": text_mod}
|
56 |
+
|
57 |
+
|
58 |
+
def _replace_url(text: str) -> str:
|
59 |
+
filler = ""
|
60 |
+
occ = text.count("www.") + text.count("http:") + text.count("https:")
|
61 |
+
text_mod = text
|
62 |
+
for _ in range(occ):
|
63 |
+
# replace other urls by filler
|
64 |
+
text_mod = re.sub(url_regex, filler, text_mod)
|
65 |
+
text_mod = " ".join(text_mod.split())
|
66 |
+
return {"text": text_mod}
|
67 |
+
|
68 |
+
|
69 |
+
def _fix_html(text: str) -> str:
|
70 |
+
text_mod = (
|
71 |
+
text.replace("#39;", "'")
|
72 |
+
.replace("amp;", "&")
|
73 |
+
.replace("#146;", "'")
|
74 |
+
.replace("nbsp;", " ")
|
75 |
+
.replace("\\n", "\n")
|
76 |
+
.replace("quot;", "'")
|
77 |
+
.replace("<br />", "\n")
|
78 |
+
.replace('\\"', '"')
|
79 |
+
.replace(" @.@ ", ".")
|
80 |
+
.replace(" @-@ ", "-")
|
81 |
+
.replace("...", " …")
|
82 |
+
)
|
83 |
+
return {"text": text_mod}
|
84 |
+
|
85 |
+
|
86 |
+
def _replace_phone_number(text: str) -> str:
|
87 |
+
text_mod = re.sub(phone_number_regex, " ", text)
|
88 |
+
return {"text": text_mod}
|
89 |
+
|
90 |
+
|
91 |
+
def _replace_identity(text: str) -> str:
|
92 |
+
text_mod = re.sub(finnish_social_id_regex, " ", text)
|
93 |
+
return {"text": text_mod}
|
94 |
+
|
95 |
+
|
96 |
+
def _replace_email(text: str) -> str:
|
97 |
+
text_mod = re.sub(email_regex, " ", text)
|
98 |
+
return {"text": text_mod}
|
99 |
+
|
100 |
+
|
101 |
+
def _remove_news_tags(text: str) -> str:
|
102 |
+
text_mod = re.sub(r"(<[A-Z].+?>)|(</[A-Z].+?>)", " ", text)
|
103 |
+
return {"text": text_mod}
|
104 |
+
|
105 |
+
|
106 |
+
def _remove_multiple_punctuation(text: str) -> str:
|
107 |
+
text_mod = re.sub(r"\b(\w+)( \1\b)+", r"\1", text)
|
108 |
+
punc = set(punctuation)
|
109 |
+
newtext = []
|
110 |
+
for k, g in groupby(text_mod):
|
111 |
+
if k in punc:
|
112 |
+
newtext.append(k)
|
113 |
+
else:
|
114 |
+
newtext.extend(g)
|
115 |
+
text_mod = "".join(newtext)
|
116 |
+
return {"text": text_mod}
|
create_dataset.py
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import datasets
|
2 |
+
|
3 |
+
features = datasets.Features(
|
4 |
+
{
|
5 |
+
"source": datasets.Value(dtype="string", id=None),
|
6 |
+
"id": datasets.Value(dtype="string", id=None),
|
7 |
+
"text": datasets.Value(dtype="string", id=None),
|
8 |
+
"added": datasets.Value(dtype="string", id=None),
|
9 |
+
"timestamp": datasets.Value(dtype="timestamp[s]", id=None),
|
10 |
+
"metadata": {"url": datasets.Value(dtype="string", id=None)},
|
11 |
+
}
|
12 |
+
)
|
13 |
+
dataset = datasets.load_dataset(
|
14 |
+
"json",
|
15 |
+
data_files={
|
16 |
+
"train": "fi_processed/c4-fi.*.json",
|
17 |
+
"validation": "fi_processed/c4-fi-validation*.json",
|
18 |
+
},
|
19 |
+
features=features,
|
20 |
+
cache_dir="/researchdisk/datasets_cache",
|
21 |
+
num_proc=96,
|
22 |
+
)
|
23 |
+
|
24 |
+
dataset = dataset.flatten()
|
25 |
+
dataset = dataset.rename_column("metadata.url", "url")
|
26 |
+
dataset = dataset.remove_columns(["source", "id", "added"])
|
27 |
+
print(dataset)
|
28 |
+
dataset.save_to_disk("/researchdisk/mc4_3.1.0_fi", num_proc=96)
|
deduplicate.py
ADDED
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# adapted from: https://github.com/huggingface/transformers/blob/master/examples/research_projects/codeparrot/scripts/preprocessing.py
|
2 |
+
|
3 |
+
import datasets
|
4 |
+
|
5 |
+
|
6 |
+
def get_hash(example):
|
7 |
+
"""Get hash of text field."""
|
8 |
+
return {"hash": hash(example["text"])}
|
9 |
+
|
10 |
+
|
11 |
+
def check_uniques(example, uniques):
|
12 |
+
"""Check if current hash is still in set of unique hashes and remove if true."""
|
13 |
+
if example["hash"] in uniques:
|
14 |
+
uniques.remove(example["hash"])
|
15 |
+
return True
|
16 |
+
else:
|
17 |
+
return False
|
18 |
+
|
19 |
+
|
20 |
+
def filter(example, uniques):
|
21 |
+
"""Filter dataset with unique values."""
|
22 |
+
if not check_uniques(example, uniques):
|
23 |
+
return False
|
24 |
+
else:
|
25 |
+
return True
|
26 |
+
|
27 |
+
|
28 |
+
dataset = datasets.load_from_disk("/researchdisk/mc4_3.1.0_fi_cleaned")
|
29 |
+
|
30 |
+
# TRAIN SPLIT DEDUPLICATION
|
31 |
+
|
32 |
+
print(f"Size of original dataset train: {len(dataset['train'])}")
|
33 |
+
|
34 |
+
dataset["train"] = dataset["train"].map(get_hash, num_proc=96)
|
35 |
+
|
36 |
+
# Deduplicate hashes
|
37 |
+
uniques = set(dataset["train"].unique("hash"))
|
38 |
+
frac = len(uniques) / len(dataset["train"])
|
39 |
+
print(f"Fraction of duplicates: {1-frac:.2%}")
|
40 |
+
|
41 |
+
# Deduplicate data
|
42 |
+
dataset_train_deduplicated = dataset["train"].filter(
|
43 |
+
filter, fn_kwargs={"uniques": uniques}, num_proc=64
|
44 |
+
)
|
45 |
+
print(f"Size of filtered dataset train: {len(dataset_train_deduplicated)}")
|
46 |
+
|
47 |
+
# VALIDATION SPLIT DEDUPLICATION
|
48 |
+
|
49 |
+
print(f"Size of original dataset valid: {len(dataset['validation'])}")
|
50 |
+
|
51 |
+
dataset["validation"] = dataset["validation"].map(get_hash, num_proc=96)
|
52 |
+
|
53 |
+
# Deduplicate hashes
|
54 |
+
uniques = set(dataset["validation"].unique("hash"))
|
55 |
+
frac = len(uniques) / len(dataset["validation"])
|
56 |
+
print(f"Fraction of duplicates: {1-frac:.2%}")
|
57 |
+
|
58 |
+
# Deduplicate data
|
59 |
+
dataset_valid_deduplicated = dataset["validation"].filter(
|
60 |
+
filter, fn_kwargs={"uniques": uniques}, num_proc=32
|
61 |
+
)
|
62 |
+
print(f"Size of filtered dataset valid: {len(dataset_valid_deduplicated)}")
|
63 |
+
|
64 |
+
# SAVE DEDUPLICATED DATASET
|
65 |
+
dataset_train_deduplicated = dataset_train_deduplicated.remove_columns(["hash"])
|
66 |
+
dataset_valid_deduplicated = dataset_valid_deduplicated.remove_columns(["hash"])
|
67 |
+
|
68 |
+
fi_mc4_cleaned = datasets.DatasetDict()
|
69 |
+
fi_mc4_cleaned["train"] = dataset_train_deduplicated
|
70 |
+
fi_mc4_cleaned["validation"] = dataset_valid_deduplicated
|
71 |
+
|
72 |
+
fi_mc4_cleaned.save_to_disk("/researchdisk/mc4_3.1.0_fi_cleaned", num_proc=32)
|
filter_stats_funcs.py
ADDED
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import re
|
2 |
+
import fasttext
|
3 |
+
from functools import reduce
|
4 |
+
|
5 |
+
FASTTEXT_MODEL_PATH = "/researchdisk/lid.176.bin"
|
6 |
+
fasttext_model = fasttext.load_model(FASTTEXT_MODEL_PATH)
|
7 |
+
|
8 |
+
|
9 |
+
def filter_stats(batch):
|
10 |
+
filter_stats_funcs = [
|
11 |
+
_count_alphabet,
|
12 |
+
_count_numbers,
|
13 |
+
_count_upper,
|
14 |
+
_count_str_len,
|
15 |
+
_predict_lang,
|
16 |
+
_calculate_alphabet_ratio,
|
17 |
+
_calculate_number_ratio,
|
18 |
+
_calculate_upper_ratio,
|
19 |
+
]
|
20 |
+
return reduce(lambda x, y: y(x), filter_stats_funcs, batch)
|
21 |
+
|
22 |
+
|
23 |
+
def _count_alphabet(batch):
|
24 |
+
batch["alphabet_len"] = len(re.findall(r"[äÄöÖåÅa-zA-Z]", batch["text"]))
|
25 |
+
return batch
|
26 |
+
|
27 |
+
|
28 |
+
def _count_numbers(batch):
|
29 |
+
batch["number_len"] = len(re.findall(r"[0-9]", batch["text"]))
|
30 |
+
return batch
|
31 |
+
|
32 |
+
|
33 |
+
def _count_upper(batch):
|
34 |
+
batch["upper_len"] = len(re.findall(r"[ÄÖÅA-Z]", batch["text"]))
|
35 |
+
return batch
|
36 |
+
|
37 |
+
|
38 |
+
def _count_str_len(batch):
|
39 |
+
batch["total_len"] = len(batch["text"])
|
40 |
+
return batch
|
41 |
+
|
42 |
+
|
43 |
+
def _predict_lang(batch):
|
44 |
+
pred = fasttext_model.predict(batch["text"])
|
45 |
+
batch["predicted_lang"] = pred[0][0]
|
46 |
+
batch["predicted_lang_percentage"] = float(pred[1][0])
|
47 |
+
return batch
|
48 |
+
|
49 |
+
|
50 |
+
def _calculate_alphabet_ratio(batch):
|
51 |
+
batch["alphabet_ratio"] = int(batch["alphabet_len"]) / int(batch["total_len"])
|
52 |
+
return batch
|
53 |
+
|
54 |
+
|
55 |
+
def _calculate_number_ratio(batch):
|
56 |
+
batch["number_ratio"] = int(batch["number_len"]) / int(batch["total_len"])
|
57 |
+
return batch
|
58 |
+
|
59 |
+
|
60 |
+
def _calculate_upper_ratio(batch):
|
61 |
+
batch["upper_ratio"] = int(batch["upper_len"]) / int(batch["total_len"])
|
62 |
+
return batch
|
jq_process_loaded_mc4_files.sh
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
# Set the source directory containing the JSON files
|
4 |
+
source_dir="multilingual"
|
5 |
+
|
6 |
+
# Set the destination directory to save the modified JSON files
|
7 |
+
destination_dir="fi_processed"
|
8 |
+
|
9 |
+
# Create the destination directory if it doesn't exist
|
10 |
+
mkdir -p "$destination_dir"
|
11 |
+
|
12 |
+
# Process each train split JSON file in the source directory
|
13 |
+
for file in "$source_dir"/c4-fi.*.json; do
|
14 |
+
# Extract the filename without the directory path and extension
|
15 |
+
filename=$(basename -- "$file")
|
16 |
+
filename="${filename%.*}"
|
17 |
+
|
18 |
+
# Remove the "lang" key from the JSON file using jq because it had errors while trying to load into huggingface datasets
|
19 |
+
jq -c 'del(.lang)' "$file" > "$destination_dir/$filename.json"
|
20 |
+
done
|
21 |
+
|
22 |
+
# Process each validation split JSON file in the source directory
|
23 |
+
for file in "$source_dir"/c4-fi-validation*.json; do
|
24 |
+
# Extract the filename without the directory path and extension
|
25 |
+
filename=$(basename -- "$file")
|
26 |
+
filename="${filename%.*}"
|
27 |
+
|
28 |
+
# Remove the "lang" key from the JSON file using jq because it had errors while trying to load into huggingface datasets
|
29 |
+
jq -c 'del(.lang)' "$file" > "$destination_dir/$filename.json"
|
30 |
+
done
|
kenlm_5gram_fi.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:24509a40ab1450810907aad8d803c43073ab1b7773991f2cf4973648fe380eac
|
3 |
+
size 6123664435
|
load_mc4_3.1.0_fi.sh
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
GIT_LFS_SKIP_SMUDGE=1 git clone --depth 1 --branch mC4_3.1.0 https://huggingface.co/datasets/allenai/c4
|
2 |
+
cd c4
|
3 |
+
git lfs pull --include "multilingual/c4-fi.*.json.gz"
|
4 |
+
git lfs pull --include "multilingual/c4-fi-validation*.json.gz"
|
5 |
+
gunzip multilingual/c4-fi.*.json.gz
|
6 |
+
gunzip multilingual/c4-fi-validation*.json.gz
|