holylovenia
commited on
Commit
•
8ad213d
1
Parent(s):
0592a4b
Upload mc4_indo.py with huggingface_hub
Browse files- mc4_indo.py +146 -0
mc4_indo.py
ADDED
@@ -0,0 +1,146 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gzip
|
2 |
+
import json
|
3 |
+
from typing import List
|
4 |
+
|
5 |
+
from pathlib import Path
|
6 |
+
from typing import Dict, List, Tuple
|
7 |
+
|
8 |
+
import datasets
|
9 |
+
|
10 |
+
from seacrowd.utils import schemas
|
11 |
+
from seacrowd.utils.configs import SEACrowdConfig
|
12 |
+
from seacrowd.utils.constants import Licenses, Tasks
|
13 |
+
|
14 |
+
|
15 |
+
_DATASETNAME = "mc4_indo"
|
16 |
+
_DESCRIPTION = """\
|
17 |
+
A thoroughly cleaned version of the Indonesia split of the multilingual colossal, cleaned version of Common Crawl's web crawl corpus (mC4). This portion represents the Indonesian language content that has been extracted and processed from the larger mC4 dataset. The extraction and cleaning process was conducted by AllenAI and resulted in a curated collection of Indonesian language data. For more information about the original mC4 dataset and its preparation, please refer to the source hosted at the address https://huggingface.co/datasets/allenai/c4.
|
18 |
+
"""
|
19 |
+
|
20 |
+
_HOMEPAGE = "https://huggingface.co/datasets/indonesian-nlp/mc4-id"
|
21 |
+
_LICENSE = Licenses.ODC_BY.value
|
22 |
+
|
23 |
+
_LANGUAGES = ["ind"]
|
24 |
+
|
25 |
+
_CITATION = """
|
26 |
+
@inproceedings{xue-etal-2021-mt5,
|
27 |
+
title = "m{T}5: A Massively Multilingual Pre-trained Text-to-Text Transformer",
|
28 |
+
author = "Xue, Linting and
|
29 |
+
Constant, Noah and
|
30 |
+
Roberts, Adam and
|
31 |
+
Kale, Mihir and
|
32 |
+
Al-Rfou, Rami and
|
33 |
+
Siddhant, Aditya and
|
34 |
+
Barua, Aditya and
|
35 |
+
Raffel, Colin",
|
36 |
+
booktitle = "Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies",
|
37 |
+
month = jun,
|
38 |
+
year = "2021",
|
39 |
+
address = "Online",
|
40 |
+
publisher = "Association for Computational Linguistics",
|
41 |
+
url = "https://aclanthology.org/2021.naacl-main.41",
|
42 |
+
doi = "10.18653/v1/2021.naacl-main.41",
|
43 |
+
pages = "483--498",
|
44 |
+
}
|
45 |
+
"""
|
46 |
+
|
47 |
+
_URLS = {"raw": "https://huggingface.co/datasets/munggok/mc4-id/resolve/main/mc4-id-filter/c4-id{split_suffix}.tfrecord-{index:05d}-of-{n_shards:05d}.json.gz"}
|
48 |
+
|
49 |
+
_CONFIGS = {"full": {"train": 1016, "validation": 8}}
|
50 |
+
# The entire dataset is 150 Gigs. You can adjust the number of "parquet" files you want to download here
|
51 |
+
# _CONFIGS = {
|
52 |
+
# "full": {"train": 1, "validation": 1}
|
53 |
+
# }
|
54 |
+
|
55 |
+
_LOCAL = False
|
56 |
+
|
57 |
+
_SUPPORTED_TASKS = [Tasks.SELF_SUPERVISED_PRETRAINING]
|
58 |
+
_SOURCE_VERSION = "1.0.0"
|
59 |
+
_SEACROWD_VERSION = "2024.06.20"
|
60 |
+
|
61 |
+
|
62 |
+
class MC4Indo(datasets.GeneratorBasedBuilder):
|
63 |
+
SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
|
64 |
+
SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
|
65 |
+
DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
|
66 |
+
|
67 |
+
BUILDER_CONFIGS = [
|
68 |
+
SEACrowdConfig(
|
69 |
+
name=f"{_DATASETNAME}_source",
|
70 |
+
version=SOURCE_VERSION,
|
71 |
+
description="mc4_indo source schema",
|
72 |
+
schema="source",
|
73 |
+
subset_id="mc4_indo",
|
74 |
+
),
|
75 |
+
SEACrowdConfig(
|
76 |
+
name=f"{_DATASETNAME}_seacrowd_ssp",
|
77 |
+
version=SEACROWD_VERSION,
|
78 |
+
description="mc4_indo SEACrowd schema",
|
79 |
+
schema="seacrowd_ssp",
|
80 |
+
subset_id="mc4_indo",
|
81 |
+
),
|
82 |
+
]
|
83 |
+
|
84 |
+
def _info(self) -> datasets.DatasetInfo:
|
85 |
+
if self.config.schema == "source":
|
86 |
+
features = datasets.Features({"text": datasets.Value("string"), "timestamp": datasets.Value("string"), "url": datasets.Value("string")})
|
87 |
+
|
88 |
+
elif self.config.schema == "seacrowd_ssp":
|
89 |
+
features = schemas.self_supervised_pretraining.features
|
90 |
+
|
91 |
+
return datasets.DatasetInfo(
|
92 |
+
description=_DESCRIPTION,
|
93 |
+
features=features,
|
94 |
+
homepage=_HOMEPAGE,
|
95 |
+
license=_LICENSE,
|
96 |
+
citation=_CITATION,
|
97 |
+
)
|
98 |
+
|
99 |
+
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
|
100 |
+
data_urls = {}
|
101 |
+
for split in ["train", "validation"]:
|
102 |
+
data_urls[split] = [
|
103 |
+
_URLS["raw"].format(
|
104 |
+
split_suffix="-validation" if split == "validation" else "",
|
105 |
+
index=index,
|
106 |
+
n_shards=8 if split == "validation" else 1024,
|
107 |
+
)
|
108 |
+
for index in range(_CONFIGS["full"][split])
|
109 |
+
]
|
110 |
+
train_downloaded_files = dl_manager.download(data_urls["train"])
|
111 |
+
validation_downloaded_files = dl_manager.download(data_urls["validation"])
|
112 |
+
return [
|
113 |
+
datasets.SplitGenerator(
|
114 |
+
name=datasets.Split.TRAIN,
|
115 |
+
gen_kwargs={
|
116 |
+
"filepaths": train_downloaded_files,
|
117 |
+
"split": "train",
|
118 |
+
},
|
119 |
+
),
|
120 |
+
datasets.SplitGenerator(
|
121 |
+
name=datasets.Split.VALIDATION,
|
122 |
+
gen_kwargs={
|
123 |
+
"filepaths": validation_downloaded_files,
|
124 |
+
"split": "dev",
|
125 |
+
},
|
126 |
+
),
|
127 |
+
]
|
128 |
+
|
129 |
+
def _generate_examples(self, filepaths: [Path], split: str) -> Tuple[int, Dict]:
|
130 |
+
id_ = 0
|
131 |
+
for filepath in filepaths:
|
132 |
+
with gzip.open(open(filepath, "rb"), "rt", encoding="utf-8") as f:
|
133 |
+
for line in f:
|
134 |
+
if line:
|
135 |
+
example = json.loads(line)
|
136 |
+
|
137 |
+
if self.config.schema == "source":
|
138 |
+
yield id_, example
|
139 |
+
elif self.config.schema == "seacrowd_ssp":
|
140 |
+
seacrowd_json = {
|
141 |
+
"id": str(id_),
|
142 |
+
"text": str(example["text"]),
|
143 |
+
}
|
144 |
+
yield id_, seacrowd_json
|
145 |
+
|
146 |
+
id_ += 1
|