File size: 7,215 Bytes
1ffce31 c6dbd62 8582d00 c6dbd62 a34ea46 8bb0356 1ffce31 69e3eae 0151b24 1ffce31 c6dbd62 1ffce31 19326a1 c6dbd62 33ee072 7d81aa5 c6dbd62 7d81aa5 33ee072 7d81aa5 c6dbd62 33ee072 ce5f16e 1816997 33ee072 5b2a231 e818d15 e748d00 33ee072 e748d00 33ee072 3db0d14 c73aa8c ce5f16e 33ee072 c6dbd62 fda4f05 c6dbd62 33ee072 c6dbd62 33ee072 c6dbd62 c970241 33ee072 c6dbd62 2b869c7 33ee072 c6dbd62 6ed291a 1ffce31 c6dbd62 1ffce31 c6dbd62 1ffce31 c6dbd62 1ffce31 c6dbd62 1ffce31 c6dbd62 1ffce31 1d59db0 1ffce31 885c19c 1ffce31 1d59db0 1ffce31 8bb0356 c6dbd62 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 |
from datasets import load_dataset
import datasets
import xml.etree.ElementTree as ET
import re
import requests
from collections import defaultdict
import os
XML_FILES = [
['kowiki-20230420-pages-articles.xml'],
['kowiki-20230501-pages-articles.xml'],
['kowiki-20230520-pages-articles.xml'],
['kowiki-20230601-pages-articles.xml'],
['kowiki-20230620-pages-articles.xml'],
['kowiki-20230701-pages-articles.xml'],
['kowiki-20230720-pages-articles.xml'],
['kowiki-20230801-pages-articles.xml'],
['kowiki-20230820-pages-articles.xml'],
['kowiki-20230901-pages-articles.xml'],
['kowiki-20231101-pages-articles-multistream.xml']
]
PROCESSED_FILES = [
[f"20230420/train-{i:05d}-of-00010.parquet" for i in range(10)],
[f"20230501/train-{i:05d}-of-00010.parquet" for i in range(10)],
[f"20230520/train-{i:05d}-of-00010.parquet" for i in range(10)],
[f"20230601/train-{i:05d}-of-00010.parquet" for i in range(10)],
[f"20230620/train-{i:05d}-of-00010.parquet" for i in range(10)],
[f"20230701/train-{i:05d}-of-00010.parquet" for i in range(10)],
[f"20230720/train-{i:05d}-of-00010.parquet" for i in range(10)],
[f"20230801/train-{i:05d}-of-00010.parquet" for i in range(10)],
]
def take_behind_linked_text(text):
_PATTERN = "\[\[[^\[\]\|]+\|([^\[\]]+)\]\]"
result = ""
while True:
result = re.sub(_PATTERN, "\\1", text)
if result == text:
break
text = result
return text
def remove_linked_text(text):
result = ""
while True:
result = re.sub("\[\[([^\[\]\|]+)\]\]", "\\1", text)
if result == text:
break
text = result
return text
def remove_attribute_in_table(text):
text = re.sub("<bgcolor=#[^>]+>", "", text)
text = re.sub("<-[0-9]>", "", text)
text = re.sub("\|\|<table[^\n]+\n", "", text)
text = re.sub("<tablewidth\=[^>]+>", "", text)
text = re.sub("<width\=[^>]+>", "", text)
text = re.sub("(?<=์ฝ๋ฉํธ\-)\|\|(?=\n)", "", text)
return text
def replace_link(text):
text = re.sub("\[youtube\([^\]]+\)\]", "[YOUTUBE LINK]", text)
return text
def remove_double_bracket_text(text):
text = re.sub("\{\{([^|\{\}]+)\}\}", "\\1", text)
text = re.sub("\|style[^\}]+(\}\})", "}}", text)
text = re.sub("\{\{ll?ang\|[a-z]+\|([^\{\}]+)\}\}", "\\1", text)
text = re.sub("\{\{[^\=]+\|([^\=\{\}]+)\}\}", "\\1", text)
matched = re.search("{{([^\|\{\}]+)\n\|([^\}]+)\}\}", text)
if matched is not None:
s, e = matched.span()
values = matched.group(2).split("\n|")
text = text[:s] + f"[{matched.group(1)}]" + ", ".join(
[v for v in values if v.split("=")[-1].strip() not in ("", "๊ฐ์ฐพ๊ธฐ")]) + "\n" + text[e:]
text = re.sub("\{\{๋ณดํธ ๋ฌธ์\|ํฌ๊ธฐ\=์๊ฒ\}\}", "", text)
text = re.sub("{{๋งํ์ \|1\=NUMBER+[^}]+\}\}", "<NUMBER>", text)
return text
def process_text(text: str):
if not isinstance(text, str):
return ""
text = text.strip()
text = re.sub("<[^>]+>", "", text)
text = re.sub("\[\[(ํ์ผ:[^\]]+)", "[\\1", text)
text = remove_linked_text(text)
text = take_behind_linked_text(text)
text = text.replace("[ํ์ผ:", "[[ํ์ผ:")
text = re.sub("'''", "", text)
text = replace_link(text)
text = remove_attribute_in_table(text)
text = remove_double_bracket_text(text)
text = re.sub("[0-9]+px\|sub\|link[^\n]+", "", text)
text = text.replace("\n]]\n", "\n")
text = re.sub("์ฌ๋ค์ผ\|[^\n]+\|([^\n]+)", "\\1", text)
text = re.sub("\n{2,}", "\n\n", text)
text = re.sub("\t{2,}", "\t\t", text)
text = re.sub("[โโ]", '"', text)
text = re.sub("[โโ]", "'", text)
text = text.replace("ใ", "<<").replace("ใ", ">>")
return text.strip()
class WikiConfig(datasets.BuilderConfig):
"""BuilderConfig for Wikibuilder."""
def __init__(self, name, data_url, data_dir, citation, **kwargs):
"""BuilderConfig for Wikibuilder.
Args:
name: `string`, name of the dataset
data_url: `string`, url to the dataset
data_dir: `string`, directory to which the data is downloaded and extracted
citation: `string`, citation for the dataset
**kwargs: keyword arguments forwarded to super.
"""
super(WikiConfig, self).__init__(
name=name,
version=datasets.Version("1.0.0", ""),
description=("Wikipedia dataset"),
**kwargs
)
self.data_url = data_url
self.data_dir = data_dir
self.citation = citation
class KoWiki(datasets.GeneratorBasedBuilder):
"""KoWiki: Korean Wikipedia"""
BUILDER_CONFIGS = [
WikiConfig(
name=name[0].split("-")[1],
data_url=[f"https://huggingface.co/datasets/psyche/kowiki/resolve/main/data/{n}" for n in name],
data_dir="data",
citation="",
)
for name in XML_FILES
]+[
WikiConfig(
name = os.path.dirname(name[0]) +".process",
data_url=[f"https://huggingface.co/datasets/psyche/kowiki/resolve/main/data/{n}" for n in name],
data_dir=name,
citation="",
)
for name in PROCESSED_FILES
]
def _info(self):
return datasets.DatasetInfo(
description=("Korean Wikipedia", ),
features=datasets.Features(
{
"id": datasets.Value("string"),
"title": datasets.Value("string"),
"text": datasets.Value("string"),
}
),
supervised_keys=None,
homepage="https://dumps.wikimedia.org/kowiki/",
citation=self.config.citation,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
data_files = dl_manager.download_and_extract(self.config.data_url)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"data_files": data_files},
)
]
def _generate_examples(self, data_files):
n = 0
if self.config.name.endswith(".process"):
_dataset = load_dataset("parquet", data_files={"train": data_files}, token=self.use_auth_token, split= "train", streaming=True)
for d in _dataset:
yield n, d
n += 1
else:
output = {}
for data_file in data_files:
if data_file.startswith(("https://", "http://")):
data_file = requests.get(data_file, stream=True).raw
for event, node in ET.iterparse(data_file):
tag_name = node.tag
if tag_name.endswith("title"):
output["title"] = node.text
elif tag_name.endswith("text"):
output["text"] = node.text
elif tag_name.endswith("page"):
output["id"] = f"{n:08d}"
yield n, output
output = {}
n += 1
|