kowiki / kowiki.py
psyche's picture
Update kowiki.py
0151b24
from datasets import load_dataset
import datasets
import xml.etree.ElementTree as ET
import re
import requests
from collections import defaultdict
import os
XML_FILES = [
['kowiki-20230420-pages-articles.xml'],
['kowiki-20230501-pages-articles.xml'],
['kowiki-20230520-pages-articles.xml'],
['kowiki-20230601-pages-articles.xml'],
['kowiki-20230620-pages-articles.xml'],
['kowiki-20230701-pages-articles.xml'],
['kowiki-20230720-pages-articles.xml'],
['kowiki-20230801-pages-articles.xml'],
['kowiki-20230820-pages-articles.xml'],
['kowiki-20230901-pages-articles.xml'],
['kowiki-20231101-pages-articles-multistream.xml']
]
PROCESSED_FILES = [
[f"20230420/train-{i:05d}-of-00010.parquet" for i in range(10)],
[f"20230501/train-{i:05d}-of-00010.parquet" for i in range(10)],
[f"20230520/train-{i:05d}-of-00010.parquet" for i in range(10)],
[f"20230601/train-{i:05d}-of-00010.parquet" for i in range(10)],
[f"20230620/train-{i:05d}-of-00010.parquet" for i in range(10)],
[f"20230701/train-{i:05d}-of-00010.parquet" for i in range(10)],
[f"20230720/train-{i:05d}-of-00010.parquet" for i in range(10)],
[f"20230801/train-{i:05d}-of-00010.parquet" for i in range(10)],
]
def take_behind_linked_text(text):
_PATTERN = "\[\[[^\[\]\|]+\|([^\[\]]+)\]\]"
result = ""
while True:
result = re.sub(_PATTERN, "\\1", text)
if result == text:
break
text = result
return text
def remove_linked_text(text):
result = ""
while True:
result = re.sub("\[\[([^\[\]\|]+)\]\]", "\\1", text)
if result == text:
break
text = result
return text
def remove_attribute_in_table(text):
text = re.sub("<bgcolor=#[^>]+>", "", text)
text = re.sub("<-[0-9]>", "", text)
text = re.sub("\|\|<table[^\n]+\n", "", text)
text = re.sub("<tablewidth\=[^>]+>", "", text)
text = re.sub("<width\=[^>]+>", "", text)
text = re.sub("(?<=์ฝ”๋ฉ˜ํŠธ\-)\|\|(?=\n)", "", text)
return text
def replace_link(text):
text = re.sub("\[youtube\([^\]]+\)\]", "[YOUTUBE LINK]", text)
return text
def remove_double_bracket_text(text):
text = re.sub("\{\{([^|\{\}]+)\}\}", "\\1", text)
text = re.sub("\|style[^\}]+(\}\})", "}}", text)
text = re.sub("\{\{ll?ang\|[a-z]+\|([^\{\}]+)\}\}", "\\1", text)
text = re.sub("\{\{[^\=]+\|([^\=\{\}]+)\}\}", "\\1", text)
matched = re.search("{{([^\|\{\}]+)\n\|([^\}]+)\}\}", text)
if matched is not None:
s, e = matched.span()
values = matched.group(2).split("\n|")
text = text[:s] + f"[{matched.group(1)}]" + ", ".join(
[v for v in values if v.split("=")[-1].strip() not in ("", "๊ฐ’์ฐพ๊ธฐ")]) + "\n" + text[e:]
text = re.sub("\{\{๋ณดํ˜ธ ๋ฌธ์„œ\|ํฌ๊ธฐ\=์ž‘๊ฒŒ\}\}", "", text)
text = re.sub("{{๋งํ’์„ \|1\=NUMBER+[^}]+\}\}", "<NUMBER>", text)
return text
def process_text(text: str):
if not isinstance(text, str):
return ""
text = text.strip()
text = re.sub("<[^>]+>", "", text)
text = re.sub("\[\[(ํŒŒ์ผ:[^\]]+)", "[\\1", text)
text = remove_linked_text(text)
text = take_behind_linked_text(text)
text = text.replace("[ํŒŒ์ผ:", "[[ํŒŒ์ผ:")
text = re.sub("'''", "", text)
text = replace_link(text)
text = remove_attribute_in_table(text)
text = remove_double_bracket_text(text)
text = re.sub("[0-9]+px\|sub\|link[^\n]+", "", text)
text = text.replace("\n]]\n", "\n")
text = re.sub("์„ฌ๋„ค์ผ\|[^\n]+\|([^\n]+)", "\\1", text)
text = re.sub("\n{2,}", "\n\n", text)
text = re.sub("\t{2,}", "\t\t", text)
text = re.sub("[โ€œโ€]", '"', text)
text = re.sub("[โ€˜โ€™]", "'", text)
text = text.replace("ใ€Š", "<<").replace("ใ€‹", ">>")
return text.strip()
class WikiConfig(datasets.BuilderConfig):
"""BuilderConfig for Wikibuilder."""
def __init__(self, name, data_url, data_dir, citation, **kwargs):
"""BuilderConfig for Wikibuilder.
Args:
name: `string`, name of the dataset
data_url: `string`, url to the dataset
data_dir: `string`, directory to which the data is downloaded and extracted
citation: `string`, citation for the dataset
**kwargs: keyword arguments forwarded to super.
"""
super(WikiConfig, self).__init__(
name=name,
version=datasets.Version("1.0.0", ""),
description=("Wikipedia dataset"),
**kwargs
)
self.data_url = data_url
self.data_dir = data_dir
self.citation = citation
class KoWiki(datasets.GeneratorBasedBuilder):
"""KoWiki: Korean Wikipedia"""
BUILDER_CONFIGS = [
WikiConfig(
name=name[0].split("-")[1],
data_url=[f"https://huggingface.co/datasets/psyche/kowiki/resolve/main/data/{n}" for n in name],
data_dir="data",
citation="",
)
for name in XML_FILES
]+[
WikiConfig(
name = os.path.dirname(name[0]) +".process",
data_url=[f"https://huggingface.co/datasets/psyche/kowiki/resolve/main/data/{n}" for n in name],
data_dir=name,
citation="",
)
for name in PROCESSED_FILES
]
def _info(self):
return datasets.DatasetInfo(
description=("Korean Wikipedia", ),
features=datasets.Features(
{
"id": datasets.Value("string"),
"title": datasets.Value("string"),
"text": datasets.Value("string"),
}
),
supervised_keys=None,
homepage="https://dumps.wikimedia.org/kowiki/",
citation=self.config.citation,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
data_files = dl_manager.download_and_extract(self.config.data_url)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"data_files": data_files},
)
]
def _generate_examples(self, data_files):
n = 0
if self.config.name.endswith(".process"):
_dataset = load_dataset("parquet", data_files={"train": data_files}, token=self.use_auth_token, split= "train", streaming=True)
for d in _dataset:
yield n, d
n += 1
else:
output = {}
for data_file in data_files:
if data_file.startswith(("https://", "http://")):
data_file = requests.get(data_file, stream=True).raw
for event, node in ET.iterparse(data_file):
tag_name = node.tag
if tag_name.endswith("title"):
output["title"] = node.text
elif tag_name.endswith("text"):
output["text"] = node.text
elif tag_name.endswith("page"):
output["id"] = f"{n:08d}"
yield n, output
output = {}
n += 1