GR_custom_dataset / GR_custom_dataset.py
mdaffarudiyanto's picture
Update GR_custom_dataset.py
f36ee63 verified
import glob
import json
import os
import re
from pathlib import Path
import datasets
logger = datasets.logging.get_logger(__name__)
_CITATION = """\
@inproceedings{GR_custom_dataset,
author = {Test},
title = {Test},
year = {2024},
url = {test.com},
}
"""
_DESCRIPTION = """\
Description
"""
_HOMEPAGE = ""
_LICENSE = ""
class GRCustomDatasetConfig(datasets.BuilderConfig):
"""BuilderConfig for GRCustomDataset"""
def __init__(self, **kwargs):
"""BuilderConfig for GRCustomDataset.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(GRCustomDatasetConfig, self).__init__(**kwargs)
class GRCustomDataset(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("2.0.0")
BUILDER_CONFIGS = [
GRCustomDatasetConfig(
name="stratified_articles_below_500",
version=VERSION,
description="Articles below 500 words",
),
GRCustomDatasetConfig(
name="stratified_articles_below_1000",
version=VERSION,
description="Articles below 1000 words",
),
GRCustomDatasetConfig(
name="stratified_articles_below_1000_v2",
version=VERSION,
description="Articles below 1000 words v2",
),
]
@property
def manual_download_instructions(self):
return """\
Test.
"""
def _info(self):
features = datasets.Features(
{
"id": datasets.Value("string"),
"url": datasets.Value("string"),
"clean_article": datasets.Value("string"),
"clean_summary": datasets.Value("string"),
"extractive_summary": datasets.Value("string"),
"word_count": datasets.Value("int64"),
"category": datasets.Value("int64")
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=None,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
data_dir = os.path.abspath(os.path.expanduser(dl_manager.manual_dir))
if not os.path.exists(data_dir):
raise FileNotFoundError(
f"{data_dir} does not exist. Make sure you insert a manual dir via `datasets.load_dataset('id_liputan6', "
"'canonical', data_dir=...)`."
)
split_generators = [
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"article_dir": os.path.join(data_dir, f"{self.config.name}/validation"),
"split": "dev",
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"article_dir": os.path.join(data_dir, f"{self.config.name}/test"),
"split": "test",
},
),
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"article_dir": os.path.join(data_dir, f"{self.config.name}/train"),
"split": "train",
}
)
]
return split_generators
def _generate_examples(self, article_dir, split):
detokenizers = [
[re.compile(r"([Ll])iputan6 . com "), r"\1iputan6.com"],
[re.compile(r" ([.,:])"), r"\1"],
[re.compile(r"\( ([^)]+) \)"), r"(\1)"],
[re.compile(r"\" ([^\"]+) \""), r'"\1"'],
[re.compile(r"\[ ([^]]+) ]"), r"[\1]"],
]
logger.info("⏳ Generating %s examples from = %s", split, article_dir)
guid = 0
for path in sorted(
glob.glob(os.path.join(article_dir, "**/*.json"), recursive=True), key=lambda p: int(Path(p).stem)
):
with open(path, encoding="utf-8") as f:
data = json.load(f)
clean_article = " ".join([" ".join(i) for i in data["clean_article"]])
for d in detokenizers:
clean_article = d[0].sub(d[1], clean_article)
clean_summary = " ".join([" ".join(i) for i in data["clean_summary"]])
for d in detokenizers:
clean_summary = d[0].sub(d[1], clean_summary)
extractive_summary = " ".join([" ".join(data["clean_article"][i]) for i in data["extractive_summary"]])
for d in detokenizers:
extractive_summary = d[0].sub(d[1], extractive_summary)
yield guid, {
"id": str(data["id"]),
"url": data["url"],
"clean_article": clean_article,
"clean_summary": clean_summary,
"extractive_summary": extractive_summary,
"word_count": data["word_count"],
"category": data["category"]
}
guid += 1