ccnews_split / ccnews_split.py
nthngdy's picture
Update ccnews_split.py
91fa821
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""The CC-News dataset is based on Common Crawl News Dataset by Sebastian Nagel"""
import json
import os
import tarfile
from fnmatch import fnmatch
import datasets
def custom_iter_archive(path_or_buf, _filter=lambda x: True):
def _iter_archive(f):
stream = tarfile.open(fileobj=f, mode="r|*")
for i, tarinfo in enumerate(stream):
if not _filter(i):
continue
file_path = tarinfo.name
if not tarinfo.isreg():
continue
if file_path is None:
continue
if os.path.basename(file_path).startswith(".") or os.path.basename(file_path).startswith("__"):
# skipping hidden files
continue
file_obj = stream.extractfile(tarinfo)
yield file_path, file_obj
stream.members = []
del stream
if hasattr(path_or_buf, "read"):
yield from _iter_archive(path_or_buf)
else:
with open(path_or_buf, "rb") as f:
yield from _iter_archive(f)
logger = datasets.logging.get_logger(__name__)
_DESCRIPTION = """\
CC-News containing news articles from news sites all over the world \
The data is available on AWS S3 in the Common Crawl bucket at /crawl-data/CC-NEWS/. \
This version of the dataset has 708241 articles. It represents a small portion of English \
language subset of the CC-News dataset created using news-please(Hamborg et al.,2017) to \
collect and extract English language portion of CC-News.
"""
_CITATION = """\
@InProceedings{Hamborg2017,
author = {Hamborg, Felix and Meuschke, Norman and Breitinger, Corinna and Gipp, Bela},
title = {news-please: A Generic News Crawler and Extractor},
year = {2017},
booktitle = {Proceedings of the 15th International Symposium of Information Science},
location = {Berlin},
doi = {10.5281/zenodo.4120316},
pages = {218--223},
month = {March}
}
"""
_PROJECT_URL = "https://commoncrawl.org/2016/10/news-dataset-available/"
_DOWNLOAD_URL = "https://storage.googleapis.com/huggingface-nlp/datasets/cc_news/cc_news.tar.gz"
class CCNewsConfig(datasets.BuilderConfig):
"""BuilderConfig for CCNews."""
def __init__(self, **kwargs):
"""BuilderConfig for CCNews.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(CCNewsConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
class CCNews(datasets.GeneratorBasedBuilder):
"""CC-News dataset."""
BUILDER_CONFIGS = [
CCNewsConfig(
name="plain_text",
description="Plain text",
),
CCNewsConfig(
name="plain_text_sentences",
description="Plain text (sentence level)",
)
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"text": datasets.Value("string"),
}
),
supervised_keys=None,
homepage=_PROJECT_URL,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
archive = dl_manager.download(_DOWNLOAD_URL)
train_filter = lambda x : (x%10) < 8
val_filter = lambda x: (x%10) == 8
test_filter = lambda x: (x%10) == 9
level = "doc" if self.config.name == "plain_text" else "sentence"
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": custom_iter_archive(archive, train_filter), "level": level}),
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"files": custom_iter_archive(archive, val_filter), "level": level}),
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"files": custom_iter_archive(archive, test_filter), "level": level}),
]
def _generate_examples(self, files, level):
id_ = 0
for article_file_path, f in files:
if fnmatch(os.path.basename(article_file_path), "*.json"):
article = json.load(f)
if level == "sentence":
full_article = article["maintext"].strip() if article["maintext"] is not None else ""
doc_dict = {}
for sent in full_article.split("\n"):
doc_dict["text"] = sent
yield id_, doc_dict
id_ += 1
else:
yield id_, {
"text": article["maintext"].strip() if article["maintext"] is not None else "",
}
id_ += 1