Datasets:

Languages:
English
Multilinguality:
monolingual
Size Categories:
100K<n<1M
Language Creators:
found
Annotations Creators:
no-annotation
Source Datasets:
original
Tags:
License:
albertvillanova HF staff commited on
Commit
b38f4eb
1 Parent(s): c90b8a6

Delete loading script

Browse files
Files changed (1) hide show
  1. cc_news.py +0 -114
cc_news.py DELETED
@@ -1,114 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- """The CC-News dataset is based on Common Crawl News Dataset by Sebastian Nagel"""
18
-
19
- import json
20
- import os
21
- from fnmatch import fnmatch
22
-
23
- import datasets
24
-
25
-
26
- logger = datasets.logging.get_logger(__name__)
27
-
28
-
29
- _DESCRIPTION = """\
30
- CC-News containing news articles from news sites all over the world \
31
- The data is available on AWS S3 in the Common Crawl bucket at /crawl-data/CC-NEWS/. \
32
- This version of the dataset has 708241 articles. It represents a small portion of English \
33
- language subset of the CC-News dataset created using news-please(Hamborg et al.,2017) to \
34
- collect and extract English language portion of CC-News.
35
- """
36
-
37
- _CITATION = """\
38
- @InProceedings{Hamborg2017,
39
- author = {Hamborg, Felix and Meuschke, Norman and Breitinger, Corinna and Gipp, Bela},
40
- title = {news-please: A Generic News Crawler and Extractor},
41
- year = {2017},
42
- booktitle = {Proceedings of the 15th International Symposium of Information Science},
43
- location = {Berlin},
44
- doi = {10.5281/zenodo.4120316},
45
- pages = {218--223},
46
- month = {March}
47
- }
48
- """
49
- _PROJECT_URL = "https://commoncrawl.org/2016/10/news-dataset-available/"
50
- _DOWNLOAD_URL = "data/cc_news.tar.gz"
51
-
52
-
53
- class CCNewsConfig(datasets.BuilderConfig):
54
- """BuilderConfig for CCNews."""
55
-
56
- def __init__(self, **kwargs):
57
- """BuilderConfig for CCNews.
58
- Args:
59
- **kwargs: keyword arguments forwarded to super.
60
- """
61
- super(CCNewsConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
62
-
63
-
64
- class CCNews(datasets.GeneratorBasedBuilder):
65
- """CC-News dataset."""
66
-
67
- BUILDER_CONFIGS = [
68
- CCNewsConfig(
69
- name="plain_text",
70
- description="Plain text",
71
- )
72
- ]
73
-
74
- def _info(self):
75
- return datasets.DatasetInfo(
76
- description=_DESCRIPTION,
77
- features=datasets.Features(
78
- {
79
- "title": datasets.Value("string"),
80
- "text": datasets.Value("string"),
81
- "domain": datasets.Value("string"),
82
- "date": datasets.Value("string"),
83
- "description": datasets.Value("string"),
84
- "url": datasets.Value("string"),
85
- "image_url": datasets.Value("string"),
86
- }
87
- ),
88
- supervised_keys=None,
89
- homepage=_PROJECT_URL,
90
- citation=_CITATION,
91
- )
92
-
93
- def _split_generators(self, dl_manager):
94
- archive = dl_manager.download(_DOWNLOAD_URL)
95
-
96
- return [
97
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": dl_manager.iter_archive(archive)}),
98
- ]
99
-
100
- def _generate_examples(self, files):
101
- id_ = 0
102
- for article_file_path, f in files:
103
- if fnmatch(os.path.basename(article_file_path), "*.json"):
104
- article = json.load(f)
105
- yield id_, {
106
- "title": article["title"].strip() if article["title"] is not None else "",
107
- "text": article["maintext"].strip() if article["maintext"] is not None else "",
108
- "domain": article["source_domain"].strip() if article["source_domain"] is not None else "",
109
- "date": article["date_publish"].strip() if article["date_publish"] is not None else "",
110
- "description": article["description"].strip() if article["description"] is not None else "",
111
- "url": article["url"].strip() if article["url"] is not None else "",
112
- "image_url": article["image_url"].strip() if article["image_url"] is not None else "",
113
- }
114
- id_ += 1