JulesBelveze commited on
Commit
98601fe
•
1 Parent(s): ca10947

attempt to format according to datasets library

Browse files
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"all": {"description": "The `tldr_news` dataset was constructed by collecting a daily tech newsletter (available at \nhttps://tldr.tech/newsletter). Then for every piece of news, the \"headline\" and its corresponding \"content\" were \ncollected. Such a dataset can be used to train a model to generate a headline from a input piece of text.\n", "citation": "", "homepage": "", "license": "", "features": {"headline": {"dtype": "string", "id": null, "_type": "Value"}, "content": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "tldr_news", "config_name": "all", "version": {"version_str": "1.1.0", "description": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 3880430, "num_examples": 7055, "dataset_name": "tldr_news"}, "test": {"name": "test", "num_bytes": 429049, "num_examples": 784, "dataset_name": "tldr_news"}}, "download_checksums": {"https://github.com/JulesBelveze/tldr_news/blob/main/1.1.0.tar.gz?raw=true": {"num_bytes": 1663243, "checksum": "2cdb7b21a2b06af5e1318c0155a20ae652aa418e0d599ff03f73e633b5ada052"}}, "download_size": 1663243, "post_processing_size": null, "dataset_size": 4309479, "size_in_bytes": 5972722}}
dummy/all/1.1.0/dummy_data.zip ADDED
Binary file (4.02 kB). View file
 
dummy/all/1.1.0/dummy_data/.DS_Store ADDED
Binary file (6.15 kB). View file
 
dummy/all/1.1.0/dummy_data/1.1.0.tar.gz%3Fraw%3Dtrue/.DS_Store ADDED
Binary file (6.15 kB). View file
 
dummy/all/{test.jsonl → 1.1.0/dummy_data/1.1.0.tar.gz%3Fraw%3Dtrue/1.1.0/test.json} RENAMED
File without changes
dummy/all/{train.jsonl → 1.1.0/dummy_data/1.1.0.tar.gz%3Fraw%3Dtrue/1.1.0/train.json} RENAMED
File without changes
test.jsonl DELETED
The diff for this file is too large to render. See raw diff
 
tldr_news.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import json
16
+ import os
17
+
18
+ import datasets
19
+
20
+
21
+ _DESCRIPTION = """\
22
+ The `tldr_news` dataset was constructed by collecting a daily tech newsletter (available at
23
+ https://tldr.tech/newsletter). Then for every piece of news, the "headline" and its corresponding "content" were
24
+ collected. Such a dataset can be used to train a model to generate a headline from a input piece of text.
25
+ """
26
+
27
+ # TODO: Add link to the official dataset URLs here
28
+ # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
29
+ # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
30
+ _URLS = {"all": "https://github.com/JulesBelveze/tldr_news/blob/main/1.1.0.tar.gz?raw=true"}
31
+
32
+
33
+ class TLDRNewsConfig(datasets.BuilderConfig):
34
+ """BuilderConfig for TLDRNews."""
35
+
36
+ def __init__(self, **kwargs):
37
+ """BuilderConfig for TLDRNews.
38
+ Args:
39
+ **kwargs: keyword arguments forwarded to super.
40
+ """
41
+ super(TLDRNewsConfig, self).__init__(**kwargs)
42
+
43
+
44
+ class TLDRNewsDataset(datasets.GeneratorBasedBuilder):
45
+ """Dataset containing headline & content of pieces of news from the tldr tech newsletter."""
46
+
47
+ VERSION = datasets.Version("1.1.0")
48
+
49
+ BUILDER_CONFIGS = [
50
+ TLDRNewsConfig(name="all", version=VERSION, description="This contains all the existing newsletter"),
51
+ ]
52
+
53
+ DEFAULT_CONFIG_NAME = "all"
54
+
55
+ def _info(self):
56
+ features = datasets.Features(
57
+ {
58
+ "headline": datasets.Value("string"),
59
+ "content": datasets.Value("string"),
60
+ }
61
+ )
62
+
63
+ return datasets.DatasetInfo(description=_DESCRIPTION, features=features)
64
+
65
+ def _split_generators(self, dl_manager):
66
+ urls = _URLS[self.config.name]
67
+ data_dir = dl_manager.download_and_extract(urls)
68
+ data_dir = os.path.join(data_dir, str(self.config.version))
69
+ return [
70
+ datasets.SplitGenerator(
71
+ name=datasets.Split.TRAIN,
72
+ gen_kwargs={
73
+ "filepath": os.path.join(data_dir, "train.json"),
74
+ "split": "train",
75
+ },
76
+ ),
77
+ datasets.SplitGenerator(
78
+ name=datasets.Split.TEST,
79
+ gen_kwargs={"filepath": os.path.join(data_dir, "test.json"), "split": "test"},
80
+ ),
81
+ ]
82
+
83
+ def _generate_examples(self, filepath, split):
84
+ with open(filepath, encoding="utf-8") as f:
85
+ data = json.load(f)
86
+ for key, row in enumerate(data):
87
+ yield key, {"headline": row["headline"], "content": row["content"]}
train.jsonl DELETED
The diff for this file is too large to render. See raw diff