system HF staff commited on
Commit
7d7bf66
0 Parent(s):

Update files from the datasets library (from 1.0.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.0.0

Files changed (4) hide show
  1. .gitattributes +27 -0
  2. dataset_infos.json +1 -0
  3. dummy/1.0.0/dummy_data.zip +3 -0
  4. reddit.py +101 -0
.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
1
+ {"default": {"description": "\nThis corpus contains preprocessed posts from the Reddit dataset.\nThe dataset consists of 3,848,330 posts with an average length of 270 words for content,\nand 28 words for the summary.\n\nFeatures includes strings: author, body, normalizedBody, content, summary, subreddit, subreddit_id.\nContent is used as document and summary is used as summary.\n", "citation": "\n@inproceedings{volske-etal-2017-tl,\n title = \"{TL};{DR}: Mining {R}eddit to Learn Automatic Summarization\",\n author = {V{\"o}lske, Michael and\n Potthast, Martin and\n Syed, Shahbaz and\n Stein, Benno},\n booktitle = \"Proceedings of the Workshop on New Frontiers in Summarization\",\n month = sep,\n year = \"2017\",\n address = \"Copenhagen, Denmark\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W17-4508\",\n doi = \"10.18653/v1/W17-4508\",\n pages = \"59--63\",\n abstract = \"Recent advances in automatic text summarization have used deep neural networks to generate high-quality abstractive summaries, but the performance of these models strongly depends on large amounts of suitable training data. We propose a new method for mining social media for author-provided summaries, taking advantage of the common practice of appending a {``}TL;DR{''} to long posts. A case study using a large Reddit crawl yields the Webis-TLDR-17 dataset, complementing existing corpora primarily from the news genre. Our technique is likely applicable to other social media sites and general web crawls.\",\n}\n", "homepage": "https://github.com/webis-de/webis-tldr-17-corpus", "license": "", "features": {"author": {"dtype": "string", "id": null, "_type": "Value"}, "body": {"dtype": "string", "id": null, "_type": "Value"}, "normalizedBody": {"dtype": "string", "id": null, "_type": "Value"}, "subreddit": {"dtype": "string", "id": null, "_type": "Value"}, "subreddit_id": {"dtype": "string", "id": null, "_type": "Value"}, "id": {"dtype": "string", "id": null, "_type": "Value"}, "content": {"dtype": "string", "id": null, "_type": "Value"}, "summary": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "reddit", "config_name": "default", "version": {"version_str": "1.0.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 18940542951, "num_examples": 3848330, "dataset_name": "reddit"}}, "download_checksums": {"https://zenodo.org/record/1043504/files/corpus-webis-tldr-17.zip?download=1": {"num_bytes": 3141854161, "checksum": "c1a0f8c4374c7314d3c9ec50dd505303c536062d87037d4dca7035b89b36938a"}}, "download_size": 3141854161, "dataset_size": 18940542951, "size_in_bytes": 22082397112}}
dummy/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:faf476f84f45e64caf7bf338d437c6248cdecfffca8fc5bfc62f65dfeb847f68
3
+ size 520
reddit.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """Reddit dataset using tldr as summaries."""
18
+
19
+ import json
20
+ import os
21
+
22
+ import datasets
23
+
24
+
25
+ _CITATION = """
26
+ @inproceedings{volske-etal-2017-tl,
27
+ title = {TL;DR: Mining {R}eddit to Learn Automatic Summarization},
28
+ author = {V{\"o}lske, Michael and Potthast, Martin and Syed, Shahbaz and Stein, Benno},
29
+ booktitle = {Proceedings of the Workshop on New Frontiers in Summarization},
30
+ month = {sep},
31
+ year = {2017},
32
+ address = {Copenhagen, Denmark},
33
+ publisher = {Association for Computational Linguistics},
34
+ url = {https://www.aclweb.org/anthology/W17-4508},
35
+ doi = {10.18653/v1/W17-4508},
36
+ pages = {59--63},
37
+ abstract = {Recent advances in automatic text summarization have used deep neural networks to generate high-quality abstractive summaries, but the performance of these models strongly depends on large amounts of suitable training data. We propose a new method for mining social media for author-provided summaries, taking advantage of the common practice of appending a {``}TL;DR{''} to long posts. A case study using a large Reddit crawl yields the Webis-TLDR-17 dataset, complementing existing corpora primarily from the news genre. Our technique is likely applicable to other social media sites and general web crawls.},
38
+ }
39
+ """
40
+
41
+ _DESCRIPTION = """
42
+ This corpus contains preprocessed posts from the Reddit dataset.
43
+ The dataset consists of 3,848,330 posts with an average length of 270 words for content,
44
+ and 28 words for the summary.
45
+
46
+ Features includes strings: author, body, normalizedBody, content, summary, subreddit, subreddit_id.
47
+ Content is used as document and summary is used as summary.
48
+ """
49
+
50
+ _URL = "https://zenodo.org/record/1043504/files/corpus-webis-tldr-17.zip?download=1"
51
+
52
+ _DOCUMENT = "content"
53
+ _SUMMARY = "summary"
54
+ _ADDITIONAL_FEATURES = ["author", "body", "normalizedBody", "subreddit", "subreddit_id", "id"]
55
+
56
+
57
+ class Reddit(datasets.GeneratorBasedBuilder):
58
+ """Reddit Dataset."""
59
+
60
+ VERSION = datasets.Version("1.0.0")
61
+
62
+ def _info(self):
63
+ return datasets.DatasetInfo(
64
+ description=_DESCRIPTION,
65
+ features=datasets.Features(
66
+ {k: datasets.Value("string") for k in _ADDITIONAL_FEATURES + [_DOCUMENT, _SUMMARY]}
67
+ ),
68
+ supervised_keys=None,
69
+ homepage="https://github.com/webis-de/webis-tldr-17-corpus",
70
+ citation=_CITATION,
71
+ )
72
+
73
+ def _split_generators(self, dl_manager):
74
+ """Returns SplitGenerators."""
75
+ dl_path = dl_manager.download_and_extract(_URL)
76
+ return [
77
+ datasets.SplitGenerator(
78
+ name=datasets.Split.TRAIN,
79
+ gen_kwargs={"path": os.path.join(dl_path, "corpus-webis-tldr-17.json")},
80
+ )
81
+ ]
82
+
83
+ def _generate_examples(self, path=None):
84
+ """Yields examples."""
85
+ with open(path, "rb") as f:
86
+ for i, line in enumerate(f):
87
+ # possible keys are:
88
+ # author: string (nullable = true)
89
+ # body: string (nullable = true)
90
+ # normalizedBody: string (nullable = true)
91
+ # content: string (nullable = true)
92
+ # content_len: long (nullable = true)
93
+ # summary: string (nullable = true)
94
+ # summary_len: long (nullable = true)
95
+ # id: string (nullable = true)
96
+ # subreddit: string (nullable = true)
97
+ # subreddit_id: string (nullable = true)
98
+ # title: string (nullable = true)
99
+ d = json.loads(line)
100
+ if _SUMMARY in d and _DOCUMENT in d:
101
+ yield i, {k: d.get(k, "") for k in _ADDITIONAL_FEATURES + [_DOCUMENT, _SUMMARY]}