Datasets:

Languages:
English
Multilinguality:
monolingual
Size Categories:
1M<n<10M
Language Creators:
crowdsourced
Annotations Creators:
no-annotation
Source Datasets:
original
License:
albertvillanova HF staff commited on
Commit
346e637
1 Parent(s): 121d543

Host data file (#7)

Browse files

- Add data file (3d013b77f2e94fd1ffb86d299d35d5f29edadad9)
- Update loading script (527607cfae5370e30bae203e392da937568b3591)
- Update metadata in legacy JSON file (93704c91fc1edb38d85b3e080d21ffe9c890d63a)

data/corpus-webis-tldr-17.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c1a0f8c4374c7314d3c9ec50dd505303c536062d87037d4dca7035b89b36938a
3
+ size 3141854161
dataset_infos.json CHANGED
@@ -1 +1 @@
1
- {"default": {"description": "\nThis corpus contains preprocessed posts from the Reddit dataset.\nThe dataset consists of 3,848,330 posts with an average length of 270 words for content,\nand 28 words for the summary.\n\nFeatures includes strings: author, body, normalizedBody, content, summary, subreddit, subreddit_id.\nContent is used as document and summary is used as summary.\n", "citation": "\n@inproceedings{volske-etal-2017-tl,\n title = \"{TL};{DR}: Mining {R}eddit to Learn Automatic Summarization\",\n author = {V{\"o}lske, Michael and\n Potthast, Martin and\n Syed, Shahbaz and\n Stein, Benno},\n booktitle = \"Proceedings of the Workshop on New Frontiers in Summarization\",\n month = sep,\n year = \"2017\",\n address = \"Copenhagen, Denmark\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W17-4508\",\n doi = \"10.18653/v1/W17-4508\",\n pages = \"59--63\",\n abstract = \"Recent advances in automatic text summarization have used deep neural networks to generate high-quality abstractive summaries, but the performance of these models strongly depends on large amounts of suitable training data. We propose a new method for mining social media for author-provided summaries, taking advantage of the common practice of appending a {``}TL;DR{''} to long posts. A case study using a large Reddit crawl yields the Webis-TLDR-17 dataset, complementing existing corpora primarily from the news genre. Our technique is likely applicable to other social media sites and general web crawls.\",\n}\n", "homepage": "https://github.com/webis-de/webis-tldr-17-corpus", "license": "", "features": {"author": {"dtype": "string", "id": null, "_type": "Value"}, "body": {"dtype": "string", "id": null, "_type": "Value"}, "normalizedBody": {"dtype": "string", "id": null, "_type": "Value"}, "subreddit": {"dtype": "string", "id": null, "_type": "Value"}, "subreddit_id": {"dtype": "string", "id": null, "_type": "Value"}, "id": {"dtype": "string", "id": null, "_type": "Value"}, "content": {"dtype": "string", "id": null, "_type": "Value"}, "summary": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "reddit", "config_name": "default", "version": {"version_str": "1.0.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 18940542951, "num_examples": 3848330, "dataset_name": "reddit"}}, "download_checksums": {"https://zenodo.org/record/1043504/files/corpus-webis-tldr-17.zip?download=1": {"num_bytes": 3141854161, "checksum": "c1a0f8c4374c7314d3c9ec50dd505303c536062d87037d4dca7035b89b36938a"}}, "download_size": 3141854161, "dataset_size": 18940542951, "size_in_bytes": 22082397112}}
1
+ {"default": {"description": "\nThis corpus contains preprocessed posts from the Reddit dataset.\nThe dataset consists of 3,848,330 posts with an average length of 270 words for content,\nand 28 words for the summary.\n\nFeatures includes strings: author, body, normalizedBody, content, summary, subreddit, subreddit_id.\nContent is used as document and summary is used as summary.\n", "citation": "\n@inproceedings{volske-etal-2017-tl,\n title = \"{TL};{DR}: Mining {R}eddit to Learn Automatic Summarization\",\n author = {V{\"o}lske, Michael and\n Potthast, Martin and\n Syed, Shahbaz and\n Stein, Benno},\n booktitle = \"Proceedings of the Workshop on New Frontiers in Summarization\",\n month = sep,\n year = \"2017\",\n address = \"Copenhagen, Denmark\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W17-4508\",\n doi = \"10.18653/v1/W17-4508\",\n pages = \"59--63\",\n abstract = \"Recent advances in automatic text summarization have used deep neural networks to generate high-quality abstractive summaries, but the performance of these models strongly depends on large amounts of suitable training data. We propose a new method for mining social media for author-provided summaries, taking advantage of the common practice of appending a {``}TL;DR{''} to long posts. A case study using a large Reddit crawl yields the Webis-TLDR-17 dataset, complementing existing corpora primarily from the news genre. Our technique is likely applicable to other social media sites and general web crawls.\",\n}\n", "homepage": "https://github.com/webis-de/webis-tldr-17-corpus", "license": "", "features": {"author": {"dtype": "string", "id": null, "_type": "Value"}, "body": {"dtype": "string", "id": null, "_type": "Value"}, "normalizedBody": {"dtype": "string", "id": null, "_type": "Value"}, "subreddit": {"dtype": "string", "id": null, "_type": "Value"}, "subreddit_id": {"dtype": "string", "id": null, "_type": "Value"}, "id": {"dtype": "string", "id": null, "_type": "Value"}, "content": {"dtype": "string", "id": null, "_type": "Value"}, "summary": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "reddit", "config_name": "default", "version": {"version_str": "1.0.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 18940542951, "num_examples": 3848330, "dataset_name": "reddit"}}, "download_checksums": {"data/corpus-webis-tldr-17.zip": {"num_bytes": 3141854161, "checksum": "c1a0f8c4374c7314d3c9ec50dd505303c536062d87037d4dca7035b89b36938a"}}, "download_size": 3141854161, "dataset_size": 18940542951, "size_in_bytes": 22082397112}}
reddit.py CHANGED
@@ -47,7 +47,7 @@ Features includes strings: author, body, normalizedBody, content, summary, subre
47
  Content is used as document and summary is used as summary.
48
  """
49
 
50
- _URL = "https://zenodo.org/record/1043504/files/corpus-webis-tldr-17.zip?download=1"
51
 
52
  _DOCUMENT = "content"
53
  _SUMMARY = "summary"
47
  Content is used as document and summary is used as summary.
48
  """
49
 
50
+ _URL = "data/corpus-webis-tldr-17.zip"
51
 
52
  _DOCUMENT = "content"
53
  _SUMMARY = "summary"