{"emilylearning--cond_ft_none_on_reddit__prcnt_100__test_run_False__bert-base-uncased": { "description": "\nThis corpus contains preprocessed posts from the Reddit dataset.\nThe dataset consists of 3,848,330 posts with an average length of 270 words for content,\nand 28 words for the summary.\n\nFeatures includes strings: author, body, normalizedBody, content, summary, subreddit, subreddit_id.\nContent is used as document and summary is used as summary.\n", "citation": "\n@inproceedings{volske-etal-2017-tl,\n title = {TL;DR: Mining {R}eddit to Learn Automatic Summarization},\n author = {V{\"o}lske, Michael and Potthast, Martin and Syed, Shahbaz and Stein, Benno},\n booktitle = {Proceedings of the Workshop on New Frontiers in Summarization},\n month = {sep},\n year = {2017},\n address = {Copenhagen, Denmark},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W17-4508},\n doi = {10.18653/v1/W17-4508},\n pages = {59--63},\n abstract = {Recent advances in automatic text summarization have used deep neural networks to generate high-quality abstractive summaries, but the performance of these models strongly depends on large amounts of suitable training data. We propose a new method for mining social media for author-provided summaries, taking advantage of the common practice of appending a {``}TL;DR{''} to long posts. A case study using a large Reddit crawl yields the Webis-TLDR-17 dataset, complementing existing corpora primarily from the news genre. Our technique is likely applicable to other social media sites and general web crawls.},\n}\n", "homepage": "https://github.com/webis-de/webis-tldr-17-corpus", "license": "", "features": { "input_ids": { "feature": { "dtype": "int32", "id": null, "_type": "Value" }, "length": -1, "id": null, "_type": "Sequence" }, "attention_mask": { "feature": { "dtype": "int8", "id": null, "_type": "Value" }, "length": -1, "id": null, "_type": "Sequence" }, "labels": { "feature": { "dtype": "int64", "id": null, "_type": "Value" }, "length": -1, "id": null, "_type": "Sequence" } }, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "reddit", "config_name": "default", "version": { "version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0 }, "splits": { "train": { "name": "train", "num_bytes": 2960434444, "num_examples": 1766369, "dataset_name": "cond_ft_none_on_reddit__prcnt_100__test_run_False__bert-base-uncased" } }, "download_checksums": null, "download_size": 436186257, "post_processing_size": null, "dataset_size": 2960434444, "size_in_bytes": 3396620701 }}