File size: 3,231 Bytes
0d096f6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19456f3
 
0d096f6
 
 
 
19456f3
0d096f6
19456f3
 
0d096f6
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
{"emilylearning--cond_ft_none_on_reddit__prcnt_100__test_run_False__xlm-roberta-base": {
    "description": "\nThis corpus contains preprocessed posts from the Reddit dataset.\nThe dataset consists of 3,848,330 posts with an average length of 270 words for content,\nand 28 words for the summary.\n\nFeatures includes strings: author, body, normalizedBody, content, summary, subreddit, subreddit_id.\nContent is used as document and summary is used as summary.\n",
    "citation": "\n@inproceedings{volske-etal-2017-tl,\n    title = {TL;DR: Mining {R}eddit to Learn Automatic Summarization},\n    author = {V{\"o}lske, Michael  and Potthast, Martin  and Syed, Shahbaz  and Stein, Benno},\n    booktitle = {Proceedings of the Workshop on New Frontiers in Summarization},\n    month = {sep},\n    year = {2017},\n    address = {Copenhagen, Denmark},\n    publisher = {Association for Computational Linguistics},\n    url = {https://www.aclweb.org/anthology/W17-4508},\n    doi = {10.18653/v1/W17-4508},\n    pages = {59--63},\n    abstract = {Recent advances in automatic text summarization have used deep neural networks to generate high-quality abstractive summaries, but the performance of these models strongly depends on large amounts of suitable training data. We propose a new method for mining social media for author-provided summaries, taking advantage of the common practice of appending a {``}TL;DR{''} to long posts. A case study using a large Reddit crawl yields the Webis-TLDR-17 dataset, complementing existing corpora primarily from the news genre. Our technique is likely applicable to other social media sites and general web crawls.},\n}\n",
    "homepage": "https://github.com/webis-de/webis-tldr-17-corpus",
    "license": "",
    "features": {
        "input_ids": {
            "feature": {
                "dtype": "int32",
                "id": null,
                "_type": "Value"
            },
            "length": -1,
            "id": null,
            "_type": "Sequence"
        },
        "attention_mask": {
            "feature": {
                "dtype": "int8",
                "id": null,
                "_type": "Value"
            },
            "length": -1,
            "id": null,
            "_type": "Sequence"
        },
        "labels": {
            "feature": {
                "dtype": "int64",
                "id": null,
                "_type": "Value"
            },
            "length": -1,
            "id": null,
            "_type": "Sequence"
        }
    },
    "post_processed": null,
    "supervised_keys": null,
    "task_templates": null,
    "builder_name": "reddit",
    "config_name": "default",
    "version": {
        "version_str": "1.0.0",
        "description": null,
        "major": 1,
        "minor": 0,
        "patch": 0
    },
    "splits": {
        "train": {
            "name": "train",
            "num_bytes": 2861733128,
            "num_examples": 1707478,
            "dataset_name": "cond_ft_none_on_reddit__prcnt_100__test_run_False__xlm-roberta-base"
        }
    },
    "download_checksums": null,
    "download_size": 397766944,
    "post_processing_size": null,
    "dataset_size": 2861733128,
    "size_in_bytes": 3259500072
}}