emilylearning commited on
Commit
8a8ccb5
1 Parent(s): 30b6a63

Upload dataset_infos.json

Browse files
Files changed (1) hide show
  1. dataset_infos.json +63 -0
dataset_infos.json ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"emilylearning--cond_ft_none_on_reddit__prcnt_100__test_run_False__roberta-base": {
2
+ "description": "\nThis corpus contains preprocessed posts from the Reddit dataset.\nThe dataset consists of 3,848,330 posts with an average length of 270 words for content,\nand 28 words for the summary.\n\nFeatures includes strings: author, body, normalizedBody, content, summary, subreddit, subreddit_id.\nContent is used as document and summary is used as summary.\n",
3
+ "citation": "\n@inproceedings{volske-etal-2017-tl,\n title = {TL;DR: Mining {R}eddit to Learn Automatic Summarization},\n author = {V{\"o}lske, Michael and Potthast, Martin and Syed, Shahbaz and Stein, Benno},\n booktitle = {Proceedings of the Workshop on New Frontiers in Summarization},\n month = {sep},\n year = {2017},\n address = {Copenhagen, Denmark},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W17-4508},\n doi = {10.18653/v1/W17-4508},\n pages = {59--63},\n abstract = {Recent advances in automatic text summarization have used deep neural networks to generate high-quality abstractive summaries, but the performance of these models strongly depends on large amounts of suitable training data. We propose a new method for mining social media for author-provided summaries, taking advantage of the common practice of appending a {``}TL;DR{''} to long posts. A case study using a large Reddit crawl yields the Webis-TLDR-17 dataset, complementing existing corpora primarily from the news genre. Our technique is likely applicable to other social media sites and general web crawls.},\n}\n",
4
+ "homepage": "https://github.com/webis-de/webis-tldr-17-corpus",
5
+ "license": "",
6
+ "features": {
7
+ "input_ids": {
8
+ "feature": {
9
+ "dtype": "int32",
10
+ "id": null,
11
+ "_type": "Value"
12
+ },
13
+ "length": -1,
14
+ "id": null,
15
+ "_type": "Sequence"
16
+ },
17
+ "attention_mask": {
18
+ "feature": {
19
+ "dtype": "int8",
20
+ "id": null,
21
+ "_type": "Value"
22
+ },
23
+ "length": -1,
24
+ "id": null,
25
+ "_type": "Sequence"
26
+ },
27
+ "labels": {
28
+ "feature": {
29
+ "dtype": "int64",
30
+ "id": null,
31
+ "_type": "Value"
32
+ },
33
+ "length": -1,
34
+ "id": null,
35
+ "_type": "Sequence"
36
+ }
37
+ },
38
+ "post_processed": null,
39
+ "supervised_keys": null,
40
+ "task_templates": null,
41
+ "builder_name": "reddit",
42
+ "config_name": "default",
43
+ "version": {
44
+ "version_str": "1.0.0",
45
+ "description": null,
46
+ "major": 1,
47
+ "minor": 0,
48
+ "patch": 0
49
+ },
50
+ "splits": {
51
+ "train": {
52
+ "name": "train",
53
+ "num_bytes": 2933641908,
54
+ "num_examples": 1750383,
55
+ "dataset_name": "cond_ft_none_on_reddit__prcnt_100__test_run_False__roberta-base"
56
+ }
57
+ },
58
+ "download_checksums": null,
59
+ "download_size": 436172602,
60
+ "post_processing_size": null,
61
+ "dataset_size": 2933641908,
62
+ "size_in_bytes": 3369814510
63
+ }}