Datasets:

Languages:
English
Multilinguality:
monolingual
Size Categories:
100K<n<1M
Language Creators:
found
Annotations Creators:
no-annotation
Source Datasets:
original
ArXiv:
License:
system HF staff commited on
Commit
e7dd1ad
0 Parent(s):

Update files from the datasets library (from 1.0.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.0.0

Files changed (3) hide show
  1. .gitattributes +27 -0
  2. dataset_infos.json +173 -0
  3. eli5.py +398 -0
.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
dataset_infos.json ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "LFQA_reddit": {
3
+ "description": "Explain Like I'm 5 long form QA dataset\n",
4
+ "citation": "@inproceedings{DBLP:conf/acl/FanJPGWA19,\n author = {Angela Fan and\n Yacine Jernite and\n Ethan Perez and\n David Grangier and\n Jason Weston and\n Michael Auli},\n editor = {Anna Korhonen and\n David R. Traum and\n Lluis Marquez},\n title = {{ELI5:} Long Form Question Answering},\n booktitle = {Proceedings of the 57th Conference of the Association for Computational\n Linguistics, {ACL} 2019, Florence, Italy, July 28- August 2, 2019,\n Volume 1: Long Papers},\n pages = {3558--3567},\n publisher = {Association for Computational Linguistics},\n year = {2019},\n url = {https://doi.org/10.18653/v1/p19-1346},\n doi = {10.18653/v1/p19-1346},\n}\n",
5
+ "homepage": "https://facebookresearch.github.io/ELI5/explore.html",
6
+ "license": "",
7
+ "features": {
8
+ "q_id": {
9
+ "dtype": "string",
10
+ "id": null,
11
+ "_type": "Value"
12
+ },
13
+ "title": {
14
+ "dtype": "string",
15
+ "id": null,
16
+ "_type": "Value"
17
+ },
18
+ "selftext": {
19
+ "dtype": "string",
20
+ "id": null,
21
+ "_type": "Value"
22
+ },
23
+ "document": {
24
+ "dtype": "string",
25
+ "id": null,
26
+ "_type": "Value"
27
+ },
28
+ "subreddit": {
29
+ "dtype": "string",
30
+ "id": null,
31
+ "_type": "Value"
32
+ },
33
+ "answers": {
34
+ "feature": {
35
+ "a_id": {
36
+ "dtype": "string",
37
+ "id": null,
38
+ "_type": "Value"
39
+ },
40
+ "text": {
41
+ "dtype": "string",
42
+ "id": null,
43
+ "_type": "Value"
44
+ },
45
+ "score": {
46
+ "dtype": "int32",
47
+ "id": null,
48
+ "_type": "Value"
49
+ }
50
+ },
51
+ "length": -1,
52
+ "id": null,
53
+ "_type": "Sequence"
54
+ },
55
+ "title_urls": {
56
+ "feature": {
57
+ "url": {
58
+ "dtype": "string",
59
+ "id": null,
60
+ "_type": "Value"
61
+ }
62
+ },
63
+ "length": -1,
64
+ "id": null,
65
+ "_type": "Sequence"
66
+ },
67
+ "selftext_urls": {
68
+ "feature": {
69
+ "url": {
70
+ "dtype": "string",
71
+ "id": null,
72
+ "_type": "Value"
73
+ }
74
+ },
75
+ "length": -1,
76
+ "id": null,
77
+ "_type": "Sequence"
78
+ },
79
+ "answers_urls": {
80
+ "feature": {
81
+ "url": {
82
+ "dtype": "string",
83
+ "id": null,
84
+ "_type": "Value"
85
+ }
86
+ },
87
+ "length": -1,
88
+ "id": null,
89
+ "_type": "Sequence"
90
+ }
91
+ },
92
+ "supervised_keys": null,
93
+ "builder_name": "eli5",
94
+ "config_name": "LFQA_reddit",
95
+ "version": {
96
+ "version_str": "1.0.0",
97
+ "description": null,
98
+ "datasets_version_to_prepare": null,
99
+ "major": 1,
100
+ "minor": 0,
101
+ "patch": 0
102
+ },
103
+ "splits": {
104
+ "train_eli5": {
105
+ "name": "train_eli5",
106
+ "num_bytes": 577188173,
107
+ "num_examples": 272634,
108
+ "dataset_name": "eli5"
109
+ },
110
+ "validation_eli5": {
111
+ "name": "validation_eli5",
112
+ "num_bytes": 21117891,
113
+ "num_examples": 9812,
114
+ "dataset_name": "eli5"
115
+ },
116
+ "test_eli5": {
117
+ "name": "test_eli5",
118
+ "num_bytes": 53099796,
119
+ "num_examples": 24512,
120
+ "dataset_name": "eli5"
121
+ },
122
+ "train_asks": {
123
+ "name": "train_asks",
124
+ "num_bytes": 286464210,
125
+ "num_examples": 131778,
126
+ "dataset_name": "eli5"
127
+ },
128
+ "validation_asks": {
129
+ "name": "validation_asks",
130
+ "num_bytes": 9662481,
131
+ "num_examples": 2281,
132
+ "dataset_name": "eli5"
133
+ },
134
+ "test_asks": {
135
+ "name": "test_asks",
136
+ "num_bytes": 17713920,
137
+ "num_examples": 4462,
138
+ "dataset_name": "eli5"
139
+ },
140
+ "train_askh": {
141
+ "name": "train_askh",
142
+ "num_bytes": 330483260,
143
+ "num_examples": 98525,
144
+ "dataset_name": "eli5"
145
+ },
146
+ "validation_askh": {
147
+ "name": "validation_askh",
148
+ "num_bytes": 18690845,
149
+ "num_examples": 4901,
150
+ "dataset_name": "eli5"
151
+ },
152
+ "test_askh": {
153
+ "name": "test_askh",
154
+ "num_bytes": 36246784,
155
+ "num_examples": 9764,
156
+ "dataset_name": "eli5"
157
+ }
158
+ },
159
+ "download_checksums": {
160
+ "https://s3.amazonaws.com/datasets.huggingface.co/nlp/datasets_experimental/explainlikeimfive/reddit_data_split.json": {
161
+ "num_bytes": 6326543,
162
+ "checksum": "e605c4854787f3db85415b90ab80bb9ca2e2bd5208391e02aff081be2690923f"
163
+ },
164
+ "https://s3.amazonaws.com/datasets.huggingface.co/nlp/datasets/eli5/reddit_data_split.json": {
165
+ "checksum": "e605c4854787f3db85415b90ab80bb9ca2e2bd5208391e02aff081be2690923f",
166
+ "num_bytes": 6326543
167
+ }
168
+ },
169
+ "download_size": 6326543,
170
+ "dataset_size": 1350667360,
171
+ "size_in_bytes": 1356993903
172
+ }
173
+ }
eli5.py ADDED
@@ -0,0 +1,398 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 Facebook, Inc. and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """ELI5: Long Form Question Answering dataset"""
18
+ from __future__ import absolute_import, division, print_function
19
+
20
+ import bz2
21
+ import io
22
+ import json
23
+ import logging
24
+ import lzma
25
+ import os
26
+ import re
27
+ from os.path import isfile
28
+ from os.path import join as pjoin
29
+ from time import time
30
+
31
+ import datasets
32
+
33
+
34
+ _SUB_REDDITS = ["explainlikeimfive", "askscience", "AskHistorians"]
35
+ _REDDIT_URL = "https://files.pushshift.io/reddit/"
36
+
37
+ # pylint: disable=line-too-long
38
+ _URL_REGEX = r"""(?i)\b((?:https?:(?:/{1,3}|[a-z0-9%])|[a-z0-9.\-]+[.](?:com|net|org|edu|gov|mil|aero|asia|biz|cat|coop|info|int|jobs|mobi|museum|name|post|pro|tel|travel|xxx|ac|ad|ae|af|ag|ai|al|am|an|ao|aq|ar|as|at|au|aw|ax|az|ba|bb|bd|be|bf|bg|bh|bi|bj|bm|bn|bo|br|bs|bt|bv|bw|by|bz|ca|cc|cd|cf|cg|ch|ci|ck|cl|cm|cn|co|cr|cs|cu|cv|cx|cy|cz|dd|de|dj|dk|dm|do|dz|ec|ee|eg|eh|er|es|et|eu|fi|fj|fk|fm|fo|fr|ga|gb|gd|ge|gf|gg|gh|gi|gl|gm|gn|gp|gq|gr|gs|gt|gu|gw|gy|hk|hm|hn|hr|ht|hu|id|ie|il|im|in|io|iq|ir|is|it|je|jm|jo|jp|ke|kg|kh|ki|km|kn|kp|kr|kw|ky|kz|la|lb|lc|li|lk|lr|ls|lt|lu|lv|ly|ma|mc|md|me|mg|mh|mk|ml|mm|mn|mo|mp|mq|mr|ms|mt|mu|mv|mw|mx|my|mz|na|nc|ne|nf|ng|ni|nl|no|np|nr|nu|nz|om|pa|pe|pf|pg|ph|pk|pl|pm|pn|pr|ps|pt|pw|py|qa|re|ro|rs|ru|rw|sa|sb|sc|sd|se|sg|sh|si|sj|Ja|sk|sl|sm|sn|so|sr|ss|st|su|sv|sx|sy|sz|tc|td|tf|tg|th|tj|tk|tl|tm|tn|to|tp|tr|tt|tv|tw|tz|ua|ug|uk|us|uy|uz|va|vc|ve|vg|vi|vn|vu|wf|ws|ye|yt|yu|za|zm|zw)/)(?:[^\s()<>{}\[\]]+|\([^\s()]*?\([^\s()]+\)[^\s()]*?\)|\([^\s]+?\))+(?:\([^\s()]*?\([^\s()]+\)[^\s()]*?\)|\([^\s]+?\)|[^\s`!()\[\]{};:'".,<>?«»“”‘’])|(?:(?<!@)[a-z0-9]+(?:[.\-][a-z0-9]+)*[.](?:com|net|org|edu|gov|mil|aero|asia|biz|cat|coop|info|int|jobs|mobi|museum|name|post|pro|tel|travel|xxx|ac|ad|ae|af|ag|ai|al|am|an|ao|aq|ar|as|at|au|aw|ax|az|ba|bb|bd|be|bf|bg|bh|bi|bj|bm|bn|bo|br|bs|bt|bv|bw|by|bz|ca|cc|cd|cf|cg|ch|ci|ck|cl|cm|cn|co|cr|cs|cu|cv|cx|cy|cz|dd|de|dj|dk|dm|do|dz|ec|ee|eg|eh|er|es|et|eu|fi|fj|fk|fm|fo|fr|ga|gb|gd|ge|gf|gg|gh|gi|gl|gm|gn|gp|gq|gr|gs|gt|gu|gw|gy|hk|hm|hn|hr|ht|hu|id|ie|il|im|in|io|iq|ir|is|it|je|jm|jo|jp|ke|kg|kh|ki|km|kn|kp|kr|kw|ky|kz|la|lb|lc|li|lk|lr|ls|lt|lu|lv|ly|ma|mc|md|me|mg|mh|mk|ml|mm|mn|mo|mp|mq|mr|ms|mt|mu|mv|mw|mx|my|mz|na|nc|ne|nf|ng|ni|nl|no|np|nr|nu|nz|om|pa|pe|pf|pg|ph|pk|pl|pm|pn|pr|ps|pt|pw|py|qa|re|ro|rs|ru|rw|sa|sb|sc|sd|se|sg|sh|si|sj|Ja|sk|sl|sm|sn|so|sr|ss|st|su|sv|sx|sy|sz|tc|td|tf|tg|th|tj|tk|tl|tm|tn|to|tp|tr|tt|tv|tw|tz|ua|ug|uk|us|uy|uz|va|vc|ve|vg|vi|vn|vu|wf|ws|ye|yt|yu|za|zm|zw)\b/?(?!@)))"""
39
+ # pylint: enable=line-too-long
40
+
41
+ _HTML_PAIRS = [
42
+ ("&amp;", " & "),
43
+ ("&quot", ' " '),
44
+ ("&apos", " ' "),
45
+ ("&gt;", " > "),
46
+ ("&lt;", " < "),
47
+ ]
48
+
49
+
50
+ # removes URLs (kept in separate list)
51
+ def _extract_urls_from_text(stp):
52
+ url_list = list(set(re.findall(_URL_REGEX, stp)))
53
+ for i, url in enumerate(url_list):
54
+ stp = stp.replace(url, "_URL_%d_" % (i,))
55
+ for a, b in _HTML_PAIRS:
56
+ stp = stp.replace(a, b)
57
+ return (stp, url_list)
58
+
59
+
60
+ # collects URLs for monthly dumps, has to be robust to file type changes
61
+ def _gather_dump_urls(base_url, mode, dl_manager):
62
+ from bs4 import BeautifulSoup
63
+
64
+ page_path = dl_manager.download(_REDDIT_URL + mode)
65
+ page_f = open(page_path, encoding="utf-8")
66
+ page_content = page_f.read()
67
+ page_f.close()
68
+ soup = BeautifulSoup(page_content, "lxml")
69
+ files = [it for it in soup.find_all(attrs={"class": "file"})]
70
+ f_urls = [
71
+ tg.find_all(lambda x: x.has_attr("href"))[0]["href"]
72
+ for tg in files
73
+ if len(tg.find_all(lambda x: x.has_attr("href"))) > 0
74
+ ]
75
+ date_to_url = {}
76
+ for url_st in f_urls:
77
+ ls = re.findall(r"20[0-9]{2}-[0-9]{2}", url_st)
78
+ if len(ls) > 0:
79
+ yr, mt = ls[0].split("-")
80
+ date_to_url[(int(yr), int(mt))] = base_url + mode + url_st[1:]
81
+ return date_to_url
82
+
83
+
84
+ # select valid top-level comments
85
+ def _valid_line(dct, mode):
86
+ top_level = (mode == "submissions") or (
87
+ len(dct["body"].split()) > 2
88
+ and not dct["body"].startswith("Your submission has been removed")
89
+ and dct["author"] != "AutoModerator"
90
+ and dct["parent_id"] == dct["link_id"]
91
+ )
92
+ res = dct.get("num_comments", 1) > 0 and dct.get("score", 0) and dct.get("score", 0) >= 2 and top_level
93
+ return res
94
+
95
+
96
+ def _open_compressed_file(f_name, f_type):
97
+ import zstandard as zstd
98
+
99
+ fh = None
100
+ if f_type == "xz":
101
+ f = lzma.open(f_name, "rt")
102
+ elif f_type == "bz2":
103
+ f = bz2.open(f_name, "rt")
104
+ elif f_type == "zst":
105
+ fh = open(f_name, "rb")
106
+ dctx = zstd.ZstdDecompressor()
107
+ stream_reader = dctx.stream_reader(fh)
108
+ f = io.TextIOWrapper(stream_reader, encoding="utf-8")
109
+ else:
110
+ raise NotImplementedError
111
+ return f, fh
112
+
113
+
114
+ # download a file, extract posts from desired subreddit, then remove from disk
115
+ def _download_and_select_lines(dl_manager, f_url, mode, st_time):
116
+ # download and pre-process original posts
117
+ print("downloading {} {:.2f}".format(f_url, time() - st_time))
118
+ f_downloaded_path = dl_manager.download(f_url)
119
+ print("decompressing and filtering {} {:.2f}".format(f_url, time() - st_time))
120
+ f, fh = _open_compressed_file(f_downloaded_path, f_url.split(".")[-1])
121
+ lines = dict([(name, []) for name in _SUB_REDDITS])
122
+ for line in f:
123
+ line_dct = json.loads(line)
124
+ if any([line_dct.get("subreddit", "") == name for name in _SUB_REDDITS]):
125
+ lines[line_dct["subreddit"]] += [line_dct]
126
+ f.close()
127
+ if f_url.split(".")[-1] == "zst":
128
+ fh.close()
129
+ os.remove(f_downloaded_path)
130
+ os.remove(f_downloaded_path + ".json")
131
+ os.remove(f_downloaded_path + ".lock")
132
+ print("tokenizing and selecting {} {:.2f}".format(f_url, time() - st_time))
133
+ processed_items = dict([(name, []) for name in _SUB_REDDITS])
134
+ if mode == "submissions":
135
+ key_list = ["id", "score", "url", "title", "selftext", "subreddit"]
136
+ else:
137
+ key_list = ["id", "link_id", "parent_id", "score", "body"]
138
+ for name in _SUB_REDDITS:
139
+ for line in lines[name]:
140
+ if _valid_line(line, mode):
141
+ reddit_res = {}
142
+ for k in key_list:
143
+ if k in ["title", "selftext", "body"]:
144
+ reddit_res[k] = _extract_urls_from_text(line[k])
145
+ else:
146
+ reddit_res[k] = line[k]
147
+ processed_items[name] += [reddit_res]
148
+ print("Total found {} {} {:.2f}".format(sum([len(ls) for ls in processed_items.values()]), mode, time() - st_time))
149
+ return processed_items
150
+
151
+
152
+ # post-process ELI5 questions and de-duplicate answers
153
+ def _post_process(reddit_dct, name=""):
154
+ # remove the ELI5 at the start of explainlikeimfive questions
155
+ start_re = re.compile(r"""\A[\[|\(]?[ ]?eli[5f][ ]?[\]|\)]?[]?[:,]?""", re.IGNORECASE)
156
+ if name == "explainlikeimfive":
157
+ title, uls = reddit_dct["title"]
158
+ title = start_re.sub("", title.strip()).strip()
159
+ reddit_dct["title"] = [title, uls]
160
+ # dedupe and filter comments
161
+ comments = [
162
+ c
163
+ for i, c in enumerate(reddit_dct["comments"])
164
+ if len(c["body"][0].split()) >= 8 and c["id"] not in [x["id"] for x in reddit_dct["comments"][:i]]
165
+ ]
166
+ comments = sorted(comments, key=lambda c: (c["score"], len(c["body"][0].split()), c["id"]), reverse=True)
167
+ reddit_dct["comments"] = comments
168
+ return reddit_dct
169
+
170
+
171
+ def _download_and_filter_reddit(dl_manager, start_year=2011, start_month=7, end_year=2019, end_month=7):
172
+ # collect submissions and comments monthly URLs
173
+ date_to_url_submissions = _gather_dump_urls(_REDDIT_URL, "submissions", dl_manager)
174
+ date_to_url_comments = _gather_dump_urls(_REDDIT_URL, "comments", dl_manager)
175
+ # download, filter, process, remove
176
+ st_time = time()
177
+ qa_dict = dict([(name, {}) for name in _SUB_REDDITS])
178
+ # first download all questions
179
+ for year in range(start_year, end_year + 1):
180
+ start_mth = start_month if year == start_year else 1
181
+ end_mth = end_month if year == end_year else 12
182
+ months = range(start_mth, end_mth + 1)
183
+ for month in months:
184
+ if (year, month) in date_to_url_submissions:
185
+ f_url = date_to_url_submissions[(year, month)]
186
+ processed_submissions = _download_and_select_lines(dl_manager, f_url, "submissions", st_time)
187
+ for name in _SUB_REDDITS:
188
+ for dct in processed_submissions[name]:
189
+ qa_dict[name][dct["id"]] = dct
190
+ else:
191
+ print("Could not find submissions dump file for year {:4d} month {:2d}".format(year, month))
192
+ # then all answers
193
+ for year in range(start_year, end_year + 1):
194
+ start_mth = start_month if year == start_year else 1
195
+ end_mth = end_month if year == end_year else 12
196
+ months = range(start_mth, end_mth + 1)
197
+ for month in months:
198
+ if (year, month) in date_to_url_comments:
199
+ f_url = date_to_url_comments[(year, month)]
200
+ processed_comments = _download_and_select_lines(dl_manager, f_url, "comments", st_time)
201
+ # merge submissions and comments
202
+ for name in _SUB_REDDITS:
203
+ merged_comments = 0
204
+ for dct in processed_comments[name]:
205
+ did = dct["parent_id"].split("_")[-1]
206
+ if did in qa_dict[name]:
207
+ merged_comments += 1
208
+ qa_dict[name][did]["comments"] = qa_dict[name][did].get("comments", []) + [dct]
209
+ else:
210
+ print("Could not find comments dump file for year {:4d} month {:2d}".format(year, month))
211
+ # then post-process
212
+ res = {}
213
+ for name in _SUB_REDDITS:
214
+ qa_dct_list = [(k, _post_process(rdct, name)) for k, rdct in qa_dict[name].items() if "comments" in rdct]
215
+ qa_dct_list = [x for x in qa_dct_list if len(x[1]["comments"]) > 0 and name in x[1]["url"]]
216
+ res[name] = dict(qa_dct_list[:])
217
+ return res
218
+
219
+
220
+ _DESCRIPTION = """\
221
+ Explain Like I'm 5 long form QA dataset
222
+ """
223
+
224
+ _CITATION = """\
225
+ @inproceedings{DBLP:conf/acl/FanJPGWA19,
226
+ author = {Angela Fan and
227
+ Yacine Jernite and
228
+ Ethan Perez and
229
+ David Grangier and
230
+ Jason Weston and
231
+ Michael Auli},
232
+ editor = {Anna Korhonen and
233
+ David R. Traum and
234
+ Lluis Marquez},
235
+ title = {{ELI5:} Long Form Question Answering},
236
+ booktitle = {Proceedings of the 57th Conference of the Association for Computational
237
+ Linguistics, {ACL} 2019, Florence, Italy, July 28- August 2, 2019,
238
+ Volume 1: Long Papers},
239
+ pages = {3558--3567},
240
+ publisher = {Association for Computational Linguistics},
241
+ year = {2019},
242
+ url = {https://doi.org/10.18653/v1/p19-1346},
243
+ doi = {10.18653/v1/p19-1346},
244
+ }
245
+ """
246
+
247
+
248
+ class Eli5Config(datasets.BuilderConfig):
249
+ """BuilderConfig for ExplainLikeImFive."""
250
+
251
+ def __init__(self, **kwargs):
252
+ """BuilderConfig for ExplainLikeImFive.
253
+ Args:
254
+ **kwargs: keyword arguments forwarded to super.
255
+ """
256
+ super(Eli5Config, self).__init__(**kwargs)
257
+
258
+
259
+ class Eli5(datasets.GeneratorBasedBuilder):
260
+ """ELI5: Explain Like I'm Five long form question answering dataset."""
261
+
262
+ BUILDER_CONFIG_CLASS = Eli5Config
263
+ _DATA_SPLIT_URL = "https://s3.amazonaws.com/datasets.huggingface.co/nlp/datasets/eli5/reddit_data_split.json"
264
+
265
+ BUILDER_CONFIGS = [
266
+ Eli5Config(name="LFQA_reddit", version=datasets.Version("1.0.0"), description="long from QA subreddits"),
267
+ ]
268
+
269
+ test_dummy_data = False
270
+
271
+ def _info(self):
272
+ return datasets.DatasetInfo(
273
+ description=_DESCRIPTION,
274
+ features=datasets.Features(
275
+ {
276
+ "q_id": datasets.Value("string"),
277
+ "title": datasets.Value("string"),
278
+ "selftext": datasets.Value("string"),
279
+ "document": datasets.Value("string"),
280
+ "subreddit": datasets.Value("string"),
281
+ "answers": datasets.features.Sequence(
282
+ {
283
+ "a_id": datasets.Value("string"),
284
+ "text": datasets.Value("string"),
285
+ "score": datasets.Value("int32"),
286
+ }
287
+ ),
288
+ "title_urls": datasets.features.Sequence(datasets.Value("string")),
289
+ "selftext_urls": datasets.features.Sequence(datasets.Value("string")),
290
+ "answers_urls": datasets.features.Sequence(datasets.Value("string")),
291
+ }
292
+ ),
293
+ supervised_keys=None,
294
+ homepage="https://facebookresearch.github.io/ELI5/explore.html",
295
+ citation=_CITATION,
296
+ )
297
+
298
+ def _split_generators(self, dl_manager):
299
+ qa_data_file = pjoin(
300
+ self._cache_dir_root, self._relative_data_dir(with_version=False), "reddit_downloaded_qa_lists.json"
301
+ )
302
+ if isfile(qa_data_file):
303
+ logging.info("loading pre-computed QA list")
304
+ self.filtered_reddit = json.load(open(qa_data_file))
305
+ else:
306
+ self.filtered_reddit = _download_and_filter_reddit(
307
+ dl_manager, start_year=2011, start_month=7, end_year=2019, end_month=7
308
+ )
309
+ logging.info("saving pre-computed QA list")
310
+ json.dump(self.filtered_reddit, open(qa_data_file, "w"))
311
+ # download data splits from AWS
312
+ fpath_splits = dl_manager.download(self._DATA_SPLIT_URL)
313
+ self.data_split = json.load(open(fpath_splits))
314
+ return [
315
+ datasets.SplitGenerator(
316
+ name=datasets.Split("train_eli5"),
317
+ gen_kwargs={"split": "train", "subreddit_name": "explainlikeimfive"},
318
+ ),
319
+ datasets.SplitGenerator(
320
+ name=datasets.Split("validation_eli5"),
321
+ gen_kwargs={"split": "validation", "subreddit_name": "explainlikeimfive"},
322
+ ),
323
+ datasets.SplitGenerator(
324
+ name=datasets.Split("test_eli5"),
325
+ gen_kwargs={"split": "test", "subreddit_name": "explainlikeimfive"},
326
+ ),
327
+ datasets.SplitGenerator(
328
+ name=datasets.Split("train_asks"),
329
+ gen_kwargs={"split": "train", "subreddit_name": "askscience"},
330
+ ),
331
+ datasets.SplitGenerator(
332
+ name=datasets.Split("validation_asks"),
333
+ gen_kwargs={"split": "validation", "subreddit_name": "askscience"},
334
+ ),
335
+ datasets.SplitGenerator(
336
+ name=datasets.Split("test_asks"),
337
+ gen_kwargs={"split": "test", "subreddit_name": "askscience"},
338
+ ),
339
+ datasets.SplitGenerator(
340
+ name=datasets.Split("train_askh"),
341
+ gen_kwargs={"split": "train", "subreddit_name": "AskHistorians"},
342
+ ),
343
+ datasets.SplitGenerator(
344
+ name=datasets.Split("validation_askh"),
345
+ gen_kwargs={"split": "validation", "subreddit_name": "AskHistorians"},
346
+ ),
347
+ datasets.SplitGenerator(
348
+ name=datasets.Split("test_askh"),
349
+ gen_kwargs={"split": "test", "subreddit_name": "AskHistorians"},
350
+ ),
351
+ ]
352
+
353
+ def _generate_examples(self, split, subreddit_name):
354
+ logging.info("generating examples from = {}, {} set".format(subreddit_name, split))
355
+ if split in self.data_split.get(subreddit_name, []):
356
+ id_list = self.data_split[subreddit_name][split]
357
+ data = [
358
+ self.filtered_reddit[subreddit_name][q_id]
359
+ for q_id in id_list
360
+ if q_id in self.filtered_reddit[subreddit_name]
361
+ ]
362
+ elif split == "train":
363
+ data = [
364
+ self.filtered_reddit[subreddit_name][q_id]
365
+ for subreddit_name in self.filtered_reddit
366
+ for q_id in self.filtered_reddit[subreddit_name]
367
+ ]
368
+ else:
369
+ data = []
370
+ for example in data:
371
+ id_ = example["id"]
372
+ title = example["title"][0]
373
+ title_urls = example["title"][1]
374
+ selftext = example["selftext"][0]
375
+ selftext_urls = example["selftext"][1]
376
+ answer_scores = [ans["score"] for ans in example["comments"]]
377
+ answer_ids = [ans["id"] for ans in example["comments"]]
378
+ # flatten list of URL mappings
379
+ url_maps = [(ul, i, j) for i, ans in enumerate(example["comments"]) for j, ul in enumerate(ans["body"][1])]
380
+ answers_urls = [ul for ul, _, _ in url_maps]
381
+ map_url_indices = dict([((i, j), k) for k, (_, i, j) in enumerate(url_maps)])
382
+ answer_texts = []
383
+ for i, ans in enumerate(example["comments"]):
384
+ txt = ans["body"][0]
385
+ for j, _ in enumerate(ans["body"][1]):
386
+ txt = txt.replace("_URL_{}_".format(j), "_URL_{}_".format(map_url_indices[(i, j)]))
387
+ answer_texts += [txt.strip()]
388
+ yield id_, {
389
+ "q_id": id_,
390
+ "title": title,
391
+ "selftext": selftext,
392
+ "document": "",
393
+ "subreddit": example.get("subreddit", subreddit_name),
394
+ "answers": {"a_id": answer_ids, "text": answer_texts, "score": answer_scores},
395
+ "title_urls": title_urls,
396
+ "selftext_urls": selftext_urls,
397
+ "answers_urls": answers_urls,
398
+ }