ghomasHudson commited on
Commit
eb92b66
1 Parent(s): dcbff82

Add repo links to replace google drive

Browse files
Files changed (1) hide show
  1. muld.py +17 -15
muld.py CHANGED
@@ -21,6 +21,8 @@ MuLD: The Multitask Long Document Benchmark
21
  A set of NLP tasks where each example is over 10,000 tokens long.
22
  """
23
 
 
 
24
  _TASK_CONFIGS = {
25
  "NarrativeQA": {
26
  "description": """\
@@ -37,9 +39,9 @@ The NarrativeQA Reading Comprehension Challenge Dataset consists of user-submitt
37
  publisher={MIT Press}
38
  }""",
39
  "urls": {
40
- datasets.Split.TRAIN: "https://drive.google.com/uc?export=download&id=1sUXIC6lmk9Khp2mnr9VZwQ-StDlHqTw1",
41
- datasets.Split.VALIDATION: "https://drive.google.com/uc?&confirm=yTib&export=download&id=1xdXEhLHtcqOZh0FbPhY_dnvNMg2bALtm",
42
- datasets.Split.TEST: "https://drive.google.com/uc?confirm=yTib&export=download&id=1BPBXyfYWVGtOXVQv_hlqtvbT25rTQzGu",
43
  }
44
  },
45
 
@@ -48,8 +50,8 @@ The NarrativeQA Reading Comprehension Challenge Dataset consists of user-submitt
48
  The HotpotQA dataset consists of questions from crowd workers which require information from multiple Wikipedia articles in order to answer, thus testing the ability for models to perform multi-hop question answering. The data is commonly presented as a list of paragraphs containing relevant information plus a setting where the addition of 'distractor paragraphs' fully test the ability of the model to comprehend which information is relevant to the question asked. To transform this into a long document, we expand each paragraph with its full Wikipedia page as well as adding additional distractor articles
49
  from similar topics (randomly chosen from links on the existing pages) in order to meet the 10,000 token minimum length requirement for this benchmark. These articles are shuffled and concatenated to form the model input.""",
50
  "urls": {
51
- datasets.Split.TRAIN: "https://drive.google.com/uc?export=download&id=1OlGRyCEL9JhwIQIKViaWIXCOB_pwj8xU",
52
- datasets.Split.VALIDATION: "https://drive.google.com/uc?export=download&id=1_Svtg6PycBpezDYJ78zcJqLa8Ohnk6Gq"
53
  }
54
  },
55
 
@@ -57,9 +59,9 @@ from similar topics (randomly chosen from links on the existing pages) in order
57
  "description": """\
58
  The Character Archetype Classification dataset is based on the methodology of Skowron et al. (2016). For this dataset, each example consists of a movie script along with a named character and the task is to classify whether the character is a Hero/Protagonist or Villain/Antagonist based on understanding their role in the narrative.""",
59
  "urls": {
60
- datasets.Split.TRAIN: "https://drive.google.com/uc?export=download&id=1Ckabmzbrunj2np2piAN5_ooZgTiK9K5i",
61
- datasets.Split.VALIDATION: "https://drive.google.com/uc?export=download&id=1I0N8gKD39s0wKLrcAJ0P-4uYdPqzTONS",
62
- datasets.Split.TEST: "https://drive.google.com/uc?export=download&id=1_AI6whuHfD1p3BF7TvOnr8Fs_lOVdt8j",
63
  }
64
  },
65
 
@@ -74,8 +76,8 @@ The Open Subtitles corpus (Lison et al., 2018) consists of aligned subtitles
74
  year={2018}
75
  }""",
76
  "urls": {
77
- datasets.Split.TRAIN: "https://drive.google.com/uc?export=download&id=10QF5kL6nvWC4kHDieKx79K36RLdW1M1r&confirm=yTib",
78
- datasets.Split.TEST: "https://drive.google.com/uc?export=download&id=1KWPLYv2_7z_XIBWrWC3khXTNdPKhDF_X"
79
  }
80
  },
81
 
@@ -84,9 +86,9 @@ The Open Subtitles corpus (Lison et al., 2018) consists of aligned subtitles
84
  Style change detection is the task of identifying the points where the author changes in a document constructed from the work of multiple authors. We use stories contributed to the fanfiction website Archive of Our Own, which contains a large number of works submitted by fans of popular films, tv, game, and book charactersmakicab10mw.
85
  """,
86
  "urls": {
87
- datasets.Split.TRAIN: "https://drive.google.com/uc?export=download&id=1R29IQ_bFLw3_6DYLtP7YWFTGe7FQAevT",
88
- datasets.Split.VALIDATION: "https://drive.google.com/uc?export=download&id=1B_RkTaMMOQXfJ7nDFCpq8GAth7yiW7vF",
89
- datasets.Split.TEST: "https://drive.google.com/uc?export=download&id=1-1eULJlV9nGrAwpdaEr5Ykchwfxn06kj"
90
  }
91
  },
92
 
@@ -95,7 +97,7 @@ Style change detection is the task of identifying the points where the author ch
95
  We follow the process of the Scientific papers (Cohan et al.,2018) summarization dataset, extracting papers from the open-access preprint server Arxiv.org using both the arxiv short abstract and the one included in the document (where available) as the reference summaries. In contrast to Cohan et al.
96
  (2018), rather than removing very long documents, we explicitly include them - removing any document with less than 10,000 tokens.""",
97
  "urls": {
98
- datasets.Split.TEST: "https://drive.google.com/uc?export=download&id=1ljTZZV5MpD07my2Vn1SVT3eQPKMVlHU5"
99
  }
100
  }
101
  }
@@ -156,4 +158,4 @@ class Muld(datasets.GeneratorBasedBuilder):
156
  row["metadata"] = ""
157
  if not isinstance(row["output"], list):
158
  row["output"] = [row["output"]]
159
- yield idx, row
21
  A set of NLP tasks where each example is over 10,000 tokens long.
22
  """
23
 
24
+ _REPO = "https://huggingface.co/datasets/ghomasHudson/muld/resolve/main/data"
25
+
26
  _TASK_CONFIGS = {
27
  "NarrativeQA": {
28
  "description": """\
39
  publisher={MIT Press}
40
  }""",
41
  "urls": {
42
+ datasets.Split.TRAIN: f"{_REPO}/narrativeqa_train.json.bz2",
43
+ datasets.Split.VALIDATION: f"{_REPO}/narrativeqa_validation.json.bz2",
44
+ datasets.Split.TEST: f"{_REPO}/narrativeqa_test.json.bz2",
45
  }
46
  },
47
 
50
  The HotpotQA dataset consists of questions from crowd workers which require information from multiple Wikipedia articles in order to answer, thus testing the ability for models to perform multi-hop question answering. The data is commonly presented as a list of paragraphs containing relevant information plus a setting where the addition of 'distractor paragraphs' fully test the ability of the model to comprehend which information is relevant to the question asked. To transform this into a long document, we expand each paragraph with its full Wikipedia page as well as adding additional distractor articles
51
  from similar topics (randomly chosen from links on the existing pages) in order to meet the 10,000 token minimum length requirement for this benchmark. These articles are shuffled and concatenated to form the model input.""",
52
  "urls": {
53
+ datasets.Split.TRAIN: f"{_REPO}/hotpotqa_train.json.bz2",
54
+ datasets.Split.VALIDATION: f"{_REPO}/hotpotqa_validation.json.bz2"
55
  }
56
  },
57
 
59
  "description": """\
60
  The Character Archetype Classification dataset is based on the methodology of Skowron et al. (2016). For this dataset, each example consists of a movie script along with a named character and the task is to classify whether the character is a Hero/Protagonist or Villain/Antagonist based on understanding their role in the narrative.""",
61
  "urls": {
62
+ datasets.Split.TRAIN: f"{_REPO}/character_id_train.json.bz2",
63
+ datasets.Split.VALIDATION: f"{_REPO}/character_id_validation.json.bz2",
64
+ datasets.Split.TEST: f"{_REPO}/character_id_test.json.bz2",
65
  }
66
  },
67
 
76
  year={2018}
77
  }""",
78
  "urls": {
79
+ datasets.Split.TRAIN: f"{_REPO}/opensubtitles_train.json.bz2",
80
+ datasets.Split.TEST: f"{_REPO}/opensubtitles_test.json.bz2"
81
  }
82
  },
83
 
86
  Style change detection is the task of identifying the points where the author changes in a document constructed from the work of multiple authors. We use stories contributed to the fanfiction website Archive of Our Own, which contains a large number of works submitted by fans of popular films, tv, game, and book charactersmakicab10mw.
87
  """,
88
  "urls": {
89
+ datasets.Split.TRAIN: f"{_REPO}/style_change_train.json.bz2",
90
+ datasets.Split.VALIDATION: f"{_REPO}/style_change_validation.json.bz2",
91
+ datasets.Split.TEST: f"{_REPO}/style_change_test.json.bz2"
92
  }
93
  },
94
 
97
  We follow the process of the Scientific papers (Cohan et al.,2018) summarization dataset, extracting papers from the open-access preprint server Arxiv.org using both the arxiv short abstract and the one included in the document (where available) as the reference summaries. In contrast to Cohan et al.
98
  (2018), rather than removing very long documents, we explicitly include them - removing any document with less than 10,000 tokens.""",
99
  "urls": {
100
+ datasets.Split.TEST: f"{_REPO}/vlsp_test.json.bz2"
101
  }
102
  }
103
  }
158
  row["metadata"] = ""
159
  if not isinstance(row["output"], list):
160
  row["output"] = [row["output"]]
161
+ yield idx, row