Datasets:
GEM
/

Modalities:
Text
Languages:
English
ArXiv:
Libraries:
Datasets
License:
ronaldahmed commited on
Commit
49ac8b1
1 Parent(s): bd0e72a

challenge sets

Browse files
Files changed (4) hide show
  1. animal.zip +2 -2
  2. company.zip +2 -2
  3. film.zip +2 -2
  4. wiki_cat_sum.py +18 -6
animal.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:89987c9d9dc4babbcdac10bea176cc5fa83dad5e37115a97c2864d6bf534c3e8
3
- size 490633812
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dd99a2a62e77cb291972bd5b51097c4619ce37ca86dab28a2cd6cb63cfc8198e
3
+ size 493575209
company.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:db1dfb103d15dc3da33c47aedcd9b4f73f95a2f1186654fbbc3ac8d36cec6504
3
- size 868626354
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9856c544319077b62bbb6c940daa7a9e53b179cee7bb84f5d731f65735e22e33
3
+ size 872354086
film.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c50bcf4978f20ffa898e4e99820d8ad2a5399f63ef94870eaada71161fe93d3e
3
- size 855752717
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:62ca469c0a6bb5e01dbbff8a1f7e34a45b7b827ffc5dd358b10e42ebf6980a37
3
+ size 859086412
wiki_cat_sum.py CHANGED
@@ -37,14 +37,14 @@ _CITATION = """\
37
  # TODO: Add description of the dataset here
38
  # You can copy an official description
39
  _DESCRIPTION = """\
40
- This new dataset is designed to solve this great NLP task and is crafted with a lot of care.
41
  """
42
 
43
  # TODO: Add a link to an official homepage for the dataset here
44
  _HOMEPAGE = "https://datashare.ed.ac.uk/handle/10283/3368"
45
 
46
  # TODO: Add the licence for the dataset here if you can find it
47
- _LICENSE = ""
48
 
49
  # TODO: Add link to the official dataset URLs here
50
  # The HuggingFace dataset library don't host the datasets but only point to the original files
@@ -63,9 +63,6 @@ class WikiCatSum(datasets.GeneratorBasedBuilder):
63
 
64
  VERSION = datasets.Version("0.1.0")
65
 
66
- # This is an example of a dataset with multiple configurations.
67
- # If you don't want/need to define several sub-sets in your dataset,
68
- # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
69
 
70
  # If you need to make complex sub-parts in the datasets with configurable options
71
  # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
@@ -125,6 +122,12 @@ class WikiCatSum(datasets.GeneratorBasedBuilder):
125
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
126
  my_urls = _URLs[self.config.name]
127
  data_dir = dl_manager.download_and_extract(my_urls)
 
 
 
 
 
 
128
  return [
129
  datasets.SplitGenerator(
130
  name=datasets.Split.TRAIN,
@@ -150,7 +153,16 @@ class WikiCatSum(datasets.GeneratorBasedBuilder):
150
  "split": "dev",
151
  },
152
  ),
153
- ]
 
 
 
 
 
 
 
 
 
154
 
155
  def _generate_examples(
156
  self, filepath, split # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
 
37
  # TODO: Add description of the dataset here
38
  # You can copy an official description
39
  _DESCRIPTION = """\
40
+ Summarise the most important facts of a given entity in the Film, Company, and Animal domains from a cluster of related documents.
41
  """
42
 
43
  # TODO: Add a link to an official homepage for the dataset here
44
  _HOMEPAGE = "https://datashare.ed.ac.uk/handle/10283/3368"
45
 
46
  # TODO: Add the licence for the dataset here if you can find it
47
+ _LICENSE = "CC BY-SA 3.0"
48
 
49
  # TODO: Add link to the official dataset URLs here
50
  # The HuggingFace dataset library don't host the datasets but only point to the original files
 
63
 
64
  VERSION = datasets.Version("0.1.0")
65
 
 
 
 
66
 
67
  # If you need to make complex sub-parts in the datasets with configurable options
68
  # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
 
122
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
123
  my_urls = _URLs[self.config.name]
124
  data_dir = dl_manager.download_and_extract(my_urls)
125
+ challenge_sets = [
126
+ ("challenge_%s_nov_%s" % (split,lvl),"%s-%s_nv2_%s.jsonl" % (split,self.config.name,lvl)) \
127
+ for split in ["train","valid","test"] for lvl in ["low","mid","high"]
128
+ ]
129
+ # + ...
130
+
131
  return [
132
  datasets.SplitGenerator(
133
  name=datasets.Split.TRAIN,
 
153
  "split": "dev",
154
  },
155
  ),
156
+ ] + [
157
+ datasets.SplitGenerator(
158
+ name=challenge_split,
159
+ gen_kwargs={
160
+ "filepath": os.path.join(data_dir, filename),
161
+ "split": challenge_split,
162
+ },
163
+ )
164
+ for challenge_split, filename in challenge_sets
165
+ ]
166
 
167
  def _generate_examples(
168
  self, filepath, split # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`