Datasets:
ronaldahmed
commited on
Commit
•
49ac8b1
1
Parent(s):
bd0e72a
challenge sets
Browse files- animal.zip +2 -2
- company.zip +2 -2
- film.zip +2 -2
- wiki_cat_sum.py +18 -6
animal.zip
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:dd99a2a62e77cb291972bd5b51097c4619ce37ca86dab28a2cd6cb63cfc8198e
|
3 |
+
size 493575209
|
company.zip
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9856c544319077b62bbb6c940daa7a9e53b179cee7bb84f5d731f65735e22e33
|
3 |
+
size 872354086
|
film.zip
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:62ca469c0a6bb5e01dbbff8a1f7e34a45b7b827ffc5dd358b10e42ebf6980a37
|
3 |
+
size 859086412
|
wiki_cat_sum.py
CHANGED
@@ -37,14 +37,14 @@ _CITATION = """\
|
|
37 |
# TODO: Add description of the dataset here
|
38 |
# You can copy an official description
|
39 |
_DESCRIPTION = """\
|
40 |
-
|
41 |
"""
|
42 |
|
43 |
# TODO: Add a link to an official homepage for the dataset here
|
44 |
_HOMEPAGE = "https://datashare.ed.ac.uk/handle/10283/3368"
|
45 |
|
46 |
# TODO: Add the licence for the dataset here if you can find it
|
47 |
-
_LICENSE = ""
|
48 |
|
49 |
# TODO: Add link to the official dataset URLs here
|
50 |
# The HuggingFace dataset library don't host the datasets but only point to the original files
|
@@ -63,9 +63,6 @@ class WikiCatSum(datasets.GeneratorBasedBuilder):
|
|
63 |
|
64 |
VERSION = datasets.Version("0.1.0")
|
65 |
|
66 |
-
# This is an example of a dataset with multiple configurations.
|
67 |
-
# If you don't want/need to define several sub-sets in your dataset,
|
68 |
-
# just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
|
69 |
|
70 |
# If you need to make complex sub-parts in the datasets with configurable options
|
71 |
# You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
|
@@ -125,6 +122,12 @@ class WikiCatSum(datasets.GeneratorBasedBuilder):
|
|
125 |
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
|
126 |
my_urls = _URLs[self.config.name]
|
127 |
data_dir = dl_manager.download_and_extract(my_urls)
|
|
|
|
|
|
|
|
|
|
|
|
|
128 |
return [
|
129 |
datasets.SplitGenerator(
|
130 |
name=datasets.Split.TRAIN,
|
@@ -150,7 +153,16 @@ class WikiCatSum(datasets.GeneratorBasedBuilder):
|
|
150 |
"split": "dev",
|
151 |
},
|
152 |
),
|
153 |
-
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
154 |
|
155 |
def _generate_examples(
|
156 |
self, filepath, split # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
|
|
|
37 |
# TODO: Add description of the dataset here
|
38 |
# You can copy an official description
|
39 |
_DESCRIPTION = """\
|
40 |
+
Summarise the most important facts of a given entity in the Film, Company, and Animal domains from a cluster of related documents.
|
41 |
"""
|
42 |
|
43 |
# TODO: Add a link to an official homepage for the dataset here
|
44 |
_HOMEPAGE = "https://datashare.ed.ac.uk/handle/10283/3368"
|
45 |
|
46 |
# TODO: Add the licence for the dataset here if you can find it
|
47 |
+
_LICENSE = "CC BY-SA 3.0"
|
48 |
|
49 |
# TODO: Add link to the official dataset URLs here
|
50 |
# The HuggingFace dataset library don't host the datasets but only point to the original files
|
|
|
63 |
|
64 |
VERSION = datasets.Version("0.1.0")
|
65 |
|
|
|
|
|
|
|
66 |
|
67 |
# If you need to make complex sub-parts in the datasets with configurable options
|
68 |
# You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
|
|
|
122 |
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
|
123 |
my_urls = _URLs[self.config.name]
|
124 |
data_dir = dl_manager.download_and_extract(my_urls)
|
125 |
+
challenge_sets = [
|
126 |
+
("challenge_%s_nov_%s" % (split,lvl),"%s-%s_nv2_%s.jsonl" % (split,self.config.name,lvl)) \
|
127 |
+
for split in ["train","valid","test"] for lvl in ["low","mid","high"]
|
128 |
+
]
|
129 |
+
# + ...
|
130 |
+
|
131 |
return [
|
132 |
datasets.SplitGenerator(
|
133 |
name=datasets.Split.TRAIN,
|
|
|
153 |
"split": "dev",
|
154 |
},
|
155 |
),
|
156 |
+
] + [
|
157 |
+
datasets.SplitGenerator(
|
158 |
+
name=challenge_split,
|
159 |
+
gen_kwargs={
|
160 |
+
"filepath": os.path.join(data_dir, filename),
|
161 |
+
"split": challenge_split,
|
162 |
+
},
|
163 |
+
)
|
164 |
+
for challenge_split, filename in challenge_sets
|
165 |
+
]
|
166 |
|
167 |
def _generate_examples(
|
168 |
self, filepath, split # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
|