Datasets:
GEM
/

Modalities:
Text
Languages:
English
ArXiv:
Tags:
License:
ronaldahmed commited on
Commit
c609620
1 Parent(s): 48253a6

data loadedr and zip files

Browse files
Files changed (5) hide show
  1. .gitattributes +3 -0
  2. animal.zip +3 -0
  3. company.zip +3 -0
  4. film.zip +3 -0
  5. wiki_cat_sum.py +166 -0
.gitattributes CHANGED
@@ -25,3 +25,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
  *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
  *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
28
+ company.zip filter=lfs diff=lfs merge=lfs -text
29
+ film.zip filter=lfs diff=lfs merge=lfs -text
30
+ animal.zip filter=lfs diff=lfs merge=lfs -text
animal.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ea645b57037b28a37189c1c3c137da96c42e7ec0f155eabd1a0852b0bf734e11
3
+ size 490578232
company.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:114a90827867a579b6f7594a1c9a380a03062bdeb2055edc45acce2118c711e1
3
+ size 868517018
film.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0473cf3ed18874809f7cd556c10e8c9ffba2b5f359a5d6c77ef1c53da2e92a9f
3
+ size 855640260
wiki_cat_sum.py ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """TODO: Add a description here."""
16
+
17
+
18
+ import csv
19
+ import json
20
+ import os
21
+
22
+ import datasets
23
+
24
+
25
+ # TODO: Add BibTeX citation
26
+ # Find for instance the citation on arxiv or on the dataset repo/website
27
+ _CITATION = """\
28
+ @inproceedings{perez2019generating,
29
+ title={Generating Summaries with Topic Templates and Structured Convolutional Decoders},
30
+ author={Perez-Beltrachini, Laura and Liu, Yang and Lapata, Mirella},
31
+ booktitle={Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics},
32
+ pages={5107--5116},
33
+ year={2019}
34
+ }
35
+ """
36
+
37
+ # TODO: Add description of the dataset here
38
+ # You can copy an official description
39
+ _DESCRIPTION = """\
40
+ This new dataset is designed to solve this great NLP task and is crafted with a lot of care.
41
+ """
42
+
43
+ # TODO: Add a link to an official homepage for the dataset here
44
+ _HOMEPAGE = "https://datashare.ed.ac.uk/handle/10283/3368"
45
+
46
+ # TODO: Add the licence for the dataset here if you can find it
47
+ _LICENSE = ""
48
+
49
+ # TODO: Add link to the official dataset URLs here
50
+ # The HuggingFace dataset library don't host the datasets but only point to the original files
51
+ # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
52
+ _URLs = {
53
+ # 'animals': "https://datashare.ed.ac.uk/bitstream/handle/10283/3368/animal_tok_min5_L7.5k.zip",
54
+ "animals": "https://huggingface.co/datasets/GEM/wiki_cat_sum/animal.zip"
55
+ 'company': "https://huggingface.co/datasets/GEM/wiki_cat_sum/company.zip",
56
+ 'film' : "https://huggingface.co/datasets/GEM/wiki_cat_sum/film.zip",
57
+ }
58
+
59
+
60
+ # TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
61
+ class WikiCatSum(datasets.GeneratorBasedBuilder):
62
+ """TODO: Short description of my dataset."""
63
+
64
+ VERSION = datasets.Version("0.1.0")
65
+
66
+ # This is an example of a dataset with multiple configurations.
67
+ # If you don't want/need to define several sub-sets in your dataset,
68
+ # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
69
+
70
+ # If you need to make complex sub-parts in the datasets with configurable options
71
+ # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
72
+ # BUILDER_CONFIG_CLASS = MyBuilderConfig
73
+
74
+ # You will be able to load one or the other configurations in the following list with
75
+ # data = datasets.load_dataset('my_dataset', 'first_domain')
76
+ # data = datasets.load_dataset('my_dataset', 'second_domain')
77
+ BUILDER_CONFIGS = [
78
+ datasets.BuilderConfig(name="animal" , version=VERSION, description="Animal domain"),
79
+ datasets.BuilderConfig(name="company", version=VERSION, description="Company domain"),
80
+ datasets.BuilderConfig(name="film" , version=VERSION, description="Film domain"),
81
+ ]
82
+
83
+ DEFAULT_CONFIG_NAME = "animal" # It's not mandatory to have a default configuration. Just use one if it make sense.
84
+
85
+ def _info(self):
86
+ # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
87
+ features = datasets.Features(
88
+ {
89
+ "id": datasets.Value("string"),
90
+ "title": datasets.Value("string"),
91
+ "paragraphs": datasets.features.Sequence(
92
+ datasets.Value("string")),
93
+ "summary": datasets.features.Sequence(
94
+ {
95
+ "text": datasets.Value("string"),
96
+ "topic": datasets.Value("int"),
97
+ })
98
+ # These are the features of your dataset like images, labels ...
99
+ }
100
+ )
101
+ return datasets.DatasetInfo(
102
+ # This is the description that will appear on the datasets page.
103
+ description=_DESCRIPTION,
104
+ # This defines the different columns of the dataset and their types
105
+ features=features, # Here we define them above because they are different between the two configurations
106
+ # If there's a common (input, target) tuple from the features,
107
+ # specify them here. They'll be used if as_supervised=True in
108
+ # builder.as_dataset.
109
+ supervised_keys=None,
110
+ # Homepage of the dataset for documentation
111
+ homepage=_HOMEPAGE,
112
+ # License for the dataset if available
113
+ license=_LICENSE,
114
+ # Citation for the dataset
115
+ citation=_CITATION,
116
+ )
117
+
118
+ def _split_generators(self, dl_manager):
119
+ """Returns SplitGenerators."""
120
+ # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
121
+ # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
122
+
123
+ # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLs
124
+ # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
125
+ # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
126
+ my_urls = _URLs[self.config.name]
127
+ data_dir = dl_manager.download_and_extract(my_urls)
128
+ return [
129
+ datasets.SplitGenerator(
130
+ name=datasets.Split.TRAIN,
131
+ # These kwargs will be passed to _generate_examples
132
+ gen_kwargs={
133
+ "filepath": os.path.join(data_dir, "train-%s.jsonl" % (self.config.name)),
134
+ "split": "train",
135
+ },
136
+ ),
137
+ datasets.SplitGenerator(
138
+ name=datasets.Split.TEST,
139
+ # These kwargs will be passed to _generate_examples
140
+ gen_kwargs={
141
+ "filepath": os.path.join(data_dir, "test-%s.jsonl" % (self.config.name)),
142
+ "split": "test"
143
+ },
144
+ ),
145
+ datasets.SplitGenerator(
146
+ name=datasets.Split.VALIDATION,
147
+ # These kwargs will be passed to _generate_examples
148
+ gen_kwargs={
149
+ "filepath": os.path.join(data_dir, "valid-%s.jsonl" % (self.config.name)),
150
+ "split": "dev",
151
+ },
152
+ ),
153
+ ]
154
+
155
+ def _generate_examples(
156
+ self, filepath, split # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
157
+ ):
158
+ """ Yields examples as (key, example) tuples. """
159
+ # This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
160
+ # The `key` is here for legacy reason (tfds) and is not important in itself.
161
+
162
+ with open(filepath, encoding="utf-8") as f:
163
+ for row in f:
164
+ data = json.loads(row)
165
+ data["gem_id"] = "GEM-wiki_cat_sum-%s-%d" % (split,data["id"]+1)
166
+ yield data["id"],data