Datasets:
GEM
/

Tasks:
Other
Multilinguality:
unknown
Size Categories:
unknown
Language Creators:
unknown
Annotations Creators:
expert-created
Source Datasets:
original
License:
mathiascreutz commited on
Commit
bba20ca
1 Parent(s): ee6c327

Data loader complete and documented

Browse files
Files changed (1) hide show
  1. opusparcus.py +65 -24
opusparcus.py CHANGED
@@ -1,5 +1,6 @@
1
  # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
 
3
  #
4
  # Licensed under the Apache License, Version 2.0 (the "License");
5
  # you may not use this file except in compliance with the License.
@@ -13,7 +14,7 @@
13
  # See the License for the specific language governing permissions and
14
  # limitations under the License.
15
 
16
- """TODO: Add a description here."""
17
 
18
  import csv
19
  import json
@@ -24,22 +25,36 @@ import bz2
24
  # Add BibTeX citation
25
 
26
  _CITATION = """\
27
-
28
- @InProceedings{huggingface:dataset,
29
- title = {A great new dataset},
30
- author={huggingface, Inc.
31
- },
32
- year={2020}
 
 
 
 
 
 
 
 
 
 
 
33
  }
34
  """
35
 
36
  _DESCRIPTION = """\
37
- Test adding a dataset with challenge set to GEM benchmark .
 
 
 
38
  """
39
 
40
- _HOMEPAGE = ""
41
 
42
- _LICENSE = ""
43
 
44
  # The HuggingFace dataset library doesn't host the datasets but only
45
  # points to the original files. This can be an arbitrary nested
@@ -61,9 +76,10 @@ class OpusparcusConfig(datasets.BuilderConfig):
61
  def __init__(self, lang=None, quality=100, **kwargs):
62
  """BuilderConfig for Wikipedia.
63
  Args:
64
- language: string, the language code for the Wikipedia dump to use.
65
- date: string, date of the Wikipedia dump in YYYYMMDD format. A list of
66
- available dates can be found at https://dumps.wikimedia.org/enwiki/.
 
67
  **kwargs: keyword arguments forwarded to super.
68
  """
69
  super(OpusparcusConfig, self).__init__(
@@ -94,19 +110,44 @@ QUALITIES = [ 100, 95, 90, 85, 80, 75, 70, 65, 60 ]
94
 
95
  class Opusparcus(datasets.GeneratorBasedBuilder):
96
 
97
- """TODO: Short description of my dataset."""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
98
 
99
- # This is an example of a dataset with multiple configurations.
100
- # If you don't want/need to define several sub-sets in your dataset,
101
- # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
102
- # If you need to make complex sub-parts in the datasets with configurable options
103
- # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
104
  BUILDER_CONFIG_CLASS = OpusparcusConfig
105
- # You will be able to load one or the other configurations in the following list with
106
- # data = datasets.load_dataset('my_dataset', 'first_domain')
107
- # data = datasets.load_dataset('my_dataset', 'second_domain')
 
 
 
 
 
 
 
 
 
 
108
  BUILDER_CONFIGS = [
109
- OpusparcusConfig(lang=lang, quality=quality, version=_VERSION) for lang in LANGS for quality in QUALITIES
 
110
  ]
111
 
112
  # There is no default configuration. User always needs to specify one:
 
1
  # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Datasets Authors and
3
+ # the current dataset script contributor (Mathias Creutz).
4
  #
5
  # Licensed under the Apache License, Version 2.0 (the "License");
6
  # you may not use this file except in compliance with the License.
 
14
  # See the License for the specific language governing permissions and
15
  # limitations under the License.
16
 
17
+ """Data loader for the Opusparcus paraphrase corpus."""
18
 
19
  import csv
20
  import json
 
25
  # Add BibTeX citation
26
 
27
  _CITATION = """\
28
+ @InProceedings{creutz:lrec2018,
29
+ title = {Open Subtitles Paraphrase Corpus for Six Languages},
30
+ author={Mathias Creutz},
31
+ booktitle={Proceedings of the 11th edition of the Language Resources
32
+ and Evaluation Conference (LREC 2018)},
33
+ year={2018},
34
+ month = {May 7-12},
35
+ address = {Miyazaki, Japan},
36
+ editor = {Nicoletta Calzolari (Conference chair) and Khalid Choukri
37
+ and Christopher Cieri and Thierry Declerck and Sara Goggi and Koiti
38
+ Hasida and Hitoshi Isahara and Bente Maegaard and Joseph Mariani and
39
+ Hélène Mazo and Asuncion Moreno and Jan Odijk and Stelios Piperidis
40
+ and Takenobu Tokunaga},
41
+ publisher = {European Language Resources Association (ELRA)},
42
+ isbn = {979-10-95546-00-9},
43
+ language = {english},
44
+ url={http://www.lrec-conf.org/proceedings/lrec2018/pdf/131.pdf}
45
  }
46
  """
47
 
48
  _DESCRIPTION = """\
49
+ Opusparcus is a paraphrase corpus for six European languages: German,
50
+ English, Finnish, French, Russian, and Swedish. The paraphrases are
51
+ extracted from the OpenSubtitles2016 corpus, which contains subtitles
52
+ from movies and TV shows.
53
  """
54
 
55
+ _HOMEPAGE = "http://urn.fi/urn:nbn:fi:lb-2018021221"
56
 
57
+ _LICENSE = "CC-BY-NC"
58
 
59
  # The HuggingFace dataset library doesn't host the datasets but only
60
  # points to the original files. This can be an arbitrary nested
 
76
  def __init__(self, lang=None, quality=100, **kwargs):
77
  """BuilderConfig for Wikipedia.
78
  Args:
79
+ lang: string, two letter language code:
80
+ de, en, fi, fr, ru, sv
81
+ quality: int, filter training set according to quality:
82
+ [ 60, 65, 70, 75, 80, 85, 90, 95, 100]
83
  **kwargs: keyword arguments forwarded to super.
84
  """
85
  super(OpusparcusConfig, self).__init__(
 
110
 
111
  class Opusparcus(datasets.GeneratorBasedBuilder):
112
 
113
+ """Opusparcus is a paraphrase corpus for six European languages:
114
+ German, English, Finnish, French, Russian, and Swedish. The
115
+ paraphrases are extracted from the OpenSubtitles2016 corpus, which
116
+ contains subtitles from movies and TV shows.
117
+
118
+ The data in Opusparcus has been extracted from OpenSubtitles2016
119
+ (http://opus.nlpl.eu/OpenSubtitles2016.php), which is in turn
120
+ based on data from http://www.opensubtitles.org/.
121
+
122
+ For each target language, the Opusparcus data have been
123
+ partitioned into three types of data sets: training, validation
124
+ and test sets. The training sets are large, consisting of millions
125
+ of sentence pairs, and have been compiled automatically, with the
126
+ help of probabilistic ranking functions. The development and test
127
+ sets consist of sentence pairs that have been annotated manually;
128
+ each set contains approximately 1000 sentence pairs that have been
129
+ verified to be acceptable paraphrases by two indepedent
130
+ annotators.
131
+ """
132
 
133
+ # This is a dataset with multiple configurations.
 
 
 
 
134
  BUILDER_CONFIG_CLASS = OpusparcusConfig
135
+
136
+ # You can load configurations as follows:
137
+ # data = datasets.load_dataset('GEM/opusparcus', lang='de')
138
+ # data = datasets.load_dataset('GEM/opusparcus', lang='fr', quality='75')
139
+ # etc.
140
+ #
141
+ # The language parameter is compulsory, whereas the quality
142
+ # parameter is not (the default value being 100).
143
+ #
144
+ # The above commands can alternatively be expressed as:
145
+ # data = datasets.load_dataset('GEM/opusparcus', 'de.100')
146
+ # data = datasets.load_dataset('GEM/opusparcus', 'fr.75')
147
+
148
  BUILDER_CONFIGS = [
149
+ OpusparcusConfig(lang=lang, quality=quality, version=_VERSION) \
150
+ for lang in LANGS for quality in QUALITIES
151
  ]
152
 
153
  # There is no default configuration. User always needs to specify one: