Datasets:
GEM
/

Multilinguality:
unknown
Size Categories:
unknown
Language Creators:
unknown
Annotations Creators:
none
Source Datasets:
original
License:
j-chim commited on
Commit
f644b6c
1 Parent(s): 599bec2

Init commit with basic data loader

Browse files
Files changed (1) hide show
  1. bisect.py +119 -0
bisect.py ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """BiSECT is a Split and Rephrase corpus created via bilingual pivoting."""
16
+
17
+ import os
18
+ import datasets
19
+
20
+
21
+ _CITATION = """\
22
+ @inproceedings{kim-etal-2021-bisect,
23
+ title = "{B}i{SECT}: Learning to Split and Rephrase Sentences with Bitexts",
24
+ author = "Kim, Joongwon and
25
+ Maddela, Mounica and
26
+ Kriz, Reno and
27
+ Xu, Wei and
28
+ Callison-Burch, Chris",
29
+ booktitle = "Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing",
30
+ month = nov,
31
+ year = "2021",
32
+ address = "Online and Punta Cana, Dominican Republic",
33
+ publisher = "Association for Computational Linguistics",
34
+ url = "https://aclanthology.org/2021.emnlp-main.500",
35
+ pages = "6193--6209"
36
+ }
37
+ """
38
+
39
+ _DESCRIPTION = """\
40
+ BiSECT is a Split and Rephrase corpus created via bilingual pivoting.
41
+ """
42
+
43
+ _HOMEPAGE = "https://github.com/mounicam/BiSECT"
44
+
45
+ _URL = "https://github.com/mounicam/BiSECT/raw/main/bisect/"
46
+
47
+ _URLs = {
48
+ "train": {"src": _URL + "train.src.gz", "dst": _URL + "train.dst.gz"},
49
+ "validation": {"src": _URL + "valid.src.gz", "dst": _URL + "valid.dst.gz"},
50
+ "test": {"src": _URL + "test.src.gz", "dst": _URL + "test.dst.gz"},
51
+ }
52
+
53
+
54
+ class BiSECT(datasets.GeneratorBasedBuilder):
55
+ """The BiSECT Split and Rephrase corpus."""
56
+
57
+ VERSION = datasets.Version("1.0.0")
58
+
59
+ BUILDER_CONFIGS = [
60
+ datasets.BuilderConfig(
61
+ name="bisect",
62
+ version=VERSION,
63
+ description="Data described in the BiSECT paper",
64
+ )
65
+ ]
66
+
67
+ DEFAULT_CONFIG_NAME = "bisect"
68
+
69
+ def _info(self):
70
+ return datasets.DatasetInfo(
71
+ description=_DESCRIPTION,
72
+ features=datasets.Features(
73
+ {
74
+ "gem_id": datasets.Value("string"),
75
+ "source_sentence": datasets.Value("string"),
76
+ "target_sentence": datasets.Value("string"),
77
+ }
78
+ ),
79
+ supervised_keys=None,
80
+ homepage=_HOMEPAGE,
81
+ citation=_CITATION,
82
+ )
83
+
84
+ def _split_generators(self, dl_manager):
85
+ """Returns SplitGenerators."""
86
+ data_dir = dl_manager.download_and_extract(_URLs)
87
+
88
+ return [
89
+ datasets.SplitGenerator(
90
+ name=datasets.Split.TRAIN,
91
+ gen_kwargs={"filepath": data_dir["train"], "split": "train"},
92
+ ),
93
+ datasets.SplitGenerator(
94
+ name=datasets.Split.TEST,
95
+ gen_kwargs={"filepath": data_dir["test"], "split": "test"},
96
+ ),
97
+ datasets.SplitGenerator(
98
+ name=datasets.Split.VALIDATION,
99
+ gen_kwargs={"filepath": data_dir["validation"], "split": "validation"},
100
+ ),
101
+ ]
102
+
103
+ def _generate_examples(self, filepath, split):
104
+ """Yields examples as (key, example) tuples."""
105
+
106
+ source_filepath = filepath["src"]
107
+ target_filepath = filepath["dst"]
108
+
109
+ with open(source_filepath, encoding="utf-8") as f:
110
+ source_lines = [line.strip() for line in f]
111
+ with open(target_filepath, encoding="utf-8") as f:
112
+ target_lines = [line.strip() for line in f]
113
+
114
+ for id_ in range(len(source_lines)):
115
+ yield id_, {
116
+ "gem_id": f"{self.config.name}-{split}-{id_}",
117
+ "source_sentence": source_lines[id_],
118
+ "target_sentence": target_lines[id_],
119
+ }