system HF staff commited on
Commit
12a477c
1 Parent(s): 97fe970

Update files from the datasets library (from 1.16.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.16.0

Files changed (2) hide show
  1. README.md +1 -0
  2. paws-x.py +21 -19
README.md CHANGED
@@ -1,4 +1,5 @@
1
  ---
 
2
  annotations_creators:
3
  - expert-generated
4
  - machine-generated
1
  ---
2
+ pretty_name: "PAWS-X: A Cross-lingual Adversarial Dataset for Paraphrase Identification"
3
  annotations_creators:
4
  - expert-generated
5
  - machine-generated
paws-x.py CHANGED
@@ -16,7 +16,6 @@
16
 
17
 
18
  import csv
19
- import os
20
 
21
  import datasets
22
 
@@ -118,15 +117,15 @@ class PAWSX(datasets.GeneratorBasedBuilder):
118
  def _split_generators(self, dl_manager):
119
  """Returns SplitGenerators."""
120
 
121
- data_dir = dl_manager.download_and_extract(_DATA_URL)
122
 
123
- _TEST_FILE_NAME = os.path.join(data_dir, f"x-final/{self.config.name}/test_2k.tsv")
124
- _VAL_FILE_NAME = os.path.join(data_dir, f"x-final/{self.config.name}/dev_2k.tsv")
125
 
126
  if self.config.name == "en":
127
- _TRAIN_FILE_NAME = os.path.join(data_dir, f"x-final/{self.config.name}/train.tsv")
128
  else:
129
- _TRAIN_FILE_NAME = os.path.join(data_dir, f"x-final/{self.config.name}/translated_train.tsv")
130
 
131
  return [
132
  datasets.SplitGenerator(
@@ -134,7 +133,7 @@ class PAWSX(datasets.GeneratorBasedBuilder):
134
  # These kwargs will be passed to _generate_examples
135
  gen_kwargs={
136
  "filepath": _TRAIN_FILE_NAME,
137
- "split": datasets.Split.TRAIN,
138
  },
139
  ),
140
  datasets.SplitGenerator(
@@ -142,7 +141,7 @@ class PAWSX(datasets.GeneratorBasedBuilder):
142
  # These kwargs will be passed to _generate_examples
143
  gen_kwargs={
144
  "filepath": _TEST_FILE_NAME,
145
- "split": datasets.Split.TEST,
146
  },
147
  ),
148
  datasets.SplitGenerator(
@@ -150,20 +149,23 @@ class PAWSX(datasets.GeneratorBasedBuilder):
150
  # These kwargs will be passed to _generate_examples
151
  gen_kwargs={
152
  "filepath": _VAL_FILE_NAME,
153
- "split": datasets.Split.VALIDATION,
154
  },
155
  ),
156
  ]
157
 
158
- def _generate_examples(self, filepath, split):
159
  """Yields examples."""
160
 
161
- with open(filepath, encoding="utf-8") as f:
162
- data = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
163
- for id_, row in enumerate(data):
164
- yield id_, {
165
- "id": row["id"],
166
- "sentence1": row["sentence1"],
167
- "sentence2": row["sentence2"],
168
- "label": row["label"],
169
- }
 
 
 
16
 
17
 
18
  import csv
 
19
 
20
  import datasets
21
 
117
  def _split_generators(self, dl_manager):
118
  """Returns SplitGenerators."""
119
 
120
+ archive = dl_manager.download(_DATA_URL)
121
 
122
+ _TEST_FILE_NAME = f"x-final/{self.config.name}/test_2k.tsv"
123
+ _VAL_FILE_NAME = f"x-final/{self.config.name}/dev_2k.tsv"
124
 
125
  if self.config.name == "en":
126
+ _TRAIN_FILE_NAME = f"x-final/{self.config.name}/train.tsv"
127
  else:
128
+ _TRAIN_FILE_NAME = f"x-final/{self.config.name}/translated_train.tsv"
129
 
130
  return [
131
  datasets.SplitGenerator(
133
  # These kwargs will be passed to _generate_examples
134
  gen_kwargs={
135
  "filepath": _TRAIN_FILE_NAME,
136
+ "files": dl_manager.iter_archive(archive),
137
  },
138
  ),
139
  datasets.SplitGenerator(
141
  # These kwargs will be passed to _generate_examples
142
  gen_kwargs={
143
  "filepath": _TEST_FILE_NAME,
144
+ "files": dl_manager.iter_archive(archive),
145
  },
146
  ),
147
  datasets.SplitGenerator(
149
  # These kwargs will be passed to _generate_examples
150
  gen_kwargs={
151
  "filepath": _VAL_FILE_NAME,
152
+ "files": dl_manager.iter_archive(archive),
153
  },
154
  ),
155
  ]
156
 
157
+ def _generate_examples(self, filepath, files):
158
  """Yields examples."""
159
 
160
+ for path, f in files:
161
+ if path == filepath:
162
+ lines = (line.decode("utf-8") for line in f)
163
+ data = csv.DictReader(lines, delimiter="\t", quoting=csv.QUOTE_NONE)
164
+ for id_, row in enumerate(data):
165
+ yield id_, {
166
+ "id": row["id"],
167
+ "sentence1": row["sentence1"],
168
+ "sentence2": row["sentence2"],
169
+ "label": row["label"],
170
+ }
171
+ break