Datasets:

Languages:
English
Multilinguality:
monolingual
Size Categories:
1K<n<10K
Language Creators:
found
Annotations Creators:
crowdsourced
Source Datasets:
original
Tags:
License:
jpwahle commited on
Commit
27753d4
1 Parent(s): 69e1d00

Update etpc.py

Browse files
Files changed (1) hide show
  1. etpc.py +35 -21
etpc.py CHANGED
@@ -19,6 +19,7 @@ import os
19
  from typing import Any, Dict, Generator, List, Optional, Tuple, Union
20
 
21
  import datasets
 
22
  from datasets.tasks import TextClassification
23
  from lxml import etree
24
 
@@ -86,8 +87,12 @@ class ETPC(datasets.GeneratorBasedBuilder):
86
  "sentence2_segment_location": datasets.Sequence(
87
  datasets.Value("int32")
88
  ),
89
- "sentence1_segment_text": datasets.Value("string"),
90
- "sentence2_segment_text": datasets.Value("string"),
 
 
 
 
91
  }
92
  )
93
 
@@ -148,33 +153,43 @@ class ETPC(datasets.GeneratorBasedBuilder):
148
  sentence2_segment_text = root_paraphrase_types.xpath(
149
  f".//pair_id[text()='{current_pair_id}']/parent::relation/s2_text/text()"
150
  )
151
- sentence1_tokenized = row.find(".//sent1_tokenized").text.split()
152
- sentence2_tokenized = row.find(".//sent2_tokenized").text.split()
153
 
154
- sentence1_segment_location_full = [0] * len(sentence1_tokenized)
155
- sentence2_segment_location_full = [0] * len(sentence2_tokenized)
 
 
 
 
 
 
 
 
 
 
 
156
 
157
  for (
158
- sentence1_segment_location_example,
159
- sentence2_segment_location_example,
160
  paraphrase_type_id,
161
  ) in zip(
162
  sentence1_segment_location,
163
  sentence2_segment_location,
164
  paraphrase_type_ids,
165
  ):
166
- for (
167
- segment_location
168
- ) in sentence1_segment_location_example.split(","):
169
- sentence1_segment_location_full[
170
- int(segment_location)
171
- ] = int(paraphrase_type_id)
172
- for (
173
- segment_location
174
- ) in sentence2_segment_location_example.split(","):
175
- sentence2_segment_location_full[
176
- int(segment_location)
177
- ] = int(paraphrase_type_id)
 
178
 
179
  yield idx, {
180
  "idx": row.find(".//pair_id").text + "_" + str(idx),
@@ -192,5 +207,4 @@ class ETPC(datasets.GeneratorBasedBuilder):
192
  "sentence1_segment_text": sentence1_segment_text,
193
  "sentence2_segment_text": sentence2_segment_text,
194
  }
195
-
196
  idx += 1
 
19
  from typing import Any, Dict, Generator, List, Optional, Tuple, Union
20
 
21
  import datasets
22
+ import numpy as np
23
  from datasets.tasks import TextClassification
24
  from lxml import etree
25
 
 
87
  "sentence2_segment_location": datasets.Sequence(
88
  datasets.Value("int32")
89
  ),
90
+ "sentence1_segment_text": datasets.Sequence(
91
+ datasets.Value("string")
92
+ ),
93
+ "sentence2_segment_text": datasets.Sequence(
94
+ datasets.Value("string")
95
+ ),
96
  }
97
  )
98
 
 
153
  sentence2_segment_text = root_paraphrase_types.xpath(
154
  f".//pair_id[text()='{current_pair_id}']/parent::relation/s2_text/text()"
155
  )
 
 
156
 
157
+ sentence1_tokenized = row.find(".//sent1_tokenized").text.split(
158
+ " "
159
+ )
160
+ sentence2_tokenized = row.find(".//sent2_tokenized").text.split(
161
+ " "
162
+ )
163
+
164
+ sentence1_segment_location_full = np.zeros(
165
+ len(sentence1_tokenized)
166
+ )
167
+ sentence2_segment_location_full = np.zeros(
168
+ len(sentence2_tokenized)
169
+ )
170
 
171
  for (
172
+ sentence1_segment_locations,
173
+ sentence2_segment_locations,
174
  paraphrase_type_id,
175
  ) in zip(
176
  sentence1_segment_location,
177
  sentence2_segment_location,
178
  paraphrase_type_ids,
179
  ):
180
+ segment_locations_1 = [
181
+ int(i) for i in sentence1_segment_locations.split(",")
182
+ ]
183
+ sentence1_segment_location_full[segment_locations_1] = [
184
+ paraphrase_type_id
185
+ ] * len(segment_locations_1)
186
+
187
+ segment_locations_2 = [
188
+ int(i) for i in sentence2_segment_locations.split(",")
189
+ ]
190
+ sentence2_segment_location_full[segment_locations_2] = [
191
+ paraphrase_type_id
192
+ ] * len(segment_locations_2)
193
 
194
  yield idx, {
195
  "idx": row.find(".//pair_id").text + "_" + str(idx),
 
207
  "sentence1_segment_text": sentence1_segment_text,
208
  "sentence2_segment_text": sentence2_segment_text,
209
  }
 
210
  idx += 1