jpwahle commited on
Commit
f1ee938
1 Parent(s): 72f0c4f

Update etpc.py

Browse files
Files changed (1) hide show
  1. etpc.py +69 -44
etpc.py CHANGED
@@ -65,19 +65,29 @@ class ETPC(datasets.GeneratorBasedBuilder):
65
  "idx": datasets.Value("string"),
66
  "sentence1": datasets.Value("string"),
67
  "sentence2": datasets.Value("string"),
 
 
 
 
 
 
68
  "etpc_label": datasets.Value("int8"),
69
  "mrpc_label": datasets.Value("int8"),
70
  "negation": datasets.Value("int8"),
71
- "paraphrase_type": datasets.Value("string"),
72
- "paraphrase_type_id": datasets.Value("int32"),
73
- "sentence1_phrase_span": datasets.Sequence(
 
 
 
 
74
  datasets.Value("int32")
75
  ),
76
- "sentence2_phrase_span": datasets.Sequence(
77
  datasets.Value("int32")
78
  ),
79
- "sentence1_phrase_text": datasets.Value("string"),
80
- "sentence2_phrase_text": datasets.Value("string"),
81
  }
82
  )
83
 
@@ -116,7 +126,7 @@ class ETPC(datasets.GeneratorBasedBuilder):
116
  root_text_pairs = tree_text_pairs.getroot()
117
  root_paraphrase_types = tree_paraphrase_types.getroot()
118
 
119
- idx = -1
120
 
121
  for row in root_text_pairs:
122
  current_pair_id = row.find(".//pair_id").text
@@ -126,49 +136,64 @@ class ETPC(datasets.GeneratorBasedBuilder):
126
  paraphrase_type_ids = root_paraphrase_types.xpath(
127
  f".//pair_id[text()='{current_pair_id}']/parent::relation/type_id/text()"
128
  )
129
- sentence1_phrase_text = root_paraphrase_types.xpath(
130
- f".//pair_id[text()='{current_pair_id}']/parent::relation/s1_text/text()"
131
- )
132
- sentence2_phrase_text = root_paraphrase_types.xpath(
133
- f".//pair_id[text()='{current_pair_id}']/parent::relation/s2_text/text()"
134
- )
135
- sentence1_phrase_spans = root_paraphrase_types.xpath(
136
  f".//pair_id[text()='{current_pair_id}']/parent::relation/s1_scope/text()"
137
  )
138
- sentence2_phrase_spans = root_paraphrase_types.xpath(
139
  f".//pair_id[text()='{current_pair_id}']/parent::relation/s2_scope/text()"
140
  )
 
 
 
 
 
 
 
 
 
 
 
 
141
  for (
142
- paraphrase_type,
 
143
  paraphrase_type_id,
144
- sentence1_phrase_span,
145
- sentence2_phrase_span,
146
- sentence1_phrase_text,
147
- sentence2_phrase_text,
148
  ) in zip(
149
- paraphrase_types,
 
150
  paraphrase_type_ids,
151
- sentence1_phrase_spans,
152
- sentence2_phrase_spans,
153
- sentence1_phrase_text,
154
- sentence2_phrase_text,
155
  ):
156
- idx += 1
157
- yield idx, {
158
- "idx": row.find(".//pair_id").text
159
- + "_"
160
- + paraphrase_type_id
161
- + "_"
162
- + str(idx),
163
- "sentence1": row.find(".//sent1_raw").text,
164
- "sentence2": row.find(".//sent2_raw").text,
165
- "etpc_label": int(row.find(".//etpc_label").text),
166
- "mrpc_label": int(row.find(".//mrpc_label").text),
167
- "negation": int(row.find(".//negation").text),
168
- "paraphrase_type": paraphrase_type,
169
- "paraphrase_type_id": paraphrase_type_id,
170
- "sentence1_phrase_span": sentence1_phrase_span.split(","),
171
- "sentence2_phrase_span": sentence2_phrase_span.split(","),
172
- "sentence1_phrase_text": sentence1_phrase_text,
173
- "sentence2_phrase_text": sentence2_phrase_text,
174
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
65
  "idx": datasets.Value("string"),
66
  "sentence1": datasets.Value("string"),
67
  "sentence2": datasets.Value("string"),
68
+ "sentence1_tokenized": datasets.Sequence(
69
+ datasets.Value("string")
70
+ ),
71
+ "sentence2_tokenized": datasets.Sequence(
72
+ datasets.Value("string")
73
+ ),
74
  "etpc_label": datasets.Value("int8"),
75
  "mrpc_label": datasets.Value("int8"),
76
  "negation": datasets.Value("int8"),
77
+ "paraphrase_types": datasets.Sequence(
78
+ datasets.Value("string")
79
+ ),
80
+ "paraphrase_type_ids": datasets.Sequence(
81
+ datasets.Value("string")
82
+ ),
83
+ "sentence1_segment_location": datasets.Sequence(
84
  datasets.Value("int32")
85
  ),
86
+ "sentence2_segment_location": datasets.Sequence(
87
  datasets.Value("int32")
88
  ),
89
+ "sentence1_segment_text": datasets.Value("string"),
90
+ "sentence2_segment_text": datasets.Value("string"),
91
  }
92
  )
93
 
 
126
  root_text_pairs = tree_text_pairs.getroot()
127
  root_paraphrase_types = tree_paraphrase_types.getroot()
128
 
129
+ idx = 0
130
 
131
  for row in root_text_pairs:
132
  current_pair_id = row.find(".//pair_id").text
 
136
  paraphrase_type_ids = root_paraphrase_types.xpath(
137
  f".//pair_id[text()='{current_pair_id}']/parent::relation/type_id/text()"
138
  )
139
+ sentence1_segment_location = root_paraphrase_types.xpath(
 
 
 
 
 
 
140
  f".//pair_id[text()='{current_pair_id}']/parent::relation/s1_scope/text()"
141
  )
142
+ sentence2_segment_location = root_paraphrase_types.xpath(
143
  f".//pair_id[text()='{current_pair_id}']/parent::relation/s2_scope/text()"
144
  )
145
+ sentence1_segment_text = root_paraphrase_types.xpath(
146
+ f".//pair_id[text()='{current_pair_id}']/parent::relation/s1_text/text()"
147
+ )
148
+ sentence2_segment_text = root_paraphrase_types.xpath(
149
+ f".//pair_id[text()='{current_pair_id}']/parent::relation/s2_text/text()"
150
+ )
151
+ sentence1_tokenized = row.find(".//sent1_tokenized").text.split()
152
+ sentence2_tokenized = row.find(".//sent2_tokenized").text.split()
153
+
154
+ sentence1_segment_location_full = [0] * len(sentence1_tokenized)
155
+ sentence2_segment_location_full = [0] * len(sentence2_tokenized)
156
+
157
  for (
158
+ sentence1_segment_location_example,
159
+ sentence2_segment_location_example,
160
  paraphrase_type_id,
 
 
 
 
161
  ) in zip(
162
+ sentence1_segment_location,
163
+ sentence2_segment_location,
164
  paraphrase_type_ids,
 
 
 
 
165
  ):
166
+ print(paraphrase_type_id)
167
+ print(sentence1_segment_location_example.split(","))
168
+ print(sentence2_segment_location_example.split(","))
169
+
170
+ for (
171
+ segment_location
172
+ ) in sentence1_segment_location_example.split(","):
173
+ sentence1_segment_location_full[
174
+ int(segment_location)
175
+ ] = int(paraphrase_type_id)
176
+ for (
177
+ segment_location
178
+ ) in sentence2_segment_location_example.split(","):
179
+ sentence2_segment_location_full[
180
+ int(segment_location)
181
+ ] = int(paraphrase_type_id)
182
+ yield idx, {
183
+ "idx": row.find(".//pair_id").text + "_" + str(idx),
184
+ "sentence1": row.find(".//sent1_raw").text,
185
+ "sentence2": row.find(".//sent2_raw").text,
186
+ "sentence1_tokenized": sentence1_tokenized,
187
+ "sentence2_tokenized": sentence2_tokenized,
188
+ "etpc_label": int(row.find(".//etpc_label").text),
189
+ "mrpc_label": int(row.find(".//mrpc_label").text),
190
+ "negation": int(row.find(".//negation").text),
191
+ "paraphrase_types": paraphrase_types,
192
+ "paraphrase_type_ids": paraphrase_type_ids,
193
+ "sentence1_segment_location": sentence1_segment_location_full,
194
+ "sentence2_segment_location": sentence2_segment_location_full,
195
+ "sentence1_segment_text": sentence1_segment_text,
196
+ "sentence2_segment_text": sentence2_segment_text,
197
+ }
198
+
199
+ idx += 1