update
Browse files- JGLUE.py +210 -190
- tests/JGLUE_test.py +2 -1
JGLUE.py
CHANGED
@@ -1,8 +1,7 @@
|
|
1 |
import json
|
2 |
import random
|
3 |
import string
|
4 |
-
from
|
5 |
-
from typing import Dict, List, Optional, Union
|
6 |
|
7 |
import datasets as ds
|
8 |
import pandas as pd
|
@@ -54,8 +53,12 @@ _DESCRIPTION_CONFIGS = {
|
|
54 |
_URLS = {
|
55 |
"MARC-ja": {
|
56 |
"data": "https://s3.amazonaws.com/amazon-reviews-pds/tsv/amazon_reviews_multilingual_JP_v1_00.tsv.gz",
|
57 |
-
"filter_review_id_list
|
58 |
-
|
|
|
|
|
|
|
|
|
59 |
},
|
60 |
"JSTS": {
|
61 |
"train": "https://raw.githubusercontent.com/yahoojapan/JGLUE/main/datasets/jsts-v1.1/train-v1.1.json",
|
@@ -141,7 +144,15 @@ def features_jcommonsenseqa() -> ds.Features:
|
|
141 |
|
142 |
|
143 |
def features_marc_ja() -> ds.Features:
|
144 |
-
features = ds.Features(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
145 |
return features
|
146 |
|
147 |
|
@@ -151,16 +162,14 @@ class MarcJaConfig(ds.BuilderConfig):
|
|
151 |
name: str = "MARC-ja",
|
152 |
is_han_to_zen: bool = False,
|
153 |
max_instance_num: Optional[int] = None,
|
154 |
-
max_char_length:
|
155 |
-
is_pos_neg: bool =
|
156 |
train_ratio: float = 0.94,
|
157 |
val_ratio: float = 0.03,
|
158 |
test_ratio: float = 0.03,
|
159 |
output_testset: bool = False,
|
160 |
-
filter_review_id_list_valid:
|
161 |
-
|
162 |
-
label_conv_review_id_list_valid: Optional[str] = None,
|
163 |
-
label_conv_review_id_list_test: Optional[str] = None,
|
164 |
version: Optional[Union[ds.utils.Version, str]] = ds.utils.Version("0.0.0"),
|
165 |
data_dir: Optional[str] = None,
|
166 |
data_files: Optional[ds.data_files.DataFilesDict] = None,
|
@@ -184,20 +193,143 @@ class MarcJaConfig(ds.BuilderConfig):
|
|
184 |
self.max_char_length = max_char_length
|
185 |
self.is_pos_neg = is_pos_neg
|
186 |
self.output_testset = output_testset
|
|
|
187 |
self.filter_review_id_list_valid = filter_review_id_list_valid
|
188 |
-
self.filter_review_id_list_test = filter_review_id_list_test
|
189 |
self.label_conv_review_id_list_valid = label_conv_review_id_list_valid
|
190 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
191 |
|
192 |
|
193 |
def preprocess_for_marc_ja(
|
194 |
config: MarcJaConfig,
|
195 |
data_file_path: str,
|
196 |
-
|
197 |
-
|
198 |
-
) -> Dict[str,
|
199 |
import mojimoji
|
200 |
from bs4 import BeautifulSoup
|
|
|
201 |
|
202 |
df = pd.read_csv(data_file_path, delimiter="\t")
|
203 |
df = df[["review_body", "star_rating", "review_id"]]
|
@@ -205,39 +337,28 @@ def preprocess_for_marc_ja(
|
|
205 |
# rename columns
|
206 |
df = df.rename(columns={"review_body": "text", "star_rating": "rating"})
|
207 |
|
208 |
-
def get_label(rating: int, is_pos_neg: bool = False) -> Optional[str]:
|
209 |
-
if rating >= 4:
|
210 |
-
return "positive"
|
211 |
-
elif rating <= 2:
|
212 |
-
return "negative"
|
213 |
-
else:
|
214 |
-
if is_pos_neg:
|
215 |
-
return None
|
216 |
-
else:
|
217 |
-
return "neutral"
|
218 |
-
|
219 |
# convert the rating to label
|
|
|
220 |
df = df.assign(
|
221 |
-
label=df["rating"].
|
|
|
|
|
222 |
)
|
223 |
|
224 |
# remove rows where the label is None
|
225 |
-
df = df[df["label"].isnull()]
|
226 |
|
227 |
# remove html tags from the text
|
|
|
228 |
df = df.assign(
|
229 |
-
text=df["text"].
|
230 |
lambda text: BeautifulSoup(text, "html.parser").get_text()
|
231 |
)
|
232 |
)
|
233 |
|
234 |
-
def is_filtered_by_ascii_rate(text: str, threshold: float = 0.9) -> bool:
|
235 |
-
ascii_letters = set(string.printable)
|
236 |
-
rate = sum(c in ascii_letters for c in text) / len(text)
|
237 |
-
return rate >= threshold
|
238 |
-
|
239 |
# filter by ascii rate
|
240 |
-
|
|
|
241 |
|
242 |
if config.max_char_length is not None:
|
243 |
df = df[df["text"].str.len() <= config.max_char_length]
|
@@ -249,140 +370,18 @@ def preprocess_for_marc_ja(
|
|
249 |
df = df.rename(columns={"text": "sentence"})
|
250 |
|
251 |
# shuffle dataset
|
252 |
-
|
253 |
-
random.seed(1)
|
254 |
-
random.shuffle(instances)
|
255 |
|
256 |
-
|
257 |
-
|
258 |
-
filter_review_id_list_test: Optional[str] = None,
|
259 |
-
) -> Dict[str, List[str]]:
|
260 |
-
filter_review_id_list = defaultdict(list)
|
261 |
-
|
262 |
-
if filter_review_id_list_valid is not None:
|
263 |
-
with open(filter_review_id_list_valid, "r") as rf:
|
264 |
-
filter_review_id_list["valid"] = [line.rstrip() for line in rf]
|
265 |
-
|
266 |
-
if filter_review_id_list_test is not None:
|
267 |
-
with open(filter_review_id_list_test, "r") as rf:
|
268 |
-
filter_review_id_list["test"] = [line.rstrip() for line in rf]
|
269 |
-
|
270 |
-
return filter_review_id_list
|
271 |
-
|
272 |
-
def get_label_conv_review_id_list(
|
273 |
-
label_conv_review_id_list_valid: Optional[str] = None,
|
274 |
-
label_conv_review_id_list_test: Optional[str] = None,
|
275 |
-
) -> Dict[str, str]:
|
276 |
-
label_conv_review_id_list = defaultdict(list)
|
277 |
-
|
278 |
-
if label_conv_review_id_list_valid is not None:
|
279 |
-
breakpoint()
|
280 |
-
with open(label_conv_review_id_list_valid, "r") as f:
|
281 |
-
label_conv_review_id_list["valid"] = {
|
282 |
-
row[0]: row[1] for row in csv.reader(f)
|
283 |
-
}
|
284 |
-
|
285 |
-
if label_conv_review_id_list_test is not None:
|
286 |
-
breakpoint()
|
287 |
-
with open(label_conv_review_id_list_test, "r") as f:
|
288 |
-
label_conv_review_id_list["test"] = {
|
289 |
-
row[0]: row[1] for row in csv.reader(f)
|
290 |
-
}
|
291 |
-
|
292 |
-
return label_conv_review_id_list
|
293 |
-
|
294 |
-
def output_data(
|
295 |
-
instances: List[Dict[str, str]],
|
296 |
-
train_ratio: float,
|
297 |
-
val_ratio: float,
|
298 |
-
test_ratio: float,
|
299 |
-
output_testset: bool = False,
|
300 |
-
) -> Dict[str, str]:
|
301 |
-
instance_num = len(instances)
|
302 |
-
|
303 |
-
split_instances = {}
|
304 |
-
length1 = int(instance_num * train_ratio)
|
305 |
-
split_instances["train"] = instances[:length1]
|
306 |
-
|
307 |
-
length2 = int(instance_num * (train_ratio + val_ratio))
|
308 |
-
split_instances["valid"] = instances[length1:length2]
|
309 |
-
split_instances["test"] = instances[length2:]
|
310 |
-
|
311 |
-
filter_review_id_list = get_filter_review_id_list(
|
312 |
-
filter_review_id_list_valid=config.filter_review_id_list_valid,
|
313 |
-
filter_review_id_list_test=config.filter_review_id_list_test,
|
314 |
-
)
|
315 |
-
label_conv_review_id_list = get_label_conv_review_id_list(
|
316 |
-
label_conv_review_id_list_valid=config.label_conv_review_id_list_valid,
|
317 |
-
label_conv_review_id_list_test=config.label_conv_review_id_list_test,
|
318 |
-
)
|
319 |
-
|
320 |
-
for eval_type in ("train", "valid", "test"):
|
321 |
-
if not output_testset and eval_type == "test":
|
322 |
-
continue
|
323 |
-
|
324 |
-
for instance in split_instances[eval_type]:
|
325 |
-
# filter
|
326 |
-
if len(filter_review_id_list) != 0:
|
327 |
-
filter_flag = False
|
328 |
-
for filter_eval_type in ("valid", "test"):
|
329 |
-
if (
|
330 |
-
eval_type == filter_eval_type
|
331 |
-
and instance["review_id"]
|
332 |
-
in filter_review_id_list[filter_eval_type]
|
333 |
-
):
|
334 |
-
filter_flag = True
|
335 |
-
if eval_type != filter_eval_type:
|
336 |
-
if filter_eval_type in filter_review_id_list:
|
337 |
-
assert (
|
338 |
-
instance["review_id"]
|
339 |
-
not in filter_review_id_list[filter_eval_type]
|
340 |
-
)
|
341 |
-
|
342 |
-
if filter_flag is True:
|
343 |
-
continue
|
344 |
-
|
345 |
-
# convert labels
|
346 |
-
if len(label_conv_review_id_list) != 0:
|
347 |
-
for conv_eval_type in ("valid", "test"):
|
348 |
-
if (
|
349 |
-
eval_type == conv_eval_type
|
350 |
-
and instance["review_id"]
|
351 |
-
in label_conv_review_id_list[conv_eval_type]
|
352 |
-
):
|
353 |
-
assert (
|
354 |
-
instance["label"]
|
355 |
-
!= label_conv_review_id_list[conv_eval_type][
|
356 |
-
instance["review_id"]
|
357 |
-
]
|
358 |
-
)
|
359 |
-
# update
|
360 |
-
instance["label"] = label_conv_review_id_list[
|
361 |
-
conv_eval_type
|
362 |
-
][instance["review_id"]]
|
363 |
-
|
364 |
-
if eval_type != conv_eval_type:
|
365 |
-
if conv_eval_type in label_conv_review_id_list:
|
366 |
-
assert (
|
367 |
-
instance["review_id"]
|
368 |
-
not in label_conv_review_id_list[conv_eval_type]
|
369 |
-
)
|
370 |
-
|
371 |
-
if eval_type == "test":
|
372 |
-
del instance["label"]
|
373 |
-
|
374 |
-
breakpoint()
|
375 |
-
|
376 |
-
breakpoint()
|
377 |
-
|
378 |
-
file_paths = output_data(
|
379 |
-
df,
|
380 |
train_ratio=config.train_ratio,
|
381 |
val_ratio=config.val_ratio,
|
382 |
test_ratio=config.test_ratio,
|
383 |
output_testset=config.output_testset,
|
|
|
|
|
384 |
)
|
385 |
-
return
|
386 |
|
387 |
|
388 |
class JGLUE(ds.GeneratorBasedBuilder):
|
@@ -441,34 +440,55 @@ class JGLUE(ds.GeneratorBasedBuilder):
|
|
441 |
file_paths = dl_manager.download_and_extract(_URLS[self.config.name])
|
442 |
|
443 |
if self.config.name == "MARC-ja":
|
444 |
-
|
|
|
|
|
|
|
445 |
config=self.config,
|
446 |
data_file_path=file_paths["data"],
|
447 |
-
|
448 |
-
|
449 |
-
],
|
450 |
-
label_conv_review_id_list_path=file_paths[
|
451 |
-
"label_conv_review_id_list/valid.txt"
|
452 |
-
],
|
453 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
454 |
|
455 |
-
|
456 |
-
|
457 |
-
|
458 |
-
|
459 |
-
|
460 |
-
|
461 |
-
|
462 |
-
ds.SplitGenerator(
|
463 |
-
name=ds.Split.VALIDATION,
|
464 |
-
gen_kwargs={
|
465 |
-
"file_path": file_paths["valid"],
|
466 |
-
},
|
467 |
-
),
|
468 |
-
]
|
469 |
|
470 |
-
|
471 |
-
|
472 |
-
|
473 |
-
|
474 |
-
yield i, json_dict
|
|
|
1 |
import json
|
2 |
import random
|
3 |
import string
|
4 |
+
from typing import Dict, List, Optional, TypedDict, Union
|
|
|
5 |
|
6 |
import datasets as ds
|
7 |
import pandas as pd
|
|
|
53 |
_URLS = {
|
54 |
"MARC-ja": {
|
55 |
"data": "https://s3.amazonaws.com/amazon-reviews-pds/tsv/amazon_reviews_multilingual_JP_v1_00.tsv.gz",
|
56 |
+
"filter_review_id_list": {
|
57 |
+
"valid": "https://raw.githubusercontent.com/yahoojapan/JGLUE/main/preprocess/marc-ja/data/filter_review_id_list/valid.txt"
|
58 |
+
},
|
59 |
+
"label_conv_review_id_list": {
|
60 |
+
"valid": "https://raw.githubusercontent.com/yahoojapan/JGLUE/main/preprocess/marc-ja/data/label_conv_review_id_list/valid.txt"
|
61 |
+
},
|
62 |
},
|
63 |
"JSTS": {
|
64 |
"train": "https://raw.githubusercontent.com/yahoojapan/JGLUE/main/datasets/jsts-v1.1/train-v1.1.json",
|
|
|
144 |
|
145 |
|
146 |
def features_marc_ja() -> ds.Features:
|
147 |
+
features = ds.Features(
|
148 |
+
{
|
149 |
+
"sentence": ds.Value("string"),
|
150 |
+
"label": ds.ClassLabel(
|
151 |
+
num_classes=3, names=["positive", "negative", "neutral"]
|
152 |
+
),
|
153 |
+
"review_id": ds.Value("string"),
|
154 |
+
}
|
155 |
+
)
|
156 |
return features
|
157 |
|
158 |
|
|
|
162 |
name: str = "MARC-ja",
|
163 |
is_han_to_zen: bool = False,
|
164 |
max_instance_num: Optional[int] = None,
|
165 |
+
max_char_length: int = 500,
|
166 |
+
is_pos_neg: bool = True,
|
167 |
train_ratio: float = 0.94,
|
168 |
val_ratio: float = 0.03,
|
169 |
test_ratio: float = 0.03,
|
170 |
output_testset: bool = False,
|
171 |
+
filter_review_id_list_valid: bool = True,
|
172 |
+
label_conv_review_id_list_valid: bool = True,
|
|
|
|
|
173 |
version: Optional[Union[ds.utils.Version, str]] = ds.utils.Version("0.0.0"),
|
174 |
data_dir: Optional[str] = None,
|
175 |
data_files: Optional[ds.data_files.DataFilesDict] = None,
|
|
|
193 |
self.max_char_length = max_char_length
|
194 |
self.is_pos_neg = is_pos_neg
|
195 |
self.output_testset = output_testset
|
196 |
+
|
197 |
self.filter_review_id_list_valid = filter_review_id_list_valid
|
|
|
198 |
self.label_conv_review_id_list_valid = label_conv_review_id_list_valid
|
199 |
+
|
200 |
+
|
201 |
+
def get_label(rating: int, is_pos_neg: bool = False) -> Optional[str]:
|
202 |
+
if rating >= 4:
|
203 |
+
return "positive"
|
204 |
+
elif rating <= 2:
|
205 |
+
return "negative"
|
206 |
+
else:
|
207 |
+
if is_pos_neg:
|
208 |
+
return None
|
209 |
+
else:
|
210 |
+
return "neutral"
|
211 |
+
|
212 |
+
|
213 |
+
def is_filtered_by_ascii_rate(text: str, threshold: float = 0.9) -> bool:
|
214 |
+
ascii_letters = set(string.printable)
|
215 |
+
rate = sum(c in ascii_letters for c in text) / len(text)
|
216 |
+
return rate >= threshold
|
217 |
+
|
218 |
+
|
219 |
+
def shuffle_dataframe(df: pd.DataFrame) -> pd.DataFrame:
|
220 |
+
instances = df.to_dict(orient="records")
|
221 |
+
random.seed(1)
|
222 |
+
random.shuffle(instances)
|
223 |
+
return pd.DataFrame(instances)
|
224 |
+
|
225 |
+
|
226 |
+
def get_filter_review_id_list(
|
227 |
+
filter_review_id_list_paths: Dict[str, str],
|
228 |
+
) -> Dict[str, List[str]]:
|
229 |
+
filter_review_id_list_valid = filter_review_id_list_paths.get("valid")
|
230 |
+
filter_review_id_list_test = filter_review_id_list_paths.get("test")
|
231 |
+
|
232 |
+
filter_review_id_list = {}
|
233 |
+
|
234 |
+
if filter_review_id_list_valid is not None:
|
235 |
+
with open(filter_review_id_list_valid, "r") as rf:
|
236 |
+
filter_review_id_list["valid"] = [line.rstrip() for line in rf]
|
237 |
+
|
238 |
+
if filter_review_id_list_test is not None:
|
239 |
+
with open(filter_review_id_list_test, "r") as rf:
|
240 |
+
filter_review_id_list["test"] = [line.rstrip() for line in rf]
|
241 |
+
|
242 |
+
return filter_review_id_list
|
243 |
+
|
244 |
+
|
245 |
+
def get_label_conv_review_id_list(
|
246 |
+
label_conv_review_id_list_paths: Dict[str, str],
|
247 |
+
) -> Dict[str, Dict[str, str]]:
|
248 |
+
import csv
|
249 |
+
|
250 |
+
label_conv_review_id_list_valid = label_conv_review_id_list_paths.get("valid")
|
251 |
+
label_conv_review_id_list_test = label_conv_review_id_list_paths.get("test")
|
252 |
+
|
253 |
+
label_conv_review_id_list: Dict[str, Dict[str, str]] = {}
|
254 |
+
|
255 |
+
if label_conv_review_id_list_valid is not None:
|
256 |
+
with open(label_conv_review_id_list_valid, "r") as rf:
|
257 |
+
label_conv_review_id_list["valid"] = {
|
258 |
+
row[0]: row[1] for row in csv.reader(rf)
|
259 |
+
}
|
260 |
+
|
261 |
+
if label_conv_review_id_list_test is not None:
|
262 |
+
with open(label_conv_review_id_list_test, "r") as rf:
|
263 |
+
label_conv_review_id_list["test"] = {
|
264 |
+
row[0]: row[1] for row in csv.reader(rf)
|
265 |
+
}
|
266 |
+
|
267 |
+
return label_conv_review_id_list
|
268 |
+
|
269 |
+
|
270 |
+
def output_data(
|
271 |
+
df: pd.DataFrame,
|
272 |
+
train_ratio: float,
|
273 |
+
val_ratio: float,
|
274 |
+
test_ratio: float,
|
275 |
+
output_testset: bool,
|
276 |
+
filter_review_id_list_paths: Dict[str, str],
|
277 |
+
label_conv_review_id_list_paths: Dict[str, str],
|
278 |
+
) -> Dict[str, pd.DataFrame]:
|
279 |
+
instance_num = len(df)
|
280 |
+
split_dfs: Dict[str, pd.DataFrame] = {}
|
281 |
+
length1 = int(instance_num * train_ratio)
|
282 |
+
split_dfs["train"] = df.iloc[:length1]
|
283 |
+
|
284 |
+
length2 = int(instance_num * (train_ratio + val_ratio))
|
285 |
+
split_dfs["valid"] = df.iloc[length1:length2]
|
286 |
+
split_dfs["test"] = df.iloc[length2:]
|
287 |
+
|
288 |
+
filter_review_id_list = get_filter_review_id_list(
|
289 |
+
filter_review_id_list_paths=filter_review_id_list_paths,
|
290 |
+
)
|
291 |
+
label_conv_review_id_list = get_label_conv_review_id_list(
|
292 |
+
label_conv_review_id_list_paths=label_conv_review_id_list_paths,
|
293 |
+
)
|
294 |
+
|
295 |
+
for eval_type in ("valid", "test"):
|
296 |
+
if filter_review_id_list.get(eval_type):
|
297 |
+
df = split_dfs[eval_type]
|
298 |
+
df = df[~df["review_id"].isin(filter_review_id_list[eval_type])]
|
299 |
+
split_dfs[eval_type] = df
|
300 |
+
|
301 |
+
for eval_type in ("valid", "test"):
|
302 |
+
if label_conv_review_id_list.get(eval_type):
|
303 |
+
df = split_dfs[eval_type]
|
304 |
+
df = df.assign(
|
305 |
+
converted_label=df["review_id"].map(label_conv_review_id_list["valid"])
|
306 |
+
)
|
307 |
+
df = df.assign(
|
308 |
+
label=df[["label", "converted_label"]].apply(
|
309 |
+
lambda xs: xs["label"]
|
310 |
+
if pd.isnull(xs["converted_label"])
|
311 |
+
else xs["converted_label"],
|
312 |
+
axis=1,
|
313 |
+
)
|
314 |
+
)
|
315 |
+
df = df.drop(columns=["converted_label"])
|
316 |
+
split_dfs[eval_type] = df
|
317 |
+
|
318 |
+
return {
|
319 |
+
"train": split_dfs["train"],
|
320 |
+
"valid": split_dfs["valid"],
|
321 |
+
}
|
322 |
|
323 |
|
324 |
def preprocess_for_marc_ja(
|
325 |
config: MarcJaConfig,
|
326 |
data_file_path: str,
|
327 |
+
filter_review_id_list_paths: Dict[str, str],
|
328 |
+
label_conv_review_id_list_paths: Dict[str, str],
|
329 |
+
) -> Dict[str, pd.DataFrame]:
|
330 |
import mojimoji
|
331 |
from bs4 import BeautifulSoup
|
332 |
+
from tqdm import tqdm
|
333 |
|
334 |
df = pd.read_csv(data_file_path, delimiter="\t")
|
335 |
df = df[["review_body", "star_rating", "review_id"]]
|
|
|
337 |
# rename columns
|
338 |
df = df.rename(columns={"review_body": "text", "star_rating": "rating"})
|
339 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
340 |
# convert the rating to label
|
341 |
+
tqdm.pandas(dynamic_ncols=True, desc="Convert the rating to the label")
|
342 |
df = df.assign(
|
343 |
+
label=df["rating"].progress_apply(
|
344 |
+
lambda rating: get_label(rating, config.is_pos_neg)
|
345 |
+
)
|
346 |
)
|
347 |
|
348 |
# remove rows where the label is None
|
349 |
+
df = df[~df["label"].isnull()]
|
350 |
|
351 |
# remove html tags from the text
|
352 |
+
tqdm.pandas(dynamic_ncols=True, desc="Remove html tags from the text")
|
353 |
df = df.assign(
|
354 |
+
text=df["text"].progress_apply(
|
355 |
lambda text: BeautifulSoup(text, "html.parser").get_text()
|
356 |
)
|
357 |
)
|
358 |
|
|
|
|
|
|
|
|
|
|
|
359 |
# filter by ascii rate
|
360 |
+
tqdm.pandas(dynamic_ncols=True, desc="Filter by ascii rate")
|
361 |
+
df = df[~df["text"].progress_apply(is_filtered_by_ascii_rate)]
|
362 |
|
363 |
if config.max_char_length is not None:
|
364 |
df = df[df["text"].str.len() <= config.max_char_length]
|
|
|
370 |
df = df.rename(columns={"text": "sentence"})
|
371 |
|
372 |
# shuffle dataset
|
373 |
+
df = shuffle_dataframe(df)
|
|
|
|
|
374 |
|
375 |
+
split_dfs = output_data(
|
376 |
+
df=df,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
377 |
train_ratio=config.train_ratio,
|
378 |
val_ratio=config.val_ratio,
|
379 |
test_ratio=config.test_ratio,
|
380 |
output_testset=config.output_testset,
|
381 |
+
filter_review_id_list_paths=filter_review_id_list_paths,
|
382 |
+
label_conv_review_id_list_paths=label_conv_review_id_list_paths,
|
383 |
)
|
384 |
+
return split_dfs
|
385 |
|
386 |
|
387 |
class JGLUE(ds.GeneratorBasedBuilder):
|
|
|
440 |
file_paths = dl_manager.download_and_extract(_URLS[self.config.name])
|
441 |
|
442 |
if self.config.name == "MARC-ja":
|
443 |
+
filter_review_id_list = file_paths["filter_review_id_list"]
|
444 |
+
label_conv_review_id_list = file_paths["label_conv_review_id_list"]
|
445 |
+
|
446 |
+
split_dfs = preprocess_for_marc_ja(
|
447 |
config=self.config,
|
448 |
data_file_path=file_paths["data"],
|
449 |
+
filter_review_id_list_paths=filter_review_id_list,
|
450 |
+
label_conv_review_id_list_paths=label_conv_review_id_list,
|
|
|
|
|
|
|
|
|
451 |
)
|
452 |
+
return [
|
453 |
+
ds.SplitGenerator(
|
454 |
+
name=ds.Split.TRAIN,
|
455 |
+
gen_kwargs={"split_df": split_dfs["train"]},
|
456 |
+
),
|
457 |
+
ds.SplitGenerator(
|
458 |
+
name=ds.Split.VALIDATION,
|
459 |
+
gen_kwargs={"split_df": split_dfs["valid"]},
|
460 |
+
),
|
461 |
+
]
|
462 |
+
else:
|
463 |
+
return [
|
464 |
+
ds.SplitGenerator(
|
465 |
+
name=ds.Split.TRAIN,
|
466 |
+
gen_kwargs={"file_path": file_paths["train"]},
|
467 |
+
),
|
468 |
+
ds.SplitGenerator(
|
469 |
+
name=ds.Split.VALIDATION,
|
470 |
+
gen_kwargs={"file_path": file_paths["valid"]},
|
471 |
+
),
|
472 |
+
]
|
473 |
+
|
474 |
+
def _generate_examples(
|
475 |
+
self,
|
476 |
+
file_path: Optional[str] = None,
|
477 |
+
split_df: Optional[pd.DataFrame] = None,
|
478 |
+
):
|
479 |
+
if self.config.name == "MARC-ja":
|
480 |
+
if split_df is None:
|
481 |
+
raise ValueError(f"Invalid preprocessing for {self.config.name}")
|
482 |
|
483 |
+
instances = split_df.to_dict(orient="records")
|
484 |
+
for i, data_dict in enumerate(instances):
|
485 |
+
yield i, data_dict
|
486 |
+
|
487 |
+
else:
|
488 |
+
if file_path is None:
|
489 |
+
raise ValueError(f"Invalid argument for {self.config.name}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
490 |
|
491 |
+
with open(file_path, "r") as rf:
|
492 |
+
for i, line in enumerate(rf):
|
493 |
+
json_dict = json.loads(line)
|
494 |
+
yield i, json_dict
|
|
tests/JGLUE_test.py
CHANGED
@@ -61,7 +61,8 @@ def test_load_marc_ja(
|
|
61 |
name=dataset_name,
|
62 |
is_pos_neg=True,
|
63 |
max_char_length=500,
|
64 |
-
|
|
|
65 |
)
|
66 |
|
67 |
assert dataset["train"].num_rows == expected_num_train
|
|
|
61 |
name=dataset_name,
|
62 |
is_pos_neg=True,
|
63 |
max_char_length=500,
|
64 |
+
filter_review_id_list_valid=True,
|
65 |
+
label_conv_review_id_list_valid=True,
|
66 |
)
|
67 |
|
68 |
assert dataset["train"].num_rows == expected_num_train
|