|
"""Script used to filter malformed examples from the original SCAT corpus. |
|
|
|
To run, copy original SCAT files from https://github.com/neulab/contextual-mt/tree/master/data/scat under the same |
|
path of the script. Filtered files will be created in the filtered_scat folder. |
|
|
|
Uncomment lines to save dropped malformed sentences into separate files for inspection. |
|
""" |
|
|
|
import re |
|
from pathlib import Path |
|
|
|
def drop_malformed_tags( |
|
split: str, |
|
save_folder: str = "filtered_scat", |
|
): |
|
find_tag_pattern = r"(<hon>|<\/?p>|<hoff>)" |
|
nested_uninformative_pattern = r"(<hon>\W*(<p>[^<]*</p>)\W*<hoff>)" |
|
embedded_tag = r"\s(\S+<p>[^<]*</p>\S*|\S*<p>[^<]*</p>\S+)\s" |
|
|
|
with open(f"highlighted.{split}.context.en") as f: |
|
orig_ctx_en = f.readlines() |
|
with open(f"highlighted.{split}.context.fr") as f: |
|
orig_ctx_fr = f.readlines() |
|
with open(f"highlighted.{split}.en") as f: |
|
orig_tgt_en = f.readlines() |
|
with open(f"highlighted.{split}.fr") as f: |
|
orig_tgt_fr = f.readlines() |
|
|
|
print("# of context examples: EN -", len(orig_ctx_en), "FR -", len(orig_ctx_fr)) |
|
print("# of target examples: EN -", len(orig_tgt_en), "FR -", len(orig_tgt_fr)) |
|
|
|
ctx_en = [] |
|
ctx_fr = [] |
|
tgt_en = [] |
|
tgt_fr = [] |
|
|
|
drop_ctx_en = [] |
|
drop_ctx_fr = [] |
|
drop_tgt_en = [] |
|
drop_tgt_fr = [] |
|
|
|
for ex_idx in range(len(orig_ctx_en)): |
|
drop = False |
|
txt_list = [orig_ctx_en[ex_idx], orig_tgt_en[ex_idx], orig_ctx_fr[ex_idx], orig_tgt_fr[ex_idx]] |
|
|
|
|
|
|
|
for i in range(len(txt_list)): |
|
for embedded_tag_match in re.findall(embedded_tag, txt_list[i]): |
|
removed_tag = embedded_tag_match.replace("<p>", "").replace("</p>", "") |
|
txt_list[i] = txt_list[i].replace(embedded_tag_match, removed_tag, 1) |
|
|
|
|
|
if not ( |
|
"<p>" in txt_list[1] and "</p>" in txt_list[1] and "<p>" in txt_list[3] and "</p>" in txt_list[3] and |
|
"<p>" not in txt_list[0] and "</p>" not in txt_list[0] and "<p>" not in txt_list[2] and "</p>" not in txt_list[2] |
|
): |
|
drop = True |
|
|
|
|
|
|
|
for i in range(len(txt_list)): |
|
for uninformative_match, nested_tag in re.findall(nested_uninformative_pattern, txt_list[i]): |
|
txt_list[i] = txt_list[i].replace(uninformative_match, nested_tag, 1) |
|
txt = " ".join(txt_list) |
|
|
|
matches = [(m.group(0),) + m.span() for m in re.finditer(find_tag_pattern, txt)] |
|
|
|
if not drop: |
|
if len(matches) > 0 and len(matches) % 2 == 0: |
|
for match_idx in range(0, len(matches), 2): |
|
|
|
if not ( |
|
(matches[match_idx][0] == "<hon>" and matches[match_idx+1][0] == "<hoff>") or |
|
(matches[match_idx][0] == "<p>" and matches[match_idx+1][0] == "</p>") or |
|
(matches[match_idx][2] < matches[match_idx+1][1]) |
|
): |
|
drop = True |
|
break |
|
else: |
|
drop = True |
|
if not drop: |
|
ctx_en.append(re.sub(r"\u200e", "", re.sub(r"\u200b", "", txt_list[0].strip())) + "\n") |
|
ctx_fr.append(re.sub(r"\u200e", "", re.sub(r"\u200b", "", txt_list[2].strip())) + "\n") |
|
tgt_en.append(re.sub(r"\u200e", "", re.sub(r"\u200b", "", txt_list[1].strip())) + "\n") |
|
tgt_fr.append(re.sub(r"\u200e", "", re.sub(r"\u200b", "", txt_list[3].strip())) + "\n") |
|
|
|
else: |
|
drop_ctx_en.append(re.sub(r"\u200e", "", re.sub(r"\u200b", "", txt_list[0].strip())) + "\n") |
|
drop_ctx_fr.append(re.sub(r"\u200e", "", re.sub(r"\u200b", "", txt_list[2].strip())) + "\n") |
|
drop_tgt_en.append(re.sub(r"\u200e", "", re.sub(r"\u200b", "", txt_list[1].strip())) + "\n") |
|
drop_tgt_fr.append(re.sub(r"\u200e", "", re.sub(r"\u200b", "", txt_list[3].strip())) + "\n") |
|
|
|
|
|
print("# of dropped examples:", len(orig_ctx_en) - len(ctx_en)) |
|
print("# of filtered examples:", len(ctx_en)) |
|
|
|
save_folder = Path(save_folder) |
|
save_folder.mkdir(parents=True, exist_ok=True) |
|
with open(save_folder / f"filtered.{split}.context.en", "w") as f: |
|
f.writelines(ctx_en) |
|
with open(save_folder / f"filtered.{split}.context.fr", "w") as f: |
|
f.writelines(ctx_fr) |
|
with open(save_folder / f"filtered.{split}.en", "w") as f: |
|
f.writelines(tgt_en) |
|
with open(save_folder / f"filtered.{split}.fr", "w") as f: |
|
f.writelines(tgt_fr) |
|
|
|
with open(save_folder / f"dropped.{split}.context.en", "w") as f: |
|
f.writelines(drop_ctx_en) |
|
with open(save_folder / f"dropped.{split}.context.fr", "w") as f: |
|
f.writelines(drop_ctx_fr) |
|
with open(save_folder / f"dropped.{split}.en", "w") as f: |
|
f.writelines(drop_tgt_en) |
|
with open(save_folder / f"dropped.{split}.fr", "w") as f: |
|
f.writelines(drop_tgt_fr) |
|
|
|
print("Files written to the filtered_scat folder") |
|
|
|
|
|
if __name__ == "__main__": |
|
drop_malformed_tags("train") |
|
drop_malformed_tags("valid") |
|
drop_malformed_tags("test") |
|
|