Datasets:
File size: 5,435 Bytes
a687d50 89073fb a687d50 d50dd92 a687d50 d50dd92 928a58b d50dd92 a687d50 d50dd92 a687d50 928a58b a687d50 928a58b a687d50 a9e8b93 d50dd92 a9e8b93 d50dd92 a687d50 d50dd92 a687d50 d50dd92 a687d50 d50dd92 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 |
"""Script used to filter malformed examples from the original SCAT corpus.
To run, copy original SCAT files from https://github.com/neulab/contextual-mt/tree/master/data/scat under the same
path of the script. Filtered files will be created in the filtered_scat folder.
Uncomment lines to save dropped malformed sentences into separate files for inspection.
"""
import re
from pathlib import Path
def drop_malformed_tags(
split: str,
save_folder: str = "filtered_scat",
):
find_tag_pattern = r"(<hon>|<\/?p>|<hoff>)"
nested_uninformative_pattern = r"(<hon>\W*(<p>[^<]*</p>)\W*<hoff>)"
embedded_tag = r"\s(\S+<p>[^<]*</p>\S*|\S*<p>[^<]*</p>\S+)\s"
with open(f"highlighted.{split}.context.en") as f:
orig_ctx_en = f.readlines()
with open(f"highlighted.{split}.context.fr") as f:
orig_ctx_fr = f.readlines()
with open(f"highlighted.{split}.en") as f:
orig_tgt_en = f.readlines()
with open(f"highlighted.{split}.fr") as f:
orig_tgt_fr = f.readlines()
print("# of context examples: EN -", len(orig_ctx_en), "FR -", len(orig_ctx_fr))
print("# of target examples: EN -", len(orig_tgt_en), "FR -", len(orig_tgt_fr))
ctx_en = []
ctx_fr = []
tgt_en = []
tgt_fr = []
drop_ctx_en = []
drop_ctx_fr = []
drop_tgt_en = []
drop_tgt_fr = []
for ex_idx in range(len(orig_ctx_en)):
drop = False
txt_list = [orig_ctx_en[ex_idx], orig_tgt_en[ex_idx], orig_ctx_fr[ex_idx], orig_tgt_fr[ex_idx]]
# Drop malformed <p>...</p> tags in which random mid-word spans are tagged
# e.g. "I bought a picture frame for my desk, and <p>it</p>'s just s<p>it</p>ting there, wa<p>it</p>ing for his face."
for i in range(len(txt_list)):
for embedded_tag_match in re.findall(embedded_tag, txt_list[i]):
removed_tag = embedded_tag_match.replace("<p>", "").replace("</p>", "")
txt_list[i] = txt_list[i].replace(embedded_tag_match, removed_tag, 1)
# <p>...</p> tags should only be present in the target text
if not (
"<p>" in txt_list[1] and "</p>" in txt_list[1] and "<p>" in txt_list[3] and "</p>" in txt_list[3] and
"<p>" not in txt_list[0] and "</p>" not in txt_list[0] and "<p>" not in txt_list[2] and "</p>" not in txt_list[2]
):
drop = True
# Nested tags like <hon><p>it</p><hoff> are uninformative and simply mean the supporting context wasn't found
# in the source. We replace them with the inner tag <p>it</p> so that the tag is dropped for the next step.
for i in range(len(txt_list)):
for uninformative_match, nested_tag in re.findall(nested_uninformative_pattern, txt_list[i]):
txt_list[i] = txt_list[i].replace(uninformative_match, nested_tag, 1)
txt = " ".join(txt_list)
matches = [(m.group(0),) + m.span() for m in re.finditer(find_tag_pattern, txt)]
if not drop:
if len(matches) > 0 and len(matches) % 2 == 0:
for match_idx in range(0, len(matches), 2):
if not (
(matches[match_idx][0] == "<hon>" and matches[match_idx+1][0] == "<hoff>") or
(matches[match_idx][0] == "<p>" and matches[match_idx+1][0] == "</p>") or
(matches[match_idx][2] < matches[match_idx+1][1])
):
drop = True
break
else:
drop = True
if not drop:
ctx_en.append(txt_list[0].replace("\n", "").strip() + "\n")
ctx_fr.append(txt_list[2].replace("\n", "").strip() + "\n")
tgt_en.append(txt_list[1].replace("\n", "").strip() + "\n")
tgt_fr.append(txt_list[3].replace("\n", "").strip() + "\n")
else:
drop_ctx_en.append(txt_list[0].replace("\n", "").strip() + "\n")
drop_ctx_fr.append(txt_list[2].replace("\n", "").strip() + "\n")
drop_tgt_en.append(txt_list[1].replace("\n", "").strip() + "\n")
drop_tgt_fr.append(txt_list[3].replace("\n", "").strip() + "\n")
#print("Dropped example:", txt)
print("# of dropped examples:", len(orig_ctx_en) - len(ctx_en))
print("# of filtered examples:", len(ctx_en))
save_folder = Path(save_folder)
save_folder.mkdir(parents=True, exist_ok=True)
with open(save_folder / f"filtered.{split}.context.en", "w") as f:
f.writelines(ctx_en)
with open(save_folder / f"filtered.{split}.context.fr", "w") as f:
f.writelines(ctx_fr)
with open(save_folder / f"filtered.{split}.en", "w") as f:
f.writelines(tgt_en)
with open(save_folder / f"filtered.{split}.fr", "w") as f:
f.writelines(tgt_fr)
with open(save_folder / f"dropped.{split}.context.en", "w") as f:
f.writelines(drop_ctx_en)
with open(save_folder / f"dropped.{split}.context.fr", "w") as f:
f.writelines(drop_ctx_fr)
with open(save_folder / f"dropped.{split}.en", "w") as f:
f.writelines(drop_tgt_en)
with open(save_folder / f"dropped.{split}.fr", "w") as f:
f.writelines(drop_tgt_fr)
print("Files written to the filtered_scat folder")
if __name__ == "__main__":
drop_malformed_tags("train")
drop_malformed_tags("valid")
drop_malformed_tags("test")
|