|
""" |
|
Adds clue/cluewsc2020 samples to xwinograd |
|
From: https://gist.github.com/jordiclive/26506ea7e897ad8270f9e793bdc285b5 |
|
""" |
|
|
|
import json |
|
|
|
import datasets |
|
import pandas as pd |
|
from datasets import load_dataset |
|
|
|
|
|
def find_pronoun(x): |
|
pronoun = x["target"]["span2_text"] |
|
indices = [x["target"]["span2_index"], x["target"]["span2_index"] + len(pronoun)] |
|
return [pronoun, indices, list(pronoun)] |
|
|
|
|
|
def find_switch(x): |
|
pronoun = x["target"]["span1_text"] |
|
indices = [x["target"]["span1_index"], x["target"]["span1_index"] + len(pronoun)] |
|
if x["label"] == 1: |
|
label = False |
|
else: |
|
label = True |
|
return [pronoun, indices, list(pronoun), label] |
|
|
|
|
|
def convert_to_format(df): |
|
|
|
df["pronoun"] = df.apply(find_pronoun, axis=1) |
|
|
|
df["toks"] = df["text"].apply(lambda x: list(x)) |
|
|
|
df["switch"] = df.apply(find_switch, axis=1) |
|
df.reset_index(inplace=True, drop=True) |
|
|
|
lang = [] |
|
original = [] |
|
o_text = [] |
|
sent = [] |
|
toks = [] |
|
pronoun = [] |
|
switch = [] |
|
df["pronoun_to_replace"] = df["target"].apply(lambda x: x["span2_text"]) |
|
for i, df_text in df.groupby(["text", "pronoun_to_replace"]): |
|
if len(df_text) == 1: |
|
continue |
|
df_text.reset_index(inplace=True, drop=True) |
|
try: |
|
if df_text["label"][0] != df_text["label"][1]: |
|
df_text = df_text[:2] |
|
elif df_text["label"][0] != df_text["label"][2] and len(df_text) > 2: |
|
df_text = df_text.iloc[[0, 2], :] |
|
df_text.reset_index(inplace=True, drop=True) |
|
df_new = df_text[:1] |
|
df_new["switch"] = df_new["switch"].apply( |
|
lambda x: [df_text["switch"][0], df_text["switch"][1]] |
|
) |
|
|
|
lang.append("zh") |
|
original.append("original") |
|
o_text.append("?") |
|
sent.append(df_new.iloc[0]["text"]) |
|
toks.append(df_new.iloc[0]["toks"]) |
|
pronoun.append(df_new.iloc[0]["pronoun"]) |
|
switch.append(df_new.iloc[0]["switch"]) |
|
|
|
except: |
|
continue |
|
|
|
total_df = pd.DataFrame( |
|
{0: lang, 1: original, 2: o_text, 3: sent, 4: toks, 5: pronoun, 6: switch} |
|
) |
|
count = total_df[5].apply(lambda x: len(x[0])) |
|
total_df[4] = total_df[4].apply(lambda x: json.dumps(x)) |
|
total_df[5] = total_df[5].apply(lambda x: json.dumps(x)) |
|
total_df[6] = total_df[6].apply(lambda x: json.dumps(x)) |
|
return total_df, count |
|
|
|
|
|
def remove_at(i, s): |
|
return s[:i] + s[i + 1 :] |
|
|
|
|
|
def remove_to(x): |
|
if x["count"] == 2: |
|
return remove_at(x["sentence"].index("_") + 1, x["sentence"]) |
|
else: |
|
return x["sentence"] |
|
|
|
|
|
def get_original_splits(): |
|
|
|
|
|
for j, i in enumerate(["en", "jp", "ru", "pt", "fr", "zh"]): |
|
if j == 0: |
|
dfx = datasets.load_dataset("xwinograd", i)["test"].to_pandas() |
|
dfx["lang"] = i |
|
else: |
|
df = datasets.load_dataset("xwinograd", i)["test"].to_pandas() |
|
df["lang"] = i |
|
dfx = pd.concat([dfx, df]) |
|
return dfx |
|
|
|
|
|
def get_examples_from_clue(): |
|
|
|
|
|
dataset = load_dataset("clue", "cluewsc2020") |
|
df = dataset["train"].to_pandas() |
|
df_val = dataset["validation"].to_pandas() |
|
df = pd.concat([df, df_val]) |
|
new_examples, count = convert_to_format(df) |
|
new_examples.reset_index(inplace=True, drop=True) |
|
new_examples.to_csv( |
|
"xwinograd/data/xwinograd.tsv", sep="\t", header=None, index=False |
|
) |
|
df_post = datasets.load_dataset("xwinograd", "zh") |
|
df_post = df_post["test"].to_pandas() |
|
df_post["count"] = count |
|
df_post["sentence"] = df_post.apply(remove_to, axis=1) |
|
df_post = df_post[["sentence", "option1", "option2", "answer"]] |
|
return df_post |
|
|
|
|
|
if __name__ == "__main__": |
|
|
|
dfx = get_original_splits() |
|
df_post = get_examples_from_clue() |
|
df_post.to_json("new_examples_updated.json", orient="split") |
|
df_post["lang"] = "zh" |
|
dfx = pd.concat([dfx, df_post]) |
|
dfx.drop_duplicates() |
|
dfx.reset_index(inplace=True, drop=True) |
|
dfx.to_json("all_examples_updated.json", orient="split") |