|
|
|
|
|
import argparse |
|
import json |
|
import os |
|
from pathlib import Path |
|
import sys |
|
|
|
pwd = os.path.abspath(os.path.dirname(__file__)) |
|
sys.path.append(os.path.join(pwd, '../../')) |
|
|
|
from datasets import load_dataset |
|
from lxml import etree |
|
import pandas as pd |
|
from tqdm import tqdm |
|
|
|
from project_settings import project_path |
|
|
|
|
|
def get_args(): |
|
parser = argparse.ArgumentParser() |
|
|
|
parser.add_argument("--data_dir", default="./data/diac2019", type=str) |
|
parser.add_argument( |
|
"--output_file", |
|
default=(project_path / "data/diac2019.jsonl"), |
|
type=str |
|
) |
|
|
|
args = parser.parse_args() |
|
return args |
|
|
|
|
|
def main(): |
|
args = get_args() |
|
|
|
data_dir = Path(args.data_dir) |
|
|
|
with open(args.output_file, "w", encoding="utf-8") as f: |
|
|
|
filename = data_dir / "train_set.xml" |
|
tree = etree.parse(filename.as_posix()) |
|
root = tree.getroot() |
|
|
|
for group in root: |
|
equal = group[0] |
|
not_equal = group[1] |
|
|
|
equal_questions = [question.text for question in equal] |
|
not_equal_questions = [question.text for question in not_equal] |
|
|
|
pairs = set() |
|
|
|
for q1 in equal_questions: |
|
for q2 in equal_questions: |
|
if q1 == q2: |
|
continue |
|
pair = (q1, q2, 1) |
|
pairs.add(pair) |
|
|
|
for q1 in equal_questions: |
|
for q2 in not_equal_questions: |
|
if q1 == q2: |
|
continue |
|
pair = (q1, q2, 0) |
|
pairs.add(pair) |
|
|
|
for pair in pairs: |
|
q1, q2, label = pair |
|
|
|
label = str(label) |
|
if label not in ("0", "1", None): |
|
raise AssertionError |
|
|
|
json_row = { |
|
"sentence1": q1, |
|
"sentence2": q2, |
|
"label": label, |
|
"data_source": "diac2019", |
|
"split": "train" |
|
} |
|
|
|
json_row = json.dumps(json_row, ensure_ascii=False) |
|
f.write("{}\n".format(json_row)) |
|
|
|
|
|
filename = data_dir / "dev_set.csv" |
|
df = pd.read_csv(filename.as_posix(), delimiter="\t") |
|
for i, row in df.iterrows(): |
|
|
|
question1 = row["question1"] |
|
question2 = row["question2"] |
|
|
|
json_row = { |
|
"sentence1": question1, |
|
"sentence2": question2, |
|
"score": "1", |
|
"data_source": "diac2019", |
|
"split": "validation" |
|
} |
|
|
|
json_row = json.dumps(json_row, ensure_ascii=False) |
|
f.write("{}\n".format(json_row)) |
|
|
|
|
|
filename = data_dir / "test_set.csv" |
|
df = pd.read_csv(filename.as_posix(), delimiter="\t") |
|
for i, row in df.iterrows(): |
|
|
|
question1 = row["question1"] |
|
question2 = row["question2"] |
|
|
|
json_row = { |
|
"sentence1": question1, |
|
"sentence2": question2, |
|
"score": None, |
|
"data_source": "diac2019", |
|
"split": "test" |
|
} |
|
|
|
json_row = json.dumps(json_row, ensure_ascii=False) |
|
f.write("{}\n".format(json_row)) |
|
|
|
return |
|
|
|
|
|
if __name__ == '__main__': |
|
main() |
|
|