File size: 3,934 Bytes
f6b30cb 226931f f6b30cb 226931f f6b30cb 226931f f6b30cb f0c0724 f6b30cb 226931f f6b30cb 46dad69 f0c0724 f6b30cb 226931f f6b30cb 46dad69 f0c0724 f6b30cb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import argparse
import json
import os
from pathlib import Path
import sys
pwd = os.path.abspath(os.path.dirname(__file__))
sys.path.append(os.path.join(pwd, '../../'))
from datasets import load_dataset
from lxml import etree
import pandas as pd
from tqdm import tqdm
from project_settings import project_path
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--data_dir", default="./data/diac2019", type=str)
parser.add_argument(
"--output_file",
default=(project_path / "data/diac2019.jsonl"),
type=str
)
args = parser.parse_args()
return args
def main():
args = get_args()
data_dir = Path(args.data_dir)
with open(args.output_file, "w", encoding="utf-8") as f:
# train_set.xml
filename = data_dir / "train_set.xml"
tree = etree.parse(filename.as_posix())
root = tree.getroot()
for group in root:
equal = group[0]
not_equal = group[1]
equal_questions = [question.text for question in equal if question.text is not None]
not_equal_questions = [question.text for question in not_equal if question.text is not None]
pairs = set()
for q1 in equal_questions:
for q2 in equal_questions:
if q1 == q2:
continue
if q1 is None or q2 is None:
continue
pair = (q1, q2, 1)
pairs.add(pair)
for q1 in equal_questions:
for q2 in not_equal_questions:
if q1 == q2:
continue
if q1 is None or q2 is None:
continue
pair = (q1, q2, 0)
pairs.add(pair)
for pair in pairs:
q1, q2, label = pair
label = str(label)
if label not in ("0", "1", None):
raise AssertionError
json_row = {
"sentence1": q1,
"sentence2": q2,
"label": label,
"category": None,
"data_source": "diac2019",
"split": "train"
}
json_row = json.dumps(json_row, ensure_ascii=False)
f.write("{}\n".format(json_row))
# dev_set.csv
filename = data_dir / "dev_set.csv"
df = pd.read_csv(filename.as_posix(), delimiter="\t")
for i, row in df.iterrows():
question1 = row["question1"]
question2 = row["question2"]
if question1 is None or question2 is None:
continue
json_row = {
"sentence1": question1,
"sentence2": question2,
"label": "1",
"category": None,
"data_source": "diac2019",
"split": "validation"
}
json_row = json.dumps(json_row, ensure_ascii=False)
f.write("{}\n".format(json_row))
# test_set.csv
filename = data_dir / "test_set.csv"
df = pd.read_csv(filename.as_posix(), delimiter="\t")
for i, row in df.iterrows():
question1 = row["question1"]
question2 = row["question2"]
if question1 is None or question2 is None:
continue
json_row = {
"sentence1": question1,
"sentence2": question2,
"label": None,
"category": None,
"data_source": "diac2019",
"split": "test"
}
json_row = json.dumps(json_row, ensure_ascii=False)
f.write("{}\n".format(json_row))
return
if __name__ == '__main__':
main()
|