|
import os |
|
import json |
|
import tqdm |
|
import functools |
|
import collections |
|
import multiprocessing |
|
from sklearn.feature_extraction.text import TfidfVectorizer |
|
from sklearn.metrics.pairwise import linear_kernel |
|
|
|
|
|
def extract_domains(filename): |
|
domains = set() |
|
with open(filename) as f: |
|
for line in f: |
|
line = json.loads(line.strip()) |
|
domains.add(line["domain"]) |
|
return filename, list(domains) |
|
|
|
|
|
def filter_valid(questions): |
|
answers = set() |
|
new_questions = [] |
|
for question in questions: |
|
if question["answer"] not in answers: |
|
new_questions.append(question) |
|
answers.add(question["answer"]) |
|
return new_questions |
|
|
|
|
|
def format_to_valid(questions): |
|
answers_txt = [e["answer"] for e in questions] |
|
questions_txt = [e["question"] for e in questions] |
|
vectorizer = TfidfVectorizer() |
|
vectorizer.fit(answers_txt + questions_txt) |
|
answer_vectors = vectorizer.transform(answers_txt) |
|
for i, question in enumerate(questions): |
|
similarities = linear_kernel(answer_vectors[[i]], answer_vectors).flatten() |
|
answer_scores = [(j, sim) for j, sim in enumerate(similarities) if sim != 1] |
|
answer_scores = sorted(answer_scores, key=lambda x: x[1], reverse=True) |
|
sorted_answers = [questions[j]["answer"] for j, _ in answer_scores if questions[j]["answer"] != question["answer"]] |
|
negative_answer = sorted_answers[0] |
|
assert question["answer"] not in sorted_answers |
|
question["candidates"] = [question["answer"]] + sorted_answers |
|
question["negative_example"] = negative_answer |
|
return questions |
|
|
|
|
|
def valid_train_split(filename, mapping=None): |
|
previous_domain = "" |
|
train = [] |
|
valid = [] |
|
domain_data = {"questions": [], "pages": set()} |
|
counter = 0 |
|
with open(filename) as f: |
|
for line_txt in f: |
|
counter += 1 |
|
line = json.loads(line_txt.strip()) |
|
domain = line["domain"] |
|
if domain != previous_domain and previous_domain != "": |
|
if len(mapping[previous_domain]) > 1: |
|
train.extend(domain_data["questions"]) |
|
elif len(valid) > 2000: |
|
train.extend(domain_data["questions"]) |
|
elif len(domain_data["pages"]) > 1: |
|
train.extend(domain_data["questions"]) |
|
elif len(domain_data["questions"]) < 15: |
|
train.extend(domain_data["questions"]) |
|
else: |
|
questions = filter_valid(domain_data["questions"]) |
|
if len(questions) < 15: |
|
train.extend(questions) |
|
else: |
|
questions = format_to_valid(questions) |
|
valid.extend(questions) |
|
domain_data = {"questions": [], "pages": set()} |
|
domain_data["questions"].append(line) |
|
domain_data["pages"].add(line["domain_index"]) |
|
previous_domain = domain |
|
train.extend(domain_data["questions"]) |
|
return train, valid, filename |
|
|
|
|
|
domain_count = collections.defaultdict(list) |
|
data = [f"data/{e}" for e in os.listdir("data") if e.endswith(".json")] |
|
|
|
with multiprocessing.Pool(1) as p: |
|
for filename, domains in tqdm.tqdm(p.imap_unordered(extract_domains, data)): |
|
language = filename.split(".")[1] |
|
for domain in domains: |
|
domain_count[domain].append(language) |
|
|
|
|
|
with multiprocessing.Pool(os.cpu_count()) as p: |
|
fn = functools.partial(valid_train_split, mapping=domain_count) |
|
for train, valid, filename in tqdm.tqdm(p.imap_unordered(fn, data)): |
|
train_filename = filename.replace("data/", "data/train/") |
|
train = [json.dumps(e, ensure_ascii=False) for e in train] |
|
valid = [json.dumps(e, ensure_ascii=False) for e in valid] |
|
with open(train_filename, "w+") as f: |
|
train = "\n".join(train) |
|
f.write(train) |
|
valid_filename = filename.replace("data/", "data/valid/") |
|
with open(valid_filename, "w+") as f: |
|
valid = "\n".join(valid) |
|
f.write(valid) |
|
|