Datasets:

Languages:
English
Multilinguality:
monolingual
Size Categories:
n<1K
Tags:
License:
analogy_questions / add_new_analogy.py
asahi417's picture
fix readme
f2da732
import json
import os
from itertools import combinations
from random import shuffle, seed
import pandas as pd
from datasets import load_dataset
def get_stats(filename):
with open(filename) as f:
_data = [json.loads(i) for i in f.read().splitlines()]
return len(_data), list(set([len(i['choice']) for i in _data])), len(list(set([i['prefix'] for i in _data])))
def lexical_overlap(word_a, word_b):
for a in word_a.split(" "):
for b in word_b.split(" "):
if a.lower() == b.lower():
return True
return False
def create_analogy(_data, output_path, negative_per_relation, instance_per_relation=100):
# if os.path.exists(output_path):
# return
df = _data.to_pandas()
analogy_data = []
for _, i in df.iterrows():
target = [(q.tolist(), c.tolist()) for q, c in combinations(i['positives'], 2)
if not any(lexical_overlap(c[0], y) or lexical_overlap(c[1], y) for y in q)]
if len(target) == 0:
continue
if len(target) > instance_per_relation:
seed(42)
shuffle(target)
target = target[:instance_per_relation]
for m, (q, c) in enumerate(target):
negative = []
for r in df['relation_type']:
if r == i['relation_type']:
continue
target_per_relation = [y.tolist() for y in df[df['relation_type'] == r]['positives'].values[0]]
shuffle(target_per_relation)
negative += target_per_relation[:negative_per_relation]
analogy_data.append({
"stem": q,
"choice": [c, c[::-1]] + negative,
"answer": 0,
"prefix": i["relation_type"]
})
os.makedirs(os.path.dirname(output_path), exist_ok=True)
with open(output_path, "w") as f:
f.write("\n".join([json.dumps(i) for i in analogy_data]))
stat = []
###################################################################
# create analogy from `relbert/semeval2012_relational_similarity` #
###################################################################
if not os.path.exists("dataset/semeval2012_relational_similarity/valid.jsonl"):
data = load_dataset("relbert/semeval2012_relational_similarity", split="validation")
analogy_data = [{
"stem": i['positives'][0], "choice": i["negatives"] + [i['positives'][1]], "answer": 2, "prefix": i["relation_type"]
} for i in data]
os.makedirs("dataset/semeval2012_relational_similarity", exist_ok=True)
with open("dataset/semeval2012_relational_similarity/valid.jsonl", "w") as f:
f.write("\n".join([json.dumps(i) for i in analogy_data]))
v_size, v_num_choice, v_relation_type = get_stats("dataset/semeval2012_relational_similarity/valid.jsonl")
stat.append({
"name": "`semeval2012_relational_similarity`",
"Size (valid/test)": f"{v_size}/-",
"Num of choice (valid/test)": f"{','.join([str(n) for n in v_num_choice])}/-",
"Num of relation group (valid/test)": f"{v_relation_type}/-",
"Original Reference": "[relbert/semeval2012_relational_similarity](https://huggingface.co/datasets/relbert/semeval2012_relational_similarity)"
})
#############################################################
# create analogy from `relbert/t_rex_relational_similarity` #
#############################################################
data = load_dataset("relbert/t_rex_relational_similarity", "filter_unified.min_entity_1_max_predicate_100", split="test")
create_analogy(data, "dataset/t_rex_relational_similarity/test.jsonl", negative_per_relation=2)
data = load_dataset("relbert/t_rex_relational_similarity", "filter_unified.min_entity_4_max_predicate_100", split="validation")
create_analogy(data, "dataset/t_rex_relational_similarity/valid.jsonl", negative_per_relation=1)
t_size, t_num_choice, t_relation_type = get_stats("dataset/t_rex_relational_similarity/test.jsonl")
v_size, v_num_choice, v_relation_type = get_stats("dataset/t_rex_relational_similarity/valid.jsonl")
stat.append({
"name": "`t_rex_relational_similarity`",
"Size (valid/test)": f"{v_size}/{t_size}",
"Num of choice (valid/test)": f"{','.join([str(n) for n in v_num_choice])}/{','.join([str(n) for n in t_num_choice])}",
"Num of relation group (valid/test)": f"{v_relation_type}/{t_relation_type}",
"Original Reference": "[relbert/t_rex_relational_similarity](https://huggingface.co/datasets/relbert/t_rex_relational_similarity)"
})
##################################################################
# create analogy from `relbert/conceptnet_relational_similarity` #
##################################################################
data = load_dataset("relbert/conceptnet_relational_similarity", split="test")
create_analogy(data, "dataset/conceptnet_relational_similarity/test.jsonl", negative_per_relation=1)
data = load_dataset("relbert/conceptnet_relational_similarity", split="validation")
create_analogy(data, "dataset/conceptnet_relational_similarity/valid.jsonl", negative_per_relation=1)
t_size, t_num_choice, t_relation_type = get_stats("dataset/conceptnet_relational_similarity/test.jsonl")
v_size, v_num_choice, v_relation_type = get_stats("dataset/conceptnet_relational_similarity/valid.jsonl")
stat.append({
"name": "`conceptnet_relational_similarity`",
"Size (valid/test)": f"{v_size}/{t_size}",
"Num of choice (valid/test)": f"{','.join([str(n) for n in v_num_choice])}/{','.join([str(n) for n in t_num_choice])}",
"Num of relation group (valid/test)": f"{v_relation_type}/{t_relation_type}",
"Original Reference": "[relbert/conceptnet_relational_similarity](https://huggingface.co/datasets/relbert/conceptnet_relational_similarity)"
})
##################################################################
# create analogy from `relbert/conceptnet_relational_similarity` #
##################################################################
data = load_dataset("relbert/nell_relational_similarity", split="test")
create_analogy(data, "dataset/nell_relational_similarity/test.jsonl", negative_per_relation=1)
data = load_dataset("relbert/nell_relational_similarity", split="validation")
create_analogy(data, "dataset/nell_relational_similarity/valid.jsonl", negative_per_relation=1)
t_size, t_num_choice, t_relation_type = get_stats("dataset/nell_relational_similarity/test.jsonl")
v_size, v_num_choice, v_relation_type = get_stats("dataset/nell_relational_similarity/valid.jsonl")
stat.append({
"name": "`nell_relational_similarity`",
"Size (valid/test)": f"{v_size}/{t_size}",
"Num of choice (valid/test)": f"{','.join([str(n) for n in v_num_choice])}/{','.join([str(n) for n in t_num_choice])}",
"Num of relation group (valid/test)": f"{v_relation_type}/{t_relation_type}",
"Original Reference": "[relbert/nell_relational_similarity](https://huggingface.co/datasets/relbert/nell_relational_similarity)"
})
print(pd.DataFrame(stat).to_markdown(index=False))