super_tweeteval / process /tempo_wic.py
antypasd's picture
reintroduced date in wic
67978db
raw
history blame
1.59 kB
import json
import os
import pandas as pd
os.makedirs("data/tempo_wic", exist_ok=True)
for s in ['train', 'validation', 'test']:
if s == 'test':
with open(f"misc/TempoWiC/data/test-codalab-10k.data.jl") as f:
data = pd.DataFrame([json.loads(i) for i in f.read().split("\n") if len(i) > 0])
df = pd.read_csv(f"misc/TempoWiC/data/test.gold.tsv", sep="\t")
else:
with open(f"misc/TempoWiC/data/{s}.data.jl") as f:
data = pd.DataFrame([json.loads(i) for i in f.read().split("\n") if len(i) > 0])
df = pd.read_csv(f"misc/TempoWiC/data/{s}.labels.tsv", sep="\t")
df.columns = ["id", "label"]
df.index = df.pop("id")
data = data[[i in df.index for i in data['id']]]
data['label'] = [df.loc[i].values[0] for i in data['id'] if i in df.index]
assert len(df) == len(data)
data_jl = []
for _, i in data.iterrows():
i = i.to_dict()
tmp = {"word": i["word"], "gold_label_binary": i["label"]}
tmp.update({f"{k}_1": v for k, v in i['tweet1'].items()})
tmp['text_1_tokenized'] = tmp.pop('tokens_1')
tmp.update({f"{k}_2": v for k, v in i['tweet2'].items()})
tmp['text_2_tokenized'] = tmp.pop('tokens_2')
tmp.pop("id")
tmp.pop("token_idx_1")
tmp.pop("token_idx_2")
# tmp.pop("text_start_1")
# tmp.pop("text_end_1")
# tmp.pop("text_start_2")
# tmp.pop("text_end_2")
data_jl.append(tmp)
with open(f"data/tempo_wic/{s}.jsonl", "w") as f:
f.write("\n".join([json.dumps(i) for i in data_jl]))