File size: 1,590 Bytes
1f8eb0e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5399d79
1f8eb0e
 
5399d79
1f8eb0e
5399d79
4e3e08b
 
 
 
 
 
1f8eb0e
5399d79
1f8eb0e
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
import json
import os
import pandas as pd

os.makedirs("data/tempo_wic", exist_ok=True)

for s in ['train', 'validation', 'test']:
    if s == 'test':
        with open(f"misc/TempoWiC/data/test-codalab-10k.data.jl") as f:
            data = pd.DataFrame([json.loads(i) for i in f.read().split("\n") if len(i) > 0])
        df = pd.read_csv(f"misc/TempoWiC/data/test.gold.tsv", sep="\t")
    else:
        with open(f"misc/TempoWiC/data/{s}.data.jl") as f:
            data = pd.DataFrame([json.loads(i) for i in f.read().split("\n") if len(i) > 0])
        df = pd.read_csv(f"misc/TempoWiC/data/{s}.labels.tsv", sep="\t")
    df.columns = ["id", "label"]
    df.index = df.pop("id")
    data = data[[i in df.index for i in data['id']]]
    data['label'] = [df.loc[i].values[0] for i in data['id'] if i in df.index]
    assert len(df) == len(data)

    data_jl = []
    for _, i in data.iterrows():
        i = i.to_dict()
        tmp = {"word": i["word"], "gold_label_binary": i["label"]}
        tmp.update({f"{k}_1": v for k, v in i['tweet1'].items()})
        tmp['text_1_tokenized'] = tmp.pop('tokens_1')
        tmp.update({f"{k}_2": v for k, v in i['tweet2'].items()})
        tmp['text_2_tokenized'] = tmp.pop('tokens_2')
        tmp.pop("id")
        tmp.pop("token_idx_1")
        tmp.pop("token_idx_2")
        # tmp.pop("text_start_1")
        # tmp.pop("text_end_1")
        # tmp.pop("text_start_2")
        # tmp.pop("text_end_2")
        data_jl.append(tmp)

    with open(f"data/tempo_wic/{s}.jsonl", "w") as f:
        f.write("\n".join([json.dumps(i) for i in data_jl]))