|
import json |
|
import os |
|
from random import shuffle, seed |
|
import numpy as np |
|
import pandas as pd |
|
from datasets import load_dataset |
|
|
|
test = load_dataset("cardiffnlp/super_tweeteval", "tweet_emoji", split="test").shuffle(seed=42) |
|
test = list(test.to_pandas().T.to_dict().values()) |
|
train = load_dataset("cardiffnlp/super_tweeteval", "tweet_emoji", split="train").shuffle(seed=42) |
|
train = list(train.to_pandas().T.to_dict().values()) |
|
validation = load_dataset("cardiffnlp/super_tweeteval", "tweet_emoji", split="validation").shuffle(seed=42) |
|
validation = list(validation.to_pandas().T.to_dict().values()) |
|
full = train + test + validation |
|
df = pd.DataFrame(full) |
|
df["date_dt"] = pd.to_datetime(df.date) |
|
df = df.sort_values(by="date_dt") |
|
|
|
|
|
df = df[df["gold_label"] != 68] |
|
df["gold_label"] = [i if i < 68 else i - 1 for i in df["gold_label"]] |
|
|
|
dist_date = df.groupby("date_dt").size() |
|
total_n = len(df) |
|
n = 0 |
|
while True: |
|
n += 1 |
|
if dist_date[:n].sum() > total_n/2: |
|
break |
|
split_date = dist_date.index[n] |
|
print(split_date) |
|
|
|
train = df[df["date_dt"] <= split_date] |
|
test = df[df["date_dt"] > split_date] |
|
print(train.date_dt.min(), train.date_dt.max()) |
|
print(test.date_dt.min(), test.date_dt.max()) |
|
|
|
train.pop("date_dt") |
|
test.pop("date_dt") |
|
train = list(train.T.to_dict().values()) |
|
test = list(test.T.to_dict().values()) |
|
|
|
seed(42) |
|
shuffle(train) |
|
shuffle(test) |
|
valid = train[:int(len(train)*0.2)] |
|
train = train[len(valid):] |
|
assert np.unique([i["gold_label"] for i in train], return_counts=True)[1].shape[0] == 99 |
|
assert np.unique([i["gold_label"] for i in test], return_counts=True)[1].shape[0] == 99 |
|
assert np.unique([i["gold_label"] for i in valid], return_counts=True)[1].shape[0] == 99 |
|
|
|
n_test = int(len(test)/4) |
|
n_train = len(train) |
|
n_validation = len(valid) |
|
test_1 = test[:n_test] |
|
test_2 = test[n_test:n_test*2] |
|
test_3 = test[n_test*2:n_test*3] |
|
test_4 = test[n_test*3:] |
|
os.makedirs("data/tweet_emoji", exist_ok=True) |
|
with open("data/tweet_emoji/test.jsonl", "w") as f: |
|
f.write("\n".join([json.dumps(i) for i in test])) |
|
with open("data/tweet_emoji/test_1.jsonl", "w") as f: |
|
f.write("\n".join([json.dumps(i) for i in test_1])) |
|
with open("data/tweet_emoji/test_2.jsonl", "w") as f: |
|
f.write("\n".join([json.dumps(i) for i in test_2])) |
|
with open("data/tweet_emoji/test_3.jsonl", "w") as f: |
|
f.write("\n".join([json.dumps(i) for i in test_3])) |
|
with open("data/tweet_emoji/test_4.jsonl", "w") as f: |
|
f.write("\n".join([json.dumps(i) for i in test_4])) |
|
with open("data/tweet_emoji/train.jsonl", "w") as f: |
|
f.write("\n".join([json.dumps(i) for i in train])) |
|
with open("data/tweet_emoji/validation.jsonl", "w") as f: |
|
f.write("\n".join([json.dumps(i) for i in valid])) |
|
|
|
|
|
def sampler(dataset_test, r_seed): |
|
seed(r_seed) |
|
shuffle(dataset_test) |
|
shuffle(train) |
|
shuffle(valid) |
|
test_tr = dataset_test[:int(n_train / 2)] |
|
test_vl = dataset_test[int(n_train / 2): int(n_train / 2) + int(n_validation / 2)] |
|
new_train = test_tr + train[:n_train - len(test_tr)] |
|
new_validation = test_vl + valid[:n_validation - len(test_vl)] |
|
print(np.unique([i["gold_label"] for i in new_train], return_counts=True)[1].shape[0]) |
|
print(np.unique([i["gold_label"] for i in new_validation], return_counts=True)[1].shape[0]) |
|
assert np.unique([i["gold_label"] for i in new_train], return_counts=True)[1].shape[0] == 99 |
|
assert np.unique([i["gold_label"] for i in new_validation], return_counts=True)[1].shape[0] == 99 |
|
return new_train, new_validation |
|
|
|
id2test = {n: t for n, t in enumerate([test_1, test_2, test_3, test_4])} |
|
for n, _test in enumerate([ |
|
test_4 + test_2 + test_3, |
|
test_1 + test_4 + test_3, |
|
test_1 + test_2 + test_4, |
|
test_1 + test_2 + test_3]): |
|
for s in range(3): |
|
os.makedirs(f"data/tweet_emoji_test{n}_seed{s}", exist_ok=True) |
|
_train, _valid = sampler(_test, s) |
|
with open(f"data/tweet_emoji_test{n}_seed{s}/train.jsonl", "w") as f: |
|
f.write("\n".join([json.dumps(i) for i in _train])) |
|
with open(f"data/tweet_emoji_test{n}_seed{s}/validation.jsonl", "w") as f: |
|
f.write("\n".join([json.dumps(i) for i in _valid])) |
|
with open(f"data/tweet_emoji_test{n}_seed{s}/test.jsonl", "w") as f: |
|
f.write("\n".join([json.dumps(i) for i in id2test[n]])) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|