import json import os from random import shuffle, seed import pandas as pd from datasets import load_dataset test = load_dataset("cardiffnlp/super_tweeteval", "tweet_nerd", split="test").shuffle(seed=42) test = list(test.to_pandas().T.to_dict().values()) train = load_dataset("cardiffnlp/super_tweeteval", "tweet_nerd", split="train").shuffle(seed=42) train = list(train.to_pandas().T.to_dict().values()) validation = load_dataset("cardiffnlp/super_tweeteval", "tweet_nerd", split="validation").shuffle(seed=42) validation = list(validation.to_pandas().T.to_dict().values()) full = train + test + validation df = pd.DataFrame(full) df["date_dt"] = pd.to_datetime(df.date) df = df.sort_values(by="date_dt") dist_date = df.groupby("date_dt").size() total_n = len(df) n = 0 while True: n += 1 if dist_date[:n].sum() > total_n/2: break split_date = dist_date.index[n] print(split_date) train = df[df["date_dt"] <= split_date] test = df[df["date_dt"] > split_date] print(train.date_dt.min(), train.date_dt.max()) print(test.date_dt.min(), test.date_dt.max()) train.pop("date_dt") test.pop("date_dt") train = list(train.T.to_dict().values()) test = list(test.T.to_dict().values()) seed(42) shuffle(train) shuffle(test) valid = train[:int(len(train)*0.2)] train = train[len(valid):] n_test = int(len(test)/4) n_train = len(train) n_validation = len(valid) test_1 = test[:n_test] test_2 = test[n_test:n_test*2] test_3 = test[n_test*2:n_test*3] test_4 = test[n_test*3:] os.makedirs("data/tweet_nerd", exist_ok=True) with open("data/tweet_nerd/test.jsonl", "w") as f: f.write("\n".join([json.dumps(i) for i in test])) with open("data/tweet_nerd/test_1.jsonl", "w") as f: f.write("\n".join([json.dumps(i) for i in test_1])) with open("data/tweet_nerd/test_2.jsonl", "w") as f: f.write("\n".join([json.dumps(i) for i in test_2])) with open("data/tweet_nerd/test_3.jsonl", "w") as f: f.write("\n".join([json.dumps(i) for i in test_3])) with open("data/tweet_nerd/test_4.jsonl", "w") as f: f.write("\n".join([json.dumps(i) for i in test_4])) with open("data/tweet_nerd/train.jsonl", "w") as f: f.write("\n".join([json.dumps(i) for i in train])) with open("data/tweet_nerd/validation.jsonl", "w") as f: f.write("\n".join([json.dumps(i) for i in valid])) def sampler(dataset_test, r_seed): seed(r_seed) shuffle(dataset_test) shuffle(train) shuffle(validation) test_tr = dataset_test[:int(n_train / 2)] test_vl = dataset_test[int(n_train / 2): int(n_train / 2) + int(n_validation / 2)] new_train = test_tr + train[:n_train - len(test_tr)] new_validation = test_vl + validation[:n_validation - len(test_vl)] return new_train, new_validation id2test = {n: t for n, t in enumerate([test_1, test_2, test_3, test_4])} for n, _test in enumerate([ test_4 + test_2 + test_3, test_1 + test_4 + test_3, test_1 + test_2 + test_4, test_1 + test_2 + test_3]): for s in range(3): os.makedirs(f"data/tweet_nerd_new_test{n}_seed{s}", exist_ok=True) _train, _valid = sampler(_test, s) with open(f"data/tweet_nerd_new_test{n}_seed{s}/train.jsonl", "w") as f: f.write("\n".join([json.dumps(i) for i in _train])) with open(f"data/tweet_nerd_new_test{n}_seed{s}/validation.jsonl", "w") as f: f.write("\n".join([json.dumps(i) for i in _valid])) with open(f"data/tweet_nerd_new_test{n}_seed{s}/test.jsonl", "w") as f: f.write("\n".join([json.dumps(i) for i in id2test[n]]))