|
import tarfile |
|
import os |
|
import shutil |
|
import multiprocessing as mp |
|
from pathlib import Path |
|
from tqdm import tqdm |
|
import pandas as pd |
|
|
|
df = pd.read_parquet("df_train_v2.parquet") |
|
df["filename_full"] = "/home/fatrek/data_network/faton/riksdagen_anforanden/data/rixvox_v2/" + df["filename"] |
|
df = df.rename(columns={"sex": "gender"}) |
|
|
|
|
|
|
|
|
|
df["speaker_total_hours"] = df.groupby(["speaker", "party"])["duration"].transform("sum") / 3600 |
|
df_hours = df.groupby(["speaker", "party"]).first().sort_values("speaker_total_hours", ascending=False).reset_index() |
|
df_hours = df_hours.sample(frac=1, random_state=1337) |
|
|
|
|
|
df_hours["train"] = df_hours["speaker_total_hours"].cumsum() / df_hours["speaker_total_hours"].sum() < 0.98 |
|
|
|
df_hours["valid"] = False |
|
df_hours.loc[df_hours["train"] == False, "valid"] = ( |
|
df_hours[df_hours["train"] == 0]["speaker_total_hours"].cumsum() / df_hours["speaker_total_hours"].sum() < 0.01 |
|
) |
|
df_hours["test"] = (df_hours["train"] == False) & (df_hours["valid"] == False) |
|
|
|
|
|
df_train = pd.merge(df, df_hours.loc[df_hours["train"], ["speaker", "party"]], on=["speaker", "party"], how="inner") |
|
df_valid = pd.merge(df, df_hours.loc[df_hours["valid"], ["speaker", "party"]], on=["speaker", "party"], how="inner") |
|
df_test = pd.merge(df, df_hours.loc[df_hours["test"], ["speaker", "party"]], on=["speaker", "party"], how="inner") |
|
|
|
|
|
def split_creator(df, observations_per_shard, shard_name): |
|
df["shard"] = range(0, len(df)) |
|
df["shard"] = df["shard"] // observations_per_shard |
|
df["shard"] = shard_name + "_" + df["shard"].astype(str) |
|
return df["shard"] |
|
|
|
|
|
df_train["shard"] = split_creator(df_train, 6500, "train") |
|
df_valid["shard"] = split_creator(df_valid, 6500, "dev") |
|
df_test["shard"] = split_creator(df_test, 6500, "test") |
|
|
|
df_train["nr_words"] = df_train["text"].str.split().str.len() |
|
df_train = df_train[df_train["nr_words"] <= 160].reset_index(drop=True) |
|
df_train = df_train.drop(columns="nr_words") |
|
|
|
|
|
def create_tar(df, data_folder="/home/fatrek/data_network/faton/rixvox/data"): |
|
shard_filename = df["shard"].reset_index(drop=True).values[0] |
|
shard_filename = shard_filename + ".tar.gz" |
|
split = df["shard"].reset_index(drop=True).str.extract(r"(.*)_")[0][0] |
|
os.makedirs(os.path.join(data_folder, split), exist_ok=True) |
|
|
|
print(f"Creating tarfile: {os.path.join(data_folder, split, shard_filename)}") |
|
with tarfile.open(os.path.join(data_folder, split, shard_filename), "w:gz") as tar: |
|
for filename in df["filename_full"].values: |
|
tar.add(Path(filename), arcname=Path(filename).relative_to(Path(filename).parent.parent), recursive=False) |
|
|
|
|
|
|
|
groups = df_train.groupby("shard") |
|
df_train_list = [groups.get_group(x) for x in groups.groups] |
|
groups = df_valid.groupby("shard") |
|
df_valid_list = [groups.get_group(x) for x in groups.groups] |
|
groups = df_test.groupby("shard") |
|
df_test_list = [groups.get_group(x) for x in groups.groups] |
|
|
|
|
|
data_folder = "/home/fatrek/data_network/faton/RixVox/data" |
|
|
|
|
|
|
|
|
|
with mp.Pool(16) as pool: |
|
pool.map(create_tar, df_train_list) |
|
|
|
with mp.Pool(1) as pool: |
|
pool.map(create_tar, df_valid_list) |
|
pool.map(create_tar, df_test_list) |
|
|
|
|
|
df_train = df_train.drop(columns=["shard", "filename_full", "file_size"]) |
|
df_valid = df_valid.drop(columns=["shard", "filename_full", "file_size"]) |
|
df_test = df_test.drop(columns=["shard", "filename_full", "file_size"]) |
|
|
|
df_train.to_parquet(os.path.join("data", "train_metadata.parquet"), index=False) |
|
df_valid.to_parquet(os.path.join("data", "dev_metadata.parquet"), index=False) |
|
df_test.to_parquet(os.path.join("data", "test_metadata.parquet"), index=False) |
|
|