import json import os import tarfile import zipfile import gzip import subprocess from os.path import join as p_join from tqdm import tqdm from multiprocessing import Pool from typing import Optional import pandas as pd chunk_size = 10 url = "https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.enA-jpn.withduration.tsv.gz" filename = os.path.basename(url) subprocess.run(["wget", url, "-O", filename]) df = pd.read_csv(filename, sep='\t', header=None, dtype=str) df.columns = ["cc_warc", "cc_sha", "cc_document_url", "cc_lineno", "paragraph_digest", "sentence_digest", "text_lid_score", "laser_score", "direction", "side", "line_no"] df = df[df.side == "jpn"] df["cc_lineno"] = df["cc_lineno"].astype(int) df.sort_values(by=["cc_warc", "cc_sha", "cc_document_url", "cc_lineno"], inplace=True) batch_size = int(len(df)/chunk_size) start = 0 end = batch_size index = 1 while start != end: df.iloc[start:end].to_csv(f"seamless.dataset.metadata.public.enA-jpn.withduration.reordered.batch_{index}.tsv", sep="\t", index=False, header=False) index += 1 start = end end += batch_size end = min(len(df), end)