File size: 1,150 Bytes
1125f7c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
import json
import os
import tarfile
import zipfile
import gzip
import subprocess
from os.path import join as p_join
from tqdm import tqdm
from multiprocessing import Pool
from typing import Optional

import pandas as pd

chunk_size = 10
url = "https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.enA-jpn.withduration.tsv.gz"
filename = os.path.basename(url)
subprocess.run(["wget", url, "-O", filename])
df = pd.read_csv(filename, sep='\t', header=None, dtype=str)
df.columns = ["cc_warc", "cc_sha", "cc_document_url", "cc_lineno", "paragraph_digest", "sentence_digest", "text_lid_score", "laser_score", "direction", "side", "line_no"]
df = df[df.side == "jpn"]
df["cc_lineno"] = df["cc_lineno"].astype(int)
df.sort_values(by=["cc_warc", "cc_sha", "cc_document_url", "cc_lineno"], inplace=True)
batch_size = int(len(df)/chunk_size)
start = 0
end = batch_size
index = 1
while start != end:
    df.iloc[start:end].to_csv(f"seamless.dataset.metadata.public.enA-jpn.withduration.reordered.batch_{index}.tsv", sep="\t", index=False, header=False)
    index += 1
    start = end
    end += batch_size
    end = min(len(df), end)