File size: 9,460 Bytes
f4b03a9 0021056 6ed1c54 7210459 0021056 2dae5d5 0021056 6ed1c54 3524295 3b95e3a 6ed1c54 3524295 28ad598 3524295 f4b03a9 cc01e7a af5bf83 f4b03a9 3f25a55 981a6f2 f4b03a9 ab289ef 4d206b1 2c3b1f3 3524295 981a6f2 3524295 6ed1c54 cc01e7a f0dde66 7210459 6ed1c54 84f9b19 6ed1c54 cc01e7a 6ed1c54 0021056 f4b03a9 d9a6351 cc01e7a 6697bc7 cc01e7a cccf8d7 f4b03a9 fb589be 0021056 6171564 4317bcb 00a3768 4317bcb eabf3d6 4317bcb f4b03a9 a5f8ab8 6171564 596ecab f4b03a9 6171564 4fefb4c 3524295 f4b03a9 43f66af 4fefb4c 5df4440 4317bcb 3f25a55 3524295 43f66af 70e3538 e220bc4 70e3538 acf74b4 2dae5d5 3b95e3a 2dae5d5 3b95e3a 3524295 5df4440 4317bcb 3f25a55 4317bcb 596ecab f4b03a9 3f25a55 0021056 16749e8 981a6f2 af7193a d0d5d11 af7193a a321b09 0021056 3524295 16749e8 4317bcb 3524295 5b6fcfd af7193a 5b6fcfd af7193a 5b6fcfd af7193a a219aaf 3524295 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 |
import json
import os
import tarfile
import zipfile
import gzip
import subprocess
from os.path import join as p_join
from math import ceil, floor
from tqdm import tqdm
from multiprocessing import Pool
from typing import Optional, Dict
from glob import glob
# import librosa
import pandas as pd
import soundfile as sf
from datasets import Dataset, Audio, DatasetDict
audio_loader = Audio()
# dataset config
url_metadata_dict = {
"enA-jaA": "https://dl.fbaipublicfiles.com/seamless/data/seamless_align_nov2023_extension/seamless.dataset.metadata.public.enA-jaA.tsv.gz",
"enA-jpn": "https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.enA-jpn.withduration.tsv.gz"
}
direction = os.getenv("DIRECTION", "enA-jaA")
sides = set(direction.split("-"))
cache_dir_audio = p_join("download", "audio", direction)
cache_dir_feature = p_join("download", "feature", direction)
os.makedirs(cache_dir_feature, exist_ok=True)
for s in sides:
os.makedirs(p_join(cache_dir_audio, s), exist_ok=True)
# processor config
n_pool = int(os.getenv("N_POOL", 1))
wget_max_retry = os.getenv("MAX_RETRY", "2")
wget_timeout = os.getenv("TIMEOUT", "20")
line_no_start = int(os.getenv("LINE_NO_START", 0))
line_no_end = int(os.getenv("LINE_NO_END", 10000))
dataset_id = os.getenv("DATASET_ID", 0)
hf_org = os.getenv("HF_ORG", "asahi417")
hf_dataset = f"seamless-align-{direction}"
skip_download = bool(int(os.getenv("SKIP_DOWNLOAD", 0)))
sampling_rate = 16000 # seamless-align aligns audio in 16kHz
def wget(url: str, output_file: Optional[str] = None):
os.makedirs(os.path.dirname(output_file), exist_ok=True)
subprocess.run(["wget", url, "-O", output_file, "--tries", wget_max_retry, "--timeout", wget_timeout])
if not os.path.exists(output_file):
return False
if output_file.endswith('.tar.gz') or output_file.endswith('.tgz') or output_file.endswith('.tar'):
if output_file.endswith('.tar'):
tar = tarfile.open(output_file)
else:
tar = tarfile.open(output_file, "r:gz")
tar.extractall(os.path.dirname(output_file))
tar.close()
os.remove(output_file)
elif output_file.endswith('.gz'):
with gzip.open(output_file, 'rb') as f:
with open(output_file.replace('.gz', ''), 'wb') as f_write:
f_write.write(f.read())
os.remove(output_file)
elif output_file.endswith('.zip'):
with zipfile.ZipFile(output_file, 'r') as zip_ref:
zip_ref.extractall()
os.remove(output_file)
return True
def get_metadata():
url_metadata = url_metadata_dict[direction]
meta_data_filename = os.path.basename(url_metadata)
meta_data_path = p_join("download", "meta", meta_data_filename)
if not os.path.exists(meta_data_path.replace(".gz", "")):
assert wget(url_metadata, output_file=meta_data_path)
df = pd.read_csv(meta_data_path.replace(".gz", ""), sep=r'[\t\s]', header=None)
df = df[[0, 2, 3, 4, 9, 10, 11, 12]]
df.columns = ["id", "url", "duration_start", "duration_end", "laser_score", "direction", "side", "line_no"]
if direction == "enA-jpn":
df = df[df["side"] == "enA"]
assert len(df["direction"].unique()) == 1
df.pop("direction")
return df.sort_values(by=["line_no", "side"])
def to_json_serializable(val):
if "float" in str(type(val)):
return float(val)
if "int" in str(type(val)):
return int(val)
return str(val)
def cleanup(features, feature_file):
if os.path.exists(feature_file):
os.remove(feature_file)
for _side in sides:
for _unrelated_audio_file in glob(p_join(cache_dir_audio, _side, f"{features['line_no']}.*")):
os.remove(_unrelated_audio_file)
# create a dummy so that we can skip from next run
with open(feature_file, "w") as f:
json.dump({"dummy": "dummy"}, f)
def get_audio(dataframe: pd.DataFrame):
resampler = {}
features = {"line_no": int(dataframe.pop('line_no').values[0])}
feature_file = p_join(cache_dir_feature, f'{features["line_no"]}.json')
for side, df in dataframe.groupby("side"):
df.pop("side")
features.update({f"{side}.{k}": to_json_serializable(v) for k, v in df.iloc[0].to_dict().items()})
identifier = os.path.basename(features[f"{side}.url"]).split(".")[-1]
features[f"{side}.path"] = str(p_join(cache_dir_audio, side, f"{features['line_no']}.{identifier}"))
start, end = features[f"{side}.duration_start"], features[f"{side}.duration_end"]
if not os.path.exists(features[f"{side}.path"]):
print(f"WGET {features[f'{side}.url']}")
flag = wget(features[f"{side}.url"], output_file=features[f"{side}.path"])
if not flag:
print("\n#### ERROR: wget failure ####\n")
cleanup(features, feature_file)
return None
else:
try:
print(f"LOAD AUDIO FROM {features[f'{side}.path']}")
wav, sr = sf.read(features[f"{side}.path"])
print(f"wav shape:{wav.shape}")
if wav.ndim > 1:
wav = wav[:, 0]
wav = wav[floor(start / sampling_rate * sr):ceil(end / sampling_rate * sr)]
print(f"wav shape (after truncate):{wav.shape}")
wav = wav[:int(end/sampling_rate * sr) + sr]
print(f"SAVING: {features[f'{side}.path']}")
sf.write(features[f"{side}.path"], wav, sr)
# if sr != sampling_rate:
# print(f"RESAMPLING: {wav.shape} length audio")
# wav = librosa.resample(wav, orig_sr=sr, target_sr=sampling_rate)
# sf.write(features[f"{side}.path"], wav[start:end], sampling_rate)
except Exception as e:
print(f"\n#### ERROR ####\n {e}")
cleanup(features, feature_file)
return None
print(f"\n### SUCCESS! ###\n:{features['line_no']}")
with open(feature_file, "w") as f:
json.dump(features, f)
return features["line_no"]
if __name__ == '__main__':
if not skip_download:
df_metadata = get_metadata()
print(f"metadata: {len(df_metadata)}, {line_no_start} --> {line_no_end}")
inputs = [
g for line_no, g in df_metadata.groupby("line_no")
if line_no_start <= line_no < line_no_end and not os.path.exists(
p_join(cache_dir_feature, f'{int(line_no)}.json')
)
]
print(f"filtered unique lines: {len(inputs)}")
if direction == "enA-jaA":
inputs = [g for g in inputs if len(g["side"].unique()) == 2 and set(g["side"].unique()) == sides]
print(f"removed side != 2: {len(inputs)}")
if n_pool == 1:
for g in tqdm(inputs, total=len(inputs)):
line_no = get_audio(g)
else:
with Pool(n_pool) as pool:
for line_no in pool.imap_unordered(get_audio, inputs):
if line_no:
print(line_no)
def loader(feature: str) -> Dict:
with open(feature) as f_reader:
return json.load(f_reader)
print("UPLOADING TO HF!!!")
features = [p_join(cache_dir_feature, f'{i}.json') for i in range(line_no_start, line_no_end)]
print(f"- raw feature: {len(features)}")
features = [i for i in features if os.path.exists(i)]
print(f"- path exists: {len(features)}")
features = [loader(i) for i in features]
features = [i for i in features if "dummy" not in i]
print(f"- dummy removed: {len(features)}")
print(f"push {len(features)} records to hub")
data_dict = {}
for side in sides:
data_dict.update({f"{side}.audio": [i.pop(f"{side}.path") for i in features]})
data_dict.update({k: [i[k] for i in features] for k in features[0].keys()})
audio_dataset = Dataset.from_dict(data_dict)
for side in sides:
audio_dataset = audio_dataset.cast_column(f"{side}.audio", Audio())
DatasetDict({"train": audio_dataset}).push_to_hub(
f"{hf_org}/{hf_dataset}",
config_name=f"subset_{dataset_id}"
)
# DatasetDict({"train": audio_dataset.select(list(range(1000)))}).push_to_hub(
# f"{hf_org}/{hf_dataset}",
# config_name=f"subset_{dataset_id}"
# )
# # 2 panel
# dataset_id = 75
# DatasetDict({"train": audio_dataset.select(list(range(3000, len(audio_dataset))))}).push_to_hub(
# f"{hf_org}/{hf_dataset}",
# config_name=f"subset_{dataset_id}"
# )
#
#
# audio_dataset = audio_dataset.select(list(range(2500)))
# dataset_to_push = DatasetDict({"train": audio_dataset})
# repo_name = f"{hf_org}/{hf_dataset}"
# dataset_to_push.push_to_hub(repo_name, config_name=f"subset_{dataset_id}")
# dataset_to_push.push_to_hub(repo_name, config_name=f"subset_{dataset_id}", max_shard_size="2GiB")
# dataset_to_push.push_to_hub(repo_name, config_name=f"subset_{dataset_id}", num_shards={"train": 1})
# while True:
# try:
# dataset_to_push.push_to_hub(repo_name, config_name=f"subset_{dataset_id}")
# break
# except Exception:
# print(f"FAILED: push_to_hub on {repo_name} failed. wait 60 sec and retry soon...")
# time.sleep(60)
|