|
import argparse |
|
import csv |
|
from datetime import datetime |
|
import os |
|
import re |
|
import shutil |
|
import tarfile |
|
import tempfile |
|
from tqdm import tqdm |
|
import torchaudio |
|
from pydub import AudioSegment |
|
import requests |
|
from pocketbase import PocketBase |
|
from torchaudio.transforms import Resample |
|
from concurrent.futures import ThreadPoolExecutor |
|
|
|
parser = argparse.ArgumentParser(description="Command description.") |
|
|
|
pb = PocketBase('https://pocketbase.nenadb.dev/') |
|
|
|
def contains_interruption(transcription: str, translation: str) -> bool: |
|
boundaries = r"[\s\-꞊ˈ…,\.?!]|$" |
|
languages = r"(A|Az|E|H|K|P|R)" |
|
|
|
|
|
if re.fullmatch(r'\(.*\)', transcription): |
|
return True |
|
|
|
|
|
pattern = f'{languages}(?={boundaries})' |
|
if re.search(pattern, transcription): |
|
return True |
|
|
|
|
|
if '[' in translation and ']' in translation: |
|
return True |
|
|
|
|
|
return False |
|
|
|
def process_example(example, dialect, split, audio_dir_path, transcripts, stats): |
|
audio_file_name = f"nena_speech_{example.id}.mp3" |
|
audio_file_path = os.path.join(audio_dir_path, audio_file_name) |
|
|
|
if not os.path.exists(audio_file_path): |
|
audio_url = pb.get_file_url(example, example.speech, {}) |
|
response = requests.get(audio_url) |
|
with tempfile.NamedTemporaryFile() as f: |
|
f.write(response.content) |
|
f.flush() |
|
waveform, sample_rate = torchaudio.load(f.name) |
|
resampler = Resample(orig_freq=sample_rate, new_freq=48000) |
|
resampled_waveform = resampler(waveform) |
|
torchaudio.save(audio_file_path, resampled_waveform, 48000, format="mp3") |
|
|
|
transcripts.append({ |
|
'transcription': example.transcription, |
|
'translation': example.translation, |
|
'locale': example.locale, |
|
'proficiency': example.proficiency, |
|
'age': example.age, |
|
'crowdsourced': example.crowdsourced, |
|
'interrupted': contains_interruption(example.transcription, example.translation), |
|
'client_id': example.speaker, |
|
'path': audio_file_name, |
|
}) |
|
|
|
dialect_stats = stats["dialects"][dialect] |
|
|
|
stats["totalExamples"] += 1 |
|
dialect_stats["totalExamples"] += 1 |
|
if example.translation: |
|
stats["examplesTranslated"] += 1 |
|
dialect_stats["examplesTranslated"] += 1 |
|
if example.transcription: |
|
stats["durationLabeled"] += 0 |
|
dialect_stats["durationLabeled"] += 0 |
|
else: |
|
stats["durationUnlabeled"] += 0 |
|
dialect_stats["durationUnlabeled"] += 0 |
|
|
|
dialect_stats["buckets"][split] += 1 |
|
dialect_stats["speakers"].add(example.speaker) |
|
dialect_stats["splits"]["proficiency"][example.proficiency] = dialect_stats["splits"]["proficiency"].get(example.proficiency, 0) + 1 |
|
dialect_stats["splits"]["age"][example.age] = dialect_stats["splits"]["age"].get(example.age, 0) + 1 |
|
dialect_stats["splits"]["locale"][example.locale] = dialect_stats["splits"]["locale"].get(example.locale, 0) + 1 |
|
if example.crowdsourced: |
|
dialect_stats["splits"]["crowdsourced"] += 1 |
|
|
|
def build_dataset(version, test_split=0.10, dev_split=0.10): |
|
dialects = pb.collection("dialects").get_full_list(query_params={ |
|
"sort": "name", |
|
}) |
|
|
|
dialects = { |
|
dialect.name.lower(): dialect.name |
|
for dialect in dialects |
|
} |
|
|
|
examples = pb.collection("examples").get_full_list(query_params={ |
|
"expand": "dialect", |
|
"filter": "validated=true", |
|
}) |
|
|
|
stats = { |
|
"dialects": { |
|
dialect : { |
|
"buckets": { |
|
"dev": 0, |
|
"test": 0, |
|
"train": 0, |
|
}, |
|
"splits": { |
|
"proficiency": {}, |
|
"age": {}, |
|
"locale": {}, |
|
"crowdsourced": 0, |
|
}, |
|
"speakers": set(), |
|
"totalExamples": 0, |
|
"examplesTranslated": 0, |
|
"durationLabeled": 0, |
|
"durationUnlabeled": 0, |
|
} |
|
for dialect in dialects.keys() |
|
}, |
|
"totalExamples": 0, |
|
"examplesTranslated": 0, |
|
"durationLabeled": 0, |
|
"durationUnlabeled": 0, |
|
"version": version, |
|
"date": datetime.now().strftime("%Y-%m-%d"), |
|
"name": "NENA Speech Dataset", |
|
"multilingual": True, |
|
} |
|
|
|
def split_examples(examples): |
|
test_end = int(test_split * len(examples)) |
|
dev_end = int((dev_split + test_split) * len(examples)) |
|
|
|
return { |
|
'test': examples[:test_end], |
|
'dev': examples[test_end:dev_end], |
|
'train': examples[dev_end:], |
|
} |
|
|
|
subsets = { |
|
dialect: split_examples([ |
|
example for example in examples |
|
if example.expand['dialect'].name.lower() == dialect |
|
]) |
|
for dialect in dialects.keys() |
|
} |
|
|
|
with tqdm(total=len(examples)) as pbar: |
|
for dialect, subset in subsets.items(): |
|
for split, examples in subset.items(): |
|
audio_dir_path = os.path.join("audio", dialect, split) |
|
audio_tar_path = f"{audio_dir_path}.tar" |
|
if os.path.exists(audio_tar_path): |
|
with tarfile.open(audio_tar_path, "r") as tar: |
|
tar.extractall(path=os.path.join("audio", dialect)) |
|
else: |
|
os.makedirs(audio_dir_path, exist_ok=True) |
|
|
|
transcripts = [] |
|
transcript_dir_path = os.path.join("transcript", dialect) |
|
os.makedirs(transcript_dir_path, exist_ok=True) |
|
|
|
|
|
with ThreadPoolExecutor() as executor: |
|
futures = [ |
|
executor.submit(process_example, example, dialect, split, audio_dir_path, transcripts, stats) |
|
for example in examples |
|
] |
|
for future in futures: |
|
pbar.update(1) |
|
future.result() |
|
|
|
pbar.set_description(f"Saving audios ({dialect}/{split})") |
|
audio_tar_path = f"{audio_dir_path}.tar" |
|
with tarfile.open(audio_tar_path, 'w') as tar: |
|
tar.add(audio_dir_path, arcname=os.path.basename(audio_dir_path)) |
|
|
|
pbar.set_description(f"Saving transcripts ({dialect}/{split})") |
|
|
|
with open(os.path.join(transcript_dir_path, f"{split}.tsv"), 'w', newline='') as f: |
|
fieldnames = [] if len(transcripts) == 0 else transcripts[0].keys() |
|
writer = csv.DictWriter(f, fieldnames=fieldnames, delimiter='\t') |
|
writer.writeheader() |
|
writer.writerows(transcripts) |
|
|
|
shutil.rmtree(audio_dir_path) |
|
stats["dialects"][dialect]["speakers"] = len(stats["dialects"][dialect]["speakers"]) |
|
|
|
with open('dialects.py', 'w') as f: |
|
python_code = f'DIALECTS = {repr(dialects)}\n' |
|
f.write(python_code) |
|
|
|
with open('release_stats.py', 'w') as f: |
|
python_code = f'STATS = {repr(stats)}\n' |
|
f.write(python_code) |
|
|
|
|
|
if __name__ == "__main__": |
|
parser = argparse.ArgumentParser(description="Generate text from prompt") |
|
|
|
parser.add_argument( |
|
"-b", |
|
"--build", |
|
action="store_true", |
|
help="Download text prompts from GCS bucket", |
|
) |
|
|
|
parser.add_argument( |
|
"-v", |
|
"--version", |
|
type=str, |
|
default="1.0.0", |
|
help="Download text prompts from GCS bucket", |
|
) |
|
|
|
args = parser.parse_args() |
|
|
|
if args.build: |
|
build_dataset(version=args.version) |
|
|
|
|
|
|