import argparse import csv import os import re import shutil import tarfile import tempfile from tqdm import tqdm from pydub import AudioSegment import requests from pocketbase import PocketBase parser = argparse.ArgumentParser(description="Command description.") pb = PocketBase('https://pocketbase.nenadb.dev/') def contains_interruption(transcription: str, translation: str) -> bool: boundaries = r"[\s\-꞊ˈ…,\.?!]|$" languages = r"(A|Az|E|H|K|P|R)" # Check if transcription is just a string enclosed with parenthesis if re.fullmatch(r'\(.*\)', transcription): return True # Check if transcription contains any language abbreviation followed by a boundary pattern = f'{languages}(?={boundaries})' if re.search(pattern, transcription): return True # Check if translation contains square brackets if '[' in translation and ']' in translation: return True # If none of the above conditions are met, return False return False def build_dataset(test_split=0.10, dev_split=0.10): dialects = pb.collection("dialects").get_full_list(query_params={ "sort": "name", }) dialects = { dialect.name.lower(): dialect.name for dialect in dialects } examples = pb.collection("examples").get_full_list(query_params={ "expand": "dialect", "filter": "validated=true", }) stats = { "dialects": { dialect : { "buckets": { "dev": 0, "test": 0, "train": 0, }, "splits": { "proficiency": {}, "age": {}, "locale": {}, "crowdsourced": 0, }, "speakers": set(), "size": 0, "totalExamples": 0, "examplesTranslated": 0, "durationLabelled": 0, "durationUnlabelled": 0, } for dialect in dialects.keys() }, "totalExamples": 0, "examplesTranslated": 0, "durationLabelled": 0, "durationUnlabelled": 0, "version": "1.0.0", "date": "2023-10-7", "name": "NENA Speech Dataset", "multilingual": True, } def split_examples(examples): test_end = int(test_split * len(examples)) dev_end = int((dev_split + test_split) * len(examples)) return { 'test': examples[:test_end], 'dev': examples[test_end:dev_end], 'train': examples[dev_end:], } subsets = { dialect: split_examples([ example for example in examples if example.expand['dialect'].name.lower() == dialect ]) for dialect in dialects.keys() } with tqdm(total=len(examples)) as pbar: for dialect, subset in subsets.items(): for split, examples in subset.items(): audio_dir_path = os.path.join("audio", dialect, split) os.makedirs(audio_dir_path, exist_ok=True) transcripts = [] transcript_dir_path = os.path.join("transcript", dialect) os.makedirs(transcript_dir_path, exist_ok=True) for example in examples: pbar.set_description(f"Downloading audios ({dialect}/{split})") pbar.update(1) audio_url = pb.get_file_url(example, example.speech, {}) response = requests.get(audio_url) with tempfile.NamedTemporaryFile() as f: f.write(response.content) f.flush() audio = AudioSegment.from_file(f.name) audio = audio.set_frame_rate(48000) audio_file_name = f"nena_speech_{example.id}.mp3" audio_file_path = os.path.join(audio_dir_path, audio_file_name) audio.export(audio_file_path, format="mp3") transcripts.append({ 'client_id': example.speaker, 'transcription': example.transcription, 'translation': example.translation, 'path': audio_file_name, 'locale': example.locale, 'proficiency': example.proficiency, 'age': example.age, 'crowdsourced': example.crowdsourced, 'unlabelled': not example.transcription, 'interrupted': contains_interruption(example.transcription, example.translation), }) dialect_stats = stats["dialects"][dialect] stats["totalExamples"] += 1 dialect_stats["totalExamples"] += 1 if example.translation: stats["examplesTranslated"] += 1 dialect_stats["examplesTranslated"] += 1 if example.transcription: stats["durationLabelled"] += len(audio) / 1000 dialect_stats["durationLabelled"] += len(audio) / 1000 else: stats["durationUnlabelled"] += len(audio) / 1000 dialect_stats["durationUnlabelled"] += len(audio) / 1000 dialect_stats["buckets"][split] += 1 dialect_stats["speakers"].add(example.speaker) dialect_stats["splits"]["proficiency"][example.proficiency] = dialect_stats["splits"]["proficiency"].get(example.proficiency, 0) + 1 / len(examples) dialect_stats["splits"]["age"][example.age] = dialect_stats["splits"]["age"].get(example.age, 0) + 1 / len(examples) dialect_stats["splits"]["locale"][example.locale] = dialect_stats["splits"]["locale"].get(example.locale, 0) + 1 / len(examples) if example.crowdsourced: dialect_stats["splits"]["crowdsourced"] += 1 / len(examples) pbar.set_description(f"Saving audios ({dialect}/{split})") audio_tar_path = f"{audio_dir_path}.tar" with tarfile.open(audio_tar_path, 'w') as tar: tar.add(audio_dir_path, arcname=os.path.basename(audio_dir_path)) pbar.set_description(f"Saving transcripts ({dialect}/{split})") with open(os.path.join(transcript_dir_path, f"{split}.tsv"), 'w', newline='') as f: fieldnames = [] if len(transcripts) == 0 else transcripts[0].keys() writer = csv.DictWriter(f, fieldnames=fieldnames, delimiter='\t') writer.writeheader() writer.writerows(transcripts) shutil.rmtree(audio_dir_path) stats["dialects"][dialect]["speakers"] = len(stats["dialects"][dialect]["speakers"]) with open('dialect.py', 'w') as f: python_code = f'DIALECT = {repr(dialects)}\n' f.write(python_code) with open('release_stats.py', 'w') as f: python_code = f'STATS = {repr(stats)}\n' f.write(python_code) if __name__ == "__main__": parser = argparse.ArgumentParser(description="Generate text from prompt") parser.add_argument( "-b", "--build", action="store_true", help="Download text prompts from GCS bucket", ) args = parser.parse_args() build_dataset()