|
---
|
|
license: cc-by-4.0
|
|
task_categories:
|
|
- automatic-speech-recognition
|
|
- text-to-speech
|
|
language:
|
|
- vi
|
|
pretty_name: VietMed unlabeled set
|
|
size_categories:
|
|
- 100K<n<1M
|
|
dataset_info:
|
|
features:
|
|
- name: audio
|
|
dtype: audio
|
|
- name: Metadata ID
|
|
dtype: string
|
|
splits:
|
|
- name: train
|
|
num_bytes: 57670081699.38
|
|
num_examples: 230516
|
|
download_size: 51899577807
|
|
dataset_size: 57670081699.38
|
|
configs:
|
|
- config_name: default
|
|
data_files:
|
|
- split: train
|
|
path: data/train-*
|
|
---
|
|
|
|
# unofficial mirror of VietMed (Vietnamese speech data in medical domain) unlabeled set
|
|
|
|
official announcement: https://arxiv.org/abs/2404.05659
|
|
|
|
official download: https://huggingface.co/datasets/leduckhai/VietMed
|
|
|
|
this repo contains the unlabeled set: 966h - 230k samples
|
|
|
|
i also gather the metadata: see [info.csv](info.csv)
|
|
|
|
my extraction code: https://github.com/phineas-pta/fine-tune-whisper-vi/blob/main/misc/vietmed-unlabeled.py
|
|
|
|
need to do: check misspelling, restore foreign words phonetised to vietnamese
|
|
|
|
usage with HuggingFace:
|
|
```python
|
|
# pip install -q "datasets[audio]"
|
|
from datasets import load_dataset
|
|
from huggingface_hub import hf_hub_download
|
|
from pandas import read_csv
|
|
|
|
repo_id = "doof-ferb/VietMed_unlabeled"
|
|
dataset = load_dataset(repo_id, split="train", streaming=True)
|
|
info_file = hf_hub_download(repo_id=repo_id, filename="info.csv", repo_type="dataset")
|
|
info_dict = read_csv(info_file, index_col=0).to_dict("index")
|
|
|
|
def merge_info(batch):
|
|
meta = info_dict.get(batch["Metadata ID"], "")
|
|
if meta != "":
|
|
batch["Domain"] = meta["Domain"]
|
|
batch["ICD-10 Code"] = meta["ICD-10 Code"]
|
|
batch["Accent"] = meta["Accent"]
|
|
else:
|
|
batch["Domain"] = ""
|
|
batch["ICD-10 Code"] = ""
|
|
batch["Accent"] = ""
|
|
return batch
|
|
dataset = dataset.map(merge_info)
|
|
```
|
|
|