Datasets:
Tasks:
Automatic Speech Recognition
Modalities:
Audio
Languages:
Polish
Size:
10K<n<100K
DOI:
License:
mj-new
commited on
Commit
•
2a586a9
1
Parent(s):
7d200c7
Updated README
Browse files- README.md +5 -2
- pl-asr-bigos-v2.py +1 -3
- test.py +1 -28
README.md
CHANGED
@@ -99,7 +99,7 @@ Polish
|
|
99 |
## Dataset Structure
|
100 |
The datasets consist of audio recordings in the WAV format with corresponding metadata.<br>
|
101 |
The audio and metadata can be used in a raw format (TSV) or via the Hugging Face datasets library.<br>
|
102 |
-
References for the test split will only become available after the completion of the
|
103 |
|
104 |
### Data Instances
|
105 |
The train set consists of 82 025 samples.
|
@@ -115,7 +115,10 @@ Available fields:
|
|
115 |
* `ref_orig` - original transcription of audio file
|
116 |
* `samplingrate_orig` - sampling rate of the original recording
|
117 |
* `sampling_rate` - sampling rate of recording in the release
|
118 |
-
* `audiopath_bigos` -
|
|
|
|
|
|
|
119 |
<br><br>
|
120 |
|
121 |
### Data Splits
|
|
|
99 |
## Dataset Structure
|
100 |
The datasets consist of audio recordings in the WAV format with corresponding metadata.<br>
|
101 |
The audio and metadata can be used in a raw format (TSV) or via the Hugging Face datasets library.<br>
|
102 |
+
References for the test split will only become available after the completion of the 2024 PolEval challenge.<br>
|
103 |
|
104 |
### Data Instances
|
105 |
The train set consists of 82 025 samples.
|
|
|
115 |
* `ref_orig` - original transcription of audio file
|
116 |
* `samplingrate_orig` - sampling rate of the original recording
|
117 |
* `sampling_rate` - sampling rate of recording in the release
|
118 |
+
* `audiopath_bigos` - relative filepath to audio file extracted from tar.gz archive
|
119 |
+
* `audiopath_local` - absolute filepath to audio file extracted with the build script
|
120 |
+
* `spk_sex_source` - sex of the speaker extracted from the source meta-data (N/A if not available)
|
121 |
+
* `spk_age_source` - age group of the speaker (in CommonVoice format) extracted from the source (N/A if not available)
|
122 |
<br><br>
|
123 |
|
124 |
### Data Splits
|
pl-asr-bigos-v2.py
CHANGED
@@ -110,9 +110,7 @@ class Bigos(datasets.GeneratorBasedBuilder):
|
|
110 |
"audiopath_bigos": datasets.Value("string"),
|
111 |
"audiopath_local": datasets.Value("string"),
|
112 |
"speaker_age": datasets.Value("string"),
|
113 |
-
"speaker_sex": datasets.Value("string")
|
114 |
-
#"ref_spoken": datasets.Value("string"),
|
115 |
-
#"ref_written": datasets.Value("string")
|
116 |
}
|
117 |
)
|
118 |
|
|
|
110 |
"audiopath_bigos": datasets.Value("string"),
|
111 |
"audiopath_local": datasets.Value("string"),
|
112 |
"speaker_age": datasets.Value("string"),
|
113 |
+
"speaker_sex": datasets.Value("string")
|
|
|
|
|
114 |
}
|
115 |
)
|
116 |
|
test.py
CHANGED
@@ -26,35 +26,8 @@ print(dataset_local["validation"][0])
|
|
26 |
print(dataset_local["train"][0])
|
27 |
|
28 |
print("Checking build script on huggingface.co")
|
29 |
-
dataset_hf = load_dataset(hf_db_name, "all")
|
30 |
print(dataset_hf)
|
31 |
print(dataset_hf["test"][0])
|
32 |
print(dataset_hf["validation"][0])
|
33 |
print(dataset_hf["train"][0])
|
34 |
-
|
35 |
-
"""
|
36 |
-
for split in splits:
|
37 |
-
print("Checking split: ", split)
|
38 |
-
print(dataset[split][0])
|
39 |
-
#TODO - rename to include date of test set creation in order to check if adding new split removes the previous one
|
40 |
-
if split == "test":
|
41 |
-
assert len(dataset["test"]) == 1900
|
42 |
-
_BIGOS_SUBSETS = ["clarin-pjatk-mobile-15", "clarin-pjatk-studio-15", "fair-mls-20", "mailabs-19", "mozilla-common-voice-19", "pwr-azon-read-20", "pwr-azon-spont-20", "pwr-maleset-unk", "pwr-shortwords-unk", "pwr-viu-unk"]
|
43 |
-
|
44 |
-
print("Testing specific subsets")
|
45 |
-
for subset in _BIGOS_SUBSETS:
|
46 |
-
dataset = load_dataset('michaljunczyk/pl-asr-bigos', subset)
|
47 |
-
print("subset: ", subset)
|
48 |
-
|
49 |
-
for split in splits:
|
50 |
-
print("Checking split: ", split)
|
51 |
-
print(dataset[split][0])
|
52 |
-
if split == "test":
|
53 |
-
if subset == "pwr-azon-spont-20":
|
54 |
-
assert len(dataset["test"]) == 100
|
55 |
-
else:
|
56 |
-
assert len(dataset["test"]) == 200
|
57 |
-
print(dataset)
|
58 |
-
|
59 |
-
# TODO - add more tests for other splits
|
60 |
-
"""
|
|
|
26 |
print(dataset_local["train"][0])
|
27 |
|
28 |
print("Checking build script on huggingface.co")
|
29 |
+
dataset_hf = load_dataset(hf_db_name, "all", download_mode="force_redownload")
|
30 |
print(dataset_hf)
|
31 |
print(dataset_hf["test"][0])
|
32 |
print(dataset_hf["validation"][0])
|
33 |
print(dataset_hf["train"][0])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|