Update datas.py
Browse files
datas.py
CHANGED
@@ -5,14 +5,15 @@ import ctcalign
|
|
5 |
model_word_separator = '|'
|
6 |
model_blank_token = '[PAD]'
|
7 |
|
8 |
-
ds_i = load_dataset("language-and-voice-lab/samromur_asr",split='train+validation+test')
|
9 |
-
#
|
|
|
10 |
ds_i = ds_i.data.to_pandas()
|
11 |
i_model_path="carlosdanielhernandezmena/wav2vec2-large-xlsr-53-icelandic-ep10-1000h"
|
12 |
a_i = ctcalign.aligner(i_model_path,model_word_separator,model_blank_token)
|
13 |
|
14 |
-
ds_f = load_dataset("carlosdanielhernandezmena/ravnursson_asr",split='train+validation+test')
|
15 |
-
|
16 |
ds_f = ds_f.data.to_pandas()
|
17 |
f_model_path="carlosdanielhernandezmena/wav2vec2-large-xlsr-53-faroese-100h"
|
18 |
a_f = ctcalign.aligner(f_model_path,model_word_separator,model_blank_token)
|
|
|
5 |
model_word_separator = '|'
|
6 |
model_blank_token = '[PAD]'
|
7 |
|
8 |
+
#ds_i = load_dataset("language-and-voice-lab/samromur_asr",split='train+validation+test')
|
9 |
+
# loading the whole corpus can be difficult for browser to handle live filtering
|
10 |
+
ds_i = load_dataset("language-and-voice-lab/samromur_asr",split='test')
|
11 |
ds_i = ds_i.data.to_pandas()
|
12 |
i_model_path="carlosdanielhernandezmena/wav2vec2-large-xlsr-53-icelandic-ep10-1000h"
|
13 |
a_i = ctcalign.aligner(i_model_path,model_word_separator,model_blank_token)
|
14 |
|
15 |
+
#ds_f = load_dataset("carlosdanielhernandezmena/ravnursson_asr",split='train+validation+test')
|
16 |
+
ds_f = load_dataset("carlosdanielhernandezmena/ravnursson_asr",split='test')
|
17 |
ds_f = ds_f.data.to_pandas()
|
18 |
f_model_path="carlosdanielhernandezmena/wav2vec2-large-xlsr-53-faroese-100h"
|
19 |
a_f = ctcalign.aligner(f_model_path,model_word_separator,model_blank_token)
|