Datasets:
Update README.md
Browse files
README.md
CHANGED
@@ -59,4 +59,123 @@ Dieser kombinierte deutsche Sprachdatensatz kann für verschiedene Zwecke verwen
|
|
59 |
- NLP (Natural Language Processing) Forschung
|
60 |
- Text-to-Speech Anwendungen
|
61 |
|
62 |
-
Bitte beachten Sie jedoch bei der Verwendung dieses Datensatzes die Lizenzbedingungen der einzelnen Quellen sowie etwaige Einschränkungen oder Richtlinien bezüglich des Datenschutzes oder Urheberrechts.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
59 |
- NLP (Natural Language Processing) Forschung
|
60 |
- Text-to-Speech Anwendungen
|
61 |
|
62 |
+
Bitte beachten Sie jedoch bei der Verwendung dieses Datensatzes die Lizenzbedingungen der einzelnen Quellen sowie etwaige Einschränkungen oder Richtlinien bezüglich des Datenschutzes oder Urheberrechts.
|
63 |
+
|
64 |
+
|
65 |
+
|
66 |
+
|
67 |
+
Evaluation results:
|
68 |
+
|
69 |
+
| Modell | Word error rate |
|
70 |
+
| ----- | ----- |
|
71 |
+
| openai/whisper-large-v3-turbo | 14.14% |
|
72 |
+
|
73 |
+
openai/whisper-large-v3-turbo
|
74 |
+
|
75 |
+
|
76 |
+
Evaluation script
|
77 |
+
|
78 |
+
```python
|
79 |
+
import datasets
|
80 |
+
from transformers import (
|
81 |
+
AutoConfig,
|
82 |
+
AutoModelForSpeechSeq2Seq,
|
83 |
+
AutoProcessor,
|
84 |
+
)
|
85 |
+
from jiwer import wer, cer, wer_standardize_contiguous
|
86 |
+
from tqdm.auto import tqdm
|
87 |
+
import torch
|
88 |
+
|
89 |
+
BASE_MODEL = "Model Path"
|
90 |
+
DATASET_NAME = "asr-german-mixed"
|
91 |
+
batch_size=4
|
92 |
+
|
93 |
+
cv_data = (
|
94 |
+
datasets.load_from_disk(DATASET_NAME)
|
95 |
+
.cast_column(
|
96 |
+
"audio",
|
97 |
+
datasets.Audio(sampling_rate=16000, decode=True),
|
98 |
+
)
|
99 |
+
.with_format("torch")
|
100 |
+
)
|
101 |
+
|
102 |
+
|
103 |
+
def get_model(model_name: str):
|
104 |
+
kwargs = {}
|
105 |
+
processor = AutoProcessor.from_pretrained(
|
106 |
+
model_name,
|
107 |
+
legacy=False,
|
108 |
+
trust_remote_code=True,
|
109 |
+
)
|
110 |
+
|
111 |
+
kwargs["attn_implementation"] = "sdpa"
|
112 |
+
|
113 |
+
model = AutoModelForSpeechSeq2Seq.from_pretrained(
|
114 |
+
model_name,
|
115 |
+
**kwargs,
|
116 |
+
)
|
117 |
+
|
118 |
+
model = model.to(torch.bfloat16).cuda()
|
119 |
+
|
120 |
+
return model, processor
|
121 |
+
|
122 |
+
|
123 |
+
model, processor = get_model(model_name=BASE_MODEL)
|
124 |
+
model.generation_config.do_sample=False
|
125 |
+
model.generation_config.num_beams=1
|
126 |
+
|
127 |
+
references = []
|
128 |
+
predictions = []
|
129 |
+
test_data = cv_data["test"]
|
130 |
+
|
131 |
+
for i in tqdm(range(0, len(test_data), batch_size)):
|
132 |
+
batch = test_data[i:i + batch_size]
|
133 |
+
|
134 |
+
audios = []
|
135 |
+
input_features = []
|
136 |
+
for x in batch["audio"]:
|
137 |
+
# Extract the audio features from the audio
|
138 |
+
extracted = processor(
|
139 |
+
audio=x["array"],
|
140 |
+
sampling_rate=16000,
|
141 |
+
return_tensors="pt",
|
142 |
+
return_attention_mask = True,
|
143 |
+
)
|
144 |
+
|
145 |
+
# check if feature extractor return input_features or input_values
|
146 |
+
ft = (
|
147 |
+
"input_values"
|
148 |
+
if hasattr(extracted, "input_values")
|
149 |
+
else "input_features"
|
150 |
+
)
|
151 |
+
|
152 |
+
# append to input_features
|
153 |
+
input_features.append(
|
154 |
+
{
|
155 |
+
ft: getattr(
|
156 |
+
extracted,
|
157 |
+
ft,
|
158 |
+
)[0],
|
159 |
+
"attention_mask": extracted.attention_mask
|
160 |
+
}
|
161 |
+
)
|
162 |
+
|
163 |
+
|
164 |
+
batch_pad = processor.feature_extractor.pad(
|
165 |
+
input_features,
|
166 |
+
padding="longest",
|
167 |
+
return_tensors="pt",
|
168 |
+
)
|
169 |
+
|
170 |
+
with torch.no_grad():
|
171 |
+
generated_ids = model.generate(input_features=batch_pad.input_features.to(torch.bfloat16).cuda(), attention_mask = batch_pad.attention_mask.to(torch.bfloat16).cuda(), max_new_tokens=384, language='de')
|
172 |
+
|
173 |
+
transcriptions = processor.batch_decode(generated_ids, skip_special_tokens=True)
|
174 |
+
references.extend(batch["transkription"])
|
175 |
+
predictions.extend(transcriptions)
|
176 |
+
if i % 1000 == 0:
|
177 |
+
print("WER: ", wer(references, predictions, reference_transform=wer_standardize_contiguous, hypothesis_transform=wer_standardize_contiguous)*100)
|
178 |
+
print("*"*20)
|
179 |
+
print("WER: ", wer(references, predictions, reference_transform=wer_standardize_contiguous, hypothesis_transform=wer_standardize_contiguous)*100)
|
180 |
+
print("*"*20)
|
181 |
+
```
|