|
|
|
0%| | 0/23265 [00:00<?, ?it/s]Traceback (most recent call last): |
|
File "/mnt/lv_ai_1_dante/javierr/wav2vec2-xls-r-1b-npsc/run_speech_recognition_ctc.py", line 792, in <module> |
|
main() |
|
File "/mnt/lv_ai_1_dante/javierr/wav2vec2-xls-r-1b-npsc/run_speech_recognition_ctc.py", line 743, in main |
|
train_result = trainer.train(resume_from_checkpoint=checkpoint) |
|
File "/mnt/lv_ai_1_dante/javierr/audio/lib/python3.9/site-packages/transformers/trainer.py", line 1384, in train |
|
tr_loss_step = self.training_step(model, inputs) |
|
File "/mnt/lv_ai_1_dante/javierr/audio/lib/python3.9/site-packages/transformers/trainer.py", line 1959, in training_step |
|
loss = self.compute_loss(model, inputs) |
|
File "/mnt/lv_ai_1_dante/javierr/audio/lib/python3.9/site-packages/transformers/trainer.py", line 1991, in compute_loss |
|
outputs = model(**inputs) |
|
File "/mnt/lv_ai_1_dante/javierr/audio/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1102, in _call_impl |
|
return forward_call(*input, **kwargs) |
|
File "/mnt/lv_ai_1_dante/javierr/audio/lib/python3.9/site-packages/transformers/models/wav2vec2/modeling_wav2vec2.py", line 1756, in forward |
|
outputs = self.wav2vec2( |
|
File "/mnt/lv_ai_1_dante/javierr/audio/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1102, in _call_impl |
|
return forward_call(*input, **kwargs) |
|
File "/mnt/lv_ai_1_dante/javierr/audio/lib/python3.9/site-packages/transformers/models/wav2vec2/modeling_wav2vec2.py", line 1346, in forward |
|
extract_features = self.feature_extractor(input_values) |
|
File "/mnt/lv_ai_1_dante/javierr/audio/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1102, in _call_impl |
|
return forward_call(*input, **kwargs) |
|
File "/mnt/lv_ai_1_dante/javierr/audio/lib/python3.9/site-packages/transformers/models/wav2vec2/modeling_wav2vec2.py", line 514, in forward |
|
hidden_states = conv_layer(hidden_states) |
|
File "/mnt/lv_ai_1_dante/javierr/audio/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1102, in _call_impl |
|
return forward_call(*input, **kwargs) |
|
File "/mnt/lv_ai_1_dante/javierr/audio/lib/python3.9/site-packages/transformers/models/wav2vec2/modeling_wav2vec2.py", line 386, in forward |
|
hidden_states = self.conv(hidden_states) |
|
File "/mnt/lv_ai_1_dante/javierr/audio/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1102, in _call_impl |
|
return forward_call(*input, **kwargs) |
|
File "/mnt/lv_ai_1_dante/javierr/audio/lib/python3.9/site-packages/torch/nn/modules/conv.py", line 301, in forward |
|
return self._conv_forward(input, self.weight, self.bias) |
|
File "/mnt/lv_ai_1_dante/javierr/audio/lib/python3.9/site-packages/torch/nn/modules/conv.py", line 297, in _conv_forward |
|
return F.conv1d(input, weight, bias, self.stride, |
|
RuntimeError: CUDA error: no kernel image is available for execution on the device |
|
CUDA kernel errors might be asynchronously reported at some other API call,so the stacktrace below might be incorrect. |
|
For debugging consider passing CUDA_LAUNCH_BLOCKING=1. |