File size: 2,159 Bytes
ef5fb31 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 |
45%|ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | 29/64 [01:44<02:10, 3.72s/it]Traceback (most recent call last):
File "run_speech_recognition_ctc.py", line 749, in <module>
main()
File "run_speech_recognition_ctc.py", line 700, in main
train_result = trainer.train(resume_from_checkpoint=checkpoint)
File "/opt/conda/lib/python3.8/site-packages/transformers/trainer.py", line 1347, in train
for step, inputs in enumerate(epoch_iterator):
File "/opt/conda/lib/python3.8/site-packages/torch/utils/data/dataloader.py", line 521, in __next__
data = self._next_data()
File "/opt/conda/lib/python3.8/site-packages/torch/utils/data/dataloader.py", line 561, in _next_data
data = self._dataset_fetcher.fetch(index) # may raise StopIteration
File "/opt/conda/lib/python3.8/site-packages/torch/utils/data/_utils/fetch.py", line 52, in fetch
return self.collate_fn(data)
File "run_speech_recognition_ctc.py", line 282, in __call__
batch = self.processor.pad(
File "/opt/conda/lib/python3.8/site-packages/transformers/models/wav2vec2/processing_wav2vec2.py", line 147, in pad
return self.current_processor.pad(*args, **kwargs)
File "/opt/conda/lib/python3.8/site-packages/transformers/feature_extraction_sequence_utils.py", line 217, in pad
outputs = self._pad(
File "/opt/conda/lib/python3.8/site-packages/transformers/feature_extraction_sequence_utils.py", line 286, in _pad
processed_features[self.model_input_names[0]] = np.pad(
File "<__array_function__ internals>", line 5, in pad
File "/opt/conda/lib/python3.8/site-packages/numpy/lib/arraypad.py", line 796, in pad
padded, original_area_slice = _pad_simple(array, pad_width)
File "/opt/conda/lib/python3.8/site-packages/numpy/lib/arraypad.py", line 124, in _pad_simple
padded[original_area_slice] = array
KeyboardInterrupt |