Automatic Speech Recognition
Transformers
Safetensors
Japanese
whisper
audio
hf-asr-leaderboard
Eval Results
Inference Endpoints
asahi417 commited on
Commit
ada231b
1 Parent(s): a345421

Update benchmark.py

Browse files
Files changed (1) hide show
  1. benchmark.py +4 -3
benchmark.py CHANGED
@@ -16,14 +16,15 @@ pipe = pipeline(
16
  )
17
 
18
  test_audio = [
19
- "kotoba-whisper-eval/audio/long_interview_1.wav",
20
  "kotoba-whisper-eval/audio/manzai1.wav",
21
  "kotoba-whisper-eval/audio/manzai2.wav",
22
- "kotoba-whisper-eval/audio/manzai3.wav"
 
23
  ]
24
  elapsed = {}
25
  for x in test_audio:
26
  start = time()
27
  transcription = pipe(x, generate_kwargs=generate_kwargs)
28
  elapsed[x] = time() - start
29
- pprint(elapsed)
 
 
16
  )
17
 
18
  test_audio = [
 
19
  "kotoba-whisper-eval/audio/manzai1.wav",
20
  "kotoba-whisper-eval/audio/manzai2.wav",
21
+ "kotoba-whisper-eval/audio/manzai3.wav",
22
+ "kotoba-whisper-eval/audio/long_interview_1.wav",
23
  ]
24
  elapsed = {}
25
  for x in test_audio:
26
  start = time()
27
  transcription = pipe(x, generate_kwargs=generate_kwargs)
28
  elapsed[x] = time() - start
29
+ pprint(transcription)
30
+ pprint(elapsed)