Automatic Speech Recognition
Transformers
Safetensors
Japanese
whisper
audio
hf-asr-leaderboard
Eval Results
Inference Endpoints
kotoba-whisper-v1.0 / benchmark.py
asahi417's picture
Update benchmark.py
ada231b verified
raw
history blame contribute delete
766 Bytes
from time import time
from pprint import pprint
import torch
from transformers import pipeline
from datasets import load_dataset
# config
generate_kwargs = {"language": "japanese", "task": "transcribe"}
model_id = "kotoba-tech/kotoba-whisper-v1.0"
# load model
pipe = pipeline(
"automatic-speech-recognition",
model=model_id,
torch_dtype=torch.float32
)
test_audio = [
"kotoba-whisper-eval/audio/manzai1.wav",
"kotoba-whisper-eval/audio/manzai2.wav",
"kotoba-whisper-eval/audio/manzai3.wav",
"kotoba-whisper-eval/audio/long_interview_1.wav",
]
elapsed = {}
for x in test_audio:
start = time()
transcription = pipe(x, generate_kwargs=generate_kwargs)
elapsed[x] = time() - start
pprint(transcription)
pprint(elapsed)