from time import time from pprint import pprint import torch from transformers import pipeline from datasets import load_dataset # config generate_kwargs = {"language": "japanese", "task": "transcribe"} model_id = "kotoba-tech/kotoba-whisper-v1.0" # load model pipe = pipeline( "automatic-speech-recognition", model=model_id, torch_dtype=torch.float32 ) test_audio = [ "kotoba-whisper-eval/audio/manzai1.wav", "kotoba-whisper-eval/audio/manzai2.wav", "kotoba-whisper-eval/audio/manzai3.wav", "kotoba-whisper-eval/audio/long_interview_1.wav", ] elapsed = {} for x in test_audio: start = time() transcription = pipe(x, generate_kwargs=generate_kwargs) elapsed[x] = time() - start pprint(transcription) pprint(elapsed)