transcriber-prompt / testwhisper.py
harshp8l's picture
Upload folder using huggingface_hub
9690d29
raw
history blame contribute delete
890 Bytes
import whisper
import time
import timeit
model_tiny = whisper.load_model("tiny.en")
model_base = whisper.load_model("base.en")
model_small = whisper.load_model("small.en")
codes_to_time = ["print(model_tiny.transcribe('2.wav')['text'])",
"print(model_base.transcribe('2.wav')['text'])",
"print(model_small.transcribe('2.wav')['text'])"]
avg_times = []
for code_to_time in codes_to_time:
execution_time = timeit.timeit(code_to_time, globals=globals(), number=5)
avg_time = execution_time / 5.0
avg_times.append(avg_time)
print(f"Execution time: {avg_time} seconds")
print(avg_times)
# [1.2609960311994655, 1.8864748299994971, 6.38237024199916]
# From both a speed, and accuracy perspective base is best
# result = model.transcribe("2.wav")
# print(result["text"])
#TODO: Figure out whisper with python chunks to implement into runner