|
from faster_whisper import WhisperModel |
|
import os |
|
|
|
|
|
model = None |
|
|
|
def get_model(): |
|
global model |
|
if model is None: |
|
model = WhisperModel("base", device="cpu", compute_type="int8") |
|
return model |
|
|
|
def transcribe_audio(audio_path: str) -> str: |
|
"""Transkribera ljudfil till text""" |
|
try: |
|
if not os.path.exists(audio_path): |
|
return f"Audio file not found: {audio_path}" |
|
|
|
model = get_model() |
|
segments, info = model.transcribe(audio_path, beam_size=5) |
|
|
|
transcription = " ".join(segment.text for segment in segments) |
|
return transcription.strip() or "No transcription found." |
|
|
|
except Exception as e: |
|
return f"Error transcribing audio: {str(e)}" |
|
|
|
|