Spaces:
Sleeping
Sleeping
File size: 2,743 Bytes
cef05ee |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 |
import torch
from transformers import pipeline
import librosa
import soundfile as sf
import numpy as np
class WhisperTranscriber:
def __init__(self, model_size="medium"):
self.model_size = model_size
self.device = "cuda:0" if torch.cuda.is_available() else "cpu"
self.model = pipeline(
"automatic-speech-recognition",
model=f"openai/whisper-{model_size}",
chunk_length_s=30,
device=self.device,
batch_size=8,
torch_dtype=torch.float16,
return_timestamps=True
)
def preprocess_audio(self, audio_path, target_sr=16000):
# Load audio with librosa for better preprocessing
y, sr = librosa.load(audio_path, sr=None)
# Resample to 16kHz (Whisper's expected rate)
y_resampled = librosa.resample(y=y, orig_sr=sr, target_sr=target_sr)
# Apply noise reduction
y_cleaned = librosa.effects.preemphasis(y_resampled)
# Normalize audio
y_normalized = librosa.util.normalize(y_cleaned)
# Remove silence and very quiet parts
y_filtered = librosa.effects.trim(
y_normalized,
top_db=30,
frame_length=2048,
hop_length=512
)[0]
return y_filtered, target_sr
def transcribe(self, audio_path):
try:
# Preprocess audio
audio_data, sample_rate = self.preprocess_audio(audio_path)
print(f"Audio loaded and preprocessed - Shape: {audio_data.shape}, Sample rate: {sample_rate}")
# Transcribe
result = self.model(
audio_data,
generate_kwargs={
"task": "transcribe",
"language": "en",
"max_new_tokens": 256,
"temperature": 0.7 # Added to reduce hallucination
}
)
# Extract transcription with timestamps if available
if isinstance(result, dict):
if "chunks" in result:
transcription = " ".join([chunk["text"] for chunk in result["chunks"]])
else:
transcription = result["text"]
else:
transcription = result
return transcription
except Exception as e:
print(f"Error in transcribe: {str(e)}")
raise
# Example usage
if __name__ == "__main__":
transcriber = WhisperTranscriber(model_size="medium")
transcription = transcriber.transcribe("path_to_your_audio_file.wav")
print(f"Transcription: {transcription}")
|