Spaces:
Runtime error
Runtime error
import torch | |
import gradio as gr | |
from transformers import ( | |
AutomaticSpeechRecognitionPipeline, | |
WhisperForConditionalGeneration, | |
WhisperTokenizer, | |
WhisperProcessor, | |
) | |
from peft import PeftModel, PeftConfig | |
peft_model_id = "Namkoy/whisper_peft_vi_nam" | |
language = "vietnamese" | |
task = "transcribe" | |
peft_config = PeftConfig.from_pretrained(peft_model_id) | |
model = WhisperForConditionalGeneration.from_pretrained( | |
peft_config.base_model_name_or_path, load_in_8bit=True, device_map="auto" | |
) | |
model = PeftModel.from_pretrained(model, peft_model_id) | |
tokenizer = WhisperTokenizer.from_pretrained(peft_config.base_model_name_or_path, language=language, task=task) | |
processor = WhisperProcessor.from_pretrained(peft_config.base_model_name_or_path, language=language, task=task) | |
feature_extractor = processor.feature_extractor | |
forced_decoder_ids = processor.get_decoder_prompt_ids(language=language, task=task) | |
pipe = AutomaticSpeechRecognitionPipeline(model=model, tokenizer=tokenizer, feature_extractor=feature_extractor) | |
def transcribe(audio): | |
with torch.cuda.amp.autocast(): | |
text = pipe(audio, generate_kwargs={"forced_decoder_ids": forced_decoder_ids}, max_new_tokens=255)["text"] | |
return text | |
iface = gr.Interface( | |
fn=transcribe, | |
inputs=gr.Audio(type="filepath"), | |
outputs="text", | |
title="PEFT LoRA + INT8 Whisper Large V2 Vietnamese", | |
description="Realtime demo for Vietnamese speech recognition using `PEFT-LoRA+INT8` fine-tuned Whisper Large V2 model.", | |
) | |
iface.launch(share=True) |