import torch import gradio as gr import pytube as pt from transformers import pipeline from fastspeech2 import FastSpeech2 MODEL_NAME = "openai/whisper-large-v2" device = 0 if torch.cuda.is_available() else "cpu" pipe = pipeline( task="automatic-speech-recognition", model=MODEL_NAME, chunk_length_s=30, device=device, ) all_special_ids = pipe.tokenizer.all_special_ids transcribe_token_id = all_special_ids[-5] translate_token_id = all_special_ids[-6] voice_conversion_model = FastSpeech2.from_pretrained("path/to/pretrained/voice_conversion_model") def convert_voice(text): converted_voice = voice_conversion_model(text) return converted_voice def transcribe(microphone, state, task="transcribe"): file = microphone pipe.model.config.forced_decoder_ids = [[2, transcribe_token_id if task=="transcribe" else translate_token_id]] text = pipe(file)["text"] converted_voice = convert_voice(text) return state + "\n" + converted_voice, state + "\n" + converted_voice, converted_voice mf_transcribe = gr.Interface( fn=transcribe, inputs=[ gr.Audio(source="microphone", type="filepath", optional=True), gr.State(value="") ], outputs=[ gr.Textbox(lines=15), gr.State(), gr.Audio(type="auto") # Add this line to include the converted voice as an output ], layout="horizontal", theme="huggingface", title="Whisper Large V2: Transcribe Audio and Voice Conversion", live=True, description=( "Transcribe long-form microphone or audio inputs and convert the voice with the click of a button! Demo uses the" f" checkpoint [{MODEL_NAME}](https://huggingface.co/{MODEL_NAME}) and 🤗 Transformers to transcribe audio files" " of arbitrary length and FastSpeech2 for voice conversion." ), allow_flagging="never", ) mf_transcribe.launch(enable_queue=True)