alexa-demo / app.py
easxtn's picture
Update app.py
bf1f67c verified
import gradio as gr
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
def alexa(audio):
converted_text = speech_to_text(audio)
generated_text = text_generation(converted_text)
speech = text_to_speech(generated_text)
return speech
def speech_to_text(audio):
audio_to_text = pipeline("automatic-speech-recognition", model="openai/whisper-tiny")
text = audio_to_text(audio,generate_kwargs={"task": "transcribe", "language": "english"})["text"]
return text
def text_generation(text):
model = AutoModelForCausalLM.from_pretrained("microsoft/Phi-3-mini-128k-instruct",
trust_remote_code=True)
tokenizer = AutoTokenizer.from_pretrained("microsoft/Phi-3-mini-128k-instruct", trust_remote_code=True)
messages = [
{"role": "user", "content": text}
]
generation_args = {
"max_new_tokens": 500,
"return_full_text": False,
"temperature": 0.1,
"do_sample": True,
}
text_gen= pipeline("text-generation", model=model, tokenizer=tokenizer, trust_remote_code = True)
response = text_gen(messages, **generation_args)
return response[0]["generated_text"]
def text_to_speech(text):
text_to_audio = pipeline("text-to-speech", model="kakao-enterprise/vits-ljs")
narrated_text = text_to_audio(text)
return (narrated_text["sampling_rate"], narrated_text["audio"][0] )
gr.Interface(
fn=alexa,
inputs=gr.Audio(type="filepath"),
outputs=[gr.Audio(label="Audio", type="numpy", autoplay=True)]).launch()