File size: 1,556 Bytes
38f5b92
7157202
 
38f5b92
1334bb6
7157202
305c392
7157202
 
 
 
25ab1a9
 
5bc3478
38f5b92
7157202
 
25ab1a9
8ca6683
7157202
 
89544a2
7157202
 
 
 
 
d04ca4f
19903e1
7157202
 
25ab1a9
af85530
7157202
 
89544a2
7157202
 
 
 
1334bb6
7157202
 
bf1f67c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
import gradio as gr 
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline

def alexa(audio):
    converted_text = speech_to_text(audio)
    generated_text = text_generation(converted_text)
    speech = text_to_speech(generated_text)
    return speech

def speech_to_text(audio):
    audio_to_text = pipeline("automatic-speech-recognition", model="openai/whisper-tiny")    
    text = audio_to_text(audio,generate_kwargs={"task": "transcribe", "language": "english"})["text"]
    return text

def text_generation(text):
    model = AutoModelForCausalLM.from_pretrained("microsoft/Phi-3-mini-128k-instruct", 
    trust_remote_code=True)
    tokenizer = AutoTokenizer.from_pretrained("microsoft/Phi-3-mini-128k-instruct", trust_remote_code=True)

    messages = [
        {"role": "user", "content": text}
    ]
    
    generation_args = {
        "max_new_tokens": 500,
        "return_full_text": False,
        "temperature": 0.1,
        "do_sample": True,
    }
    
    text_gen= pipeline("text-generation", model=model, tokenizer=tokenizer, trust_remote_code = True)
    response = text_gen(messages, **generation_args)
    return response[0]["generated_text"]

def text_to_speech(text):
    text_to_audio = pipeline("text-to-speech", model="kakao-enterprise/vits-ljs")
    narrated_text = text_to_audio(text)
    return (narrated_text["sampling_rate"], narrated_text["audio"][0] )

gr.Interface(
  fn=alexa,
  inputs=gr.Audio(type="filepath"),
  outputs=[gr.Audio(label="Audio", type="numpy",  autoplay=True)]).launch()