ysharma's picture
ysharma HF staff
update
ee1afde
import os
import gradio as gr
import whisper
import requests
import tempfile
from neon_tts_plugin_coqui import CoquiTTS
# Whisper: Speech-to-text
model = whisper.load_model("base")
# LLM : Bloom as inference
API_URL = "https://api-inference.huggingface.co/models/bigscience/bloom"
HF_TOKEN = os.environ["HF_TOKEN"]
headers = {"Authorization": f"Bearer {HF_TOKEN}"}
#Language covered in Bloom : en, fr, esp, arb, hn, portu, Indonesian, Vietnamese, Chinese, tamil, telugu, bengali
# Text-to-Speech
LANGUAGES = list(CoquiTTS.langs.keys())
print(f"Languages for Coqui are: {LANGUAGES}")
#Languages for Coqui are: ['en', 'es', 'fr', 'de', 'pl', 'uk', 'ro', 'hu', 'el', 'bg', 'nl', 'fi', 'sl', 'lv', 'ga']
coquiTTS = CoquiTTS()
# Driver function
def driver_fun(audio) :
text1, lang = whisper_stt(audio)
#text1 = model.transcribe(audio)["text"]
text2 = lang_model_response(text1, lang)
speech = tts(text2, lang) #'en')
return text1, text2, speech
# Whisper - speeech-to-text
def whisper_stt(audio):
print("Inside Whisper TTS")
# load audio and pad/trim it to fit 30 seconds
audio = whisper.load_audio(audio)
audio = whisper.pad_or_trim(audio)
# make log-Mel spectrogram and move to the same device as the model
mel = whisper.log_mel_spectrogram(audio).to(model.device)
# detect the spoken language
_, probs = model.detect_language(mel)
lang = max(probs, key=probs.get)
print(f"Detected language: {max(probs, key=probs.get)}")
# decode the audio
options = whisper.DecodingOptions(fp16 = False, language=lang)
result = whisper.decode(model, mel, options)
# print the recognized text
print(f"transcript is : {result.text}")
return result.text, lang
# LLM - Bloom Response
def lang_model_response(prompt, language):
print(f"*****Inside lang_model_response - Prompt is :{prompt}")
p = """Question: How are you doing today?
Answer: I am doing good, thanks.
Question: """
if len(prompt) == 0:
prompt = """Question: Can you help me please?
Answer: Sure, I am here for you.
Question: """
prompt = p + prompt + "\n" + "Answer: "
json_ = {"inputs": prompt,
"parameters":
{
"top_p": 0.90, #0.90 default
"max_new_tokens": 64,
"temperature": 1.1, #1.1 default
"return_full_text": False,
"do_sample": True,
},
"options":
{"use_cache": True,
"wait_for_model": True,
},}
response = requests.post(API_URL, headers=headers, json=json_)
#print(f"Response is : {response}")
output = response.json()
output_tmp = output[0]['generated_text']
print(f"Bloom API Response is : {output_tmp}")
solution = output_tmp.split("Answer: ")[2].split("\n")[0]
print(f"Final Bloom Response after splits is: {solution}")
return solution
# Coqui - Text-to-Speech
def tts(text, language):
print(f"Inside tts - language is : {language}")
coqui_langs = ['en' ,'es' ,'fr' ,'de' ,'pl' ,'uk' ,'ro' ,'hu' ,'bg' ,'nl' ,'fi' ,'sl' ,'lv' ,'ga']
if language not in coqui_langs:
language = 'en'
with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as fp:
coquiTTS.get_tts(text, fp, speaker = {"language" : language})
return fp.name
gr.Interface(
title = 'Testing Whisper',
fn=driver_fun,
inputs=[
gr.Audio(source="microphone", type="filepath"), #streaming = True,
# "state"
],
outputs=[
"textbox", "textbox", "audio",
],
live=True).launch()