Rai_AI / app.py
SantiagoTesla's picture
Update app.py
2981fb2
raw
history blame contribute delete
No virus
1.69 kB
import openai
import gradio as gr
import time
import warnings
import warnings
import os
from gtts import gTTS
warnings.filterwarnings("ignore")
openai.api_key = os.getenv("OPENAI_API_KEY")
def chatgpt_api(input_text):
messages = [
{"role": "system", "content": "You are a helpful assistant."}]
if input_text:
messages.append(
{"role": "user", "content": input_text},
)
chat_completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo", messages=messages
)
reply = chat_completion.choices[0].message.content
return reply
#ffmpeg -f lavfi -i anullsrc=r=44100:cl=mono -t 10 -q:a 9 -acodec libmp3lame Temp.mp3'
def transcribe(audio, text):
language = "en"
if audio is not None:
with open(audio, "rb") as transcript:
prompt = openai.Audio.transcribe("whisper-1", transcript)
s = prompt["text"]
else:
s = text
out_result = chatgpt_api(s)
audioobj = gTTS(text = out_result,
lang = language,
slow = False)
audioobj.save("Temp.mp3")
return [s, out_result, "Temp.mp3"]
with gr.Blocks() as demo:
gr.Markdown("Rai AI")
input1 = gr.inputs.Audio(source="microphone", type = "filepath", label="Use your voice to chat")
input2 = gr.inputs.Textbox(lines=7, label="Chat with AI")
output_1 = gr.Textbox(label="User Input")
output_2 = gr.Textbox(label="Text Output")
output_3 = gr.Audio("Temp.mp3", label="Speech Output")
btn = gr.Button("Run")
btn.click(fn=transcribe, inputs=[input1, input2], outputs=[output_1, output_2, output_3])
demo.launch()