playground / app.py
Francesco's picture
comment out audio for faster build time
928e9ad
raw
history blame
1.64 kB
from pathlib import Path
from dotenv import load_dotenv
import torch
load_dotenv()
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain.chat_models import ChatOpenAI
import gradio as gr
from elevenlabs import generate, play
from pathlib import Path
import whisper
model = whisper.load_model("base", device="cuda")
prompt = PromptTemplate(
input_variables=["user_input"],
template=Path("prompts/patient.prompt").read_text(),
)
llm = ChatOpenAI(temperature=0.7)
chain = LLMChain(llm=llm, prompt=prompt)
def run_text_prompt(message, chat_history):
bot_message = chain.run(user_input=message)
# audio = generate(text=bot_message, voice="Bella")
# play(audio, notebook=True)
chat_history.append((message, bot_message))
return "", chat_history
def run_audio_prompt(audio, chat_history):
if audio is None:
return None, chat_history
message_transcription = model.transcribe(audio)["text"]
_, chat_history = run_text_prompt(message_transcription, chat_history)
return None, chat_history
with gr.Blocks() as demo:
gr.Markdown("""
Name: Emma Thompson
Age: 28
Present complain: Abdominal pain
Matched Condition: Gastritis
""")
chatbot = gr.Chatbot()
msg = gr.Textbox()
msg.submit(run_text_prompt, [msg, chatbot], [msg, chatbot])
# with gr.Row():
# audio = gr.Audio(source="microphone", type="filepath")
# send_audio_button = gr.Button("Send Audio", interactive=True)
# send_audio_button.click(run_audio_prompt, [audio, chatbot], [audio, chatbot])
demo.launch(debug=True)