File size: 1,638 Bytes
6295354
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
928e9ad
 
6295354
928e9ad
 
6295354
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
from pathlib import Path

from dotenv import load_dotenv
import torch
load_dotenv()

from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain.chat_models import ChatOpenAI
import gradio as gr
from elevenlabs import generate, play
from pathlib import Path

import whisper

model = whisper.load_model("base", device="cuda")


prompt = PromptTemplate(
    input_variables=["user_input"],
    template=Path("prompts/patient.prompt").read_text(),
)

llm = ChatOpenAI(temperature=0.7)

chain = LLMChain(llm=llm, prompt=prompt)


def run_text_prompt(message, chat_history):
    bot_message = chain.run(user_input=message)

    # audio = generate(text=bot_message, voice="Bella")

    # play(audio, notebook=True)

    chat_history.append((message, bot_message))
    return "", chat_history


def run_audio_prompt(audio, chat_history):
    if audio is None:
        return None, chat_history

    message_transcription = model.transcribe(audio)["text"]
    _, chat_history = run_text_prompt(message_transcription, chat_history)
    return None, chat_history


with gr.Blocks() as demo:
    gr.Markdown("""
Name: Emma Thompson
Age: 28
Present complain: Abdominal pain
Matched Condition: Gastritis
    """)
    chatbot = gr.Chatbot()

    msg = gr.Textbox()
    msg.submit(run_text_prompt, [msg, chatbot], [msg, chatbot])
    
    # with gr.Row():
        # audio = gr.Audio(source="microphone", type="filepath")
        
        # send_audio_button = gr.Button("Send Audio", interactive=True)
        # send_audio_button.click(run_audio_prompt, [audio, chatbot], [audio, chatbot])

demo.launch(debug=True)