Spaces:
Sleeping
Sleeping
File size: 2,797 Bytes
a4f80cf f6ef3e2 a4f80cf 1009064 890da38 1009064 a4f80cf |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 |
import streamlit as st
from audiorecorder import audiorecorder
import openai
import os
openai.api_key = os.environ['OPENAI_API_KEY']
def get_completion(messages, model="gpt-3.5-turbo"):
response = openai.ChatCompletion.create(
model=model,
messages=messages,
temperature=0)
return response.choices[0].message["content"]
def transcribe(audio_path):
audio_file = open(audio_path, "rb")
transcript = openai.Audio.translate_raw("whisper-1", audio_file, filename = '1.mp3')
return transcript["text"]
def get_ddx(vignette):
messages_ddx = [
{'role': 'system', 'content': 'You are a Physician AI assistant tool. Write a differential diagnosis for a patient. Write just diagnoses and justification. Do no write any additional information. Do not write any introduction.'},
{'role': 'user', 'content': vignette}]
ddx = get_completion(messages_ddx)
return ddx
def get_orders(vignette, ddx):
messages_orders = [
{'role': 'system', 'content': 'You are a Physician AI assistant tool. Write an order set for a patient to differentiate between conditions. Write just orders and justification. Do no write any additional information. Do not write any introduction.'},
{'role': 'user', 'content': f'Information about patient: {vignette}. Differential diagnosis: {ddx}'}]
orders = get_completion(messages_orders)
return orders
if 'vignette' not in st.session_state:
st.session_state['vignette'] = ''
if 'ddx' not in st.session_state:
st.session_state['ddx'] = ''
if 'orders' not in st.session_state:
st.session_state['orders'] = ''
if 'length' not in st.session_state:
st.session_state['length'] = 0
st.title("AI loop for healthcare providers")
st.markdown(
"Record your patient presentation and get the differential diagnoses and orders.")
st.divider()
audio = audiorecorder("Record", "Stop")
if (len(audio) != st.session_state['length']):
st.session_state['length'] = len(audio)
# wav_file = open("audio.mp3", "wb")
# wav_file.write(audio.tobytes())
transcript = openai.Audio.translate_raw("whisper-1", audio.tobytes(), filename = '1.mp3')
transcript["text"]
st.session_state['vignette'] += transcript["text"]
st.session_state['vignette'] = st.text_area(
"Vignette", value=st.session_state['vignette'])
if st.button("Get DDX and Orders"):
vignette = st.session_state['vignette']
ddx = get_ddx(vignette)
st.session_state['ddx'] = ddx
st.session_state['orders'] = get_orders(vignette, ddx)
col1, col2 = st.columns(2)
with col1:
st.markdown(
f"**DDX**\n\n{st.session_state['ddx']}", unsafe_allow_html=True)
with col2:
st.markdown(
f"**ORDERS**\n\n{st.session_state['orders']}", unsafe_allow_html=True)
|