Spaces:
Sleeping
Sleeping
Dimitrios64
commited on
Commit
•
8b09cae
1
Parent(s):
2182e9c
Upload 3 files
Browse files- Walgreens_AI.png +0 -0
- app.py +153 -0
- requirements.txt +5 -0
Walgreens_AI.png
ADDED
app.py
ADDED
@@ -0,0 +1,153 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Simple APP for specialty pharmacy
|
2 |
+
|
3 |
+
# Import packages
|
4 |
+
import numpy as np
|
5 |
+
import os
|
6 |
+
|
7 |
+
import gradio as gr
|
8 |
+
from transformers import pipeline
|
9 |
+
|
10 |
+
#Import LLMs
|
11 |
+
from langchain.llms import OpenAI
|
12 |
+
from langchain.chat_models import ChatOpenAI
|
13 |
+
|
14 |
+
# Prompt template
|
15 |
+
from langchain import PromptTemplate
|
16 |
+
|
17 |
+
# Chains
|
18 |
+
from langchain.chains import LLMChain
|
19 |
+
|
20 |
+
|
21 |
+
# Import "secret" OPENAI_API_KEY
|
22 |
+
os.environ["OPENAI_API_KEY"]
|
23 |
+
|
24 |
+
# Import GPT-4
|
25 |
+
llm_gpt = ChatOpenAI(model='gpt-4-0613',temperature=0.)
|
26 |
+
|
27 |
+
# ======================================================
|
28 |
+
# Set up an ASR pipeline using facebook's wav2vec2
|
29 |
+
p = pipeline("automatic-speech-recognition", chunk_length_s=40)
|
30 |
+
|
31 |
+
# =======================================================
|
32 |
+
# LLM Chains
|
33 |
+
|
34 |
+
# Dialogue chain
|
35 |
+
template_diag = """
|
36 |
+
You are an AI assistant with medical language understanding.
|
37 |
+
|
38 |
+
The input is a dialogue between a specialty pharmacist and patient: {input}
|
39 |
+
|
40 |
+
To give you context, the dialogue will have to do about symptoms, side effects, medications etc
|
41 |
+
of a rare disease, most probably multiple sclerosis.
|
42 |
+
|
43 |
+
You have a couple of tasks:
|
44 |
+
|
45 |
+
- First: If there are some non-sensical words, convert them to the most probable real word,
|
46 |
+
taking into account that this is a pharmaxist, so most of them should describe medical conditions
|
47 |
+
or symptoms, most probably about multiple sclerosis.
|
48 |
+
If a medication is mentioned, do your best to find which is that, if any. Correct any mispellings
|
49 |
+
Capitalize the names of the medications.
|
50 |
+
|
51 |
+
- Second: Convert the text into a dialogue of the form:
|
52 |
+
|
53 |
+
[Pat]:
|
54 |
+
[PRx]:
|
55 |
+
|
56 |
+
Where [PRx]: Pharmacist, [Pat]: Patient
|
57 |
+
|
58 |
+
Use your judgement to distinguish between the two roles and who said what.
|
59 |
+
Output only this dialogue.
|
60 |
+
|
61 |
+
Output:
|
62 |
+
"""
|
63 |
+
|
64 |
+
|
65 |
+
prompt_diag = PromptTemplate(template=template_diag, input_variables=["input"])
|
66 |
+
chain_diag = LLMChain(llm=llm_gpt, prompt=prompt_diag, verbose=False)
|
67 |
+
|
68 |
+
|
69 |
+
# ==============================================
|
70 |
+
template_struct = """
|
71 |
+
You are an AI assistant with medical language understanding.
|
72 |
+
|
73 |
+
The input is a dialogue between a specialty pharmacist and patient: {input}
|
74 |
+
|
75 |
+
To give you context, the dialogue will have to do about symptoms, side effects, medications etc
|
76 |
+
of a rare disease, most probably multiple sclerosis.
|
77 |
+
|
78 |
+
Some words may not be clearly spelled, because they come from an automatic
|
79 |
+
audio to text transcript.
|
80 |
+
|
81 |
+
Your have a few tasks:
|
82 |
+
|
83 |
+
- First task: If there are some non-sensical words, convert them to the most probable real word,
|
84 |
+
taking into account that this is a dialogue about a medical condition, probably multiple sclerosis
|
85 |
+
|
86 |
+
- Second task: extract information from this dialogue
|
87 |
+
|
88 |
+
Specifically the following:
|
89 |
+
|
90 |
+
- A brief summary of the dialogue, highlighting the chief complaint
|
91 |
+
- The main disease mentioned by the patient
|
92 |
+
- Medications mentioned by the patient
|
93 |
+
- Side effets mentioned by the patient
|
94 |
+
|
95 |
+
The output should have the form of a json file with those four keys: (Summary, Disease, Medications, Side_Effects)
|
96 |
+
|
97 |
+
Do not hallucinate and do not make up information that is not included in the original file.
|
98 |
+
|
99 |
+
Output:
|
100 |
+
"""
|
101 |
+
|
102 |
+
# SOAP notes
|
103 |
+
prompt_struct = PromptTemplate(template=template_struct, input_variables=["input"])
|
104 |
+
chain_struct = LLMChain(llm=llm_gpt, prompt=prompt_struct, verbose=False)
|
105 |
+
|
106 |
+
|
107 |
+
# Transcription function
|
108 |
+
def transcribe(audio):
|
109 |
+
#text = fake_audio
|
110 |
+
text = p(audio)["text"]
|
111 |
+
output_1 = eval(chain_struct.run(text))
|
112 |
+
output_2 = chain_diag.run(text)
|
113 |
+
summa = output_1['Summary']
|
114 |
+
disease = output_1['Disease']
|
115 |
+
meds = output_1['Medications']
|
116 |
+
sides = output_1['Side_Effects']
|
117 |
+
return summa, disease, meds, sides, output_2
|
118 |
+
|
119 |
+
|
120 |
+
#
|
121 |
+
with gr.Blocks(title="AI specialty scriber",theme=gr.themes.Soft()) as demo:
|
122 |
+
|
123 |
+
with gr.Row():
|
124 |
+
image_wag = gr.Image(value="Walgreens_AI.png",height=100, width=100, show_label=False,show_download_button=False, scale=1)
|
125 |
+
gr.Markdown("## <center> Walgreens AI-powered specialty pharmacy tool </center>")
|
126 |
+
#gr.Markdown("**<center>"+scriber_description+"</center>**")
|
127 |
+
gr.Markdown("<center> ________________________________________________________________________ </center>")
|
128 |
+
|
129 |
+
# ====================================================
|
130 |
+
# Dictation tool
|
131 |
+
gr.Markdown("**Record Patient Interaction**")
|
132 |
+
audio = gr.Audio(label='Your recording here',source="microphone", type="filepath",container=True)
|
133 |
+
audio_submit_btn = gr.Button(value="Submit Recording", variant="primary")
|
134 |
+
|
135 |
+
# Clinical notess and transcript
|
136 |
+
with gr.Tab("Extracted Information"):
|
137 |
+
with gr.Row():
|
138 |
+
summary = gr.Textbox(label='Summary',lines=3,interactive=True)
|
139 |
+
disease = gr.Textbox(label='Disease mentioned',lines=3,interactive=True)
|
140 |
+
with gr.Row():
|
141 |
+
medications = gr.Textbox(label='Medications mentioned',lines=3,interactive=True)
|
142 |
+
sides = gr.Textbox(label='Side Effects mentioned',lines=3,interactive=True)
|
143 |
+
|
144 |
+
with gr.Tab("Original Transcript"):
|
145 |
+
dialogue = gr.Textbox(label='Full conversation transcript',lines=10)
|
146 |
+
|
147 |
+
# ===============================================
|
148 |
+
# Submit and clear tool
|
149 |
+
audio_submit_btn.click(transcribe, inputs = audio, outputs=[summary,disease,medications,sides,dialogue])
|
150 |
+
audio_clear_btn = gr.ClearButton([audio,summary,disease,medications,sides,dialogue])
|
151 |
+
|
152 |
+
|
153 |
+
demo.launch(auth=("alliancedemo","wag2046"))
|
requirements.txt
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
gradio==3.40.0
|
2 |
+
langchain==0.0.178
|
3 |
+
openai==0.27.6
|
4 |
+
transformers==4.28.1
|
5 |
+
torch==2.0.1
|