Spaces:
Sleeping
Sleeping
Commit
•
2b67ae3
1
Parent(s):
5880b6a
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import streamlit as st
|
3 |
+
from llama_index.core import (
|
4 |
+
VectorStoreIndex,
|
5 |
+
SimpleDirectoryReader,
|
6 |
+
StorageContext,
|
7 |
+
load_index_from_storage,
|
8 |
+
)
|
9 |
+
from dotenv import load_dotenv
|
10 |
+
import openai
|
11 |
+
|
12 |
+
# Load environment variables
|
13 |
+
load_dotenv()
|
14 |
+
|
15 |
+
# Set OpenAI API key
|
16 |
+
openai.api_key = os.getenv("OPENAI_API_KEY")
|
17 |
+
|
18 |
+
# Define the storage directory
|
19 |
+
PERSIST_DIR = "./storage"
|
20 |
+
|
21 |
+
# Check if storage already exists and load or create the index
|
22 |
+
if not os.path.exists(PERSIST_DIR):
|
23 |
+
# Load the documents and create the index
|
24 |
+
documents = SimpleDirectoryReader(
|
25 |
+
"data",
|
26 |
+
exclude_hidden=False,
|
27 |
+
).load_data()
|
28 |
+
index = VectorStoreIndex.from_documents(documents)
|
29 |
+
# Store it for later
|
30 |
+
index.storage_context.persist(persist_dir=PERSIST_DIR)
|
31 |
+
else:
|
32 |
+
# Load the existing index
|
33 |
+
storage_context = StorageContext.from_defaults(persist_dir=PERSIST_DIR)
|
34 |
+
index = load_index_from_storage(storage_context)
|
35 |
+
|
36 |
+
# Create a QueryEngine for Retrieval & Augmentation
|
37 |
+
query_engine = index.as_query_engine()
|
38 |
+
|
39 |
+
# Streamlit app
|
40 |
+
st.title("Conversational Medical Chat Assistant")
|
41 |
+
|
42 |
+
def get_medical_llm_response(query):
|
43 |
+
# Generate response from the specialized medical LLM
|
44 |
+
response = openai.chat.completions.create(
|
45 |
+
model="gpt-3.5-turbo", # Assuming this is a more evolved model suited for medical queries
|
46 |
+
messages=[
|
47 |
+
{"role": "system", "content": "You are an expert in Homeopathic treatment with advanced training on medicine and diagnosis."},
|
48 |
+
{"role": "user", "content": query}
|
49 |
+
]
|
50 |
+
)
|
51 |
+
return response.choices[0].message.content.strip()
|
52 |
+
|
53 |
+
|
54 |
+
# Initialize session state for chat history
|
55 |
+
if 'messages' not in st.session_state:
|
56 |
+
st.session_state.messages = []
|
57 |
+
|
58 |
+
# Display chat messages from history on app rerun
|
59 |
+
for message in st.session_state.messages:
|
60 |
+
with st.chat_message(message["role"]):
|
61 |
+
st.markdown(message["content"])
|
62 |
+
|
63 |
+
# Get user input
|
64 |
+
if user_input := st.chat_input("Describe your symptoms or ask a medical question:"):
|
65 |
+
# Add user message to chat history
|
66 |
+
st.session_state.messages.append({"role": "user", "content": user_input})
|
67 |
+
with st.chat_message("user"):
|
68 |
+
st.markdown(user_input)
|
69 |
+
|
70 |
+
with st.spinner('Generating response...'):
|
71 |
+
# Get the RAG-based response
|
72 |
+
rag_response = query_engine.query(user_input).response
|
73 |
+
# Combine RAG response with LLM response
|
74 |
+
combined_query = f"Based on the following information, provide a comprehensive response:\n\n{rag_response}\n\nUser's query: {user_input}"
|
75 |
+
llm_response = get_medical_llm_response(combined_query)
|
76 |
+
|
77 |
+
# Add assistant message to chat history
|
78 |
+
st.session_state.messages.append({"role": "assistant", "content": llm_response})
|
79 |
+
with st.chat_message("assistant"):
|
80 |
+
st.markdown(llm_response)
|