davidfearne
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -5,13 +5,13 @@ import json
|
|
5 |
import requests
|
6 |
import uuid
|
7 |
from datetime import date, datetime
|
8 |
-
import
|
9 |
-
from pydantic import BaseModel, Field
|
10 |
from typing import Optional
|
11 |
|
|
|
12 |
placeHolderPersona1 = """## Mission Statement
|
13 |
My mission is to utilize my expertise to aid in the medical triaging process by providing a clear, concise, and accurate assessment of potential arthritis related conditions.
|
14 |
-
|
15 |
# Triaging process
|
16 |
Ensure you stay on the topic of asking questions to triage the potential of Rheumatoid arthritis.
|
17 |
Ask only one question at a time.
|
@@ -22,20 +22,19 @@ Do not give a diagnosis """
|
|
22 |
|
23 |
placeHolderPersona2 = """## Mission
|
24 |
To analyse a clinical triaging discussion between a patient and AI doctor interactions with a focus on Immunology symptoms, medical history, and test results to deduce the most probable Immunology diagnosis.
|
25 |
-
|
26 |
## Diagnostic Process
|
27 |
Upon receipt of the clinical notes, I will follow a systematic approach to arrive at a diagnosis:
|
28 |
1. Review the patient's presenting symptoms and consider their relevance to immunopathology.
|
29 |
2. Cross-reference the gathered information with my knowledge base of immunology to identify patterns or indicators of specific immune disorders.
|
30 |
3. Formulate a diagnosis from the potential conditions.
|
31 |
4. Determine the most likely diagnosis and assign a confidence score from 1-100, with 100 being absolute certainty.
|
32 |
-
|
33 |
# Limitations
|
34 |
While I am specialized in immunology, I understand that not all cases will fall neatly within my domain. In instances where the clinical notes point to a condition outside of my expertise, I will provide the best possible diagnosis with the acknowledgment that my confidence score will reflect the limitations of my specialization in those cases"""
|
35 |
|
36 |
-
|
37 |
class ChatRequestClient(BaseModel):
|
38 |
-
|
39 |
user_id: str
|
40 |
user_input: str
|
41 |
numberOfQuestions: int
|
@@ -50,115 +49,286 @@ class ChatRequestClient(BaseModel):
|
|
50 |
tokens2: int
|
51 |
temperature2: float
|
52 |
|
|
|
53 |
def call_chat_api(data: ChatRequestClient):
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
if response.status_code == 200:
|
62 |
-
return response.json() # Return the JSON response if successful
|
63 |
-
else:
|
64 |
-
return "An error occured" # Return the raw response text if not successful
|
65 |
|
66 |
-
|
|
|
67 |
return uuid.uuid4()
|
68 |
|
69 |
def format_elapsed_time(time):
|
70 |
-
# Format the elapsed time to two decimal places
|
71 |
return "{:.2f}".format(time)
|
72 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
73 |
|
74 |
-
#
|
75 |
-
#
|
76 |
-
st.
|
77 |
-
|
78 |
-
# Sidebar for inputting personas
|
79 |
-
st.sidebar.image('cognizant_logo.jpg')
|
80 |
-
st.sidebar.header("Agent Personas Design")
|
81 |
-
# st.sidebar.subheader("Welcome Message")
|
82 |
-
# welcomeMessage = st.sidebar.text_area("Define Intake Persona", value=welcomeMessage, height=300)
|
83 |
-
st.sidebar.subheader("Intake AI")
|
84 |
-
numberOfQuestions = st.sidebar.slider("Number of Questions", min_value=0, max_value=10, step=1, value=5, key='persona1_questions')
|
85 |
-
persona1SystemMessage = st.sidebar.text_area("Define Intake Persona", value=placeHolderPersona1, height=300)
|
86 |
-
with st.sidebar.expander("See explanation"):
|
87 |
-
st.write("This AI persona will converse with the patient to gather their symptoms. With each round of chat, the object of the AI is to ask more specific follow up questions as it narrows down to the specific diagnosis. However this AI should never give a diagnosis")
|
88 |
-
st.image("agentPersona1.png")
|
89 |
-
llm1 = st.sidebar.selectbox("Model Selection", ['GPT-4', 'GPT3.5'], key='persona1_size')
|
90 |
-
temp1 = st.sidebar.slider("Temperature", min_value=0.0, max_value=1.0, step=0.1, value=0.6, key='persona1_temp')
|
91 |
-
tokens1 = st.sidebar.slider("Tokens", min_value=0, max_value=4000, step=100, value=500, key='persona1_tokens')
|
92 |
-
|
93 |
-
# Persona 2
|
94 |
-
st.sidebar.subheader("Recommendation and Next Best Action AI")
|
95 |
-
persona2SystemMessage = st.sidebar.text_area("Define Recommendation Persona", value=placeHolderPersona2, height=300)
|
96 |
-
with st.sidebar.expander("See explanation"):
|
97 |
-
st.write("This AI persona uses the output of the symptom intake AI as its input. This AI’s job is to augment a health professional by assisting with a diagnosis and possible next best action. The teams will need to determine if this should be a tool used directly by the patient, as an assistant to the health professional or a hybrid of the two. ")
|
98 |
-
st.image("agentPersona2.png")
|
99 |
-
llm2 = st.sidebar.selectbox("Model Selection", ['GPT-4', 'GPT3.5'], key='persona2_size')
|
100 |
-
temp2 = st.sidebar.slider("Temperature", min_value=0.0, max_value=1.0, step=0.1, value=0.5, key='persona2_temp')
|
101 |
-
tokens2 = st.sidebar.slider("Tokens", min_value=0, max_value=4000, step=100, value=500, key='persona2_tokens')
|
102 |
-
userMessage2 = st.sidebar.text_area("Define User Message", value="This is the conversation todate, ", height=150)
|
103 |
-
st.sidebar.caption(f"Session ID: {genuuid()}")
|
104 |
-
# Main chat interface
|
105 |
-
st.header("Chat with the Agents")
|
106 |
-
|
107 |
-
# User ID Input
|
108 |
-
user_id = st.text_input("User ID:", key="user_id")
|
109 |
-
|
110 |
-
# Ensure user_id is defined or fallback to a default value
|
111 |
-
if not user_id:
|
112 |
-
st.warning("Please provide a User ID to start the chat.")
|
113 |
-
else:
|
114 |
-
# Initialize chat history in session state
|
115 |
-
if "messages" not in st.session_state:
|
116 |
-
st.session_state.messages = []
|
117 |
-
|
118 |
-
# Display chat messages from history on app rerun
|
119 |
-
for message in st.session_state.messages:
|
120 |
-
with st.chat_message(message["role"]):
|
121 |
-
st.markdown(message["content"])
|
122 |
-
|
123 |
-
# Collect user input
|
124 |
-
if user_input := st.chat_input("Write your message here:"):
|
125 |
-
# Add user message to the chat history
|
126 |
-
st.session_state.messages.append({"role": "user", "content": user_input})
|
127 |
-
st.chat_message("user").markdown(user_input)
|
128 |
-
|
129 |
-
# Prepare data for API call
|
130 |
-
data = ChatRequestClient(
|
131 |
-
user_id=user_id, # Ensure user_id is passed correctly
|
132 |
-
user_input=user_input,
|
133 |
-
numberOfQuestions=numberOfQuestions,
|
134 |
-
welcomeMessage="",
|
135 |
-
llm1=llm1,
|
136 |
-
tokens1=tokens1,
|
137 |
-
temperature1=temp1,
|
138 |
-
persona1SystemMessage=persona1SystemMessage,
|
139 |
-
persona2SystemMessage=persona2SystemMessage,
|
140 |
-
userMessage2=userMessage2,
|
141 |
-
llm2=llm2,
|
142 |
-
tokens2=tokens2,
|
143 |
-
temperature2=temp2
|
144 |
-
)
|
145 |
-
|
146 |
-
# Call the API
|
147 |
-
response = call_chat_api(data)
|
148 |
-
|
149 |
-
# Process the API response
|
150 |
-
agent_message = response.get("content", "No response received from the agent.")
|
151 |
-
elapsed_time = response.get("elapsed_time", 0)
|
152 |
-
count = response.get("count", 0)
|
153 |
-
|
154 |
-
# Add agent response to the chat history
|
155 |
-
st.session_state.messages.append({"role": "assistant", "content": agent_message})
|
156 |
-
with st.chat_message("assistant"):
|
157 |
-
st.markdown(agent_message)
|
158 |
-
|
159 |
-
# Display additional metadata
|
160 |
-
st.markdown(f"##### Time taken: {format_elapsed_time(elapsed_time)} seconds")
|
161 |
-
st.markdown(f"##### Question Count: {count} of {numberOfQuestions}")
|
162 |
|
163 |
|
164 |
|
|
|
5 |
import requests
|
6 |
import uuid
|
7 |
from datetime import date, datetime
|
8 |
+
from pydantic import BaseModel
|
|
|
9 |
from typing import Optional
|
10 |
|
11 |
+
# Placeholder personas
|
12 |
placeHolderPersona1 = """## Mission Statement
|
13 |
My mission is to utilize my expertise to aid in the medical triaging process by providing a clear, concise, and accurate assessment of potential arthritis related conditions.
|
14 |
+
|
15 |
# Triaging process
|
16 |
Ensure you stay on the topic of asking questions to triage the potential of Rheumatoid arthritis.
|
17 |
Ask only one question at a time.
|
|
|
22 |
|
23 |
placeHolderPersona2 = """## Mission
|
24 |
To analyse a clinical triaging discussion between a patient and AI doctor interactions with a focus on Immunology symptoms, medical history, and test results to deduce the most probable Immunology diagnosis.
|
25 |
+
|
26 |
## Diagnostic Process
|
27 |
Upon receipt of the clinical notes, I will follow a systematic approach to arrive at a diagnosis:
|
28 |
1. Review the patient's presenting symptoms and consider their relevance to immunopathology.
|
29 |
2. Cross-reference the gathered information with my knowledge base of immunology to identify patterns or indicators of specific immune disorders.
|
30 |
3. Formulate a diagnosis from the potential conditions.
|
31 |
4. Determine the most likely diagnosis and assign a confidence score from 1-100, with 100 being absolute certainty.
|
32 |
+
|
33 |
# Limitations
|
34 |
While I am specialized in immunology, I understand that not all cases will fall neatly within my domain. In instances where the clinical notes point to a condition outside of my expertise, I will provide the best possible diagnosis with the acknowledgment that my confidence score will reflect the limitations of my specialization in those cases"""
|
35 |
|
36 |
+
# Data model for API request
|
37 |
class ChatRequestClient(BaseModel):
|
|
|
38 |
user_id: str
|
39 |
user_input: str
|
40 |
numberOfQuestions: int
|
|
|
49 |
tokens2: int
|
50 |
temperature2: float
|
51 |
|
52 |
+
# Mock API call function
|
53 |
def call_chat_api(data: ChatRequestClient):
|
54 |
+
# Replace this with actual API logic
|
55 |
+
return {
|
56 |
+
"content": f"Response to: {data.user_input}",
|
57 |
+
"elapsed_time": 0.5,
|
58 |
+
"count": 1,
|
59 |
+
"response_tokens": len(data.user_input.split()) # Mock token count
|
60 |
+
}
|
|
|
|
|
|
|
|
|
61 |
|
62 |
+
# Utility functions
|
63 |
+
def genuuid():
|
64 |
return uuid.uuid4()
|
65 |
|
66 |
def format_elapsed_time(time):
|
|
|
67 |
return "{:.2f}".format(time)
|
68 |
|
69 |
+
# Layout with three columns
|
70 |
+
col1, col2, col3 = st.columns([1, 2, 1]) # Adjust ratios for desired width
|
71 |
+
|
72 |
+
# Left Column: Variables and Settings
|
73 |
+
with col1:
|
74 |
+
st.sidebar.image('cognizant_logo.jpg')
|
75 |
+
st.sidebar.header("Agent Personas Design")
|
76 |
+
st.sidebar.subheader("Intake AI")
|
77 |
+
numberOfQuestions = st.sidebar.slider(
|
78 |
+
"Number of Questions", min_value=0, max_value=10, step=1, value=5, key='persona1_questions'
|
79 |
+
)
|
80 |
+
persona1SystemMessage = st.sidebar.text_area(
|
81 |
+
"Define Intake Persona", value=placeHolderPersona1, height=300
|
82 |
+
)
|
83 |
+
llm1 = st.sidebar.selectbox("Model Selection", ['GPT-4', 'GPT3.5'], key='persona1_size')
|
84 |
+
temp1 = st.sidebar.slider(
|
85 |
+
"Temperature", min_value=0.0, max_value=1.0, step=0.1, value=0.6, key='persona1_temp'
|
86 |
+
)
|
87 |
+
tokens1 = st.sidebar.slider(
|
88 |
+
"Tokens", min_value=0, max_value=4000, step=100, value=500, key='persona1_tokens'
|
89 |
+
)
|
90 |
+
st.sidebar.subheader("Recommendation and Next Best Action AI")
|
91 |
+
persona2SystemMessage = st.sidebar.text_area(
|
92 |
+
"Define Recommendation Persona", value=placeHolderPersona2, height=300
|
93 |
+
)
|
94 |
+
llm2 = st.sidebar.selectbox("Model Selection", ['GPT-4', 'GPT3.5'], key='persona2_size')
|
95 |
+
temp2 = st.sidebar.slider(
|
96 |
+
"Temperature", min_value=0.0, max_value=1.0, step=0.1, value=0.5, key='persona2_temp'
|
97 |
+
)
|
98 |
+
tokens2 = st.sidebar.slider(
|
99 |
+
"Tokens", min_value=0, max_value=4000, step=100, value=500, key='persona2_tokens'
|
100 |
+
)
|
101 |
+
userMessage2 = st.sidebar.text_area(
|
102 |
+
"Define User Message", value="This is the conversation to date, ", height=150
|
103 |
+
)
|
104 |
+
st.sidebar.caption(f"Session ID: {genuuid()}")
|
105 |
+
|
106 |
+
# Middle Column: Chat Interface
|
107 |
+
with col2:
|
108 |
+
st.header("Chat with the Agents")
|
109 |
+
user_id = st.text_input("User ID:", key="user_id")
|
110 |
+
|
111 |
+
if not user_id:
|
112 |
+
st.warning("Please provide a User ID to start the chat.")
|
113 |
+
else:
|
114 |
+
# Initialize chat history
|
115 |
+
if "messages" not in st.session_state:
|
116 |
+
st.session_state.messages = []
|
117 |
+
|
118 |
+
# Display chat history
|
119 |
+
for message in st.session_state.messages:
|
120 |
+
with st.chat_message(message["role"]):
|
121 |
+
st.markdown(message["content"])
|
122 |
+
|
123 |
+
# Collect user input
|
124 |
+
if user_input := st.chat_input("Write your message here:"):
|
125 |
+
# Add user message
|
126 |
+
st.session_state.messages.append({"role": "user", "content": user_input})
|
127 |
+
st.chat_message("user").markdown(user_input)
|
128 |
+
|
129 |
+
# Prepare API data
|
130 |
+
data = ChatRequestClient(
|
131 |
+
user_id=user_id,
|
132 |
+
user_input=user_input,
|
133 |
+
numberOfQuestions=numberOfQuestions,
|
134 |
+
welcomeMessage="",
|
135 |
+
llm1=llm1,
|
136 |
+
tokens1=tokens1,
|
137 |
+
temperature1=temp1,
|
138 |
+
persona1SystemMessage=persona1SystemMessage,
|
139 |
+
persona2SystemMessage=persona2SystemMessage,
|
140 |
+
userMessage2=userMessage2,
|
141 |
+
llm2=llm2,
|
142 |
+
tokens2=tokens2,
|
143 |
+
temperature2=temp2
|
144 |
+
)
|
145 |
+
|
146 |
+
# Call the API
|
147 |
+
response = call_chat_api(data)
|
148 |
+
|
149 |
+
# Process response
|
150 |
+
agent_message = response.get("content", "No response received.")
|
151 |
+
elapsed_time = response.get("elapsed_time", 0)
|
152 |
+
count = response.get("count", 0)
|
153 |
+
response_tokens = response.get("response_tokens", 0)
|
154 |
+
|
155 |
+
# Add agent response
|
156 |
+
st.session_state.messages.append({"role": "assistant", "content": agent_message})
|
157 |
+
with st.chat_message("assistant"):
|
158 |
+
st.markdown(agent_message)
|
159 |
+
|
160 |
+
# Right Column: Stats
|
161 |
+
with col3:
|
162 |
+
st.header("Stats")
|
163 |
+
if "elapsed_time" in locals() and "count" in locals():
|
164 |
+
st.markdown(f"**Time taken:** {format_elapsed_time(elapsed_time)} seconds")
|
165 |
+
st.markdown(f"**Question Count:** {count} of {numberOfQuestions}")
|
166 |
+
st.markdown(f"**Response Tokens:** {response_tokens}")
|
167 |
+
else:
|
168 |
+
st.markdown("No stats available yet.")
|
169 |
+
|
170 |
+
|
171 |
+
# import os
|
172 |
+
# import streamlit as st
|
173 |
+
# from datetime import datetime
|
174 |
+
# import json
|
175 |
+
# import requests
|
176 |
+
# import uuid
|
177 |
+
# from datetime import date, datetime
|
178 |
+
# import requests
|
179 |
+
# from pydantic import BaseModel, Field
|
180 |
+
# from typing import Optional
|
181 |
+
|
182 |
+
# placeHolderPersona1 = """## Mission Statement
|
183 |
+
# My mission is to utilize my expertise to aid in the medical triaging process by providing a clear, concise, and accurate assessment of potential arthritis related conditions.
|
184 |
+
|
185 |
+
# # Triaging process
|
186 |
+
# Ensure you stay on the topic of asking questions to triage the potential of Rheumatoid arthritis.
|
187 |
+
# Ask only one question at a time.
|
188 |
+
# Provide some context or clarification around the follow-up questions you ask.
|
189 |
+
# Do not converse with the customer.
|
190 |
+
# Be as concise as possible.
|
191 |
+
# Do not give a diagnosis """
|
192 |
+
|
193 |
+
# placeHolderPersona2 = """## Mission
|
194 |
+
# To analyse a clinical triaging discussion between a patient and AI doctor interactions with a focus on Immunology symptoms, medical history, and test results to deduce the most probable Immunology diagnosis.
|
195 |
+
|
196 |
+
# ## Diagnostic Process
|
197 |
+
# Upon receipt of the clinical notes, I will follow a systematic approach to arrive at a diagnosis:
|
198 |
+
# 1. Review the patient's presenting symptoms and consider their relevance to immunopathology.
|
199 |
+
# 2. Cross-reference the gathered information with my knowledge base of immunology to identify patterns or indicators of specific immune disorders.
|
200 |
+
# 3. Formulate a diagnosis from the potential conditions.
|
201 |
+
# 4. Determine the most likely diagnosis and assign a confidence score from 1-100, with 100 being absolute certainty.
|
202 |
+
|
203 |
+
# # Limitations
|
204 |
+
# While I am specialized in immunology, I understand that not all cases will fall neatly within my domain. In instances where the clinical notes point to a condition outside of my expertise, I will provide the best possible diagnosis with the acknowledgment that my confidence score will reflect the limitations of my specialization in those cases"""
|
205 |
+
|
206 |
+
|
207 |
+
# class ChatRequestClient(BaseModel):
|
208 |
+
|
209 |
+
# user_id: str
|
210 |
+
# user_input: str
|
211 |
+
# numberOfQuestions: int
|
212 |
+
# welcomeMessage: str
|
213 |
+
# llm1: str
|
214 |
+
# tokens1: int
|
215 |
+
# temperature1: float
|
216 |
+
# persona1SystemMessage: str
|
217 |
+
# persona2SystemMessage: str
|
218 |
+
# userMessage2: str
|
219 |
+
# llm2: str
|
220 |
+
# tokens2: int
|
221 |
+
# temperature2: float
|
222 |
+
|
223 |
+
# def call_chat_api(data: ChatRequestClient):
|
224 |
+
# url = "https://agent-builder-api.greensea-b20be511.northeurope.azurecontainerapps.io/chat/"
|
225 |
+
# # Validate and convert the data to a dictionary
|
226 |
+
# validated_data = data.dict()
|
227 |
+
|
228 |
+
# # Make the POST request to the FastAPI server
|
229 |
+
# response = requests.post(url, json=validated_data)
|
230 |
+
|
231 |
+
# if response.status_code == 200:
|
232 |
+
# return response.json() # Return the JSON response if successful
|
233 |
+
# else:
|
234 |
+
# return "An error occured" # Return the raw response text if not successful
|
235 |
+
|
236 |
+
# def genuuid ():
|
237 |
+
# return uuid.uuid4()
|
238 |
+
|
239 |
+
# def format_elapsed_time(time):
|
240 |
+
# # Format the elapsed time to two decimal places
|
241 |
+
# return "{:.2f}".format(time)
|
242 |
+
|
243 |
+
|
244 |
+
# # Title of the application
|
245 |
+
# # st.image('agentBuilderLogo.png')
|
246 |
+
# st.title('LLM-Powered Agent Interaction')
|
247 |
+
|
248 |
+
# # Sidebar for inputting personas
|
249 |
+
# st.sidebar.image('cognizant_logo.jpg')
|
250 |
+
# st.sidebar.header("Agent Personas Design")
|
251 |
+
# # st.sidebar.subheader("Welcome Message")
|
252 |
+
# # welcomeMessage = st.sidebar.text_area("Define Intake Persona", value=welcomeMessage, height=300)
|
253 |
+
# st.sidebar.subheader("Intake AI")
|
254 |
+
# numberOfQuestions = st.sidebar.slider("Number of Questions", min_value=0, max_value=10, step=1, value=5, key='persona1_questions')
|
255 |
+
# persona1SystemMessage = st.sidebar.text_area("Define Intake Persona", value=placeHolderPersona1, height=300)
|
256 |
+
# with st.sidebar.expander("See explanation"):
|
257 |
+
# st.write("This AI persona will converse with the patient to gather their symptoms. With each round of chat, the object of the AI is to ask more specific follow up questions as it narrows down to the specific diagnosis. However this AI should never give a diagnosis")
|
258 |
+
# st.image("agentPersona1.png")
|
259 |
+
# llm1 = st.sidebar.selectbox("Model Selection", ['GPT-4', 'GPT3.5'], key='persona1_size')
|
260 |
+
# temp1 = st.sidebar.slider("Temperature", min_value=0.0, max_value=1.0, step=0.1, value=0.6, key='persona1_temp')
|
261 |
+
# tokens1 = st.sidebar.slider("Tokens", min_value=0, max_value=4000, step=100, value=500, key='persona1_tokens')
|
262 |
+
|
263 |
+
# # Persona 2
|
264 |
+
# st.sidebar.subheader("Recommendation and Next Best Action AI")
|
265 |
+
# persona2SystemMessage = st.sidebar.text_area("Define Recommendation Persona", value=placeHolderPersona2, height=300)
|
266 |
+
# with st.sidebar.expander("See explanation"):
|
267 |
+
# st.write("This AI persona uses the output of the symptom intake AI as its input. This AI’s job is to augment a health professional by assisting with a diagnosis and possible next best action. The teams will need to determine if this should be a tool used directly by the patient, as an assistant to the health professional or a hybrid of the two. ")
|
268 |
+
# st.image("agentPersona2.png")
|
269 |
+
# llm2 = st.sidebar.selectbox("Model Selection", ['GPT-4', 'GPT3.5'], key='persona2_size')
|
270 |
+
# temp2 = st.sidebar.slider("Temperature", min_value=0.0, max_value=1.0, step=0.1, value=0.5, key='persona2_temp')
|
271 |
+
# tokens2 = st.sidebar.slider("Tokens", min_value=0, max_value=4000, step=100, value=500, key='persona2_tokens')
|
272 |
+
# userMessage2 = st.sidebar.text_area("Define User Message", value="This is the conversation todate, ", height=150)
|
273 |
+
# st.sidebar.caption(f"Session ID: {genuuid()}")
|
274 |
+
# # Main chat interface
|
275 |
+
# st.header("Chat with the Agents")
|
276 |
+
|
277 |
+
# # User ID Input
|
278 |
+
# user_id = st.text_input("User ID:", key="user_id")
|
279 |
+
|
280 |
+
# # Ensure user_id is defined or fallback to a default value
|
281 |
+
# if not user_id:
|
282 |
+
# st.warning("Please provide a User ID to start the chat.")
|
283 |
+
# else:
|
284 |
+
# # Initialize chat history in session state
|
285 |
+
# if "messages" not in st.session_state:
|
286 |
+
# st.session_state.messages = []
|
287 |
+
|
288 |
+
# # Display chat messages from history on app rerun
|
289 |
+
# for message in st.session_state.messages:
|
290 |
+
# with st.chat_message(message["role"]):
|
291 |
+
# st.markdown(message["content"])
|
292 |
+
|
293 |
+
# # Collect user input
|
294 |
+
# if user_input := st.chat_input("Write your message here:"):
|
295 |
+
# # Add user message to the chat history
|
296 |
+
# st.session_state.messages.append({"role": "user", "content": user_input})
|
297 |
+
# st.chat_message("user").markdown(user_input)
|
298 |
+
|
299 |
+
# # Prepare data for API call
|
300 |
+
# data = ChatRequestClient(
|
301 |
+
# user_id=user_id, # Ensure user_id is passed correctly
|
302 |
+
# user_input=user_input,
|
303 |
+
# numberOfQuestions=numberOfQuestions,
|
304 |
+
# welcomeMessage="",
|
305 |
+
# llm1=llm1,
|
306 |
+
# tokens1=tokens1,
|
307 |
+
# temperature1=temp1,
|
308 |
+
# persona1SystemMessage=persona1SystemMessage,
|
309 |
+
# persona2SystemMessage=persona2SystemMessage,
|
310 |
+
# userMessage2=userMessage2,
|
311 |
+
# llm2=llm2,
|
312 |
+
# tokens2=tokens2,
|
313 |
+
# temperature2=temp2
|
314 |
+
# )
|
315 |
+
|
316 |
+
# # Call the API
|
317 |
+
# response = call_chat_api(data)
|
318 |
+
|
319 |
+
# # Process the API response
|
320 |
+
# agent_message = response.get("content", "No response received from the agent.")
|
321 |
+
# elapsed_time = response.get("elapsed_time", 0)
|
322 |
+
# count = response.get("count", 0)
|
323 |
+
|
324 |
+
# # Add agent response to the chat history
|
325 |
+
# st.session_state.messages.append({"role": "assistant", "content": agent_message})
|
326 |
+
# with st.chat_message("assistant"):
|
327 |
+
# st.markdown(agent_message)
|
328 |
|
329 |
+
# # Display additional metadata
|
330 |
+
# st.markdown(f"##### Time taken: {format_elapsed_time(elapsed_time)} seconds")
|
331 |
+
# st.markdown(f"##### Question Count: {count} of {numberOfQuestions}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
332 |
|
333 |
|
334 |
|