File size: 5,235 Bytes
4c9038c
 
 
 
 
 
 
 
 
 
 
 
 
 
41c14df
 
 
 
 
 
 
 
 
 
08449d9
3db4edb
 
 
 
 
 
 
 
 
 
08449d9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41c14df
 
 
4c9038c
 
dc74040
4c9038c
 
 
 
 
 
 
 
f2f6fcf
4c9038c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
678b96b
 
9196b86
678b96b
 
f9a36d0
678b96b
4c9038c
 
 
 
 
 
 
 
 
 
7a849f3
 
4c9038c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
import streamlit as st
import requests
import openai
import os
# Initialize API keys and headers
api_key = os.getenv("api_key")
openai.api_key = os.getenv("OPENAI_API_KEY")
bearer = "Bearer " + api_key
API_URL = "https://api-inference.huggingface.co/models/Waqasjan123/Skin_Cancer_Detector_Live"
HEADERS = {"Authorization": bearer}

# Chat history
stringi = ""

# Inject custom CSS to change the avatar size
custom_css = """
<style>
/* Replace your selector and styles */
.css-h1sjnp.eeusbqq0 {
    width: 100px !important;
    height: 100px !important;
}
</style>
"""

st.markdown("""
    <style>
        [data-testid="collapsedControl"] {
            color: red;
            font-size: 2em;
        }
    </style>
    """, unsafe_allow_html=True)


st.markdown("""
    <style>
        [data-testid="collapsedControl"] {
            animation: pulse 2s infinite;
        }
        @keyframes pulse {
            0% {
                opacity: 1;
            }
            50% {
                opacity: 0.5;
            }
            100% {
                opacity: 1;
            }
        }
    </style>
    """, unsafe_allow_html=True)


st.markdown(custom_css, unsafe_allow_html=True)


# Initialize session state if it doesn't exist
if "messages" not in st.session_state:
    st.session_state["messages"] = [{"role": "assistant", "content": "This AI model is designed to classify skin images into specific categories: Actinic Keratoses (akiec), Basal Cell Carcinoma (bcc), Benign Keratosis (bkl), Dermatofibroma (df), Melanoma (mel), Melanocytic Nevi (nv), and Vascular Lesions (vasc). It won't recognize general skin images. You can ask more."}]

# Define the model query function with error handling for HuggingFace
def query_model(image_data):
    try:
        response = requests.post(API_URL, headers=HEADERS, data=image_data)
        response.raise_for_status()
        return response.json()
    except requests.RequestException as e:
        st.error(f"Error querying the model: Setting Up The Server Please Try Again in 5 Seconds")
        return None

# Main
st.title("Skin Check Pro")
st.write("---")

# Upload Image
uploaded_file = st.file_uploader("Choose an image...", type=["jpg", "png", "webp"])

if uploaded_file:
    image_data = uploaded_file.read()
    results = query_model(image_data)

    # Display image and results on the sidebar
    with st.sidebar:
        st.image(image_data, caption='Uploaded Image.', width=250)
        if results:
            for classification in results:
                st.write(f"**{classification['label']}**")
                st.write(f"Score: {classification['score']:.2f}")
                st.progress(classification['score'])
                st.write("---")

    st.write("## Ai Skin Check Assitant")
    st.write("Have questions? Type your message below.")

    for msg in st.session_state.messages:
        avatar_image = None  # Default avatar
        if msg["role"] == "assistant":
            avatar_image = "LOGO.png"  # Set the assistant's avatar image
    
        # Render the message with the appropriate avatar
        st.chat_message(msg["role"], avatar=avatar_image).write(msg["content"])
        

    # User input
    user_input = st.chat_input("Type your message here...")

    if user_input:
        # Display only the user's message in the chat interface
        st.session_state.messages.append({"role": "user", "content": user_input})
        st.chat_message("user").write(user_input)

        # Prepare complete message to send to OpenAI
        complete_message = f"This is what we discussed before: {stringi}. The result for the uploaded image is: {results}. This is the new question: {user_input}. [IF SOMEONE IS SPECIFICALLY ASKING ABOUT THIS PROJECT OR CANCER DETECTION AI ONLY THEN MENTION SOMETHING LIKE THIS, Dr. Aisha Zargham led a team of experts, combining her rich medical knowledge with advances in artificial intelligence to create this game-changing skin cancer detection system.] [KEEP YOUR MESSAGE SHORT] [IN THE DISEASES FOCUS MAINLY THE DISEASE WITH THE HIGHEST SCORE IN RESULTS]"
        conversation_messages = [{"role": "system", "content": "You are a expert dermatologist on a website where we analyze images through Ai for Skin Cancer. [KEEP YOUR ANSWER SHORT] [YOU ARE NOT TALKING WITH PATIENTS, YOU ARE TALKING WITH DOCOTRS WHO ARE DIAGNOSING PATIENTS]"},
        {"role": "user", "content": complete_message}]

        # Generate assistant's response
        completion = openai.ChatCompletion.create(
            model="gpt-3.5-turbo",
            messages=conversation_messages
        )
        assistant_response = completion.choices[0].message
        # Update Chat History string
        stringi += assistant_response['content']

        # Append assistant's message to conversation and display it
        st.session_state.messages.append(assistant_response)
        st.chat_message("assistant", avatar="LOGO.png").write(assistant_response['content'])

else:
    st.warning("Please upload an image to get started.")

# Disclaimer
st.write("---")
st.write("**Note on Usage:**")
st.write("This tool is based on research and is intended for informational purposes only. For medical advice, consult with a dermatologist.")