import streamlit as st
from openai import OpenAI
# chatBot项目    streamlit run chatbot-web.py --server.port 8880
# 左侧边框
# <img src="images/1737880170574.png"/>
# <img src="D:/1737880170574.png"/>


with st.sidebar:
    st.image("images/1737880170574.png", width=100)
    st.markdown(f"""
    <center>
    <h1>MotBot<sup><h5>韩天阳</h5></sup></h1>
    </center>
    """, unsafe_allow_html=True)
    # 角色定义输入框systemMessage
    systen_message = st.text_area("角色定义", "你是一个能帮助用户的AI助手。")
    # 创造力调节 Temperature
    temperature = st.slider("创造力调节", min_value=0.0, max_value=2.0, value=1.0, step=0.1, help='值越大约具有创造力',
                            format="%.1f")

st.title("AI聊天机器人")

# 初始化界面的瞬天列表公
if "messages" not in st.session_state:
    st.session_state.messages = [{
        "role": "assistant",
        "content": "Hi，我是Mobot~很高兴遇见你！有问必答，专注于懂你的AI"
    }]
# 保存输入
for message in st.session_state.messages:
    with st.chat_message(message["role"]):
        st.markdown(message["content"])

# 交流机器人后端
self = OpenAI(
    api_key="sk-on1j51cvbafu31osq7ukpslf3ace04fea1bk7ld3e4g26fjc",
    base_url="https://api.aihao123.cn/luomacode-api/open-api/v1/"
)

if "messageHistory" not in st.session_state:
    messageHistory = []


def chat_stream(self, query, system_message=None, temperature=1.0):
    if system_message:
        messageHistory.append({"role": "system", "content": system_message})
    messageHistory.append({"role": "user", "content": query})
    response = self.chat.completions.create(
        model="gpt-4o",  # 你可以选择其他模型，如"gpt-4"
        messages=messageHistory,
        temperature=temperature,
        stream=True
    )
    return response


# 用户输入
user_query = st.chat_input("说点什么。。.")
if user_query:
    # 显示用户输入的内容到聊天窗口
    with st.chat_message("user"):
        st.write(user_query)
    # 在聊天窗口输出用户输入的问题
    st.session_state.messages.append({"role": "user", "content": user_query})
    with st.chat_message("assistant"):
        # 等待的图标,调用ai返回值
        with st.spinner(""):
            response = chat_stream(self, user_query, systen_message, temperature)
            message_placeholder = st.empty()
            ai_response = ""
            for chunk in response:
                if chunk.choices and chunk.choices[0].delta.content:
                    ai_response += chunk.choices[0].delta.content
                    message_placeholder.markdown(ai_response + "")
            message_placeholder.markdown(ai_response + "")
        st.session_state.messages.append({"role": "assistant", "content": ai_response})

# 使用OpenAI服务处理用户问题，Stream流式输出LLM的答案
