import streamlit as st
from openai import OpenAI

# Streamlit 页面配置
st.set_page_config(page_title="Qwen3-8B Chat", page_icon="🤖", layout="centered")

st.title("💬 Qwen3-8B (vLLM) 聊天测试")
st.markdown("与本地部署的 vLLM 模型进行实时对话")

# 初始化会话状态
if "messages" not in st.session_state:
    st.session_state["messages"] = [
        {"role": "system", "content": "你是一个中文智能助手。"}
    ]

# 输入接口配置
base_url = st.text_input("vLLM 接口地址", value="http://localhost:8000/v1")
model_name = st.text_input("模型名称", value="Qwen3-8B-Instruct")

# 用户输入区
user_input = st.chat_input("请输入你的问题...")

# 初始化客户端
client = OpenAI(base_url=base_url, api_key="EMPTY")  # vLLM 不验证 key

# 聊天逻辑
if user_input:
    st.session_state.messages.append({"role": "user", "content": user_input})

    # 展示用户输入
    with st.chat_message("user"):
        st.markdown(user_input)

    # 调用模型接口
    with st.chat_message("assistant"):
        with st.spinner("思考中..."):
            try:
                response = client.chat.completions.create(
                    model=model_name,
                    messages=st.session_state.messages,
                    temperature=0.7,
                    max_tokens=512,
                )
                reply = response.choices[0].message.content
            except Exception as e:
                reply = f"❌ 出错了: {e}"

            st.markdown(reply)

    # 保存模型回复
    st.session_state.messages.append({"role": "assistant", "content": reply})

# 展示历史对话
for msg in st.session_state.messages:
    if msg["role"] != "system":
        with st.chat_message(msg["role"]):
            st.markdown(msg["content"])
