import streamlit as st
from openai import OpenAI
import argparse
import time

# Argument parser setup
parser = argparse.ArgumentParser(
    description='Chatbot Interface with Customizable Parameters')
parser.add_argument('--model-url',
                    type=str,
                    default='http://0.0.0.0:12345/v1',
                    help='Model URL')
parser.add_argument('--temp',
                    type=float,
                    default=0.6,
                    help='Temperature for text generation')
parser.add_argument('--stop-token-ids',
                    type=str,
                    default='',
                    help='Comma-separated stop token IDs')
parser.add_argument("--host", type=str, default=None)
parser.add_argument("--port", type=int, default=12348)

# Parse the arguments
args = parser.parse_args()

# Set OpenAI's API key and API base to use vLLM's API server.
openai_api_key = "EMPTY"
openai_api_base = args.model_url

# Create an OpenAI client to interact with the API server
client = OpenAI(
    api_key=openai_api_key,
    base_url=openai_api_base,
)

models = client.models.list()
model_id = models.data[0].id


def predict(message, history=[]):
    # Convert chat history to OpenAI format
    history_openai_format = [{
        "role": "system",
        "content": "你是一个专业的助手，请用简洁清晰的语言回答问题，避免重复内容。"
    }]
    history_openai_format = []
    for human, assistant in history:
        history_openai_format.append({"role": "user", "content": human})
        history_openai_format.append({
            "role": "assistant",
            "content": assistant
        })
    history_openai_format.append({"role": "user", "content": message})

    # Create a chat completion request and send it to the API server
    stream = client.chat.completions.create(
        model= model_id,  # Model name to use
        messages=history_openai_format,  # Chat history
        temperature=args.temp,  # Temperature for text generation
        stream=True,  # Stream response
        extra_body={
            'repetition_penalty': 1,
            'stop_token_ids': [
                int(id.strip()) for id in args.stop_token_ids.split(',')
                if id.strip()
            ] if args.stop_token_ids else []
        })

    # Read and return generated text from response stream
    partial_message = ""
    partial_message2 = ""
    for chunk in stream:
        # partial_message += (chunk.choices[0].delta.content or "")
        partial_message2 += (chunk.choices[0].delta.content or "")
        partial_message = (chunk.choices[0].delta.content or "")
        if partial_message == "<think>":
            partial_message = "⏱️ 思考过程：\n"
        if partial_message == "</think>":
            partial_message = "\n📅 回答结果:\n"
        yield partial_message

# 界面布局
st.title("🤖 DeepSeek-R1对话助手")

# 对话记录显示（仅用于展示，不影响模型）
if "dialogs" not in st.session_state:
    st.session_state.dialogs = []

# 显示最近5次对话
for dialog in st.session_state.dialogs[-5:]:
    with st.chat_message("user"):
        st.write(dialog["question"])

    with st.chat_message("assistant"):
        st.write(dialog["answer"])

# 侧边栏控制面板
with st.sidebar:
    st.header("控制面板")

    # 清空记录按钮
    if st.button("清空对话记录"):
        st.session_state.dialogs =[]
        st.rerun()
        
    # st.header("对话参数设置")
    # temperature = st.sidebar.slider('temperature', min_value=0.01, max_value=5.0, value=0.1, step=0.01)
    # top_p = st.sidebar.slider('top_p', min_value=0.01, max_value=1.0, value=0.9, step=0.01)
    # max_length = st.sidebar.slider('max_length', min_value=64, max_value=4096, value=512, step=8)


# 用户输入处理
if prompt := st.chat_input("请输入您的问题"):
    # 生成回复
    with st.spinner("正在生成回复..."):
        try:
            response = predict(prompt)
        except Exception as e:
            response =f"生成错误：{str(e)}"

    # 记录对话（仅展示用）
    st.session_state.dialogs.append({"question": prompt, "answer": response})

    # 立即刷新显示
    st.rerun()

# 底部状态栏
st.sidebar.markdown(f"""
<div style="position: fixed; bottom: 10px; color: #666;">
    <small>模型版本：DeepSeek-R1<br>
    最后加载：{time.strftime('%Y-%m-%d %H:%M')}</small>
</div>
""", unsafe_allow_html=True)
