import streamlit as st
import requests
import json
from typing import List, Dict

from openai import OpenAI

# 配置DeepSeek API
client = OpenAI(
    base_url='https://api-inference.modelscope.cn/v1/',
    api_key='b011f8fd-99a5-4633-8bf5-fef46cf7bd90',  # ModelScope Token
)

# 自定义CSS样式
STYLES = """
<style>
.think-block {
    background-color: #f0f0f0;
    border-left: 4px solid #4CAF50;
    padding: 1rem;
    margin: 1rem 0;
    font-style: italic;
    color: #555;
    border-radius: 0 5px 5px 0;
}
.container-border {
    border: 1px solid #e0e0e0;
    border-radius: 8px;
    padding: 1rem;
    margin-bottom: 1rem;
}
</style>
"""


class DeepSeekChatApp:
    def __init__(self):
        # 初始化会话状态
        if 'messages' not in st.session_state:
            st.session_state.messages = []

    def get_deepseek_response(self, messages: List[Dict], temperature: float = 0.7) -> str:
        """调用DeepSeek API获取响应"""
        try:
            response = client.chat.completions.create(
                model='deepseek-ai/DeepSeek-R1-Distill-Qwen-7B',  # ModelScope Model-Id
                messages=messages,
                stream=True
            )
            done_reasoning = False
            for chunk in response:
                reasoning_chunk = chunk.choices[0].delta.reasoning_content
                answer_chunk = chunk.choices[0].delta.content
                if reasoning_chunk != '':
                    print(reasoning_chunk, end='', flush=True)
                    yield 'think', reasoning_chunk
                elif answer_chunk != '':
                    if not done_reasoning:
                        print('\n\n === Final Answer ===\n')
                        done_reasoning = True
                    print(answer_chunk, end='', flush=True)
                    yield 'content', answer_chunk

        except requests.RequestException as e:
            st.error(f"API调用错误: {str(e)}")
            yield f"API调用错误: {str(e)}"

    def render_chat_interface(self):
        """渲染聊天界面"""
        st.markdown(STYLES, unsafe_allow_html=True)
        st.title("🤖 DeepSeek AI 对话助手")

        # 侧边栏配置
        with st.sidebar:
            st.header("对话设置")
            temperature = st.slider("创造力", 0.0, 1.0, 0.7, step=0.1)
            system_prompt = st.text_area("系统角色设定", "你是一个友好、专业的AI助手")

        # 显示历史消息
        for message in st.session_state.messages:
            with st.chat_message(message["role"]):
                st.markdown(message["content"])

        # 用户输入
        if prompt := st.chat_input("输入您的问题"):
            # 添加用户消息
            st.session_state.messages.append({"role": "user", "content": prompt})

            # 显示用户消息
            with st.chat_message("user"):
                st.markdown(prompt)

            # 准备API消息列表
            messages = [
                {"role": "system", "content": system_prompt},
                *[{"role": m["role"], "content": m["content"]} for m in st.session_state.messages]
            ]

            # 显示AI响应
            with st.chat_message("assistant"):
                response_placeholder = st.empty()
                full_response = ""
                full_think_response = ""
                think_count = 0
                # 流式显示响应
                for tag, chunk in self.get_deepseek_response(messages, temperature):
                    if 'think' == tag:
                        if think_count == 0:
                            full_think_response += f'<div class="think-block">'
                        think_count = 1
                        full_think_response += chunk
                        response_placeholder.markdown(full_think_response + "▌", unsafe_allow_html=True)
                    else:
                        if think_count == 1:
                            full_think_response += '</div>'
                            response_placeholder.markdown(full_think_response, unsafe_allow_html=True)
                            think_count = 0
                        full_response += chunk
                        response_placeholder.markdown(full_think_response + full_response + "▌", unsafe_allow_html=True)

                # response_placeholder.markdown(full_think_response + full_response)

            # 保存AI消息
            st.session_state.messages.append({"role": "assistant", "content": full_response})


def main():
    chat_app = DeepSeekChatApp()
    chat_app.render_chat_interface()


if __name__ == "__main__":
    main()
