
from dotenv import load_dotenv, find_dotenv
_ = load_dotenv(find_dotenv())

from zhipuai import ZhipuAI
client = ZhipuAI()

def get_stream_completion(messages, model="glm-zero-preview"):
    response = client.chat.completions.create(
        model=model,
        messages=messages,
        stream=True  # 启用流式输出
    )
    return response

import streamlit as st

# 头像配置
ICON_AI = '💻'
ICON_USER = '🧑'

# 显示一条消息（包含头像与消息内容）
def dspMessage(role, content, container):
    with container:
        if role == 'assistant':
            st.markdown(f"{ICON_AI} {content}")
        else:
            st.markdown(f"{ICON_USER} {content}")

# 追加并显示一条消息
def append_and_show(role, content, container):
    if role is None or content is None or container is None: 
        return
    st.session_state.messages.append({"role": role, "content": content})      
    dspMessage(role, content, container)

# 判断参数中是否包含"###Thinking"和"###Response"
# 如果包含，则按此两标志分割，分别取得"###Thinking"和"###Response"之后的内容，并以其构成的元组作为返回值
# 如果仅包含"###Thinking"，则取得"###Thinking"之后的内容，并以其和None构成的元组作为返回值
def split_content(content):
    THINKING = "###Thinking"
    RESPONSE = "###Response"
    content = content.strip()
    if THINKING in content and "###Response" in content:
        thinking, response = content.split(THINKING)[1].split(RESPONSE)
        return thinking, response
    elif "###Thinking" in content:
        thinking = content.split(THINKING)[1]
        return thinking, None
    else:
        return None, content

# 完成一次模型交互
def complete_interaction_Zhipu(short_term_memory):
    assistant_container = st.empty()  # 助手消息的动态区域
    
    # 提示正在思考
    assistant_container.markdown(f"{ICON_AI} 思考中...")

    # 逐步接收流式数据并显示
    content = ""
    thinking = None
    response = None

    # 获取流式输出的生成器
    stream_response = get_stream_completion(messages=short_term_memory,
                                        model="glm-zero-preview")
    for chunk in stream_response:
        if chunk.choices[0].delta.content:
            content += chunk.choices[0].delta.content
            # print(chunk.choices[0].delta.content, end="")
            thinking, response = split_content(content)
            # print(f"thinking={thinking}")
            # print(f"response={response}")
            if thinking:
                # 使用HTML标签和CSS样式来设置文本颜色
                grey_style = "color:#A9A9A9; margin-bottom: 10px; border-left: 2px solid #000; padding-left: 10px;"
                grey_text = f"<div style='{grey_style}'>{thinking}</div>"
                assistant_container.markdown(f"{ICON_AI} 思考中...{grey_text}", 
                                            unsafe_allow_html=True)
            if response:
                assistant_container.markdown(f"{ICON_AI} {response}")
        else:
            print(f"\nchunk={chunk}\n")
    
    # 保存并显示已经完成的回复
    if response:
        st.session_state.assistant_message = response
        append_and_show("assistant", response, assistant_container)
        return response
    else:
        return None



# 如果还没有消息，则添加第一条提示消息
if 'messages' not in st.session_state:
    st.session_state.messages = [{"role": "assistant", 
            "content": "我是你的推理思维链助手，请告诉我要完成的任务是什么？"}]

# 将会话中的messages列表中的消息全部显示出来
for msg in st.session_state.messages:
    container = st.empty()  # 创建一个新的动态区域
    dspMessage(msg["role"], msg["content"], container)

# 接受用户输入的提示词，并调用大模型API获得反馈
if prompt := st.chat_input():
    user_container = st.empty()  # 用户消息的动态区域
    assistant_container = st.empty()  # 助手消息的动态区域
    
    append_and_show("user", prompt, user_container)
    
    
    # 在会话状态中创建或重置assistant的响应
    # response_key = "assistant_response"
    # st.session_state[response_key] = ""
    
    # 获取流式输出的生成器
    messages = st.session_state.messages
    # messages = [
    #     {"role": "user", "content": prompt}
    # ]
    print("messages=\n", messages)
    complete_interaction_Zhipu(messages)
    
    print("\n【本轮对话完成】\n")

