import streamlit as st
import torch
from transformers import BertTokenizer, BertForSequenceClassification

from 词嵌入 import embedding, tokenizer as chat_tokenizer
from 商品咨询 import kayak_pro
from 联网搜索 import web_search
from 聊天模型 import LanguageModel

# 页面配置
st.set_page_config(
    page_title="张三电商客服",
    page_icon="🛒",
    layout="wide"
)

# 设备配置
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"运行设备: {'GPU' if device.type == 'cuda' else 'CPU'}")
# 初始化会话状态存储历史消息
if "history" not in st.session_state:
    st.session_state.history = []

# 侧边栏设置
with st.sidebar:
    st.header("操作面板")
    # 清除历史按钮放在侧边栏
    if st.button("清除对话历史", use_container_width=True, type="primary"):
        st.session_state.history = []
        st.rerun()  # 使用最新的rerun方法

    st.divider()
    st.info("客服系统支持：\n- 商品售前咨询\n- 技术问题解答\n- 投诉处理\n- 日常聊天")



# 加载分类模型和分词器
@st.cache_resource
def load_classification_model():
    tokenizer = BertTokenizer.from_pretrained('weights/sort_model')
    model = BertForSequenceClassification.from_pretrained('weights/sort_model')
    model.to(device)
    model.eval()
    return tokenizer, model


# 加载聊天模型
@st.cache_resource
def load_chat_model():
    model = LanguageModel()
    model.load_state_dict(torch.load("weights/best_model.pt", map_location=device))
    model.to(device)
    model.eval()
    return model


# 分类预测函数
def classify_text(text, cls_tokenizer, cls_model):
    label_map = {0: '技术问题', 1: '售前咨询', 2: '投诉', 3: '聊天'}
    encoding = cls_tokenizer.encode_plus(
        text,
        add_special_tokens=True,
        max_length=128,
        truncation=True,
        padding='max_length',
        return_attention_mask=True,
        return_tensors='pt'
    )
    input_ids = encoding['input_ids'].to(device)
    attention_mask = encoding['attention_mask'].to(device)

    with torch.no_grad():
        outputs = cls_model(input_ids, attention_mask=attention_mask)
        _, prediction = torch.max(outputs.logits, dim=1)
    return label_map[prediction.item()]


# 聊天函数
def chat(src, chat_model, max_len=50):
    src, src_key_padding_mask = chat_tokenizer(src)
    src = src.to(device)
    src_key_padding_mask = src_key_padding_mask.to(device)

    tgt = '[CLS]'
    tokens = []
    for i in range(max_len):
        _tgt, tgt_key_padding_mask = chat_tokenizer(tgt)
        _tgt = _tgt.to(device)
        tgt_key_padding_mask = tgt_key_padding_mask.to(device)

        with torch.no_grad():
            y = chat_model(src, _tgt, src_key_padding_mask, tgt_key_padding_mask)

        valid_y = y[tgt_key_padding_mask == 0]
        _ids = valid_y.softmax(-1).argmax(-1)
        last_ids = _ids[-1].item()
        last_token = embedding.tokenizer.convert_ids_to_tokens([last_ids])[0]
        tokens.append(last_token)

        if last_ids == 102:  # 结束标记ID
            break
        tgt += last_token
    return "".join(tokens)


# 主应用
def main():
    st.title("🛒 张三电商客服")
    st.caption("请输入您的问题，我会尽快为您解答")
    st.divider()

    # 加载模型
    with st.spinner("加载模型中..."):
        cls_tokenizer, cls_model = load_classification_model()
        chat_model = load_chat_model()

    # 显示对话历史
    for msg in st.session_state.history:
        with st.chat_message(msg["role"]):
            st.write(msg["content"])
            if "category" in msg:
                st.caption(f"分类: {msg['category']}")
        st.markdown("---")

    # 用户输入
    user_input = st.chat_input("请输入您的问题或消息:")

    if user_input and user_input.strip():
        # 添加用户消息到历史
        st.session_state.history.append({
            "role": "user",
            "content": user_input
        })

        # 显示当前用户输入
        with st.chat_message("user"):
            st.write(user_input)
        st.markdown("---")

        # 1. 文本分类
        with st.spinner("正在分析内容..."):
            category = classify_text(user_input, cls_tokenizer, cls_model)

        # 2. 根据分类结果处理
        if category == "聊天":
            with st.spinner("正在生成回复..."):
                response = chat(user_input, chat_model)
            st.session_state.history.append({
                "role": "assistant",
                "content": response,
                "category": category
            })
            with st.chat_message("assistant"):
                st.write(response)
                st.caption(f"分类: {category}")

        elif category == "售前咨询":
            with st.spinner("正在查询商品信息..."):
                response = kayak_pro(user_input)
            st.session_state.history.append({
                "role": "assistant",
                "content": response,
                "category": category
            })
            with st.chat_message("assistant"):
                st.write(response)
                st.caption(f"分类: {category}")

        elif category == "技术问题":
            with st.spinner("正在联网搜索解决方案..."):
                search_results = web_search(user_input)
            response = "\n\n".join(
                [f"参考信息 {i + 1}: {r.get('abstract', '无相关描述')}" for i, r in enumerate(search_results[:2])])
            st.session_state.history.append({
                "role": "assistant",
                "content": response,
                "category": category
            })
            with st.chat_message("assistant"):
                st.write(response)
                st.caption(f"分类: {category}")

        else:  # 投诉类别
            response = f"我们会根据您的{category}，尽快处理您的反馈"
            st.session_state.history.append({
                "role": "assistant",
                "content": response,
                "category": category
            })
            with st.chat_message("assistant"):
                st.write(response)
                st.caption(f"分类: {category}")

        st.markdown("---")


if __name__ == "__main__":
    main()