import streamlit as st
import torch
import torch.nn.functional as F
from train import LanguageModel, tokenizer, embedding, device
from sys3_位置编码 import PositionEncoding

# 页面配置
st.set_page_config(
    page_title="心理咨询问答系统",
    page_icon="🧠",
    layout="wide"
)

# 标题
st.title("🧠 心理咨询问答系统")
st.markdown("---")

# 初始化模型
@st.cache_resource
def load_model():
    model = LanguageModel().to(device)
    try:
        model.load_state_dict(torch.load("weights/best_model.pth", map_location=device))
        model.eval()
        st.success("✅ 模型加载成功")
    except FileNotFoundError:
        st.error("❌ 模型文件未找到，请先训练模型")
        return None
    return model

model = load_model()

# 初始化聊天历史
if "messages" not in st.session_state:
    st.session_state.messages = []

# 显示聊天历史
for message in st.session_state.messages:
    with st.chat_message(message["role"]):
        st.markdown(message["content"])

# 用户输入
if prompt := st.chat_input("请输入您的问题..."):
    # 显示用户消息
    with st.chat_message("user"):
        st.markdown(prompt)
    
    # 添加用户消息到历史
    st.session_state.messages.append({"role": "user", "content": prompt})
    
    # 生成回复
    if model is not None:
        with st.chat_message("assistant"):
            message_placeholder = st.empty()
            full_response = ""
            
            # 构建上下文
            context_parts = []
            for msg in st.session_state.messages:
                prefix = "用户：" if msg["role"] == "user" else "心理师："
                context_parts.append(prefix + msg["content"])
            
            # 添加当前问题
            context_parts.append("用户：" + prompt)
            # 添加心理师的标识
            context_parts.append("心理师：")
            
            src_text = "\n".join(context_parts)
            
            # 模型推理
            with torch.no_grad():
                src_ids, src_mask = tokenizer([src_text])
                src_ids, src_mask = src_ids.to(device), src_mask.to(device)
                
                # 起始 token
                start_token = torch.tensor([[embedding.tokenizer.cls_token_id or 101]], device=device)
                tgt = start_token
                tgt_mask = torch.zeros((1, 1), dtype=torch.bool, device=device)
                
                # 生成回复
                for _ in range(100):
                    out = model(src_ids, tgt, src_mask, tgt_mask)
                    next_token_logits = out[:, -1, :] / 0.8  # temperature=0.8
                    
                    # 计算概率分布
                    probs = F.softmax(next_token_logits, dim=-1)
                    
                    # 从概率分布中采样
                    next_token = torch.multinomial(probs, 1)
                    
                    tgt = torch.cat([tgt, next_token], dim=1)
                    tgt_mask = torch.zeros((1, tgt.size(1)), dtype=torch.bool, device=device)
                    
                    if next_token.item() == embedding.tokenizer.sep_token_id:
                        break
                
                reply = embedding.tokenizer.decode(tgt[0].tolist(), skip_special_tokens=True)
                # 处理多余空格
                full_response = " ".join(reply.split())
            
            message_placeholder.markdown(full_response)
        
        # 添加助手回复到历史
        st.session_state.messages.append({"role": "assistant", "content": full_response})
    else:
        st.error("模型未加载，无法生成回复")