import streamlit as st
from langchain_core.output_parsers import StrOutputParser

from create_llm import create_llm, chain_log
from embedding import get_embedding, query_book_with_embedding, query_author_with_embedding
from langchain.prompts import PromptTemplate

def format_book(result):
    fields = [
        ("书名", result.get("书名")),
        ("出版时间", result.get("出版时间")),
        ("简介", result.get("简介")),
        ("作者", result.get("作者")),
        ("出版社", result.get("出版社")),
        ("类别", result.get("类别")),
        ("关键字", ",".join(result.get("关键字", []))),
    ]
    details = "\n".join([f"- {k}:{v}" for k, v in fields if v])
    return details

def format_author(result):
    fields = [
        ("姓名", result.get("姓名")),
        ("所著图书", ",".join(result.get("所著图书", []))),
    ]
    details = "\n".join([f"- {k}:{v}" for k, v in fields if v])
    return details

def format_context(type, results):
    lines = []
    for idx, result in enumerate(results, 1):
        header = f"{idx}. {result['name']} (相似度:{result['similarity']:.4f})"
        if type == "图书":
            details = format_book(result)
        else:
            details = format_author(result)
        info = ""
        if header:
            info += f"{header}\n"
        if details:
            info += f"{details}\n"
        lines.append(info)
    return "\n\n".join(lines)

prompt_template = PromptTemplate(
    input_variables=["question", "context"],
    template="""
        你是一名图书知识助手，你需要根据提供的图书信息回答用户的提问。
        请直接回答，如果信息不足，请回答\"根据现有信息无法确定\"。
        问题：{question}
        图书信息：\n{context}
    """
)

def main():
    st.set_page_config(page_title="图书知识图谱问答系统", layout="wide")
    if "history" not in st.session_state:
        st.session_state.history = []
    with st.sidebar:
        st.markdown("### 参数设置", help="配置参数")
        # 查询类型
        query_type = st.radio("选择查询类型", ["图书", "作者"])
        top_k = st.slider("返回结果数量(Top K)", min_value=1, max_value=10, value=3, step=1)
        temperature = st.slider("温度 Temperature", min_value=0.0, max_value=10.0, value=0.3, step=0.1)
        # 选择大模型
        model_options_dict = {
            "豆包": "huoshan-doubao",
            "deepseek": "deepseek-v3",
            "通义千问": "bailian-qwen-turbo"
        }
        model_type = st.radio("选择大模型类型", options=model_options_dict.keys())
        st.markdown("### 历史查询")
        if st.session_state.history:
            for i, item in enumerate(st.session_state.history, 1):
                with st.expander(f"查询{i}:{item['question']}"):
                    st.json(item)
    st.markdown("<h3 style='text-align: center;color: green'>图书知识图谱问答系统</h3>", unsafe_allow_html=True)

    if "messages" not in st.session_state:
        st.session_state.messages = []
    for message in st.session_state.messages:
        st.chat_message(message["role"]).write(message["content"])
    if query := st.chat_input("输入相关的问题进行提问"):
        st.session_state.messages.append({"role": "user", "content": query})
        st.chat_message("user").write(query)
        with st.spinner("正在查询中..."):
            try:
                query_embedding = get_embedding(query)
                if query_type == "图书":
                    results = query_book_with_embedding(query_embedding, top_k=top_k)
                else:
                    results = query_author_with_embedding(query_embedding, top_k=top_k)
                if not results or len(results) == 0:
                    answer = "抱歉，没有找到相关信息"
                else:
                    context = format_context(query_type, results)
                    _chain = prompt_template | create_llm(model_options_dict[model_type], temperature=temperature) | chain_log() | StrOutputParser()
                    answer = _chain.invoke({
                        "question": query,
                        "context":context
                    })
                    st.session_state.history.append({
                        "question": query,
                        "query_type": query_type,
                        "context": context,
                        "answer": answer,
                        "temperature": temperature,
                    })
                    with st.expander("查看详细结果"):
                        st.json({"type": query_type, "results": results})
                st.session_state.messages.append({"role": "assistant", "content": answer})
                st.chat_message("assistant").write(answer)
                st.rerun()
            except Exception as e:
                print(f"回答发生错误:{e}")






if __name__ == "__main__":
    main()