import streamlit as st
import os
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from main import EmbeddingKnowledgeBase
from collections import Counter
import re
from datetime import datetime
import json
import time

# 设置页面配置
st.set_page_config(
    page_title="公共卫生知识库Pro",
    page_icon="🧪",
    layout="wide",
    initial_sidebar_state="expanded"
)

# 应用自定义CSS
st.markdown("""
<style>
    .main-header {
        font-size: 2.8rem;
        background: linear-gradient(90deg, #1e3c72, #2a5298);
        -webkit-background-clip: text;
        -webkit-text-fill-color: transparent;
        text-align: center;
        padding: 0.5rem 0;
        margin-bottom: 1rem;
    }
    .subheader {
        font-size: 1.3rem;
        color: #6c757d;
        text-align: center;
        margin-bottom: 2rem;
    }
    .result-card {
        background-color: #f8f9fa;
        border-radius: 8px;
        padding: 20px;
        margin-bottom: 20px;
        border-left: 5px solid #3498db;
        box-shadow: 0 2px 5px rgba(0,0,0,0.1);
        transition: transform 0.2s;
    }
    .result-card:hover {
        transform: translateY(-3px);
        box-shadow: 0 4px 8px rgba(0,0,0,0.15);
    }
    .source-info {
        color: #6c757d;
        font-size: 0.9rem;
        margin-top: 12px;
    }
    .similarity-score {
        color: #2ecc71;
        font-weight: bold;
    }
    .result-content {
        margin: 12px 0;
        line-height: 1.6;
        color: #333;
    }
    .highlight {
        background-color: #fffacd;
        padding: 2px 0;
        border-radius: 3px;
    }
    .keyword-tag {
        display: inline-block;
        background-color: #e9ecef;
        color: #495057;
        padding: 3px 10px;
        margin: 3px;
        border-radius: 15px;
        font-size: 0.85rem;
    }
    .stat-card {
        background-color: white;
        border-radius: 8px;
        padding: 15px;
        box-shadow: 0 2px 5px rgba(0,0,0,0.05);
        text-align: center;
    }
    .stat-value {
        font-size: 1.8rem;
        font-weight: bold;
        color: #1e3c72;
    }
    .stat-label {
        font-size: 0.9rem;
        color: #6c757d;
    }
</style>
""", unsafe_allow_html=True)

# 初始化会话状态变量
if 'search_history' not in st.session_state:
    st.session_state.search_history = []
if 'last_query' not in st.session_state:
    st.session_state.last_query = ""
if 'knowledge_base' not in st.session_state:
    st.session_state.knowledge_base = None
if 'embedding_status' not in st.session_state:
    st.session_state.embedding_status = None

# 高亮关键词
def highlight_keywords(text, query_terms):
    """在文本中高亮显示查询词"""
    highlighted_text = text
    for term in query_terms:
        if len(term) > 1:  # 忽略单字词
            pattern = re.compile(re.escape(term), re.IGNORECASE)
            highlighted_text = pattern.sub(f'<span class="highlight">{term}</span>', highlighted_text)
    return highlighted_text

# 保存搜索历史
def save_search_history(query, results):
    timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
    history_item = {
        "timestamp": timestamp,
        "query": query,
        "results_count": len(results),
        "top_similarity": results[0]["similarity"] if results else 0
    }
    st.session_state.search_history.append(history_item)
    # 只保留最近20条记录
    if len(st.session_state.search_history) > 20:
        st.session_state.search_history.pop(0)

# 加载知识库
def load_knowledge_base(docs_dir, embedding_model, force_rebuild=False):
    kb = EmbeddingKnowledgeBase(documents_dir=docs_dir, model_name=embedding_model)
    
    embedding_file = "embeddings.npz"
    
    # 检查是否需要重建
    if force_rebuild or not os.path.exists(embedding_file):
        with st.spinner("正在构建知识库，这可能需要几分钟..."):
            start_time = time.time()
            success = kb.build_knowledge_base()
            end_time = time.time()
            
            if success:
                # 保存嵌入向量
                kb.save_embeddings(embedding_file)
                st.session_state.embedding_status = f"知识库构建成功！用时 {end_time - start_time:.2f} 秒"
                return kb
            else:
                st.session_state.embedding_status = "知识库构建失败，请检查日志"
                return None
    else:
        # 从缓存加载
        with st.spinner("正在加载知识库..."):
            start_time = time.time()
            success = kb.load_embeddings(embedding_file)
            end_time = time.time()
            
            if success:
                st.session_state.embedding_status = f"知识库加载成功！用时 {end_time - start_time:.2f} 秒"
                return kb
            else:
                st.session_state.embedding_status = "知识库加载失败，尝试重新构建"
                # 尝试重新构建
                os.remove(embedding_file) if os.path.exists(embedding_file) else None
                return load_knowledge_base(docs_dir, embedding_model, True)

# 侧边栏设置
with st.sidebar:
    st.title("公共卫生知识库")
    
    # 文档目录设置
    docs_dir = st.text_input("文档目录路径", value="./public_health_docs")
    
    # 嵌入模型选择
    embedding_model = st.selectbox(
        "嵌入模型",
        [
            "paraphrase-multilingual-MiniLM-L12-v2",
            "all-MiniLM-L6-v2",
            "distiluse-base-multilingual-cased-v2"
        ],
        index=0
    )
    
    # 搜索参数设置
    st.subheader("搜索参数")
    
    num_results = st.slider("最大结果数量", min_value=1, max_value=30, value=10)
    similarity_threshold = st.slider("相似度阈值", min_value=0.0, max_value=1.0, value=0.3, step=0.05)
    
    # 加载/重建知识库
    col1, col2 = st.columns(2)
    with col1:
        load_button = st.button("加载知识库", use_container_width=True)
    with col2:
        rebuild_button = st.button("重建知识库", use_container_width=True)
    
    if load_button:
        st.session_state.knowledge_base = load_knowledge_base(docs_dir, embedding_model)
    
    if rebuild_button:
        st.session_state.knowledge_base = load_knowledge_base(docs_dir, embedding_model, force_rebuild=True)
    
    # 显示知识库状态
    if st.session_state.embedding_status:
        st.info(st.session_state.embedding_status)
    
    # 显示知识库统计
    if st.session_state.knowledge_base:
        stats = st.session_state.knowledge_base.get_document_stats()
        st.subheader("知识库统计")
        st.markdown(f"- 文档数: **{stats['document_count']}**")
        st.markdown(f"- 来源数: **{stats['source_count']}**")
        st.markdown(f"- 文本量: **{stats['total_chars']/1000:.1f}K** 字符")
    
    # 系统信息
    st.markdown("---")
    st.markdown("### 关于系统")
    st.info("""
    公共卫生知识库 - 基于高质量文本嵌入
    
    本系统使用神经网络嵌入模型进行语义检索，
    能够理解查询意图，返回最相关的文档片段。
    """)

# 主界面内容
st.markdown('<h1 class="main-header">公共卫生知识库</h1>', unsafe_allow_html=True)
st.markdown('<p class="subheader">基于高质量语义嵌入的专业信息检索系统</p>', unsafe_allow_html=True)

# 确保文档目录存在
if not os.path.exists(docs_dir):
    os.makedirs(docs_dir)
    st.warning(f"已创建文档目录：{docs_dir}，请在此目录中放置您的文档后刷新页面。")
else:
    files = os.listdir(docs_dir)
    if not files:
        st.warning(f"文档目录 {docs_dir} 为空，请添加一些公共卫生相关文档后刷新页面。")
    else:
        # 主界面标签页
        tabs = st.tabs(["📚 知识检索", "📊 知识分析", "📝 搜索历史"])
        
        # 选项卡1：知识检索
        with tabs[0]:
            # 检查知识库是否已加载
            if not st.session_state.knowledge_base:
                st.info("请先在侧边栏点击「加载知识库」按钮")
            else:
                query = st.text_input(
                    "请输入您的公共卫生相关问题", 
                    placeholder="例如：新冠肺炎的主要症状有哪些？",
                    value=st.session_state.last_query
                )
                
                col1, col2 = st.columns([1, 4])
                with col1:
                    search_button = st.button("搜索", type="primary", use_container_width=True)
                
                # 搜索逻辑
                if query and search_button:
                    st.session_state.last_query = query  # 保存查询
                    
                    with st.spinner("正在搜索相关文档..."):
                        start_time = time.time()
                        results = st.session_state.knowledge_base.search(
                            query, 
                            n_results=num_results,
                            threshold=similarity_threshold
                        )
                        search_time = time.time() - start_time
                        
                        # 保存搜索历史
                        if results:
                            save_search_history(query, results)
                    
                    if results:
                        st.success(f"找到 {len(results)} 个相关结果 (用时 {search_time:.2f} 秒)")
                        
                        # 提取关键词（简单分词）
                        query_terms = query.split()
                        
                        # 显示结果
                        for i, result in enumerate(results):
                            content = result["content"]
                            
                            # 高亮关键词
                            content = highlight_keywords(content, query_terms)
                            
                            with st.container():
                                st.markdown(f"""
                                <div class="result-card">
                                    <h4>结果 {i+1} <span class="similarity-score">(相似度: {result['similarity']:.2f})</span></h4>
                                    <div class="result-content">{content}</div>
                                    <div class="source-info">
                                        📄 来源: {result['source']} {f"第{result['page']}页" if result['page'] else ""}
                                    </div>
                                </div>
                                """, unsafe_allow_html=True)
                    else:
                        st.info("未找到与您问题相关的信息，请尝试：\n\n1. 使用其他关键词\n2. 降低相似度阈值\n3. 检查知识库是否包含相关文档")
        
        # 选项卡2：知识分析
        with tabs[1]:
            if not st.session_state.knowledge_base:
                st.info("请先在侧边栏点击「加载知识库」按钮")
            else:
                st.subheader("知识库文档分布")
                
                # 获取文档来源分布
                if hasattr(st.session_state.knowledge_base, 'documents'):
                    sources = [doc.metadata.get("source", "未知来源") for doc in st.session_state.knowledge_base.documents]
                    source_counts = Counter(sources)
                    
                    # 创建数据框
                    source_df = pd.DataFrame({
                        '文档来源': list(source_counts.keys()),
                        '文档数量': list(source_counts.values())
                    }).sort_values('文档数量', ascending=False)
                    
                    # 绘制柱状图
                    fig, ax = plt.subplots(figsize=(10, 6))
                    chart = sns.barplot(x='文档数量', y='文档来源', data=source_df.head(15), ax=ax)
                    ax.set_title('文档来源分布（前15名）')
                    st.pyplot(fig)
                    
                    # 显示详细数据
                    with st.expander("查看完整数据"):
                        st.dataframe(source_df)
                else:
                    st.info("暂无文档分布数据")
        
        # 选项卡3：搜索历史
        with tabs[2]:
            if not st.session_state.search_history:
                st.info("暂无搜索历史记录")
            else:
                st.subheader("搜索历史记录")
                
                # 显示搜索历史
                history_df = pd.DataFrame(st.session_state.search_history)
                st.dataframe(history_df, use_container_width=True)
                
                # 导出搜索历史
                col1, col2 = st.columns(2)
                with col1:
                    if st.button("导出搜索历史"):
                        json_data = json.dumps(st.session_state.search_history, ensure_ascii=False, indent=2)
                        st.download_button(
                            label="下载JSON文件",
                            data=json_data,
                            file_name="search_history.json",
                            mime="application/json"
                        )
                
                with col2:
                    # 清除历史
                    if st.button("清除搜索历史"):
                        st.session_state.search_history = []
                        st.success("搜索历史已清除")
                        st.experimental_rerun()

# 页脚
st.markdown("---")
st.markdown("© 2025 公共卫生知识库 | 基于语义嵌入的专业文档检索系统")