import streamlit as st
import tempfile
import os
from typing import List
try:
    import bs4
    from langchain_community.document_loaders import WebBaseLoader, PyPDFLoader, TextLoader
    from langchain_text_splitters import RecursiveCharacterTextSplitter
    from langchain_ollama import OllamaEmbeddings
    from langchain_postgres import PGVector
    from langchain_core.documents import Document
    from langchain import hub
    from langchain.chat_models import init_chat_model
    from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
    from langgraph.graph import START, StateGraph
    from typing_extensions import TypedDict
    DEPENDENCIES_AVAILABLE = True
except ImportError as e:
    DEPENDENCIES_AVAILABLE = False
    MISSING_DEPS = str(e)

# 页面配置
st.set_page_config(
    page_title="知识库管理及检索", 
    page_icon="📚", 
    layout="wide"
)

st.title("📚 知识库管理及检索系统")

# 检查依赖
if not DEPENDENCIES_AVAILABLE:
    st.error(f"缺少必要的依赖包: {MISSING_DEPS}")
    st.markdown("""
    请安装以下依赖包:
    ```bash
    pip install langchain langchain-community langchain-ollama langchain-postgres \
                langgraph langsmith beautifulsoup4 psycopg2-binary pgvector
    ```
    """)
    st.stop()

# 侧边栏配置
with st.sidebar:
    st.header("🔧 系统配置")
    
    # API配置
    st.subheader("API 配置")
    deepseek_api_key = st.text_input("DeepSeek API Key", type="password", key="deepseek_key")
    langsmith_api_key = st.text_input("LangSmith API Key (可选)", type="password", key="langsmith_key")
    
    # 数据库配置
    st.subheader("数据库配置")
    db_connection = st.text_input(
        "PostgreSQL 连接字符串", 
        value="postgresql+psycopg://docker:docker@localhost:5432/postgres",
        key="db_connection"
    )
    collection_name = st.text_input("集合名称", value="my_docs", key="collection_name")
    
    # 模型配置
    st.subheader("模型配置")
    embedding_model = st.selectbox(
        "嵌入模型",
        ["bge-m3:latest", "nomic-embed-text", "all-minilm"],
        key="embedding_model"
    )

# 状态管理
class State(TypedDict):
    question: str
    context: List[Document]
    answer: str

# 初始化组件
@st.cache_resource
def initialize_components(_deepseek_api_key, _langsmith_api_key, _db_connection, _collection_name, _embedding_model):
    """初始化LLM、嵌入模型和向量存储"""
    try:
        # 设置环境变量
        if _langsmith_api_key:
            os.environ["LANGSMITH_TRACING"] = "true"
            os.environ["LANGSMITH_API_KEY"] = _langsmith_api_key
        
        if _deepseek_api_key:
            os.environ["DEEPSEEK_API_KEY"] = _deepseek_api_key
        
        # 初始化LLM
        llm = init_chat_model("deepseek-chat", model_provider="deepseek")
        
        # 初始化嵌入模型
        embeddings = OllamaEmbeddings(model=_embedding_model)
        
        # 初始化向量存储
        vector_store = PGVector(
            embeddings=embeddings,
            collection_name=_collection_name,
            connection=_db_connection,
        )
        
        # 获取RAG提示模板
        prompt = hub.pull("rlm/rag-prompt")
        
        return llm, embeddings, vector_store, prompt
        
    except Exception as e:
        st.error(f"初始化组件失败: {str(e)}")
        return None, None, None, None

# 文档处理函数
def load_documents(uploaded_files, web_urls):
    """加载文档"""
    docs = []
    
    # 处理上传的文件
    if uploaded_files:
        temp_dir = tempfile.TemporaryDirectory()
        for uploaded_file in uploaded_files:
            temp_filepath = os.path.join(temp_dir.name, uploaded_file.name)
            with open(temp_filepath, "wb") as f:
                f.write(uploaded_file.getvalue())
            
            # 根据文件类型选择加载器
            if uploaded_file.name.endswith('.pdf'):
                loader = PyPDFLoader(temp_filepath)
            else:
                loader = TextLoader(temp_filepath)
            
            docs.extend(loader.load())
    
    # 处理网页URL
    if web_urls:
        urls = [url.strip() for url in web_urls.split('\n') if url.strip()]
        if urls:
            bs4_strainer = bs4.SoupStrainer(class_=("post-title", "post-header", "post-content"))
            loader = WebBaseLoader(
                web_paths=urls,
                bs_kwargs={"parse_only": bs4_strainer},
            )
            docs.extend(loader.load())
    
    return docs

def split_documents(docs, chunk_size=1000, chunk_overlap=200):
    """分割文档"""
    text_splitter = RecursiveCharacterTextSplitter(
        chunk_size=chunk_size,
        chunk_overlap=chunk_overlap,
        add_start_index=True,
    )
    return text_splitter.split_documents(docs)

# 检索和生成函数
def retrieve(state: State, vector_store):
    """检索相关文档"""
    retrieved_docs = vector_store.similarity_search(state["question"])
    return {"context": retrieved_docs}

def generate(state: State, llm, prompt):
    """生成答案"""
    docs_content = "\n\n".join(doc.page_content for doc in state["context"])
    messages = prompt.invoke({"question": state["question"], "context": docs_content})
    response = llm.invoke(messages)
    return {"answer": response.content}

# 主界面
col1, col2 = st.columns([1, 1])

with col1:
    st.header("📄 文档管理")
    
    # 文档上传
    st.subheader("上传文档")
    uploaded_files = st.file_uploader(
        "选择文件",
        type=['pdf', 'txt', 'md'],
        accept_multiple_files=True,
        key="uploaded_files"
    )
    
    # 网页URL输入
    st.subheader("添加网页")
    web_urls = st.text_area(
        "网页URL（每行一个）",
        placeholder="https://example.com/article1\nhttps://example.com/article2",
        key="web_urls"
    )
    
    # 文档分割参数
    st.subheader("分割参数")
    chunk_size = st.slider("块大小", 500, 2000, 1000, key="chunk_size")
    chunk_overlap = st.slider("重叠大小", 0, 500, 200, key="chunk_overlap")
    
    # 处理文档按钮
    if st.button("📥 处理并存储文档", type="primary"):
        if not deepseek_api_key:
            st.error("请先配置 DeepSeek API Key")
        elif not uploaded_files and not web_urls:
            st.error("请上传文件或输入网页URL")
        else:
            with st.spinner("正在处理文档..."):
                try:
                    # 初始化组件
                    llm, embeddings, vector_store, prompt = initialize_components(
                        deepseek_api_key, langsmith_api_key, db_connection, 
                        collection_name, embedding_model
                    )
                    
                    if vector_store is None:
                        st.error("向量存储初始化失败")
                    else:
                        # 加载文档
                        docs = load_documents(uploaded_files, web_urls)
                        st.info(f"加载了 {len(docs)} 个文档")
                        
                        # 分割文档
                        splits = split_documents(docs, chunk_size, chunk_overlap)
                        st.info(f"分割成 {len(splits)} 个块")
                        
                        # 存储到向量数据库
                        document_ids = vector_store.add_documents(documents=splits)
                        st.success(f"✅ 成功存储 {len(document_ids)} 个文档块到知识库")
                        
                        # 显示部分文档内容预览
                        if docs:
                            st.subheader("📖 文档预览")
                            with st.expander("查看第一个文档的前500字符"):
                                st.text(docs[0].page_content[:500])
                                
                except Exception as e:
                    st.error(f"处理文档时出错: {str(e)}")

with col2:
    st.header("🔍 知识检索")
    
    if not deepseek_api_key:
        st.warning("请先在侧边栏配置 DeepSeek API Key")
    else:
        # 初始化检索系统
        try:
            llm, embeddings, vector_store, prompt = initialize_components(
                deepseek_api_key, langsmith_api_key, db_connection, 
                collection_name, embedding_model
            )
            
            if vector_store and llm and prompt:
                # 构建检索图
                def retrieve_wrapper(state):
                    return retrieve(state, vector_store)
                
                def generate_wrapper(state):
                    return generate(state, llm, prompt)
                
                graph_builder = StateGraph(State).add_sequence([retrieve_wrapper, generate_wrapper])
                graph_builder.add_edge(START, "retrieve_wrapper")
                graph = graph_builder.compile()
                
                # 聊天界面
                st.subheader("💬 智能问答")
                
                # 初始化聊天历史
                if "messages" not in st.session_state:
                    st.session_state.messages = []
                
                # 显示聊天历史
                for message in st.session_state.messages:
                    with st.chat_message(message["role"]):
                        st.markdown(message["content"])
                
                # 用户输入
                if question := st.chat_input("请输入您的问题..."):
                    # 显示用户消息
                    with st.chat_message("user"):
                        st.markdown(question)
                    st.session_state.messages.append({"role": "user", "content": question})
                    
                    # 生成回答
                    with st.chat_message("assistant"):
                        with st.spinner("正在思考..."):
                            try:
                                result = graph.invoke({"question": question})
                                
                                # 显示检索到的上下文
                                with st.expander("📚 相关文档片段"):
                                    for i, doc in enumerate(result['context']):
                                        st.markdown(f"**片段 {i+1}:**")
                                        st.text(doc.page_content[:300] + "...")
                                        st.markdown("---")
                                
                                # 显示答案
                                answer = result['answer']
                                st.markdown(answer)
                                st.session_state.messages.append({"role": "assistant", "content": answer})
                                
                            except Exception as e:
                                error_msg = f"生成回答时出错: {str(e)}"
                                st.error(error_msg)
                                st.session_state.messages.append({"role": "assistant", "content": error_msg})
                
                # 清除聊天历史按钮
                if st.button("🗑️ 清除聊天历史"):
                    st.session_state.messages = []
                    st.rerun()
                    
        except Exception as e:
            st.error(f"初始化检索系统失败: {str(e)}")

# 底部信息
st.markdown("---")
st.markdown("""
### 📝 使用说明

1. **配置系统**: 在左侧边栏配置API密钥和数据库连接
2. **上传文档**: 支持PDF、TXT、MD格式文件，或输入网页URL
3. **处理文档**: 点击"处理并存储文档"将文档存入知识库
4. **智能问答**: 在右侧输入问题，系统会基于知识库内容回答

### 🔧 技术栈
- **LLM**: DeepSeek Chat
- **嵌入模型**: Ollama (BGE-M3)
- **向量数据库**: PostgreSQL + pgvector
- **检索框架**: LangChain + LangGraph
""")