import os
import tempfile

import streamlit as st
from pydantic import SecretStr

from langchain_community.document_loaders import TextLoader, Docx2txtLoader, PyPDFLoader
from langchain.text_splitter import CharacterTextSplitter
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_community.vectorstores import FAISS

from langchain_deepseek import ChatDeepSeek
from langchain_core.tools import tool
from langgraph.prebuilt import create_react_agent
from langchain_core.messages import HumanMessage

# 获取.env 文件中的变量
from dotenv import load_dotenv
load_dotenv()

embedding_model = os.getenv("EMBEDDING_MODEL")
chunk_size = int(os.getenv("CHUNK_SIZE", 512))
chunk_overlap = int(os.getenv("CHUNK_OVERLAP", 100))

# system prompt
system_prompt = """你是一个AI助手，通过用户提供的信息，回答用户的问题。回答要精简，不需要解释。"""

# Initialize FAISS vector store
def init_faiss():
    """Initialize FAISS vector store."""
    try:
        embeddings = HuggingFaceEmbeddings(model_name=embedding_model)
        vector_store = FAISS.from_texts(["initial text"], embeddings)
        return vector_store, embeddings
    except Exception as e:
        st.error(f"Failed to initialize FAISS: {str(e)}")
        return None, None

# Document processing functions
def load_document(file_path, file_type):
    """Load document based on file type."""
    loaders = {
        'pdf': PyPDFLoader,
        'docx': Docx2txtLoader,
        'txt': TextLoader
    }
    
    if file_type not in loaders:
        raise ValueError(f"Unsupported file type: {file_type}")
    
    loader = loaders[file_type](file_path)
    return loader.load()

def split_documents(documents):
    """
    将文档分割成文本块。
    Split documents into chunks of text.
    """
    text_splitter = CharacterTextSplitter(
        chunk_size=chunk_size, 
        chunk_overlap=chunk_overlap
    )
    return text_splitter.split_documents(documents)

def process_file(uploaded_file, vector_store, embeddings):
    """Process uploaded file and store embeddings in FAISS."""
    try:
        # Get file type
        file_type = uploaded_file.name.split('.')[-1].lower()
        
        # Create temporary file
        with tempfile.NamedTemporaryFile(delete=False, suffix=f".{file_type}") as tmp_file:
            tmp_file.write(uploaded_file.getbuffer())
            tmp_file_path = tmp_file.name
        
        try:
            # Load document
            documents = load_document(tmp_file_path, file_type)
            
            if not documents:
                st.error("无法加载文件内容")
                return
            
            # Split documents
            texts = split_documents(documents)
            st.subheader(f"分割后的文本块 ({file_type.upper()}):")
            for i, text in enumerate(texts):
                st.text(f"文本块 {i+1}:")
                st.write(text.page_content)
            
            # Add to FAISS vector store
            with st.spinner("Generating embeddings and saving to FAISS..."):
                text_contents = [text.page_content for text in texts]
                vector_store.add_texts(text_contents)
                st.success(f"已成功将 {len(text_contents)} 个文本块插入到FAISS中。")
            
        finally:
            # Clean up temporary file
            if os.path.exists(tmp_file_path):
                os.remove(tmp_file_path)
                
    except Exception as e:
        st.error(f"Error processing file: {str(e)}")

# RAG tool for querying documents
@tool
def query_documents(query: str) -> str:
    """Query the uploaded documents using RAG."""
    global vector_store_global
    if vector_store_global is None:
        return "No documents have been uploaded yet."
    
    try:
        # Perform similarity search
        docs = vector_store_global.similarity_search(query, k=3)
        if not docs:
            return "No relevant documents found."
        
        # Combine the content of the retrieved documents
        combined_content = "\n\n".join([doc.page_content for doc in docs])
        return combined_content
    except Exception as e:
        return f"Error querying documents: {str(e)}"

# Create the agent with RAG tool
def create_agent():
    """Create a ReAct agent with RAG capability."""
    try:
        api_key = os.getenv("DEEPSEEK_API_KEY")
        if api_key is None:
            raise ValueError("DEEPSEEK_API_KEY not found in .env")
        
        # Initialize DeepSeek model
        model = ChatDeepSeek(
            model=os.getenv("MODEL", "deepseek-chat"),
            api_key=SecretStr(api_key),
        )
        
        # Create ReAct agent with the RAG tool
        agent = create_react_agent(model, [query_documents], prompt=system_prompt)
        return agent
    except Exception as e:
        st.error(f"Failed to create agent: {str(e)}")
        return None

# Global variable to store the vector store for the RAG tool
vector_store_global = None

# Main application
def main():
    global vector_store_global
    
    st.title("智能文档问答系统")
    st.write("上传一个PDF、DOCX或TXT文件，然后与文档进行问答。")
    
    # Initialize FAISS
    vector_store, embeddings = init_faiss()
    vector_store_global = vector_store
    if not vector_store:
        st.error("Failed to initialize vector store. Please check your setup.")
        return
    
    # File uploader
    uploaded_file = st.file_uploader("选择文件", type=["pdf", "docx", "txt"])
    
    if uploaded_file is not None:
        process_file(uploaded_file, vector_store, embeddings)
        
        # Create agent after file processing
        agent = create_agent()
        if not agent:
            st.error("Failed to create the agent.")
            return
        
        st.divider()
        st.subheader("与文档对话")
        st.write("现在你可以向上传的文档提问了！")
        
        # Initialize chat history
        if "messages" not in st.session_state:
            st.session_state.messages = []
        
        # Display chat history
        for message in st.session_state.messages:
            with st.chat_message(message["role"]):
                st.markdown(message["content"])
        
        # User input
        if prompt := st.chat_input("请输入你的问题..."):
            # Add user message to chat history
            st.session_state.messages.append({"role": "user", "content": prompt})
            with st.chat_message("user"):
                st.markdown(prompt)
            
            # Generate response using the agent
            with st.chat_message("assistant"):
                with st.spinner("思考中..."):
                    try:
                        # Invoke the agent
                        response = agent.invoke({
                            "messages": [HumanMessage(content=prompt)]
                        })
                        
                        # Extract the answer
                        answer = response["messages"][-1].content
                        
                        # Display the answer
                        st.markdown(answer)
                        
                        # Add assistant response to chat history
                        st.session_state.messages.append({"role": "assistant", "content": answer})
                    except Exception as e:
                        st.error(f"Error generating response: {str(e)}")


if __name__ == "__main__":
    main()