import os
import uuid
import streamlit as st
from langchain_community.document_loaders import PyPDFLoader, TextLoader, Docx2txtLoader
from langchain_community.embeddings import JinaEmbeddings
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_elasticsearch import ElasticsearchStore
from elasticsearch import Elasticsearch, helpers

# 配置环境变量
es_url = "http://10.252.202.55:9200"  # Elasticsearch地址
es = Elasticsearch([es_url])
# 初始化Elasticsearch客户端
user_id = 10086
embeddings = JinaEmbeddings(
    jina_api_key="jina_21eda124ed7e4ee895f7bb7197f3cc5ebT45ebFpQaWsIeieL6QYTY9Sojgu",
    model_name="jina-embeddings-v2-base-zh"
)

rag_index = "rag_documents"
file_index = "rag_file_info"

root_dir = '/home/rag'
# 创建临时存储目录
if not os.path.exists(root_dir):
    os.makedirs(root_dir)


def process_file(file_path, filename):
    """处理上传文件并存储到ES"""
    # 根据文件类型选择加载器
    if filename.endswith(".pdf"):
        loader = PyPDFLoader(file_path)
    elif filename.endswith(".txt"):
        loader = TextLoader(file_path)
    elif filename.endswith(".docx"):
        loader = Docx2txtLoader(file_path)
    else:
        raise ValueError("Unsupported file type")

    documents = loader.load()
    # 拆分文档
    text_splitter = RecursiveCharacterTextSplitter(
        chunk_size=1000,
        chunk_overlap=200
    )
    splits = text_splitter.split_documents(documents)

    # 添加元数据
    doc_id = str(uuid.uuid4())
    add_chunks(doc_id, filename, splits)
    return doc_id, len(splits)


def add_chunks(doc_id, filename, splits):
    ids = []
    for i, chunk in enumerate(splits):
        ids.append(str(uuid.uuid4()))
        chunk.metadata.update({
            "doc_id": doc_id,
            "chunk_id": i + 1,
            "user_id": user_id,
            "filename": filename
        })
    # 存储到Elasticsearch
    vector_store = ElasticsearchStore(
        es_url=es_url,
        index_name=rag_index,
        embedding=embeddings,
        es_user="",
        es_password=""
    )
    vector_store.add_documents(documents=splits, ids=ids)

    # 添加文件元数据
    file_doc = {
        "title": filename,
        "chunk_count": len(splits),
        "user_id": user_id,
        "file_id": doc_id
    }
    es.index(index=file_index, id=doc_id, body=file_doc)
    es.indices.refresh(index=file_index)  # 立即刷新索引


def query_doc():
    """实时查询用户文档列表"""
    search_body = {
        "query": {
            "term": {"user_id": user_id}
        }
    }
    response = es.search(index=file_index, body=search_body)
    return [
        {
            "filename": hit["_source"]["title"],
            "file_id": hit["_source"]["file_id"],
            "chunks": hit["_source"]["chunk_count"]
        }
        for hit in response["hits"]["hits"]
    ]


def delete_doc(file_id):
    """删除文档及相关数据"""
    # 删除文件元数据
    es.delete(index=file_index, id=file_id)
    # 删除关联的向量数据
    es.delete_by_query(
        index=rag_index,
        body={"query": {"term": {"metadata.doc_id.keyword": file_id}}}
    )
    # 强制刷新索引
    es.indices.refresh(index=file_index)
    es.indices.refresh(index=rag_index)


def query_chunks(file_id):
    """查询文档分块内容"""
    response = es.search(
        index=rag_index,
        body={
            "query": {"term": {"metadata.doc_id.keyword": file_id}},
            "sort": ["metadata.chunk_id"],
            "size": 500
        }
    )
    return response["hits"]["hits"]


def main():
    st.title("智能检索系统")

    # 初始化上传组件刷新key
    if 'upload_key' not in st.session_state:
        st.session_state.upload_key = 0

    # ==================== 文件上传处理 ====================
    with st.sidebar:
        st.header("文档上传")
        uploaded_files = st.file_uploader(
            "选择需要上传的文档（支持PDF/TXT/DOCX）",
            type=["pdf", "txt", "docx"],
            accept_multiple_files=True,
            key=f'uploader_{st.session_state.upload_key}'
        )

        if uploaded_files:
            processing_progress = st.progress(0)
            total_files = len(uploaded_files)

            for index, uploaded_file in enumerate(uploaded_files):
                file_path = os.path.join(root_dir, uploaded_file.name)
                try:
                    with open(file_path, "wb") as f:
                        f.write(uploaded_file.getbuffer())
                    # 处理文件并立即刷新
                    file_id, chunk_count = process_file(file_path, uploaded_file.name)
                    st.success(f"✅ 成功上传 {uploaded_file.name}（{chunk_count}个文本块）")
                except Exception as e:
                    st.error(f"❌ 处理文件 {uploaded_file.name} 失败：{str(e)}")
                finally:
                    if os.path.exists(file_path):
                        os.remove(file_path)

                processing_progress.progress((index + 1) / total_files)

            # 重置上传组件
            st.session_state.upload_key += 1
            st.rerun()

    # ==================== 文档列表展示 ====================
    st.header("我的知识库文档")
    user_files = query_doc()  # 实时获取最新文档列表

    if not user_files:
        st.info("🔄 当前还没有上传任何文档，请通过左侧边栏上传")
        return

    # 显示带操作按钮的文档列表
    for file in user_files:
        cols = st.columns([4, 1.5, 1.5])
        with cols[0]:
            st.markdown(f"**📄 {file['filename']}**  ")
            st.caption(f"文档ID：{file['file_id']} | 包含 {file['chunks']}个文本块")

        # 查看按钮
        with cols[1]:
            if st.button(f"查看内容", key=f"view_{file['file_id']}"):
                st.session_state["selected_file"] = file

        # 删除按钮
        with cols[2]:
            if st.button(f"删除文档", key=f"del_{file['file_id']}"):
                delete_doc(file['file_id'])
                # 如果当前选中的文件被删除，清除选中状态
                if "selected_file" in st.session_state and st.session_state.selected_file["file_id"] == file['file_id']:
                    del st.session_state["selected_file"]
                st.rerun()

    # ==================== 分块内容展示 ====================
    if "selected_file" in st.session_state:
        selected_file = st.session_state.selected_file
        st.divider()
        st.subheader(f"文档内容浏览：{selected_file['filename']}")

        chunks = query_chunks(selected_file['file_id'])
        for chunk in chunks:
            meta = chunk["_source"]["metadata"]
            with st.expander(f"📝 文本块 #{meta['chunk_id']}（约{len(chunk['_source']['text'])}字符）"):
                st.markdown(chunk["_source"]["text"])
                st.caption(f"元数据：{meta}")


if __name__ == "__main__":
    main()