# 安装必要库
# !pip install langchain langchain-openai langchain-community pypdf unstructured python-docx pdfminer.six

import os
import re
from typing import List, Dict, Any, Tuple
from langchain_community.document_loaders import (
    PyPDFLoader,
    Docx2txtLoader,
    TextLoader,
    UnstructuredFileLoader
)
from langchain_core.documents import Document
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_openai import OpenAIEmbeddings, ChatOpenAI
from langchain_community.vectorstores import FAISS
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
from langchain_core.output_parsers import StrOutputParser
from langchain_deepseek import ChatDeepSeek
from langchain_community.embeddings import QianfanEmbeddingsEndpoint
from langchain_community.vectorstores import Chroma

# 设置OpenAI API密钥
# os.environ["OPENAI_API_KEY"] = "YOUR_OPENAI_API_KEY"  # 替换为你的API密钥
os.environ["DASHSCOPE_API_KEY"]="sk-fd6c79bf32274aae845c9d3372b6331b"
os.environ["QIANFAN_AK"]="SGbbQdjFjlKurTfUIjYM0Q4P"
os.environ["QIANFAN_SK"]="lb1tKvDGRhqLZYH4ZYpke6Vco9n9X8Xv"

class StructuredTextSplitter:
    """按文档结构（章节、子章节）分割文本的分块器"""

    def __init__(self, max_chunk_size: int = 1, min_chunk_size: int = 384):
        self.max_chunk_size = max_chunk_size
        self.min_chunk_size = min_chunk_size
        self.section_pattern = re.compile(
            r'(\n\s*(?:第[一二三四五六七八九十]+[章条节]|第\d+[章条节]|[一二三四五六七八九十]+、|\d+\.\d+|\d+\.)\s+.*?\n)'
        )
        self.subsection_pattern = re.compile(
            r'(\n\s*(?:\([一二三四五六七八九十]+\)|\(\d+\)|[①②③④⑤⑥⑦⑧⑨⑩])\s+.*?\n)'
        )

    def split_documents(self, documents: List[Document]) -> List[Document]:
        """分割文档为结构化块"""
        chunks = []
        for doc in documents:
            text = doc.page_content
            metadata = doc.metadata

            # 一级分割：按主要章节
            sections = self._split_by_pattern(text, self.section_pattern)

            for section_idx, section in enumerate(sections):
                # 二级分割：按子章节
                subsections = self._split_by_pattern(section, self.subsection_pattern)

                for sub_idx, sub in enumerate(subsections):
                    # 三级分割：按大小分块
                    sub_chunks = self._split_by_size(sub)

                    for chunk_idx, chunk in enumerate(sub_chunks):
                        # 添加结构化元数据
                        chunk_metadata = metadata.copy()
                        chunk_metadata.update({
                            "section_index": section_idx,
                            "subsection_index": sub_idx,
                            "chunk_index": chunk_idx,
                            "structure_level": f"{section_idx}.{sub_idx}.{chunk_idx}"
                        })
                        chunks.append(Document(page_content=chunk, metadata=chunk_metadata))
        return chunks

    def _split_by_pattern(self, text: str, pattern: re.Pattern) -> List[str]:
        """使用正则表达式模式分割文本"""
        parts = []
        last_end = 0
        matches = list(pattern.finditer(text))

        if not matches:
            return [text]

        for match in matches:
            start = match.start()
            if start > last_end:
                parts.append(text[last_end:start])
            parts.append(match.group().strip())
            last_end = match.end()

        if last_end < len(text):
            parts.append(text[last_end:])

        # 合并小片段
        merged_parts = []
        current_part = ""
        for part in parts:
            if len(current_part) + len(part) < self.min_chunk_size:
                current_part += part
            else:
                if current_part:
                    merged_parts.append(current_part)
                current_part = part

        if current_part:
            merged_parts.append(current_part)

        return merged_parts

    def _split_by_size(self, text: str) -> List[str]:
        """按大小分割文本"""
        if len(text) <= self.max_chunk_size:
            return [text]

        # 使用LangChain的分块器进行最终分割
        splitter = RecursiveCharacterTextSplitter(
            # chunk_size=self.max_chunk_size,
            # chunk_overlap=int(self.max_chunk_size * 0.1),
            chunk_size=384,
            # chunk_overlap=0,
            separators=["\n\n", "\n", "。", "！", "？", "；", " ", ""]
        )
        return splitter.split_text(text)


def load_document(file_path: str) -> List[Document]:
    """加载文档（支持PDF、DOCX、TXT）"""
    if file_path.lower().endswith('.pdf'):
        loader = PyPDFLoader(file_path)
    elif file_path.lower().endswith('.docx'):
        loader = Docx2txtLoader(file_path)
    elif file_path.lower().endswith('.txt'):
        loader = TextLoader(file_path)
    else:
        loader = UnstructuredFileLoader(file_path)

    return loader.load()


def create_vector_store(chunks: List[Document]) -> Chroma:
    """创建向量存储"""
    print("创建向量存储长度",chunks)
    print(chunks)
    # print(chunks[0])
    # embeddings = OpenAIEmbeddings(model="text-embedding-3-small")
    embeddings = QianfanEmbeddingsEndpoint()
    # Chroma.from_texts(chunks, embedding=embeddings)
    # Chroma.from_documents(documents=chunks, embedding=embeddings,
    #                       persist_directory="D:\\hbyt\\project\\aibid\\db\\ddd")

    return  Chroma.from_documents(documents=chunks, embedding=embeddings,
                          persist_directory="D:\\hbyt\\project\\aibid\\db\\ddd")
    # return Chroma.from_documents(chunks, embeddings,persist_directory="D:\\hbyt\\project\\aibid\\db\\ddd")


def create_rag_chain(vector_store: FAISS) -> Any:
    """创建RAG问答链"""
    # 初始化LLM
    # llm = ChatOpenAI(model="gpt-3.5-turbo-1106", temperature=0.5)

    llm = ChatDeepSeek(
        model="deepseek-chat",
        api_key="sk-bfdc307c3def4f9da9a06775a127e7a1"
    )

    # 定义检索器 - 使用MMR算法确保多样性
    retriever = vector_store.as_retriever(
        search_type="mmr",
        search_kwargs={"k": 6, "lambda_mult": 0.5}
    )

    # 构建提示模板
    template = """
    你是一个专业的问答助手，请基于以下上下文回答问题。上下文来自文档的不同章节：

    相关上下文：
    {context}

    问题：{question}

    请根据上下文提供准确、详细的回答。如果上下文不包含相关信息，请回答"根据提供的文档，我无法回答这个问题"。
    """
    prompt = ChatPromptTemplate.from_template(template)

    # 构建处理链
    return (
            {"context": retriever, "question": RunnablePassthrough()}
            | prompt
            | llm
            | StrOutputParser()
    )


def print_chunk_structure(chunks: List[Document], max_chunks: int = 5):
    """打印分块结构信息"""
    print(f"\n文档分割为 {len(chunks)} 个结构化块")
    print("前5个块的结构信息：")

    for i, chunk in enumerate(chunks[:max_chunks]):
        content_preview = chunk.page_content[:70] + "..." if len(chunk.page_content) > 70 else chunk.page_content
        print(f"块 {i + 1}:")
        print(f"  结构层级: {chunk.metadata.get('structure_level', 'N/A')}")
        print(f"  页码: {chunk.metadata.get('page', 'N/A')}")
        print(f"  内容预览: {content_preview}")
        print("-" * 50)


def main():
    # 加载文档（替换为你的文档路径）
    # file_path = "example.pdf"  # 支持PDF、DOCX、TXT等格式
    file_path = "D:\\hbyt\\AI智能投标\\激励及绩效管理_v1.0_2201.docx"  # 支持PDF、DOCX、TXT等格式

    print(f"加载文档: {file_path}")
    documents = load_document(file_path)
    print(f"文档加载完成，共 {len(documents)} 页")

    # 结构化分块
    splitter = StructuredTextSplitter(max_chunk_size=100, min_chunk_size=200)
    chunks = splitter.split_documents(documents)

    # 打印分块结构
    print_chunk_structure(chunks)

    # # text_documents = text_spliter.split_documents(pages)
    # # embeddings = QianfanEmbeddingsEndpoint()
    # vectordb = Chroma.from_documents(documents=text_documents, embedding=embeddings,
    #                                  persist_directory="D:\\hbyt\\project\\aibid\\db\\d")

    # 创建向量存储
    print("\n创建向量数据库...")
    vector_db = create_vector_store(chunks)
    print("向量数据库创建完成")

    retriever = vector_db.as_retriever(search_kwargs={"k": 1})
    top_chunks = retriever.get_relevant_documents("绩效考核结果管理")
    print(top_chunks[0].page_content)


    # # 构建RAG链
    # rag_chain = create_rag_chain(vector_db)
    # print("\nRAG系统准备就绪，请输入问题（输入'exit'退出）")
    #
    # # 交互式问答
    # while True:
    #     try:
    #         question = input("\n用户问题: ")
    #         if question.lower() in ['exit', 'quit']:
    #             break
    #         if not question.strip():
    #             continue
    #
    #         response = rag_chain.invoke(question)
    #         print(f"\nAI回答: {response}")
    #
    #     except KeyboardInterrupt:
    #         print("\n程序已终止")
    #         break
    #     except Exception as e:
    #         print(f"发生错误: {str(e)}")


if __name__ == "__main__":
    main()