from langchain_community.vectorstores import Chroma
from langchain_community.embeddings import QianfanEmbeddingsEndpoint
from docx import Document
from docx.shared import Pt
import re
import os
os.environ["QIANFAN_AK"]="SGbbQdjFjlKurTfUIjYM0Q4P"
os.environ["QIANFAN_SK"]="lb1tKvDGRhqLZYH4ZYpke6Vco9n9X8Xv"
def is_leaf_title(idx, lines, current_level):
    """判断当前标题是否为最后一级"""
    for i in range(idx + 1, len(lines)):
        line = lines[i]
        next_level = (len(line) - len(line.lstrip(" "))) // 2
        if next_level > current_level:
            return False
        elif next_level <= current_level:
            break
    return True
def search_doc_content(query):
    embedding = QianfanEmbeddingsEndpoint()
    db = Chroma(persist_directory="D:\\hbyt\\project\\aibid\\db\\d", embedding_function=embedding)
    results = db.similarity_search(query, k=1)
    return results[0].page_content
def write_outline_to_docx(md_path, output_path):
    doc = Document()
    # doc.add_heading("目录结构文档", level=1)
    with open(md_path, 'r', encoding='utf-8') as f:
        lines = [line for line in f if line.strip().startswith("-")]
    for idx, line in enumerate(lines):
        level = (len(line) - len(line.lstrip(" "))) // 2
        title = line.strip("- ").strip()
        # 写标题（Word 标题1~6）
        doc.add_heading(title, level=min(level + 1, 6))
        # 如果是最后一级，写正文内容
        if is_leaf_title(idx, lines, level):
            para = doc.add_paragraph(search_doc_content(re.sub(r'^\d+(\.\d+)*\s*', '', title)))
            para.style.font.size = Pt(10)
    doc.save(output_path)
# 用法示例
write_outline_to_docx("D:\\hbyt\\AI智能投标\\典型招标要求和投标文件\\目录.md", "目录结构输出.docx")


# 安装必要库
# !pip install langchain langchain-openai langchain-community pypdf unstructured python-docx pdfminer.six

import os
import re
from typing import List, Dict, Any, Tuple
from langchain_community.document_loaders import (
    PyPDFLoader,
    Docx2txtLoader,
    TextLoader,
    UnstructuredFileLoader
)
from langchain_core.documents import Document
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_openai import OpenAIEmbeddings, ChatOpenAI
from langchain_community.vectorstores import FAISS
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
from langchain_core.output_parsers import StrOutputParser
from langchain_deepseek import ChatDeepSeek
from langchain_community.embeddings import QianfanEmbeddingsEndpoint
from langchain_community.vectorstores import Chroma
from pathlib import Path
# 设置OpenAI API密钥
os.environ["OPENAI_API_KEY"] = "YOUR_OPENAI_API_KEY"  # 替换为你的API密钥

os.environ["DASHSCOPE_API_KEY"] = "sk-fd6c79bf32274aae845c9d3372b6331b"
os.environ["QIANFAN_AK"] = "SGbbQdjFjlKurTfUIjYM0Q4P"
os.environ["QIANFAN_SK"] = "lb1tKvDGRhqLZYH4ZYpke6Vco9n9X8Xv"


class StructuredTextSplitter:
    """按文档结构（章节、子章节）分割文本的分块器"""
    def __init__(self, max_chunk_size: int = 1000, min_chunk_size: int = 200):
        self.max_chunk_size = max_chunk_size
        self.min_chunk_size = min_chunk_size
        self.section_pattern = re.compile(
            r'(\n\s*(?:第[一二三四五六七八九十]+[章条节]|第\d+[章条节]|[一二三四五六七八九十]+、|\d+\.\d+|\d+\.)\s+.*?\n)'
        )
        self.subsection_pattern = re.compile(
            r'(\n\s*(?:\([一二三四五六七八九十]+\)|\(\d+\)|[①②③④⑤⑥⑦⑧⑨⑩])\s+.*?\n)'
        )
    def split_documents(self, documents: List[Document]) -> List[Document]:
        """分割文档为结构化块"""
        chunks = []
        for doc in documents:
            text = doc.page_content
            metadata = doc.metadata
            # 一级分割：按主要章节
            sections = self._split_by_pattern(text, self.section_pattern)
            for section_idx, section in enumerate(sections):
                # 二级分割：按子章节
                subsections = self._split_by_pattern(section, self.subsection_pattern)
                for sub_idx, sub in enumerate(subsections):
                    # 三级分割：按大小分块
                    sub_chunks = self._split_by_size(sub)
                    for chunk_idx, chunk in enumerate(sub_chunks):
                        # 添加结构化元数据
                        chunk_metadata = metadata.copy()
                        chunk_metadata.update({
                            "section_index": section_idx,
                            "subsection_index": sub_idx,
                            "chunk_index": chunk_idx,
                            "structure_level": f"{section_idx}.{sub_idx}.{chunk_idx}"
                        })
                        chunks.append(Document(page_content=chunk, metadata=chunk_metadata))
        return chunks
    def _split_by_pattern(self, text: str, pattern: re.Pattern) -> List[str]:
        """使用正则表达式模式分割文本"""
        parts = []
        last_end = 0
        matches = list(pattern.finditer(text))

        if not matches:
            return [text]

        for match in matches:
            start = match.start()
            if start > last_end:
                parts.append(text[last_end:start])
            parts.append(match.group().strip())
            last_end = match.end()

        if last_end < len(text):
            parts.append(text[last_end:])

        # 合并小片段
        merged_parts = []
        current_part = ""
        for part in parts:
            if len(current_part) + len(part) < self.min_chunk_size:
                current_part += part
            else:
                if current_part:
                    merged_parts.append(current_part)
                current_part = part

        if current_part:
            merged_parts.append(current_part)

        return merged_parts

    def _split_by_size(self, text: str) -> List[str]:
        """按大小分割文本"""
        if len(text) <= self.max_chunk_size:
            return [text]

        # 使用LangChain的分块器进行最终分割
        splitter = RecursiveCharacterTextSplitter(
            chunk_size=self.max_chunk_size,
            chunk_overlap=int(self.max_chunk_size * 0.1),
            separators=["\n\n", "\n", "。", "！", "？", "；", " ", ""]
        )
        return splitter.split_text(text)


def load_document(file_path: str) -> List[Document]:
    """加载文档（支持PDF、DOCX、TXT）"""
    if file_path.lower().endswith('.pdf'):
        loader = PyPDFLoader(file_path)
    elif file_path.lower().endswith('.docx'):
        loader = Docx2txtLoader(file_path)
    elif file_path.lower().endswith('.txt'):
        loader = TextLoader(file_path)
    else:
        loader = UnstructuredFileLoader(file_path)

    return loader.load()
def create_vector_store(chunks: List[Document]) -> FAISS:
    """创建向量存储"""
    """创建向量存储"""
    # print(chunks[0])
    # embeddings = OpenAIEmbeddings(model="text-embedding-3-small")
    embeddings = QianfanEmbeddingsEndpoint()
    # Chroma.from_texts(chunks, embedding=embeddings)
    # Chroma.from_documents(documents=chunks, embedding=embeddings,
    #                       persist_directory="D:\\hbyt\\project\\aibid\\db\\ddd")

    return Chroma.from_documents(documents=chunks, embedding=embeddings,
                                 persist_directory="D:\\hbyt\\project\\aibid\\db\\d4")
    # return Chroma.from_documents(chunks, embeddings,persist_directory="D:\\hbyt\\project\\aibid\\db\\ddd")

def create_rag_chain(vector_store: FAISS) -> Any:
    """创建RAG问答链"""
    # 初始化LLM
    llm = ChatDeepSeek(
        model="deepseek-chat",
        api_key="sk-bfdc307c3def4f9da9a06775a127e7a1"
    )

    # 定义检索器 - 使用MMR算法确保多样性
    retriever = vector_store.as_retriever(
        search_type="mmr",
        search_kwargs={"k": 6, "lambda_mult": 0.5}
    )

    # 构建提示模板
    template = """
    你是一个专业的问答助手，请基于以下上下文回答问题。
    上下文来自文档的不同章节,你需要将根据内容和问题，
    根据问题和检索到的内容进行相似度匹配，如果是和query最相似的第一段，那么返回第一段：

    案列像下面的输出，如果问题是“客户表扬奖励办法”，那么你需要给我回答的是“如果技术人员在具体项目获得客户的书面表扬，慧博云通将在内部进行通报表扬，并给予对应技术人员以一定奖励，以表彰技术人员在项目实施过程中的出色表现和辛勤付出。
”，其他的不要回答了，这是一个案列，其他的你也需要严格按照这个来，：

    相关上下文：
    {context}

    问题：{question}

    请根据上下文提供准确、详细的回答。如果上下文不包含相关信息，请回答"根据提供的文档，我无法回答这个问题"。
    """
    prompt = ChatPromptTemplate.from_template(template)

    # 构建处理链
    return (
            {"context": retriever, "question": RunnablePassthrough()}
            | prompt
            | llm
            | StrOutputParser()
    )


def print_chunk_structure(chunks: List[Document], max_chunks: int = 5):
    for i, chunk in enumerate(chunks[:max_chunks]):
        content_preview = chunk.page_content[:70] + "..." if len(chunk.page_content) > 70 else chunk.page_content


def is_leaf_title(idx, lines, current_level):
    """判断当前标题是否为最后一级"""
    for i in range(idx + 1, len(lines)):
        line = lines[i]
        next_level = (len(line) - len(line.lstrip(" "))) // 2
        if next_level > current_level:
            return False
        elif next_level <= current_level:
            break
    return True
def search_doc_content(query):
    embedding = QianfanEmbeddingsEndpoint()
    # db = Chroma(persist_directory="D:\\hbyt\\project\\aibid\\db\\d4", embedding_function=embedding)
    # results = db.similarity_search(query, k=1)
    # rag_chain = create_rag_chain(vector_db)
    # print("\nRAG系统准备就绪，请输入问题（输入'exit'退出）")
    # question = input("\n用户问题: ")
    # response = rag_chain.invoke(question)
    # print(f"\nAI回答: {response}")

    # 用法示例
    file_path = "D:\\hbyt\\AI智能投标\\激励及绩效管理_v1.0_2201.docx"  # 支持PDF、DOCX、TXT等格式
    documents = load_document(file_path)
    splitter = StructuredTextSplitter(max_chunk_size=500, min_chunk_size=200)
    chunks = splitter.split_documents(documents)
    print_chunk_structure(chunks)
    vector_db = create_vector_store(chunks)
    rag_chain = create_rag_chain(vector_db)
    response = rag_chain.invoke(query)
    return response
def write_outline_to_docx(md_path, output_path):
    print(md_path)
    doc = Document()
    print("ok")
    # doc.add_heading("目录结构文档", level=1)
    with open(md_path, 'r', encoding='utf-8') as f:
        print("ok1")
        lines = [line for line in f if line.strip().startswith("-")]
    for idx, line in enumerate(lines):
        level = (len(line) - len(line.lstrip(" "))) // 2
        title = line.strip("- ").strip()
        # 写标题（Word 标题1~6）
        print(search_doc_content(re.sub(r'^\d+(\.\d+)*\s*', '', title)))
        doc.add_heading(title, level=min(level + 1, 6))
        # 如果是最后一级，写正文内容
        if is_leaf_title(idx, lines, level):
            para = doc.add_paragraph(search_doc_content(re.sub(r'^\d+(\.\d+)*\s*', '', title)))
            para.style.font.size = Pt(10)
    doc.save(output_path)

def main():
    # # 用法示例
    print("ok")
    write_outline_to_docx("D:\\hbyt\\AI智能投标\\典型招标要求和投标文件\\目录.md", "目录结构输出.docx")
    # file_path = "D:\\hbyt\\AI智能投标\\激励及绩效管理_v1.0_2201.docx"  # 支持PDF、DOCX、TXT等格式
    # print(f"加载文档: {file_path}")
    # documents = load_document(file_path)
    # print(documents)
    # print(f"文档加载完成，共 {len(documents)} 页")
    # # 结构化分块
    # splitter = StructuredTextSplitter(max_chunk_size=500, min_chunk_size=200)
    # chunks = splitter.split_documents(documents)
    # # 打印分块结构
    # print_chunk_structure(chunks)
    # # 创建向量存储
    # print("\n创建向量数据库...")
    # vector_db = create_vector_store(chunks)
    # print("向量数据库创建完成")
    #
    # retriever = vector_db.as_retriever(search_kwargs={"k": 1})
    # top_chunks = retriever.get_relevant_documents("绩效考核流程")
    # print(top_chunks[0].page_content)
    # 构建RAG链
    # rag_chain = create_rag_chain(vector_db)
    # print("\nRAG系统准备就绪，请输入问题（输入'exit'退出）")
    # question = input("\n用户问题: ")
    # response = rag_chain.invoke(question)
    # print(f"\nAI回答: {response}")
if __name__ == "__main__":
    # write_outline_to_docx("D:\\hbyt\\AI智能投标\\典型招标要求和投标文件\\目录.md", "目录结构输出.docx")
    print("ok")
    # main("ok")
