# 安装必要库
# !pip install langchain langchain-community python-docx qianfan chromadb

import os
import re
import shutil
import time
from typing import List, Tuple

import PyPDF2
from docx import Document as DocxDocument
from langchain_core.documents import Document as LangDocument
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_community.embeddings import QianfanEmbeddingsEndpoint
from langchain_community.vectorstores import Chroma
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
from langchain_core.output_parsers import StrOutputParser
from langchain_deepseek import ChatDeepSeek
from typing import List, Dict, Any, Tuple
from pathlib import Path
from langchain_community.embeddings import DashScopeEmbeddings


# 设置API密钥
os.environ["DASHSCOPE_API_KEY"] = "sk-5c6689dccd074a739c78ef7d1d780148"
# os.environ["QIANFAN_AK"] = "SGbbQdjFjlKurTfUIjYM0Q4P"
# os.environ["QIANFAN_SK"] = "lb1tKvDGRhqLZYH4ZYpke6Vco9n9X8Xv"

class MinimalSectionSplitter:
    """专注于最小章节内容的分块器（支持多种目录格式）"""
    def __init__(self, max_chunk_size: int = 1000, min_chunk_size: int = 200):
        self.max_chunk_size = max_chunk_size
        self.min_chunk_size = min_chunk_size
        # 增强章节标题匹配模式
        # 格式1: 中文数字 + 点 + 阿拉伯数字 (如: 一.1.1)
        self.pattern1 = re.compile(
            r'^(\s*[一二三四五六七八九十]+\.\d+(?:\.\d+)*\s+[^\n]+)$',
            re.MULTILINE
        )
        # 格式2: 纯阿拉伯数字 (如: 1.1.1)
        self.pattern2 = re.compile(
            r'^(\s*\d+(?:\.\d+)*\s+[^\n]+)$',
            re.MULTILINE
        )
        # 新增格式3: EHS保障体系-标题 (如: EHS保障体系-职业健康管理体系)
        self.pattern3 = re.compile(
            r'^(\s*EHS保障体系-[^\n]+)$',
            re.MULTILINE
        )
        # 文本分割器
        self.text_splitter = RecursiveCharacterTextSplitter(
            chunk_size=self.max_chunk_size,
            chunk_overlap=int(self.max_chunk_size * 0.1),
            separators=["\n\n", "\n", "。", "！", "？", ";", " ", ""]
        )
    def split_documents(self, documents: List[LangDocument]) -> List[LangDocument]:
        """处理文档，提取最小章节内容"""
        chunks = []
        # 合并所有页面内容
        full_text = "\n".join([doc.page_content for doc in documents])
        # print("full_text",full_text)
        metadata = documents[0].metadata.copy() if documents else {}
        print("metadata",metadata)
        # 查找所有最小章节标题位置
        min_sections = self._find_minimal_sections(full_text)
        # print("min_sections",min_sections)
        # 如果没有找到最小章节，使用普通分块
        if not min_sections:
            return self._split_content(full_text, {"section": "无章节内容", "section_number": "0"}, metadata)
        # 处理每个最小章节
        for i, (section_title, start_pos, end_pos) in enumerate(min_sections):
            # 确定内容范围
            content_start = end_pos
            content_end = min_sections[i + 1][1] if i + 1 < len(min_sections) else len(full_text)
            # 提取纯内容（不含标题）
            content_text = full_text[content_start:content_end].strip()
            print("提取纯内容（不含标题）",content_text)
            # 跳过空内容
            if not content_text:
                continue
            # 提取章节编号
            section_number = self._extract_section_number(section_title)
            # 创建内容块
            chunks.extend(self._split_content(
                content_text,
                {
                    "section": section_title,
                    "section_number": section_number
                },
                metadata
            ))
            # print("创建内容块",chunks)
        # 确保至少返回一个块
        if not chunks:
            return self._split_content(full_text, {"section": "无章节内容", "section_number": "0"}, metadata)
        return chunks
    def _find_minimal_sections(self, text: str) -> List[Tuple[str, int, int]]:
        """查找所有最小章节标题及其位置"""
        # 查找所有匹配的标题
        all_matches = []
        # 查找第一种格式的标题
        for match in self.pattern1.finditer(text):
            title = match.group(0).strip()
            start = match.start()
            end = match.end()
            all_matches.append((title, start, end))
        # 查找第二种格式的标题
        for match in self.pattern2.finditer(text):
            title = match.group(0).strip()
            start = match.start()
            end = match.end()
            # 跳过可能被第一种格式匹配过的标题
            if not any(abs(start - m[1]) < 5 for m in all_matches):
                all_matches.append((title, start, end))
        # 查找第三种格式的标题 (新增)
        for match in self.pattern3.finditer(text):
            title = match.group(0).strip()
            start = match.start()
            end = match.end()
            # 跳过可能被其他格式匹配过的标题
            if not any(abs(start - m[1]) < 5 for m in all_matches):
                all_matches.append((title, start, end))
        # 按位置排序
        all_matches.sort(key=lambda x: x[1])
        # 找出最小章节（最深层级的章节）
        min_sections = []
        for i, (title, start, end) in enumerate(all_matches):
            # 计算标题层级深度
            depth = self._calculate_depth(title)
            # 检查是否是当前文档的最小层级
            is_minimal = True
            for j in range(i + 1, len(all_matches)):
                next_title, next_start, next_end = all_matches[j]
                next_depth = self._calculate_depth(next_title)
                # 如果后续有更深层级的标题，当前标题不是最小章节
                if next_depth > depth and next_start < (
                        all_matches[j + 1][1] if j + 1 < len(all_matches) else len(text)):
                    is_minimal = False
                    break
            if is_minimal:
                min_sections.append((title, start, end))
        return min_sections
    def _calculate_depth(self, title: str) -> int:
        """计算标题层级深度"""
        # 对EHS保障体系-开头的标题，固定深度为1
        if title.startswith(""):
            return 1
        # 其他标题根据点号数量计算深度
        return title.count('.') + 1
    def _extract_section_number(self, section_title: str) -> str:
        """从标题中提取章节编号"""
        # 对EHS保障体系-开头的标题，返回完整标题
        if section_title.startswith(""):
            return section_title
        # 提取数字部分
        match = re.search(r'([一二三四五六七八九十]+\.\d+(?:\.\d+)*|\d+(?:\.\d+)*)', section_title)
        if match:
            return match.group(1)
        return "未知章节"
    def _split_content(self, content: str, section_info: dict, base_metadata: dict) -> List[LangDocument]:
        """分割内容并添加元数据"""
        if not content:
            return []
        # 单块处理
        if len(content) <= self.max_chunk_size:
            metadata = base_metadata.copy()
            metadata.update(section_info)
            return [LangDocument(page_content=content, metadata=metadata)]
        # 多块处理
        content_chunks = self.text_splitter.split_text(content)
        chunks = []
        for i, chunk in enumerate(content_chunks):
            metadata = base_metadata.copy()
            metadata.update(section_info)
            metadata["chunk_index"] = i + 1
            chunks.append(LangDocument(page_content=chunk, metadata=metadata))
        print("chunks",chunks)
        return chunks


def load_document(file_path: str) -> List[LangDocument]:
    content = []
    with open(file_path, 'rb') as file:
        reader = PyPDF2.PdfReader(file)
        for page in reader.pages:
            content.append(page.extract_text())

    print("content=[]",content)
    # 将整个文档作为一个LangDocument对象
    full_text = "\n".join(content)
    if not full_text.strip():
        print(f"警告: 文档没有可提取的文本内容 - {file_path}")
        return []
    # 创建元数据
    metadata = {
        "source": file_path,
        "file_name": os.path.basename(file_path),
        "file_type": "docx"
    }
    return [LangDocument(page_content=full_text, metadata=metadata)]

def create_vector_store(chunks: List[LangDocument], persist_dir: str) -> Chroma:
    """创建内容向量存储"""
    # 检查内容块是否为空
    if not chunks:
        raise ValueError("无法创建向量存储：内容块列表为空")
    print(f"创建向量数据库 ({len(chunks)}个内容块)...")

    print("有多少个chunks",chunks)
    # 确保目录存在
    os.makedirs(persist_dir, exist_ok=True)
    # 使用千帆嵌入模型
    # embeddings = QianfanEmbeddingsEndpoint()
    # 删除旧的向量数据库
    # if os.path.exists(persist_dir):
    #     print(f"删除旧的向量数据库: {persist_dir}")
    #     shutil.rmtree(persist_dir)

    # 确保目录存在
    os.makedirs(persist_dir, exist_ok=True)

    embeddings = DashScopeEmbeddings(
        model="text-embedding-v2",
        dashscope_api_key="sk-5c6689dccd074a739c78ef7d1d780148",
    )
    # 创建Chroma向量存储
    try:
        text=[]
        for i in range(len(chunks)):
            text.append(chunks[i])
            vector_store = Chroma.from_documents(
                documents=text,
                embedding=embeddings,
                persist_directory=persist_dir
            )
            print("text",text)
            print(f"向量数据库创建完成，持久化到: {persist_dir}")
        return vector_store
    except Exception as e:
        # 处理嵌入生成失败的情况
        print(f"创建向量数据库失败: {str(e)}")
        print("可能的原因：")
        print("1. 内容块文本为空")
        print("2. 嵌入服务API调用失败")
        print("3. 网络连接问题")
        print("4. 文件权限问题")

        # 打印前3个内容块的预览以帮助调试
        print("\n内容块预览:")
        for i, chunk in enumerate(chunks[:3]):
            content = chunk.page_content
            preview = content[:100] + "..." if len(content) > 100 else content
            print(f"块 {i + 1}: {preview}")
        raise

def create_rag_chain(vector_store: Chroma) -> Any:
    """创建内容导向的RAG链"""
    # 初始化DeepSeek语言模型
    llm = ChatDeepSeek(
        model="deepseek-chat",
        api_key="sk-bfdc307c3def4f9da9a06775a127e7a1"
    )
    # 内容相似度检索器
    retriever = vector_store.as_retriever(
        search_type="similarity",
        search_kwargs={"k": 4}  # 检索4个最相关内容块
    )
    # 内容导向的提示模板
    template = """
    你是一个专业的问答助手，请基于文档的纯文本内容回答问题。
    以下是检索到的相关内容片段（只包含文档正文，不包含标题）:

    {context}

    问题: {question}

    请根据文档正文内容提供准确回答，忽略标题和结构信息,你只需要回答检索到的部分,不要做任何变动和总结。
    如果内容不包含相关信息，请回答"根据文档内容，我无法回答这个问题"。
    """
    prompt = ChatPromptTemplate.from_template(template)
    # 只使用内容部分
    def format_docs(docs):
        return "\n\n".join([doc.page_content for doc in docs])
    # 构建处理链
    return (
            {"context": retriever | format_docs, "question": RunnablePassthrough()}
            | prompt
            | llm
            | StrOutputParser()
    )

def print_content_chunks(chunks: List[LangDocument], max_chunks: int = 5):
    """打印内容块信息"""
    print(f"\n文档分割为 {len(chunks)} 个内容块")
    print("前5个块的内容预览：")
    for i, chunk in enumerate(chunks[:max_chunks]):
        section = chunk.metadata.get("section_number", "N/A")
        content_preview = chunk.page_content[:100] + "..." if len(chunk.page_content) > 100 else chunk.page_content
        print(f"块 {i + 1} [章节 {section}]:")
        print(f"  内容: {content_preview}")
        print("-" * 60)

from pathlib import Path


def convert_docx_to_pdf(input_docx_path, output_pdf_path=None, method='auto'):
    """
    将Word文档(.docx)转换为PDF格式
    :param input_docx_path: 输入的.docx文件路径
    :param output_pdf_path: 输出的.pdf文件路径(可选)
    :param method: 转换方法 ('auto', 'comtypes', 'docx2pdf', 'unoserver')
    :return: 转换后的PDF文件路径
    """
    # 确保输入文件存在
    docx_path = Path(input_docx_path)
    if not docx_path.exists():
        raise FileNotFoundError(f"输入文件不存在: {docx_path}")
    if docx_path.suffix.lower() != '.docx':
        raise ValueError(f"文件不是有效的DOCX格式: {docx_path}")

    # 设置输出路径
    if output_pdf_path is None:
        pdf_path = docx_path.with_suffix('.pdf')
    else:
        pdf_path = Path(output_pdf_path)

    # 确保输出目录存在
    pdf_path.parent.mkdir(parents=True, exist_ok=True)

    # 自动选择最佳转换方法
    if method == 'auto':
        method = _select_best_conversion_method()

    # 执行转换
    if method == 'comtypes':
        return _convert_with_comtypes(docx_path, pdf_path)
    elif method == 'docx2pdf':
        return _convert_with_docx2pdf(docx_path, pdf_path)
    elif method == 'unoserver':
        return _convert_with_unoserver(docx_path, pdf_path)
    else:
        raise ValueError(f"未知的转换方法: {method}")


def _select_best_conversion_method():
    """自动选择最佳转换方法"""
    try:
        # 尝试使用docx2pdf作为首选
        import docx2pdf
        return 'docx2pdf'
    except ImportError:
        pass

    try:
        # 如果docx2pdf不可用，尝试comtypes（仅Windows）
        import comtypes
        import sys
        if sys.platform == 'win32':
            return 'comtypes'
    except ImportError:
        pass

    try:
        # 最后尝试unoserver
        import unoserver
        return 'unoserver'
    except ImportError:
        pass

    # 如果没有可用的库，提示安装
    raise RuntimeError("没有可用的转换库，请安装: pip install docx2pdf 或 pip install comtypes 或 pip install unoserver")


def _convert_with_comtypes(docx_path, pdf_path):
    """使用comtypes库转换(仅Windows, 需要安装Microsoft Word)"""
    import comtypes.client
    import sys

    if sys.platform != 'win32':
        raise OSError("comtypes方法仅支持Windows系统")

    word = comtypes.client.CreateObject('Word.Application')
    word.Visible = False

    try:
        doc = word.Documents.Open(str(docx_path))
        doc.SaveAs(str(pdf_path), FileFormat=17)  # 17 = PDF format
        doc.Close()
        return str(pdf_path)
    except Exception as e:
        raise RuntimeError(f"转换失败: {str(e)}")
    finally:
        word.Quit()


def _convert_with_docx2pdf(docx_path, pdf_path):
    """使用docx2pdf库转换(跨平台, 需要Microsoft Word或LibreOffice)"""
    from docx2pdf import convert

    try:
        convert(str(docx_path), str(pdf_path))
        return str(pdf_path)
    except Exception as e:
        raise RuntimeError(f"转换失败: {str(e)}")


def _convert_with_unoserver(docx_path, pdf_path):
    """使用unoserver库转换(跨平台, 需要LibreOffice)"""
    from unoserver import converter

    try:
        # 启动转换器服务
        conv = converter.UnoConverter()
        conv.convert(str(docx_path), str(pdf_path))
        return str(pdf_path)
    except Exception as e:
        raise RuntimeError(f"转换失败: {str(e)}")
    finally:
        # 确保关闭转换器
        if 'conv' in locals():
            conv.__exit__(None, None, None)


def docx_to_pdf(path):
    # ====== 在这里设置你的输入路径 ======
    # 替换为你的DOCX文件路径
    input_docx_file = path

    # ====== 可选：设置输出路径 ======
    # 如果不设置，将保存在同一目录下，文件名相同但扩展名为.pdf
    output_pdf_file = None  # 例如: r"C:\Output\example.pdf"

    # ====== 可选：设置转换方法 ======
    # 可选值: 'auto', 'comtypes', 'docx2pdf', 'unoserver'
    # 默认'auto'会自动选择最佳可用方法
    conversion_method = 'auto'
    print(f"开始转换: {input_docx_file}")

    time.sleep(5)  # 加 1 秒延时
    result_path = convert_docx_to_pdf(
        input_docx_path=input_docx_file,
        output_pdf_path=output_pdf_file,
        method=conversion_method
    )
    print(f"转换成功! PDF文件已保存到: {result_path}")
    # try:
    #
    # except Exception as e:
    #     print(f"转换失败: {str(e)}")
    #     print("可能的解决方案:")
    #     print("1. 确保输入文件是有效的DOCX格式")
    #     print("2. 安装所需的依赖库:")
    #     print("   - Windows系统: pip install comtypes")
    #     print("   - 所有系统: pip install docx2pdf")
    #     print("   - 所有系统(需要LibreOffice): pip install unoserver")
    #     print("3. 确保系统已安装Microsoft Word或LibreOffice")
    return result_path


def main():
    # 文档路径和持久化目录
    base_persist_dir = "D:\\hbyt\\project\\aibid\\db\\d2"
    os.makedirs(base_persist_dir, exist_ok=True)
    folder = Path("D:\\hbyt\\AI智能投标\\2025_04_23_Word\\2025_04_23_Word\\Word")
    # folder = Path("D:\\hbyt\\AI智能投标\\能力组件-附件\\")
    processed_files = 0
    skipped_files = 0
    for file in folder.rglob("*"):
        if file.is_file() and file.suffix.lower() == ".docx" and not re.match(r'^~\$.*', file.name):
            file_path = str(file)
            print("docx文件",file_path)
            pdf_path=docx_to_pdf(file_path)
            print(f"\n{'=' * 60}")
            print(f"处理文件: {file.name}")
            print(f"完整路径: {pdf_path}")
            # 1. 加载文档
            documents = load_document(pdf_path)
            print("documents",documents)
            print("长度：",len(documents))

            splitter = MinimalSectionSplitter(max_chunk_size=1000, min_chunk_size=200)
            content_chunks = splitter.split_documents(documents)
            print("content_chunks:",content_chunks)
            # 检查分割后的内容块是否为空
            if not content_chunks:
                print(f"警告: 文档分割后没有内容块 - {file.name}")
                skipped_files += 1
                continue
            create_vector_store(content_chunks, base_persist_dir)
            file_path = Path(pdf_path)  # 替换为你的文件路径
            os.remove(file_path)
            processed_files += 1
    print(f"\n处理完成! 成功处理: {processed_files} 个文件, 跳过: {skipped_files} 个文件")
if __name__ == "__main__":
    main()