import csv
import os
import re
import subprocess
from pathlib import Path

from docx import Document
from docx.oxml.ns import qn
from docx.oxml import parse_xml
from PIL import Image
import io
import xml.etree.ElementTree as ET
from langchain_text_splitters import RecursiveCharacterTextSplitter


def docx_to_markdown(docx_path, output_md_path, media_dir="media", tables_dir="tables"):
    """
    将Word文档转换为Markdown格式，保留所有结构、表格和图片
    表格将保存为CSV文件，并在原位置添加引用链接
    """
    # 创建媒体文件夹和表格文件夹
    os.makedirs(media_dir, exist_ok=True)
    os.makedirs(tables_dir, exist_ok=True)

    # 加载Word文档
    doc = Document(docx_path)

    # 初始化Markdown内容
    md_content = []
    image_count = 0
    table_count = 0  # 表格计数器

    # 遍历文档中的所有元素（按原始顺序）
    for element in doc.element.body:
        # 处理段落
        if element.tag.endswith('p'):
            # 找到对应的段落对象
            for para in doc.paragraphs:
                if para._element is element:
                    # 处理段落文本
                    if para.text.strip():
                        md_content.append(process_paragraph(para))

                    # 处理段落中的图片
                    images_md = process_images_in_paragraph(para, media_dir, image_count)
                    if images_md:
                        md_content.append(images_md)
                        image_count += images_md.count("![图片描述]")
                    break

        # 处理表格
        elif element.tag.endswith('tbl'):
            # 找到对应的表格对象
            for table in doc.tables:
                if table._element is element:
                    table_count += 1
                    # 生成CSV文件名
                    csv_filename = f"table_{table_count}.csv"
                    csv_path = os.path.join(tables_dir, csv_filename)

                    # 保存表格为CSV
                    save_table_as_csv(table, csv_path)

                    # 在Markdown中添加CSV文件引用
                    relative_csv_path = os.path.relpath(csv_path, start=os.path.dirname(output_md_path))
                    md_content.append(f"[表格已导出为CSV文件]({relative_csv_path})")
                    break

    # 保存Markdown文件
    with open(output_md_path, 'w', encoding='utf-8') as md_file:
        md_file.write("\n\n".join([c for c in md_content if c]))

    print(f"Markdown文件已保存至: {output_md_path}")
    print(f"图片已保存至: {media_dir}")
    print(f"表格已保存为CSV文件至: {tables_dir}")
    print(f"共转换 {table_count} 个表格")


def save_table_as_csv(table, csv_path):
    """
    将Word表格保存为CSV文件
    """
    with open(csv_path, 'w', encoding='utf-8', newline='') as csvfile:
        writer = csv.writer(csvfile)

        for row in table.rows:
            # 获取行数据
            row_data = []
            for cell in row.cells:
                # 清理单元格文本（移除多余空格和换行）
                cell_text = cell.text.strip().replace('\n', ' ')
                row_data.append(cell_text)

            # 写入CSV
            writer.writerow(row_data)


def process_paragraph(para):
    """
    处理段落：转换标题、列表和普通文本
    """
    text = para.text.strip()
    if not text:
        return ""

    # 处理标题
    if para.style.name.startswith('Heading'):
        level = int(para.style.name.split()[-1])
        return f"{'#' * level} {text}"

    # 处理加粗文本
    text = re.sub(r'\*\*(.*?)\*\*', r'**\1**', text)

    # 处理列表
    if para.style.name == 'List Paragraph' or para.style.name.startswith('List'):
        return f"- {text}"

    # 处理普通文本中的换行
    text = text.replace('\n', '  \n')  # Markdown中的两个空格表示换行

    return text


def process_images_in_paragraph(para, media_dir, img_index):
    """
    处理段落中的图片
    """
    md_images = []

    for run in para.runs:
        # 检查运行中是否包含图片
        drawing = run._element.find('.//' + qn('w:drawing'))
        if drawing is not None:
            # 查找图片元素
            blip = drawing.find('.//' + qn('a:blip'))
            if blip is not None:
                try:
                    # 获取图片ID
                    embed_attr = qn('r:embed')
                    image_id = blip.get(embed_attr)

                    if image_id:
                        # 获取图片数据
                        image_part = run.part.related_parts[image_id]
                        image_bytes = image_part.blob

                        # 生成图片文件名
                        try:
                            img = Image.open(io.BytesIO(image_bytes))
                            image_ext = img.format.lower()
                        except:
                            image_ext = "png"  # 默认使用PNG格式

                        image_name = f"image{img_index}.{image_ext}"
                        image_path = os.path.join(media_dir, image_name)

                        # 保存图片
                        with open(image_path, 'wb') as img_file:
                            img_file.write(image_bytes)

                        # 添加Markdown图片引用
                        md_images.append(f"![图片描述]({image_path})")
                        img_index += 1  # 递增图片索引
                except Exception as e:
                    print(f"图片处理错误: {str(e)}")
                    md_images.append("[图片处理错误]")

    return "\n".join(md_images)


def enhance_markdown_with_pandoc(input_md, output_md):
    """
    使用pandoc增强Markdown格式（可选）
    """
    try:
        subprocess.run([
            'pandoc', '-s', input_md,
            '-t', 'markdown_strict+pipe_tables',
            '-o', output_md
        ], check=True)
        print(f"使用pandoc优化后的Markdown已保存至: {output_md}")
    except (subprocess.CalledProcessError, FileNotFoundError):
        print("未找到pandoc，使用基本转换结果")

def docx_to_md(path):
    # 提取文件名（不带扩展名）
    filename = os.path.splitext(os.path.basename(path))[0]

    # 要拼接的目标文件夹
    output_md = os.path.join(r"D:\hbyt\AI智能投标\markdown_output", filename, f"{filename}.md")
    enhanced_md = os.path.join(r"D:\hbyt\AI智能投标\markdown_output", f"{filename}_enhanced.md")
    media_folder = os.path.join(r"D:\hbyt\AI智能投标\markdown_output\media", filename)
    tables_folder = os.path.join(r"D:\hbyt\AI智能投标\markdown_output\tables", filename)

    # # 保存结果
    # with open(output_md, "w", encoding="utf-8") as f:
    #     f.write(markdown_content)

    # 确保输出目录存在
    os.makedirs(os.path.dirname(output_md), exist_ok=True)
    os.makedirs(media_folder, exist_ok=True)
    os.makedirs(tables_folder, exist_ok=True)

    # 执行转换
    docx_to_markdown(path, output_md, media_folder, tables_folder)
    # 使用pandoc增强格式（可选）
    enhance_markdown_with_pandoc(output_md, enhanced_md)
    return output_md

import re
import os
from collections import defaultdict
from typing import Dict, List, Tuple


class MarkdownSectionExtractor:
    def __init__(self, file_path: str):
        self.file_path = file_path
        self.content = self._read_file()
        self.sections = self._extract_sections()

    def _read_file(self) -> List[str]:
        """读取Markdown文件内容"""
        try:
            with open(self.file_path, 'r', encoding='utf-8') as f:
                return f.readlines()
        except FileNotFoundError:
            print(f"错误: 文件不存在 - {self.file_path}")
            return []
        except Exception as e:
            print(f"读取文件时出错: {str(e)}")
            return []

    def _extract_sections(self) -> Dict[Tuple, Dict]:
        """
        提取所有章节及其完整内容
        返回格式: {(level, title): {'content': str, 'children': []}}
        """
        if not self.content:
            return {}

        # 正则表达式匹配Markdown标题
        heading_pattern = re.compile(r'^(#{1,6})\s+(.*)$')

        # 存储所有标题及其位置
        headings = []
        for line_num, line in enumerate(self.content, 1):
            match = heading_pattern.match(line.strip())
            if match:
                level = len(match.group(1))
                title = match.group(2).strip()
                headings.append({
                    'level': level,
                    'title': title,
                    'line': line_num
                })

        if not headings:
            return {}

        # 构建章节树
        section_tree = {}
        current_path = []

        # 添加所有标题到章节树
        for heading in headings:
            # 确定当前章节路径
            while current_path and current_path[-1][0] >= heading['level']:
                current_path.pop()

            current_path.append((heading['level'], heading['title']))
            section_tree[tuple(current_path)] = {
                'content': '',
                'children': []
            }

        # 为每个章节收集内容
        current_section = None
        content_lines = []

        # 查找所有标题行号
        heading_lines = [h['line'] for h in headings]

        for line_num, line in enumerate(self.content, 1):
            if line_num in heading_lines:
                # 保存上一章节内容
                if current_section:
                    section_tree[current_section]['content'] = ''.join(content_lines).strip()
                    content_lines = []

                # 设置新章节
                for path in section_tree.keys():
                    if path[-1][0] == headings[heading_lines.index(line_num)]['level'] and \
                            path[-1][1] == headings[heading_lines.index(line_num)]['title']:
                        current_section = path
                        break
            elif current_section:
                # 添加内容到当前章节
                content_lines.append(line)

        # 保存最后一个章节的内容
        if current_section and content_lines:
            section_tree[current_section]['content'] = ''.join(content_lines).strip()

        # 构建层级关系
        for path in list(section_tree.keys()):
            if len(path) > 1:
                parent_path = path[:-1]
                if parent_path in section_tree:
                    section_tree[parent_path]['children'].append(path)

        return section_tree

    def get_all_sections(self) -> Dict[Tuple, Dict]:
        """获取所有章节"""
        return self.sections

    def get_sections_by_level(self, level: int) -> List[Dict]:
        """获取特定层级的所有章节"""
        sections = []
        for path, section in self.sections.items():
            if path[-1][0] == level:
                sections.append({
                    'level': level,
                    'title': path[-1][1],
                    'content': section['content']
                })
        return sections

    def get_min_level_sections(self) -> List[Dict]:
        """获取最小层级的所有章节"""
        if not self.sections:
            return []

        # 找到最大层级（最小标题）
        max_level = max(path[-1][0] for path in self.sections.keys())
        return self.get_sections_by_level(max_level)

    def get_full_content_by_level(self, level: int) -> Dict[str, str]:
        """获取特定层级所有章节的完整内容（标题+内容）"""
        result = {}
        for section in self.get_sections_by_level(level):
            full_content = f"{'#' * section['level']} {section['title']}\n\n{section['content']}"
            result[section['title']] = full_content
        return result

    def print_section_structure(self):
        """打印章节结构"""
        if not self.sections:
            print("没有找到章节")
            return

        print("Markdown文档结构:")
        print("=" * 60)

        # 从根章节开始
        root_sections = [p for p in self.sections.keys() if len(p) == 1]
        for path in root_sections:
            self._print_section(path, 0)

    def _print_section(self, path: Tuple, indent: int):
        """打印单个章节及其子章节"""
        section = self.sections.get(path)
        if not section:
            return

        level, title = path[-1]
        print(f"{'  ' * indent}层级 {level}: {title}")

        # 打印内容摘要
        if section['content']:
            content_preview = section['content'][:100].replace('\n', ' ')
            if len(section['content']) > 100:
                content_preview += "..."
            print(f"{'  ' * (indent + 1)}内容预览: {content_preview}")
        else:
            print(f"{'  ' * (indent + 1)}无内容")

        # 递归打印子章节
        for child_path in section['children']:
            self._print_section(child_path, indent + 1)

    def save_all_sections(self, output_dir: str):
        """将所有章节保存为单独的文件"""
        if not self.sections:
            print("没有章节数据可保存")
            return

        os.makedirs(output_dir, exist_ok=True)

        for path, section in self.sections.items():
            level, title = path[-1]

            # 创建安全文件名
            safe_title = re.sub(r'[\\/*?:"<>|]', '', title)
            path_str = "_".join([f"{lvl}_{t}" for lvl, t in path])
            file_name = f"{path_str}.md"
            file_path = os.path.join(output_dir, file_name)

            # 构建完整内容
            full_content = f"{'#' * level} {title}\n\n{section['content']}"

            # 写入文件
            with open(file_path, 'w', encoding='utf-8') as f:
                f.write(full_content)

            print(f"已保存: {file_path}")

from langchain_core.documents import Document as LangDocument



def load_md(path)-> List[LangDocument]:
    # 配置Markdown文件路径
    md_file = path
    # 初始化解析器
    extractor = MarkdownSectionExtractor(md_file)
    if not extractor.content:
        return
    content = []
    level_2_sections = extractor.get_sections_by_level(2)
    print(f"层级2的章节 (共{len(level_2_sections)}个):")
    for i, section in enumerate(level_2_sections, 1):
        print(f"{i}. {section['title']}")
        print(f"   内容摘要: {section['content'][:100000]}...")
        if section['content'][:100000] is not "":
            content.append(section['content'][:100000])

    print("\n" + "=" * 60)

    # 3. 获取所有层级3的章节
    level_3_sections = extractor.get_sections_by_level(3)
    print(f"层级3的章节 (共{len(level_3_sections)}个):")
    for i, section in enumerate(level_3_sections, 1):
        print(f"{i}. {section['title']}")
        print(f"   内容摘要: {section['content'][:100000]}...")
        if section['content'][:100000] is not None:
            content.append(section['content'][:100000])
    print(content)

    # 将整个文档作为一个LangDocument对象
    full_text = "\n".join(content)
    if not full_text.strip():
        print(f"警告: 文档没有可提取的文本内容 - {md_file}")
        return []

    text_spliter = RecursiveCharacterTextSplitter(
        chunk_size=384,
        chunk_overlap=100,
        length_function=len,
        add_start_index=True,
        separators=["\n\n"]
    )

    chunks = []
    for i in range(len(content)):
        if len(content[i])<2048:
            # 创建元数据
            metadata = {
                "source": md_file,
                "file_name": os.path.basename(md_file),
                "file_type": "docx"
            }
            chunks.append(LangDocument(page_content=content[i], metadata=metadata))
        else:
            text = content[i]
            first_10_chars = text[:2046]
            # 创建元数据
            metadata = {
                "source": md_file,
                "file_name": os.path.basename(md_file),
                "file_type": "docx"
            }
            chunks.append(LangDocument(page_content=first_10_chars, metadata=metadata))
    return chunks

class MinimalSectionSplitter:
    """专注于最小章节内容的分块器（支持多种目录格式）"""
    def __init__(self, max_chunk_size: int = 1000, min_chunk_size: int = 200):
        self.max_chunk_size = max_chunk_size
        self.min_chunk_size = min_chunk_size
        # 增强章节标题匹配模式
        # 格式1: 中文数字 + 点 + 阿拉伯数字 (如: 一.1.1)
        self.pattern1 = re.compile(
            r'^(\s*[一二三四五六七八九十]+\.\d+(?:\.\d+)*\s+[^\n]+)$',
            re.MULTILINE
        )
        # 格式2: 纯阿拉伯数字 (如: 1.1.1)
        self.pattern2 = re.compile(
            r'^(\s*\d+(?:\.\d+)*\s+[^\n]+)$',
            re.MULTILINE
        )
        # 新增格式3: EHS保障体系-标题 (如: EHS保障体系-职业健康管理体系)
        self.pattern3 = re.compile(
            r'^(\s*EHS保障体系-[^\n]+)$',
            re.MULTILINE
        )
        # 文本分割器
        self.text_splitter = RecursiveCharacterTextSplitter(
            chunk_size=self.max_chunk_size,
            chunk_overlap=int(self.max_chunk_size * 0.1),
            separators=["\n\n", "\n", "。", "！", "？", ";", " ", ""]
        )
    def split_documents(self, documents: List[LangDocument]) -> List[LangDocument]:
        """处理文档，提取最小章节内容"""
        chunks = []
        # 合并所有页面内容
        full_text = "\n".join([doc.page_content for doc in documents])
        # print("full_text",full_text)
        metadata = documents[0].metadata.copy() if documents else {}
        print("metadata",metadata)
        # 查找所有最小章节标题位置
        min_sections = self._find_minimal_sections(full_text)
        print("min_sections",min_sections)
        # 如果没有找到最小章节，使用普通分块
        if not min_sections:
            return self._split_content(full_text, {"section": "无章节内容", "section_number": "0"}, metadata)
        # 处理每个最小章节
        for i, (section_title, start_pos, end_pos) in enumerate(min_sections):
            # 确定内容范围
            content_start = end_pos
            content_end = min_sections[i + 1][1] if i + 1 < len(min_sections) else len(full_text)
            # 提取纯内容（不含标题）
            content_text = full_text[content_start:content_end].strip()
            print("提取纯内容（不含标题）",content_text)
            # 跳过空内容
            if not content_text:
                continue
            # 提取章节编号
            section_number = self._extract_section_number(section_title)
            # 创建内容块
            chunks.extend(self._split_content(
                content_text,
                {
                    "section": section_title,
                    "section_number": section_number
                },
                metadata
            ))
            # print("创建内容块",chunks)
        # 确保至少返回一个块
        if not chunks:
            return self._split_content(full_text, {"section": "无章节内容", "section_number": "0"}, metadata)
        return chunks
    def _find_minimal_sections(self, text: str) -> List[Tuple[str, int, int]]:
        """查找所有最小章节标题及其位置"""
        # 查找所有匹配的标题
        all_matches = []
        # 查找第一种格式的标题
        for match in self.pattern1.finditer(text):
            title = match.group(0).strip()
            start = match.start()
            end = match.end()
            all_matches.append((title, start, end))
        # 查找第二种格式的标题
        for match in self.pattern2.finditer(text):
            title = match.group(0).strip()
            start = match.start()
            end = match.end()
            # 跳过可能被第一种格式匹配过的标题
            if not any(abs(start - m[1]) < 5 for m in all_matches):
                all_matches.append((title, start, end))
        # 查找第三种格式的标题 (新增)
        for match in self.pattern3.finditer(text):
            title = match.group(0).strip()
            start = match.start()
            end = match.end()
            # 跳过可能被其他格式匹配过的标题
            if not any(abs(start - m[1]) < 5 for m in all_matches):
                all_matches.append((title, start, end))
        # 按位置排序
        all_matches.sort(key=lambda x: x[1])
        # 找出最小章节（最深层级的章节）
        min_sections = []
        for i, (title, start, end) in enumerate(all_matches):
            # 计算标题层级深度
            depth = self._calculate_depth(title)
            # 检查是否是当前文档的最小层级
            is_minimal = True
            for j in range(i + 1, len(all_matches)):
                next_title, next_start, next_end = all_matches[j]
                next_depth = self._calculate_depth(next_title)
                # 如果后续有更深层级的标题，当前标题不是最小章节
                if next_depth > depth and next_start < (
                        all_matches[j + 1][1] if j + 1 < len(all_matches) else len(text)):
                    is_minimal = False
                    break
            if is_minimal:
                min_sections.append((title, start, end))
        return min_sections
    def _calculate_depth(self, title: str) -> int:
        """计算标题层级深度"""
        # 对EHS保障体系-开头的标题，固定深度为1
        if title.startswith(""):
            return 1
        # 其他标题根据点号数量计算深度
        return title.count('.') + 1
    def _extract_section_number(self, section_title: str) -> str:
        """从标题中提取章节编号"""
        # 对EHS保障体系-开头的标题，返回完整标题
        if section_title.startswith(""):
            return section_title
        # 提取数字部分
        match = re.search(r'([一二三四五六七八九十]+\.\d+(?:\.\d+)*|\d+(?:\.\d+)*)', section_title)
        if match:
            return match.group(1)
        return "未知章节"
    def _split_content(self, content: str, section_info: dict, base_metadata: dict) -> List[LangDocument]:
        """分割内容并添加元数据"""
        if not content:
            return []
        # 单块处理
        if len(content) <= self.max_chunk_size:
            metadata = base_metadata.copy()
            metadata.update(section_info)
            return [LangDocument(page_content=content, metadata=metadata)]
        # 多块处理
        content_chunks = self.text_splitter.split_text(content)
        chunks = []
        for i, chunk in enumerate(content_chunks):
            metadata = base_metadata.copy()
            metadata.update(section_info)
            metadata["chunk_index"] = i + 1
            chunks.append(LangDocument(page_content=chunk, metadata=metadata))
        return chunks

from langchain_community.vectorstores import Chroma
from langchain_community.embeddings import DashScopeEmbeddings

def create_vector_store(chunks: List[LangDocument], persist_dir: str) -> Chroma:
    """创建内容向量存储"""
    # 检查内容块是否为空
    if not chunks:
        raise ValueError("无法创建向量存储：内容块列表为空")
    print(f"创建向量数据库 ({len(chunks)}个内容块)...")

    print("有多少个chunks",chunks)
    # 确保目录存在
    os.makedirs(persist_dir, exist_ok=True)

    embeddings = DashScopeEmbeddings(
        model="text-embedding-v2",
        dashscope_api_key="sk-5c6689dccd074a739c78ef7d1d780148",
    )
    # 创建Chroma向量存储
    try:
        vector_store = Chroma.from_documents(
            documents=chunks,
            embedding=embeddings,
            persist_directory=persist_dir
        )
        return vector_store
    except Exception as e:
        # 处理嵌入生成失败的情况
        print(f"创建向量数据库失败: {str(e)}")
        print("可能的原因：")
        print("1. 内容块文本为空")
        print("2. 嵌入服务API调用失败")
        print("3. 网络连接问题")
        print("4. 文件权限问题")

        # 打印前3个内容块的预览以帮助调试
        print("\n内容块预览:")
        for i, chunk in enumerate(chunks[:3]):
            content = chunk.page_content
            preview = content[:100] + "..." if len(content) > 100 else content
            print(f"块 {i + 1}: {preview}")
        raise

def main():
    # 文档路径和持久化目录
    base_persist_dir = "D:\\hbyt\\project\\aibid\\db\\d1"
    os.makedirs(base_persist_dir, exist_ok=True)
    folder = Path("D:\\hbyt\\AI智能投标\\2025_04_23_Word\\2025_04_23_Word\\Word")
    # folder = Path("D:\\hbyt\\AI智能投标\\markdown_output\\")
    processed_files = 0
    skipped_files = 0
    for file in folder.rglob("*"):
        if file.is_file() and file.suffix.lower() == ".docx" and not re.match(r'^~\$.*', file.name):
            file_path = str(file)
            print("docx文件",file_path)
            md_path=docx_to_md(file_path)
            print(f"\n{'=' * 60}")
            print(f"处理文件: {file.name}")
            print(f"完整路径: {md_path}")
            # 1. 加载文档
            # documents = load_document(md_path)
            documents=load_md(md_path)
            print("documents",documents)
            print("长度：",len(documents))

            # splitter = MinimalSectionSplitter(max_chunk_size=10000, min_chunk_size=200)
            # content_chunks = splitter.split_documents(documents)
            # print("content_chunks:",documents)
            # 检查分割后的内容块是否为空
            c=[]
            if not documents:
                print(f"警告: 文档分割后没有内容块 - {file.name}")
                skipped_files += 1
                c.append(file)
                print(c)
                continue
            create_vector_store(documents, base_persist_dir)
            file_path = Path(md_path)  # 替换为你的文件路径
            os.remove(file_path)
            processed_files += 1
            print(c)
    print(f"\n处理完成! 成功处理: {processed_files} 个文件, 跳过: {skipped_files} 个文件")
if __name__ == "__main__":
    main()
