from docx.shared import Pt, RGBColor
from pathlib import Path
from docx.shared import Inches
import csv
from langchain_community.vectorstores import Chroma
from langchain_community.embeddings import DashScopeEmbeddings
import os
from docx import Document
from docx.oxml.ns import qn
from docx.shared import Pt

os.environ["QIANFAN_AK"] = "SGbbQdjFjlKurTfUIjYM0Q4P"
os.environ["QIANFAN_SK"] = "lb1tKvDGRhqLZYH4ZYpke6Vco9n9X8Xv"
def convert_markdown_headings(md_content, base_prefix):
    """
    自动根据 base_prefix 为 Markdown 添加递增标题编号。
    起始编号如 base_prefix=3.1，则从 3.1.1 开始。
    """
    prefix_base = list(map(int, base_prefix.strip().split('.'))) + [0]  # ✅ 补 0 保证从 .1 开始
    base_depth = len(prefix_base)
    counters = prefix_base + [0] * (100 - base_depth)

    lines = md_content.splitlines()
    output_lines = []

    for line in lines:
        match = re.match(r'^(#{1,100})\s+(.*)', line.strip())
        if match:
            level = len(match.group(1))
            content = match.group(2).strip()
            index = base_depth + level - 2  # 层级偏移
            counters[index] += 1
            for i in range(index + 1, len(counters)):
                counters[i] = 0
            prefix = '.'.join(str(num) for num in counters[:index + 1])
            output_lines.append(f"{prefix} {content}")
        else:
            output_lines.append(line)

    return '\n'.join(output_lines)


def is_leaf_title(idx, lines, current_level):
    """判断当前标题是否为最后一级"""
    for i in range(idx + 1, len(lines)):
        line = lines[i]
        next_level = (len(line) - len(line.lstrip(" "))) // 2
        if next_level > current_level:
            return False
        elif next_level <= current_level:
            break
    return True

def doc_llm_generation(con):
    import os
    from openai import OpenAI
    os.environ["DASHSCOPE_API_KEY"] = "sk-5c6689dccd074a739c78ef7d1d780148"
    client = OpenAI(
        api_key=os.getenv("DASHSCOPE_API_KEY"),
        base_url="https://dashscope.aliyuncs.com/compatible-mode/v1"
    )
    completion = client.chat.completions.create(
        model="deepseek-v3",
        messages=[
            {"role": "system", "content": '''
            你是一个专业写投标标书人员的小助手，
            投标公司是"慧博运通"，
            你需要根据user的content，传入的是投标标书的目录，根据这个目录去写内容，内容在500字内，纯文本输出，不要表格内容，正文第一行写"DeepSeek写"。
            输出风格为docx，禁止markdown输出。
            '''},
            {"role": "user", "content": con},
        ],
    )
    return completion.choices[0].message.content

def doc_llm_generation2(con):
    import os
    from openai import OpenAI
    os.environ["DASHSCOPE_API_KEY"] = "sk-5c6689dccd074a739c78ef7d1d780148"
    client = OpenAI(
        api_key=os.getenv("DASHSCOPE_API_KEY"),
        base_url="https://dashscope.aliyuncs.com/compatible-mode/v1"
    )
    completion = client.chat.completions.create(
        model="deepseek-v3",
        messages=[
            {"role": "system", "content": '''
            第一步：根据user的值，一个是query，一个是source_content.
            第二步：根据query去source_content找相同的章节标题或者是相似的章节标题，如果找到一级标题，那就返回一级标题的所有内容，包括二级标题的所有内容，三级标题的所有内容等；如果找到是二级标题也是按照这种逻辑；如果是三级标题也是这种思路，等等以此类推。
            第三步：你所返回的所有内容格式不得变动，输入是怎么样的，就返回什么样的内容，不得遗漏内容，不得缺失内容，禁止自由发挥，不要  做你的总结。
            第四步：输出格式是docx文件的风格,禁止用markdown风格输出，除了上述要求，其他的不要修改，也不要做总结,
            '''},
            {"role": "user", "content": con},
        ],
    )
    return completion.choices[0].message.content


def search_doc_content(query):
    # embedding = QianfanEmbeddingsEndpoint()
    embeddings = DashScopeEmbeddings(
        model="text-embedding-v2",
        dashscope_api_key="sk-5c6689dccd074a739c78ef7d1d780148",
    )
    db = Chroma(persist_directory="D:\\hbyt\\project\\aibid\\db\\d1", embedding_function=embeddings)
    results = db.similarity_search(query, k=10)
    # for r in results:
    #     print("+++++++++++++++++++++++++++++++++++++++++++++++++++++++")
    #     print(r.page_content)
    # print("+++++++++++++++++++++++++++++++++++++")
    result = db.similarity_search_with_score(query, k=1)
    # print("相似的值：",result)
    for doc, score in result:
        similarity = 1 / (1 + score)  # 将距离转换为相似度（粗略处理）
        # print("相似的值：",similarity)
        if similarity > 0.5:
            # print(f"文档内容：{doc.page_content}")
            print(f"相似度（近似值）：{similarity:.4f}")
            return results
        else:
            return None

def set_run_font(run, font_name, font_size_pt):
    run.font.name = font_name
    run._element.rPr.rFonts.set(qn('w:eastAsia'), font_name)
    run.font.size = Pt(font_size_pt)

def add_title(doc, text,style):
    para = doc.add_paragraph(style=style)
    run = para.add_run(text)
    run.font.name = '宋体'
    run._element.rPr.rFonts.set(qn('w:eastAsia'), '宋体')
    run.font.size = Pt(14)  # 四号
    run.bold = True # 加粗
    run.italic = False  # 关键：禁用斜体
    run.font.color.rgb = RGBColor(0, 0, 0)  # 设置颜色为黑色



def write_csv_to_docx(doc, csv_path):
    def set_cell_font(cell, text, font_size=10.5):
        run = cell.paragraphs[0].add_run(text)
        run.font.name = '宋体'
        run._element.rPr.rFonts.set(qn('w:eastAsia'), '宋体')
        run.font.size = Pt(font_size)
    # 打开 CSV 文件
    with open(csv_path, 'r', encoding='utf-8-sig') as f:
        reader = list(csv.reader(f))
        if not reader:
            print("CSV 文件为空")
            return
        # 创建表格（行数，列数）
        table = doc.add_table(rows=len(reader), cols=len(reader[0]))
        table.style = 'Table Grid'
        # try:
        #     table.style = 'Table Grid'
        # except KeyError:
        #     table.style = 'TableNormal'  # 回退样式

        # 填充表格内容
        for row_idx, row in enumerate(reader):
            for col_idx, cell_value in enumerate(row):
                cell = table.cell(row_idx, col_idx)
                set_cell_font(cell, cell_value, font_size=10.5)

def markdown_to_docx(md_text, doc, title):
    lines = md_text.splitlines()
    pattern = r'^(\d+(?:\.\d+)*)(?:\.|\s)'
    match = re.match(pattern, title.strip())
    match.group(1)
    collection = []
    collection.append(match.group(1))
    for line in lines:
        if not line.strip():
            continue  # 跳过空行
        else:
            # 插入 PNG 图片
            img_match1=re.search(r'!\[.*?\]\(([^)]+\.jpeg)\)', line)
            img_match = re.search(r'!\[.*?\]\(([^)]+\.png)\)', line)
            img_match2 = re.search(r'!\[.*?\]\((.*?\.png)\)', line)


            pattern3 = re.compile(r'([A-Za-z]:\\(?:[^\\\n]+\\)*[^\\\n]+\.wmf)')
            img_match3 = re.search(pattern3, line)

            pattern4 = re.compile(r'([A-Za-z]:\\(?:[^\\\n]+\\)*[^\\\n]+\.jpeg)')
            img_match4 = re.search(pattern4, line)
            if img_match:
                img_path = img_match.group(1)
                try:
                    doc.add_picture(img_path, width=Inches(5))
                except Exception as e:
                    doc.add_paragraph(f"[图片插入失败: {img_path}] 错误: {e}")
                continue
            if img_match1:
                img_path1 = img_match1.group(1)
                try:
                    doc.add_picture(img_path1, width=Inches(5))
                except Exception as e:
                    doc.add_paragraph(f"[图片插入失败: {img_path1}] 错误: {e}")
                continue
            if img_match2:
                img_path2 = img_match2.group(1)
                try:
                    doc.add_picture(img_path2, width=Inches(5))
                except Exception as e:
                    doc.add_paragraph(f"[图片插入失败: {img_path2}] 错误: {e}")
                continue
            if img_match3:
                img_path3 = img_match3.group(1)
                try:
                    doc.add_picture(img_path3, width=Inches(5))
                except Exception as e:
                    doc.add_paragraph(f"[图片插入失败: {img_path3}] 错误: {e}")
                continue
            if img_match4:
                img_path4 = img_match4.group(1)
                try:
                    doc.add_picture(img_path4, width=Inches(5))
                except Exception as e:
                    doc.add_paragraph(f"[图片插入失败: {img_path4}] 错误: {e}")
                continue
            # 插入 CSV 表格
            csv_match = re.search(r'\[.*?\]\(([^)]+\.csv)\)', line)
            if csv_match:
                csv_path = csv_match.group(1)
                write_csv_to_docx(doc, Path(csv_path))
                continue
            def has_section_number(text):
                pattern = r'^\d+(\.\d+)*\s+'
                return bool(re.match(pattern, text.strip()))

            if has_section_number(line):
                heading_map = {
                    "1": "Heading 1",
                    "2": "Heading 2",
                    "3": "Heading 3",
                    "4": "Heading 4",
                    "5": "Heading 5",
                    "6": "Heading 6",
                    "7": "Heading 7",
                    "8": "Heading 8",
                    "9": "Heading 9",
                }
                # 提取前面的编号部分（只要以数字开头、由 . 分隔的）
                match = re.match(r'^(\d+(\.\d+)*)', line)
                if match:
                    number_part = match.group(1)
                    count = number_part.count('.') + 1
                else:
                    print("未找到编号")
                add_title(doc, ''.join(line),heading_map.get(str(count)))
            else:
                body = doc.add_paragraph(style='Normal')
                body_run = body.add_run(line)
                set_run_font(body_run, '宋体', 10.5)
                body.paragraph_format.first_line_indent = Pt(21)

def count_heading_level(line):
    """统计开头 # 的数量"""
    stripped = line.lstrip()
    return len(stripped) - len(stripped.lstrip('#'))

def reduce_one_heading_hash(line):
    """将 Markdown 标题的 # 减少一个"""
    stripped = line.lstrip()
    level = count_heading_level(stripped)
    new_level = max(1, level - 1)
    content = stripped[level:].strip()
    return f"{'#' * new_level} {content}\n"
def get_first_heading_hash_count(md_lines):
    """获取第一行带有#的标题中#的数量"""
    for line in md_lines:
        stripped = line.lstrip()
        if stripped.startswith('#'):
            return len(stripped) - len(stripped.lstrip('#'))
    return 0

def reduce_one_heading_hash(line):
    """将标题的#减少一个，最少为#"""
    stripped = line.lstrip()
    count = len(stripped) - len(stripped.lstrip('#'))
    content = stripped[count:].strip()
    new_count = max(1, count - 1)
    return f"{'#' * new_count} {content}\n"

def reduce_headings_once(md_lines):
    """将所有标题行的#减少一级"""
    new_lines = []
    for line in md_lines:
        stripped = line.lstrip()
        if stripped.startswith('#'):
            new_lines.append(reduce_one_heading_hash(line))
        else:
            new_lines.append(line)
    return new_lines

def process_until_level_one(md_path):
    """递归处理，直到第一个标题为#"""
    with open(md_path, 'r', encoding='utf-8') as f:
        lines = f.readlines()

    count = get_first_heading_hash_count(lines)
    if count <= 1:
        # print("层级达到最低")
        return
    # 降级标题
    new_lines = reduce_headings_once(lines)
    # 写回文件
    with open(md_path, 'w', encoding='utf-8') as f:
        f.writelines(new_lines)
    # 递归调用
    process_until_level_one(md_path)

def extract_and_remove_first_heading(text):
    lines = text.splitlines()
    for i, line in enumerate(lines):
        if line.strip() == "":
            continue  # 忽略空行
        if re.match(r'^\s*#+\s+.*', line):  # 匹配以#开头的标题
            heading = line.strip()
            remaining_text = '\n'.join(lines[:i] + lines[i+1:])  # 删除该行
            return remaining_text
        else:
            break  # 第一行不是标题，则不处理
    return text  # 如果没有以#开头的有效首行标题

import re

def process_title_recursive(title, idx, a, titles,j):
    query_title = re.sub(r'^\d+(\.\d+)*\s*', '', title)
    result = search_doc_content(query_title)

    if result is None:
        return

    with open(result[j].metadata["source_md"], "r", encoding="utf-8") as f:
        md_content = f.read()

    template = f"query:{query_title}\nsource_content:\n{md_content}"
    text = doc_llm_generation2(template)
    titles1 = []
    for match in re.finditer(r'^\s*#+\s+(.+)', text, re.MULTILINE):
        title2 = match.group(1).strip()
        titles1.append(title2)
    common = list(set(titles) & set(titles1))
    if len(common) == 0:
        for match in re.finditer(r'^\s*#+\s+(.+)', text, re.MULTILINE):
            title2 = match.group(1).strip()
            titles.append(title2)
        content = extract_and_remove_first_heading(text)
        # 你可以在此处理 content，例如保存或返回
        return content
    else:
        # 递归再次调用，直到没有共同元素为止
        process_title_recursive(title, idx, a, titles, j + 1)




def write_outline_to_docx(md_path, template_path,output_path):
    doc = Document(template_path)
    # 插入分页符
    doc.add_page_break()
    with open(md_path,'r', encoding='utf-8') as f:
        lines = [line for line in f if line.strip().startswith("-")]

    titles = []

    for idx, line in enumerate(lines):
        level = (len(line) - len(line.lstrip(" "))) // 2
        title = line.strip("- ").strip()
        # 定义键值对映射
        heading_map = {
            "1": "Heading 1",
            "2": "Heading 2",
            "3": "Heading 3",
            "4": "Heading 4",
            "5": "Heading 5",
            "6": "Heading 6",
            "7": "Heading 7",
            "8": "Heading 8",
            "9": "Heading 9",
        }

        # 提取前面的编号部分（只要以数字开头、由 . 分隔的）
        match = re.match(r'^(\d+(\.\d+)*)', title)
        if match:
            number_part = match.group(1)
            count = number_part.count('.') + 1
            add_title(doc, title, heading_map.get(str(count)))
        else:
            print("未找到编号")

        content=''
        a=[]
        a.append(idx)
        if is_leaf_title(idx, lines, level):
            # 提取第一个数字（以数字开头）
            match2 = re.match(r'^(\d+)', title)
            if int(match2.group(1)) <= 2:
                content="投标人员书写部分"
            else:
                if search_doc_content(re.sub(r'^\d+(\.\d+)*\s*', '', title)) is not None:
                    # with open(search_doc_content(re.sub(r'^\d+(\.\d+)*\s*', '', title))[0].metadata["source_md"], "r",
                    #           encoding="utf-8") as f:
                    #     md_content = f.read()
                    # template = "query:" + re.sub(r'^\d+(\.\d+)*\s*', '', title) + "\n" + "source_content:" + "\n" + md_content
                    # text = doc_llm_generation2(template)
                    j=0
                    content=process_title_recursive(title, idx, a, titles,j)
                else:
                    match3 = re.search(r'^\s*[\d.]+\s+(.*)', title)
                    content = doc_llm_generation(match3.group(1))
            match1 = re.search(r"\b(\d+(?:\.\d+)*)", title)
            converted_content = ""
            if match1:
                with open("temp.md", 'w', encoding='utf-8') as f:
                    f.write(str(content))
                process_until_level_one("temp.md")
                # 自动化转换标题
                with open("temp.md", 'r', encoding='utf-8') as f:
                    c=f.read()
                converted_content = convert_markdown_headings(c, match1.group(1))
            else:
                print("未找到编号")
            markdown_to_docx(converted_content, doc, title)
            os.remove("temp.md")
        doc.save(output_path)


# 用法示例
write_outline_to_docx(
    "D:\\hbyt\\AI智能投标\\生成标书文件\\标书目录\\投标文件-技术部分-天翼视联6.12终版.md",
    "D:\\hbyt\\AI智能投标\\生成标书文件\\标书模板\\template1.docx",
    "D:\\hbyt\\AI智能投标\\生成标书文件\\生成标书\\投标文件4.docx")
