import time
from idlelib.colorizer import prog_group_name_to_tag
from pathlib import Path
import re
from pathlib import Path
from docx.shared import Cm
from docx.shared import Inches
import csv
from langchain_community.vectorstores import Chroma
import re
from langchain_community.embeddings import DashScopeEmbeddings
import os
from openai import OpenAI
from docx import Document
from docx.oxml.ns import qn
from docx.shared import Pt

os.environ["QIANFAN_AK"] = "SGbbQdjFjlKurTfUIjYM0Q4P"
os.environ["QIANFAN_SK"] = "lb1tKvDGRhqLZYH4ZYpke6Vco9n9X8Xv"


def convert_markdown_headings(md_content, base_prefix="3.1.1"):
    """
    根据基础前缀动态编号Markdown标题

    :param md_content: Markdown文档内容
    :param base_prefix: 基础编号前缀，如"3.1.1"
    :return: 转换后的文档内容
    """
    # 解析基础前缀
    prefix_parts = base_prefix.split('.')

    # 初始化计数器
    level1_counter = int(prefix_parts[-1]) if prefix_parts else 1
    level2_counter = 1
    level3_counter = 1
    level4_counter = 1

    # 获取基础前缀的前部分
    base_str = '.'.join(prefix_parts[:-1]) + '.' if len(prefix_parts) > 1 else ""

    # 按行处理文档
    lines = md_content.split('\n')
    output_lines = []

    for line in lines:
        # 处理一级标题
        if line.startswith('# '):
            # 生成编号：base_str + level1_counter
            new_title = f"{base_str}{level1_counter} {line[2:].strip()}"
            output_lines.append(new_title)
            level1_counter += 1
            level2_counter = 1  # 重置二级计数器
            continue

        # 处理二级标题
        if line.startswith('## '):
            # 生成编号：base_str + level1_counter-1 + '.' + level2_counter
            new_title = f"{base_str}{level1_counter}.{level2_counter} {line[3:].strip()}"
            output_lines.append(new_title)
            level2_counter += 1
            level3_counter = 1  # 重置三级计数器
            continue

        # 处理三级标题
        if line.startswith('### '):
            # 生成编号：base_str + level1_counter-1 + '.' + level2_counter-1 + '.' + level3_counter
            new_title = f"{base_str}{level1_counter}.{level2_counter}.{level3_counter} {line[4:].strip()}"
            output_lines.append(new_title)
            level3_counter += 1
            level4_counter = 1  # 重置四级计数器
            continue

        # 处理四级标题
        if line.startswith('#### '):
            # 生成编号：base_str + level1_counter-1 + '.' + level2_counter-1 + '.' + level3_counter-1 + '.' + level4_counter
            new_title = f"{base_str}{level1_counter}.{level2_counter}.{level3_counter}.{level4_counter} {line[5:].strip()}"
            output_lines.append(new_title)
            level4_counter += 1
            continue
        # 其他行直接保留
        output_lines.append(line)
        # print(output_lines)
        print('\n'.join(output_lines))
    # 返回转换后的文档
    return '\n'.join(output_lines)


def is_leaf_title(idx, lines, current_level):
    """判断当前标题是否为最后一级"""
    for i in range(idx + 1, len(lines)):
        line = lines[i]
        next_level = (len(line) - len(line.lstrip(" "))) // 2
        if next_level > current_level:
            return False
        elif next_level <= current_level:
            break
    return True

def doc_llm_generation2(con):
    import os
    from openai import OpenAI
    os.environ["DASHSCOPE_API_KEY"] = "sk-5c6689dccd074a739c78ef7d1d780148"
    client = OpenAI(
        api_key=os.getenv("DASHSCOPE_API_KEY"),
        base_url="https://dashscope.aliyuncs.com/compatible-mode/v1"
    )
    completion = client.chat.completions.create(
        model="deepseek-v3",
        messages=[
            {"role": "system", "content": '''
            第一步：根据user的值，一个是query，一个是source_content.
            第二步：根据query去source_content找相同的章节标题或者是相似的章节标题，如果找到一级标题，那就返回一级标题的所有内容，包括二级标题的所有内容，三级标题的所有内容等；如果找到是二级标题也是按照这种逻辑；如果是三级标题也是这种思路，等等以此类推。
            第三步：你所返回的所有内容格式不得变动，输入是怎么样的，就返回什么样的内容，不得遗漏内容，不得缺失内容。
            第四步：输出格式是docx文件的风格,禁止用markdown风格输出，除了上述要求，其他的不要修改，也不要做总结,
            '''},
            {"role": "user", "content": con},
        ],
    )
    return completion.choices[0].message.content

def search_doc_content(query):
    embeddings = DashScopeEmbeddings(
        model="text-embedding-v2",
        dashscope_api_key="sk-5c6689dccd074a739c78ef7d1d780148",
    )
    db = Chroma(persist_directory="D:\\hbyt\\project\\aibid\\db\\d1", embedding_function=embeddings)
    results = db.similarity_search(query, k=10)
    result = db.similarity_search_with_score(query, k=1)
    for doc, score in result:
        similarity = 1 / (1 + score)  # 将距离转换为相似度（粗略处理）
        # print("相似的值：",similarity)
        if similarity > 0.5:
            # print(f"文档内容：{doc.page_content}")
            print(f"相似度（近似值）：{similarity:.4f}")
            return results
        else:
            return None
def set_run_font(run, font_name, font_size_pt, bold=True):
    run.font.name = font_name
    run._element.rPr.rFonts.set(qn('w:eastAsia'), font_name)
    run.font.size = Pt(font_size_pt)
    run.font.bold = bold


def add_title(doc, text):
    para = doc.add_paragraph()
    run = para.add_run(text)
    run.font.name = '宋体'
    run._element.rPr.rFonts.set(qn('w:eastAsia'), '宋体')
    run.font.size = Pt(14)  # 四号
    run.bold = True



def write_csv_to_docx(doc, csv_path):
    # # 创建 Word 文档对象
    # doc = Document()
    # 设置字体为宋体（表格内容）
    def set_cell_font(cell, text, font_size=10.5):
        run = cell.paragraphs[0].add_run(text)
        run.font.name = '宋体'
        run._element.rPr.rFonts.set(qn('w:eastAsia'), '宋体')
        run.font.size = Pt(font_size)
    # 打开 CSV 文件
    with open(csv_path, 'r', encoding='utf-8-sig') as f:
        reader = list(csv.reader(f))
        if not reader:
            print("CSV 文件为空")
            return
        # 创建表格（行数，列数）
        table = doc.add_table(rows=len(reader), cols=len(reader[0]))
        table.style = 'Table Grid'
        # 填充表格内容
        for row_idx, row in enumerate(reader):
            for col_idx, cell_value in enumerate(row):
                cell = table.cell(row_idx, col_idx)
                set_cell_font(cell, cell_value, font_size=10.5)
def markdown_to_docx(md_text, doc, title):
    lines = md_text.splitlines()
    pattern = r'^(\d+(?:\.\d+)*)(?:\.|\s)'
    match = re.match(pattern, title.strip())
    match.group(1)
    collection = []
    collection.append(match.group(1))
    for line in lines:
        if not line.strip():
            continue  # 跳过空行
        else:
            # 插入 PNG 图片
            img_match1=re.search(r'!\[.*?\]\(([^)]+\.jpeg)\)', line)
            img_match = re.search(r'!\[.*?\]\(([^)]+\.png)\)', line)
            if img_match:
                img_path = img_match.group(1)
                try:
                    doc.add_picture(img_path, width=Inches(5))
                except Exception as e:
                    doc.add_paragraph(f"[图片插入失败: {img_path}] 错误: {e}")
                continue
            if img_match1:
                img_path = img_match1.group(1)
                try:
                    doc.add_picture(img_path, width=Inches(5))
                except Exception as e:
                    doc.add_paragraph(f"[图片插入失败: {img_path}] 错误: {e}")
                continue
            # 插入 CSV 表格
            csv_match = re.search(r'\[.*?\]\(([^)]+\.csv)\)', line)
            if csv_match:
                csv_path = csv_match.group(1)
                write_csv_to_docx(doc, Path(csv_path))
                continue
            def has_section_number(text):
                pattern = r'^\d+(\.\d+)*\s+'
                return bool(re.match(pattern, text.strip()))

            def replace_single_number(line, new_number):
                # 匹配开头编号：如 1.1.1 或 1.1.1.
                match = re.match(r'^(\s*)(\d+(?:\.\d+)*)([.\s]+)(.*)', line)
                if match:
                    indent, _, sep, title = match.groups()
                    return f"{indent}{new_number}{sep}{title}"
                return line

            def increment_any_number(number_str: str) -> str:
                """
                输入形如 '3'、'3.2'、'3.2.1' 的编号字符串，
                返回最后一段数字递增1的新编号字符串。
                """
                if not number_str.strip():
                    return "1"
                parts = number_str.strip().split(".")
                try:
                    parts = [int(p) for p in parts]
                except ValueError:
                    raise ValueError("编号中必须全部是数字组成，如 '1.2.3'")
                parts[-1] += 1
                return ".".join(str(p) for p in parts)

            def get_last_element(arr):
                return arr[-1] if arr else None
            if has_section_number(line):
                # 获取最后一个
                new_number = get_last_element(collection)
                # 其他普通行
                # doc.add_paragraph(line)
                add_title(doc, replace_single_number(line, new_number))
                collection.append(increment_any_number(str(new_number)))
            else:
                # 其他普通行
                para = doc.add_paragraph(line)
                # 首行缩进2个字符
                para.paragraph_format.first_line_indent = Pt(21)

def count_heading_level(line):
    stripped = line.lstrip()
    count = 0
    for char in stripped:
        if char == '#':
            count += 1
        else:
            break
    return count

def reduce_one_heading_hash(line):
    stripped = line.lstrip()
    count = 0
    for char in stripped:
        if char == '#':
            count += 1
        else:
            break
    # 保证至少保留一个 #
    new_count = max(1, count - 1)
    content = stripped[count:].lstrip()
    return f"{'#' * new_count} {content}"

def read_first_line(md_path):
    with open(md_path, 'r', encoding='utf-8') as f:
        first_line = f.read()
        for l in first_line:
            stripped = l.lstrip()
            if stripped.startswith('#'):
                if count_heading_level(first_line)==1 or count_heading_level(first_line)==0:
                    return "全部返回"
                else:
                    cont=[]
                    with open(md_path, 'r', encoding='utf-8') as f:
                        for line in f:
                            stripped = line.lstrip()
                            if stripped.startswith('#'):
                                new_line = reduce_one_heading_hash(line)
                                cont.append(new_line)
                            else:
                                cont.append(line)
                    c=''.join(cont)
                    with open(md_path, 'w', encoding='utf-8') as f:
                        f.write(c)
                    with open(md_path, 'r', encoding='utf-8') as f:
                        content=f.read()
                    return content

def write_outline_to_docx(md_path, output_path):
    doc = Document(output_path)
    doc.add_page_break()
    with open(md_path,'r', encoding='utf-8') as f:
        lines = [line for line in f]
    for idx, line in enumerate(lines):
        level = (len(line) - len(line.lstrip(" "))) // 2
        title = line.strip()
        # print("title",title)
        # add_title(doc, title)
        if is_leaf_title(idx, lines, level):
            content = ''
            if search_doc_content(re.sub(r'^\d+(\.\d+)*\s*', '', title)) is not None:
                with open(search_doc_content(re.sub(r'^\d+(\.\d+)*\s*', '', title))[0].metadata["source_md"], "r",
                          encoding="utf-8") as f:
                    content = f.read()
                template = "query:" + re.sub(r'^\d+(\.\d+)*\s*', '', title) + "\n" + "source_content:" + "\n" + content
                print("字数：",len(template))
                if len(template)>57344:
                    tem=template[:20000]
                    content = doc_llm_generation2(tem)
                else:
                    content = doc_llm_generation2(template)
            else:
                content = "无"
            match = re.search(r"\b(\d+(?:\.\d+)*)", title)
            converted_content = ""
            if match:
                with open("temp.md", 'w', encoding='utf-8') as f:
                    f.write(content)
                first = read_first_line("temp.md")
                if first=="全部返回":
                    converted_content = convert_markdown_headings(content, match.group(1))
                elif first == None:
                    converted_content = convert_markdown_headings(content, match.group(1))
                else:
                    converted_content = convert_markdown_headings(first, match.group(1))
            else:
                print("未找到编号")
            body = doc.add_paragraph()
            body_run = body.add_run(markdown_to_docx(converted_content, doc,title))
            set_run_font(body_run, '宋体', 10.5, bold=True)
            os.remove("temp.md")
    doc.save(output_path)
# 用法示例
write_outline_to_docx(
    "D:\\hbyt\\AI智能投标\\典型投标标书目录\\word\\常规目录\\标准目录\\投标文件_技术服务方案部分_邮储银行.md",
    "D:\\hbyt\\AI智能投标\\典型投标标书目录\\word\\常规目录\\标准目录\\AI生成标书\\投标文件_技术服务方案部分_邮储银行.docx")

