from langchain_community.vectorstores import Chroma
from langchain_community.embeddings import QianfanEmbeddingsEndpoint
from langchain_community.document_loaders import Docx2txtLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
import os
os.environ["QIANFAN_AK"]="SGbbQdjFjlKurTfUIjYM0Q4P"
os.environ["QIANFAN_SK"]="lb1tKvDGRhqLZYH4ZYpke6Vco9n9X8Xv"
def parse_markdown_toc(lines):
    toc = []
    stack = []
    for line in lines:
        if not line.strip().startswith("-"):
            continue
        # 计算缩进级别，每两个空格为一层
        level = (len(line) - len(line.lstrip(" "))) // 2
        title = line.strip("- ").strip()
        node = {"title": title, "children": []}
        if level == 0:
            toc.append(node)
            stack = [node]
        else:
            while len(stack) > level:
                stack.pop()
            parent = stack[-1]
            parent["children"].append(node)
            stack.append(node)
    return toc
def traverse_toc(nodes, depth=0):
    for node in nodes:
        indent = "  " * depth
        print(f"{indent}- {node['title']}")
        doc_search(f"{indent}- {node['title']}")
        if node["children"]:
            traverse_toc(node["children"], depth + 1)
def doc_search(query):
    embedding = QianfanEmbeddingsEndpoint()
    db = Chroma(persist_directory="D:\\hbyt\\project\\aibid\\db\\d4", embedding_function=embedding)
    results = db.similarity_search(query, k=1)
    print(results[0].page_content)
    return results[0].page_content
if __name__ == "__main__":
    # 替换为你的 Markdown 文件路径
    md_file = "D:\\hbyt\\AI智能投标\\典型招标要求和投标文件\\目录.md"
    with open(md_file, encoding="utf-8") as f:
        lines = f.readlines()
    toc_tree = parse_markdown_toc(lines)
    traverse_toc(toc_tree)