from langchain_community.document_loaders import Docx2txtLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.vectorstores import Chroma
from langchain_community.embeddings import QianfanEmbeddingsEndpoint
import re
from langchain_core.documents import Document
from pathlib import Path
import os
os.environ["QIANFAN_AK"]="SGbbQdjFjlKurTfUIjYM0Q4P"
os.environ["QIANFAN_SK"]="lb1tKvDGRhqLZYH4ZYpke6Vco9n9X8Xv"
folder=Path("D:\\hbyt\\AI智能投标\\2025_04_23_Word\\2025_04_23_Word\\Word")
# folder=Path("D:\\hbyt\\AI智能投标\\典型招标要求和投标文件\\北研2023年\\招标文件\\必选")

# 递归遍历所有子目录和文件
for file in folder.rglob("*"):
    if file.is_file():
        path = str(file)
        if not re.match(r'^~\$.*', file.name):
            filename=os.path.splitext(file.name)[0]
            docs=[]
            doc=[]
            doc.append(Document(
                page_content="\n".join(filename),
                metadata={
                    "source": path,
                    "slide": "0",
                    "layout": "slide.slide_layout.name"
                }
            ))
            loader=Docx2txtLoader(path)
            pages=loader.load()
            docs.append(doc)
            docs.append(pages)

            print(docs)
            for i in range(len(docs)):
                text_spliter = RecursiveCharacterTextSplitter(
                    chunk_size=384,
                    chunk_overlap=100,
                    length_function=len,
                    add_start_index=True,
                    separators=["\n\n"]
                )
                print("docs[i]",docs[i])
                text_documents = text_spliter.split_documents(docs[i])
                print("text_documents:",text_documents)
                embeddings = QianfanEmbeddingsEndpoint()
                vectordb = Chroma.from_documents(documents=text_documents, embedding=embeddings,
                                                 persist_directory="D:\\hbyt\\project\\aibid\\db\\dddd")