import os
from model.my_chat_model import ChatModel
from langchain_community.document_loaders import TextLoader,PyPDFLoader,CSVLoader,Docx2txtLoader
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_chroma import Chroma

def store_file(file_path,store_path,collection_name):
    if not os.path.exists(store_path):
        os.makedirs(store_path)
    if file_path.endswith('.txt'):
        #加载.txt文档
        loader = TextLoader(file_path,encoding="utf-8")
    elif file_path.endswith('.csv'):
        #加载.csv文档
        loader = CSVLoader(file_path,encoding="utf-8")
    elif file_path.endswith('.pdf'):
        #加载.pdf文档
        loader = PyPDFLoader(file_path)
    else:
        #加载docx文档
        loader = Docx2txtLoader(file_path)
    #创建分割器
    text_splitter = RecursiveCharacterTextSplitter(
        # 优先按结构切分
        separators=[
            "\n\n",  # 段落之间
            "\n",  # 换行（句子之间）
            "。", "！", "？",  # 中文句号、感叹号、问号
            "；", "……", "…",  # 中文分号、省略号
            " ",  # 空格（词之间）
            ""  # 最后才按字符切
        ],
        chunk_size=300,  # 每块约 150-200 个汉字（合理上下文长度）
        chunk_overlap=60,  # 保留部分上下文
    )
    #文档分割
    splitted_docs = loader.load_and_split(text_splitter=text_splitter)
    print(f"文档分割完毕，一共有{len(splitted_docs)}条")
    #获取词嵌入模型
    chat = ChatModel()
    embedding_model = chat.get_embedding_model()
    #分批次存入chroma数据库
    batch_size=10
    for i in range(0, len(splitted_docs), batch_size):
        batch_docs = splitted_docs[i:i+batch_size]
        chroma = Chroma.from_documents(
            documents=batch_docs,
            embedding=embedding_model,
            persist_directory=store_path,
            collection_name=collection_name,
            collection_metadata={"hnsw:space":"cosine"}
        )
        print(f"成功存入第{i}/{len(splitted_docs)}文档")
    print("所有文档存入成功")

if __name__ == '__main__':
    file_path="../static/file_csv/companies_context.csv"
    store_path="../chroma_db"
    collection_name="companies_context"
    store_file(file_path,store_path,collection_name)
