# from langchain.document_loaders import DirectoryLoader, TextLoader 弃用
from langchain_community.document_loaders import DirectoryLoader,TextLoader
from langchain.text_splitter import CharacterTextSplitter

# 加载文档
def load_documents():
    loader = DirectoryLoader(
        './knowledge_base',
        glob="**/*.txt",
        loader_cls=TextLoader,
        loader_kwargs={'encoding': 'utf8'}
    )
    documents = loader.load()
    
    # 文档分段
    text_splitter = CharacterTextSplitter(
        separator="\n",
        chunk_size=500,
        chunk_overlap=50
    )
    texts = text_splitter.split_documents(documents)
    return texts

# 创建向量数据库
def create_vector_store(texts):
    # from langchain.embeddings import HuggingFaceEmbeddings 弃用
    # from langchain.vectorstores import FAISS 弃用
    from langchain_community.embeddings import HuggingFaceEmbeddings
    from langchain_community.vectorstores import FAISS
    
    # GPU模式
    # embeddings = HuggingFaceEmbeddings(
    #     model_name="shibing624/text2vec-base-chinese",
    #     model_kwargs={'device': 'cuda'},    # 明确指定 GPU
    #     encode_kwargs={'device': 'cuda:0', 'batch_size': 8},  # 添加 batch_size 控制显存使用，cuda:0可以修改为cpu代码用cpu模式
    #     cache_folder="./models"
    # )

    # CPU模式
    embeddings = HuggingFaceEmbeddings(
        model_name="shibing624/text2vec-base-chinese",
        model_kwargs={'device': 'cpu'},    # 改为 CPU 模式
        encode_kwargs={'device': 'cpu', 'batch_size': 8},  # 使用 CPU 设备
        cache_folder="./models"
    )
    
    vector_store = FAISS.from_documents(
        documents=texts,
        embedding=embeddings
    )
    return vector_store