# from langchain.document_loaders import DirectoryLoader, TextLoader 弃用
from langchain_community.document_loaders import DirectoryLoader,TextLoader
from langchain.text_splitter import CharacterTextSplitter
from config.config import ChatConfig

# 加载文档
def load_documents():
    loader = DirectoryLoader(
        './knowledge_base',
        glob="**/*.txt",
        loader_cls=TextLoader,
        loader_kwargs={'encoding': 'utf8'}
    )
    documents = loader.load()

    print(f"documents: {documents}")
    # 小写，去掉空格，不能这样简单粗暴处理，会导致字母连接在一起
    # for t in documents:
    #     print(f"t: {t}")
    #     t.page_content = t.page_content.lower().replace(" ", "")
    #     print(f"t小写，去掉空格: {t}")
    
    # 文档分段
    text_splitter = CharacterTextSplitter(
        separator="\n",
        chunk_size=500,
        chunk_overlap=50
    )
    texts = text_splitter.split_documents(documents)
    return texts

# 创建向量数据库
def create_vector_store(texts):
    # from langchain.embeddings import HuggingFaceEmbeddings 弃用
    # from langchain.vectorstores import FAISS 弃用
    from langchain_community.embeddings import HuggingFaceEmbeddings
    from langchain_community.vectorstores import FAISS
    

    embeddings = HuggingFaceEmbeddings(
        model_name="shibing624/text2vec-base-chinese",
        model_kwargs={'device': 'cuda'},    # 明确指定 GPU
        encode_kwargs={'device': 'cuda:0', 'batch_size': 8},  # 添加 batch_size 控制显存使用，cuda:0可以修改为cpu代码用cpu模式
        cache_folder=ChatConfig.cache_folder
    )
    
    vector_store = FAISS.from_documents(
        documents=texts,
        embedding=embeddings
    )
    return vector_store

def get_vector_store():
    t = load_documents()
    return create_vector_store(t)

if __name__ == "main":
    v = get_vector_store()
    # 异步
    v.asimilarity_search()
    # 相似性搜索
    v.similarity_search()
    # 需要提供向量
    v.similarity_search_by_vector()
    # 相似性搜索，提供分数
    v.similarity_search_with_relevance_scores()
    v.similarity_search_with_score()
    v.similarity_search_with_score_by_vector()