from sentence_transformers import SentenceTransformer, util
import os
import torch

# 初始化句子转换器模型
# model_path = r'D:\work\LLM\sentence-transformers\all-MiniLM-L6-v2'  #最小的模型
# model_path = r'D:\work\LLM\sentence-transformers\msmarco-distilbert-base-v4'  # 大点的模型
model_path = r'/home/fangning/work/LLM/models/sentence-transformers/msmarco-distilbert-base-v4'  # 大点的模型
print(f"加载模型路径: {model_path}")
model = SentenceTransformer(model_path)

# 定义存储向量的目录
vectors_dir = r'./vector_data'


# 从文件中读取文本并生成向量
def load_knowledge(directory):
    documents = []
    print(f"加载知识目录: {directory}")
    for filename in os.listdir(directory):
        if filename.endswith('.txt'):
            file_path = os.path.join(directory, filename)
            print(f"读取文件: {file_path}")
            with open(file_path, 'r', encoding='utf-8') as file:
                content = file.read().split('\n')
                documents.extend(content)
                print(f"Loaded content from {filename}: {content}")
    return documents


def generate_vectors(documents):
    print("生成向量...")
    vectors = model.encode(documents, convert_to_tensor=True)
    return vectors


def save_vectors(vectors, directory):
    if not os.path.exists(directory):
        os.makedirs(directory)
    file_path = os.path.join(directory, 'vectors.pt')
    torch.save(vectors, file_path)
    print(f"向量已保存到: {file_path}")


def load_vectors(directory):
    file_path = os.path.join(directory, 'vectors.pt')
    if os.path.exists(file_path):
        vectors = torch.load(file_path)
        print(f"向量已从: {file_path} 加载")
        return vectors
    else:
        raise FileNotFoundError(f"没有找到向量文件: {file_path}")


# 基于用户查询进行相似度搜索，返回多个相关结果
def search(query, documents, vectors, threshold=0.4):
    print(f"查询: {query}")
    query_vector = model.encode(query, convert_to_tensor=True)
    scores = util.pytorch_cos_sim(query_vector, vectors)[0]
    results = [(documents[i], score.item()) for i, score in enumerate(scores) if score > threshold]

    # 输出所有相似度分数
    print("相似度分数：", [(documents[i], score.item()) for i, score in enumerate(scores)])

    results.sort(key=lambda x: x[1], reverse=True)  # 按相似度排序
    return results


def main():
    knowledge_dir = r'./knowledge'
    documents = load_knowledge(knowledge_dir)

    # 生成并保存向量
    vectors = generate_vectors(documents)
    save_vectors(vectors, vectors_dir)

    # 加载向量
    vectors = load_vectors(vectors_dir)

    print("知识库加载完成，输入'quit'退出")
    while True:
        user_input = input("请输入您的问题: ")
        if user_input.lower() == 'quit':
            print("程序结束")
            break

        results = search(user_input, documents, vectors)
        if results:
            for result, score in results:
                print(f"相关文档: {result}（相似度: {score:.4f}）")

            # 根据查询精确匹配相关信息
            for result, _ in results:
                if user_input in result:
                    name = result.split()[0]
                    print(f"匹配信息 ({name}):")
                    for doc in documents:
                        if name in doc:
                            print(doc)
        else:
            print("没有找到相关文档")


if __name__ == "__main__":
    main()
