# conding   : utf-8
# @FileName : test04.py
# @Author   : 刘汕汕
# @Time     : 2025/3/24 09:10
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.core import Settings,SimpleDirectoryReader,VectorStoreIndex
from llama_index.llms.huggingface import HuggingFaceLLM

# embedding 模型
embed_model = HuggingFaceEmbedding(
    model_name="/mnt/workspace/llm/sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2",
    local_files_only=True  # 禁止联网加载模型
)
Settings.embed_model = embed_model

#本地大模型
llm = HuggingFaceLLM(model_name="/mnt/workspace/llm/Qwen/Qwen-1_8B-Chat",
               tokenizer_name="/mnt/workspace/llm/Qwen/Qwen-1_8B-Chat",
               model_kwargs={"trust_remote_code":True},
               tokenizer_kwargs={"trust_remote_code":True})
Settings.llm = llm

# 从指定目录读文档，将数据加载到内存
documents = SimpleDirectoryReader("/mnt/workspace/llama-index/data-sources/data").load_data()

#将文档变成索引，通过embed_model获取索引的内容（主要是词向量）
index = VectorStoreIndex.from_documents(documents)

# 将索引的内容（词向量）持久化存储到本地的向量数据库
index.storage_context.persist()

query_engine = index.as_query_engine()
rsp = query_engine.query("南通-工行消费分期什么情况下选这个")
print(f"得到的结果：{rsp}")

