from langchain_community.chat_models import ChatTongyi
from llama_index.core import Settings, SimpleDirectoryReader, VectorStoreIndex
from llama_index.embeddings.dashscope import (
    DashScopeEmbedding,
    DashScopeTextEmbeddingModels,
    DashScopeTextEmbeddingType
)
from llama_index.llms.langchain import LangChainLLM

#词嵌入模型
# Settings.embed_model = DashScopeEmbedding(
#     model_name=DashScopeTextEmbeddingModels.TEXT_EMBEDDING_V3,
#     text_type=DashScopeTextEmbeddingType.TEXT_TYPE_DOCUMENT,
#     api_key="sk-f97e3654139742a4b01a99631628d36d"
# )

embed_model = DashScopeEmbedding(
    model_name=DashScopeTextEmbeddingModels.TEXT_EMBEDDING_V3,
    text_type=DashScopeTextEmbeddingType.TEXT_TYPE_DOCUMENT,
    api_key="sk-f97e3654139742a4b01a99631628d36d"
)

Settings.llm = LangChainLLM(
    ChatTongyi(model="qwen-plus", api_key="sk-f97e3654139742a4b01a99631628d36d")
)

#加载文档
documents = SimpleDirectoryReader(r"D:\Code\sshcode\llamaindex").load_data()
#创建索引
index = VectorStoreIndex.from_documents(documents,embed_model=embed_model)
#创建检索器
# retriever = index.as_retriever()
# res = retriever.retrieve("怎么退款")
# for r in res:
#     print(r.node.text)

from llama_index.core.storage.storage_context import StorageContext
from llama_index.core import load_index_from_storage

index.storage_context.persist(persist_dir="./index") #构建索引持久化目录
storage_context = StorageContext.from_defaults(persist_dir="./index") #加载索引持久化文件
new_index = load_index_from_storage(storage_context,embed_model=embed_model)
new_retriever = new_index.as_retriever()
new_res = new_retriever.retrieve("怎么退款")
for r in new_res:
    print(r.node.text)