import os

from llama_index.core import Settings, SimpleDirectoryReader, VectorStoreIndex
from llama_index.core.node_parser import TokenTextSplitter, SentenceSplitter
from llama_index.embeddings.dashscope import DashScopeEmbedding, DashScopeTextEmbeddingModels
from llama_index.llms.dashscope import DashScope, DashScopeGenerationModels

# Settings.llm = DashScope(
#     model_name=DashScopeGenerationModels.QWEN_PLUS,
#     api_key=os.getenv("DASHSCOPE_API_KEY")
# )


# 配置大模型
Settings.llm = DashScope(model_name=DashScopeGenerationModels.QWEN_PLUS, api_key=os.getenv("DASHSCOPE_API_KEY"))
# 配置向量模型
Settings.embed_model = DashScopeEmbedding(model_name=DashScopeTextEmbeddingModels.TEXT_EMBEDDING_V1,
                                          api_key=os.getenv("DASHSCOPE_API_KEY"))

# 加载本地文件: 同级目录data下面的文件
documents = SimpleDirectoryReader("./data", required_exts=['.pdf']).load_data()

# 定义文本分割器
nodes_parser = TokenTextSplitter(
    chunk_size=512,
    chunk_overlap=200
)

nodes = nodes_parser.get_nodes_from_documents(documents)



# 创建索引,默认是存在内存里面
#index = VectorStoreIndex(nodes)


# 另外一种实现方式
index = VectorStoreIndex.from_documents(documents=documents, transformations=[SentenceSplitter(chunk_size=512)])

# 写入本地文件
index.storage_context.persist(persist_dir="./doc_emb")

retriever = index.as_retriever(
    similarity_top_k=2  # 返回2个结果
)

results = retriever.retrieve("deepseek-V3 数学能力怎样?")
print(results[0].text)
