import os

from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, Settings
from llama_index.core.node_parser import TokenTextSplitter, SentenceSplitter
from llama_index.embeddings.dashscope import DashScopeEmbedding, DashScopeTextEmbeddingModels
from llama_index.llms.dashscope import DashScope, DashScopeGenerationModels

Settings.llm = DashScope(model_name=DashScopeGenerationModels.QWEN_MAX, api_key=os.getenv("DASHSCOPE_API_KEY"))

# LlamaIndex默认使用的Embedding模型被替换为百炼的Embedding模型

Settings.embed_model = DashScopeEmbedding(
    # model_name="text-embedding-v1"
    model_name=DashScopeTextEmbeddingModels.TEXT_EMBEDDING_V1,
    # api_key=os.getenv("DASHSCOPE_API_KEY")
)

# 加载 pdf 文档
documents = SimpleDirectoryReader(
    "./data",
    required_exts=[".pdf"],
).load_data()

# 定义 Node Parser
node_parser = TokenTextSplitter(chunk_size=512, chunk_overlap=200)

# 切分文档
nodes = node_parser.get_nodes_from_documents(documents)

# 构建 index，默认是在内存中
index = VectorStoreIndex(nodes)

# 另外一种实现方式
#index = VectorStoreIndex.from_documents(documents=documents, transformations=[SentenceSplitter(chunk_size=512)])

# 写入本地文件
#index.storage_context.persist(persist_dir="./doc_emb")

# 获取 retriever
vector_retriever = index.as_retriever(
    similarity_top_k=2  # 返回2个结果
)

# 检索
results = vector_retriever.retrieve("deepseek v3数学能力怎么样？")

print(results[0].text)
