
from langchain_ollama import OllamaEmbeddings, ChatOllama
from llama_index.core import SimpleDirectoryReader, VectorStoreIndex, Settings

# 配置ollama 服务
ollama_url = "http://127.0.0.1:11434"

# 配置模型
Settings.embed_model = OllamaEmbeddings(base_url=ollama_url, model="nomic-embed-text:latest")
Settings.llm = ChatOllama(base_url=ollama_url, model="deepseek-r1:1.5b", temperature=0)

# 创建索引
documents = SimpleDirectoryReader("./data").load_data()
index = VectorStoreIndex.from_documents(documents)

# 获取向量存储
vector_store = index.storage_context.vector_store

# 查看存储的向量数量
print(f"向量存储中的文档数量: {len(vector_store._data.embedding_dict)}")

# 查看前几个节点的嵌入向量 enumerate 获取索引和对象
for i, (node_id, embedding) in enumerate(list(vector_store._data.embedding_dict.items())[:3]):
    print(f"节点 {i + 1} ID: {node_id}")
    print(f"嵌入向量维度: {len(embedding)}")
    print(f"嵌入向量前10维: {embedding[:10]}...")

    # 修正：通过 storage_context 获取节点信息
    node = index.storage_context.docstore.get_node(node_id)
    print(f"对应文本: {node.text[:100]}...\n")
