from langchain_ollama import OllamaEmbeddings, ChatOllama
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, Settings
from llama_index.core.node_parser import SentenceSplitter
from llama_index.embeddings.huggingface import HuggingFaceEmbedding

# 配置ollama 服务
ollama_url = "http://127.0.0.1:11434"

def detailed_index_demo():
    # 设置文本分割参数
    node_parser = SentenceSplitter(
        chunk_size=512,
        chunk_overlap=20,
    )

    # 设置嵌入模型
    Settings.embed_model = OllamaEmbeddings(base_url=ollama_url, model="nomic-embed-text:latest")
    Settings.llm = ChatOllama(base_url=ollama_url, model="deepseek-r1:1.5b", temperature=0)

    # 加载文档
    documents = SimpleDirectoryReader("./data").load_data()
    print(f"加载了 {len(documents)} 个文档")

    # 1. 文本分割
    nodes = node_parser.get_nodes_from_documents(documents)
    print(f"分割成 {len(nodes)} 个文本块")

    # 2. 创建索引（自动进行向量化和存储）
    index = VectorStoreIndex(nodes)

    # 3. 查询验证
    query_engine = index.as_query_engine()
    response = query_engine.query("总结主要内容")
    print(f"回答: {response}")

    return index


# 运行
index = detailed_index_demo()