#########
# Uncomment the below two lines if running in a jupyter notebook to handle the async nature of rag.insert()
# import nest_asyncio
# nest_asyncio.apply()
#########



import os
import asyncio
from lightrag import LightRAG, QueryParam
from lightrag.utils import EmbeddingFunc
from lightrag.llm.ollama import ollama_model_complete,ollama_embedding
from lightrag.kg.shared_storage import initialize_pipeline_status
from lightrag.utils import setup_logger

WORKING_DIR = "./dickens"
ollama_url = "http://192.168.0.188:11434"

if not os.path.exists(WORKING_DIR):
    os.mkdir(WORKING_DIR)


setup_logger("lightrag", level="INFO")

async def initialize_rag():
    rag = LightRAG(
        working_dir=WORKING_DIR,
        llm_model_name='qwen2.5:14b',
        llm_model_max_async=1,
        llm_model_max_token_size=8192,
        llm_model_kwargs={"options": {"num_ctx": 8192},"host":ollama_url},
        embedding_func=EmbeddingFunc(
            embedding_dim=1024,
            max_token_size=8192,
            func=lambda texts: ollama_embedding(
                texts,
                embed_model="bge-m3:latest",
                host=ollama_url
            )
        ),
        # embedding_func=ollama_embed,
        llm_model_func=ollama_model_complete
    )

    await rag.initialize_storages()
    await initialize_pipeline_status()

    return rag

def main():
    # Initialize RAG instance
    rag = asyncio.run(initialize_rag())
    # Insert text
    # rag.insert("Your text")
    # with open("./book.txt", "r", encoding="utf-8") as f:
    #     rag.insert(f.read())

    # Perform naive search
    mode="naive"
    # Perform local search
    # mode="local"
    # Perform global search
    # mode="global"
    # Perform hybrid search
    # mode="hybrid"
    # Mix mode Integrates knowledge graph and vector retrieval.
    # mode="mix"

    result = rag.query(
        "三体里,叶文洁和杨卫宁的关系",
        param=QueryParam(mode=mode)
    )
    print(result)

if __name__ == "__main__":
    main()
