# pip install lightrag-hku -i https://pypi.tuna.tsinghua.edu.cn/simple/

# https://github.com/HKUDS/LightRAG/blob/main/lightrag/api/README.md

import os
import asyncio

from lightrag import LightRAG
from lightrag import QueryParam
from lightrag.llm.openai import gpt_4o_mini_complete, openai_embed
from lightrag.llm.ollama import ollama_model_complete, ollama_embed
from lightrag.kg.shared_storage import initialize_pipeline_status
from lightrag.utils import EmbeddingFunc

WORKING_DIR = "./dickens"

async def initialize_rag():
    rag = LightRAG(
        # 指定工作目录，存储KV数据、向量数据、图数据等
        working_dir=WORKING_DIR,
        # #  指定LLM函数实现，可以使用OpenAI兼容接口
        # llm_model_func=gpt_4o_mini_complete,
        # # 指定嵌入函数实现，可以使用本地嵌入模型
        # embedding_func=openai_embed,
        llm_model_func=ollama_model_complete,  # Use Ollama model for text generation
        llm_model_name="qwen3:8b", # Your model name
        # Use Ollama embedding function
        embedding_func=EmbeddingFunc(
            embedding_dim=1024,
            func=lambda texts: ollama_embed(
                texts,
                embed_model="bge-m3:latest"
            )
        ),
    )

    await rag.initialize_storages()
    await initialize_pipeline_status()

    return rag


async def main():
    rag = None
    try:
        rag = await initialize_rag()

        # 向rag导入新文档
        # with open("./stu.txt", "r", encoding="utf-8") as f:
            # await rag.ainsert(f.read())

        print('请在下方“>>>”处输入查询问题，输入“exit”则退出系统')

        while True:
            query = input(">>> ")
            # 向rag检索内容
            response = await rag.aquery(
                query,
                param=QueryParam(mode="hybrid", top_k=5, response_type="single line"),
            )
            print(response)

    except Exception as e:
        print(f"An error occurred: {e}")
    finally:
        print('Bye~')
        if rag:
            await rag.finalize_storages()


if __name__ == "__main__":
    asyncio.run(main())
