File size: 1,503 Bytes
			
			| f4c9977 0553d6a f4c9977 0553d6a f4c9977 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 | import os
import inspect
from lightrag import LightRAG
from lightrag.llm import openai_complete, openai_embed
from lightrag.utils import EmbeddingFunc
from lightrag.lightrag import always_get_an_event_loop
from lightrag import QueryParam
# WorkingDir
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
WORKING_DIR = os.path.join(ROOT_DIR, "dickens")
if not os.path.exists(WORKING_DIR):
    os.mkdir(WORKING_DIR)
print(f"WorkingDir: {WORKING_DIR}")
api_key = "empty"
rag = LightRAG(
    working_dir=WORKING_DIR,
    llm_model_func=openai_complete,
    llm_model_name="qwen2.5-14b-instruct@4bit",
    llm_model_max_async=4,
    llm_model_max_token_size=32768,
    llm_model_kwargs={"base_url": "http://127.0.0.1:1234/v1", "api_key": api_key},
    embedding_func=EmbeddingFunc(
        embedding_dim=1024,
        max_token_size=8192,
        func=lambda texts: openai_embed(
            texts=texts,
            model="text-embedding-bge-m3",
            base_url="http://127.0.0.1:1234/v1",
            api_key=api_key,
        ),
    ),
)
with open("./book.txt", "r", encoding="utf-8") as f:
    rag.insert(f.read())
resp = rag.query(
    "What are the top themes in this story?",
    param=QueryParam(mode="hybrid", stream=True),
)
async def print_stream(stream):
    async for chunk in stream:
        if chunk:
            print(chunk, end="", flush=True)
loop = always_get_an_event_loop()
if inspect.isasyncgen(resp):
    loop.run_until_complete(print_stream(resp))
else:
    print(resp)
 | 
