| 
							 | 
						import os | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						from lightrag import LightRAG, QueryParam | 
					
					
						
						| 
							 | 
						from lightrag.llm.lmdeploy import lmdeploy_model_if_cache | 
					
					
						
						| 
							 | 
						from lightrag.llm.hf import hf_embed | 
					
					
						
						| 
							 | 
						from lightrag.utils import EmbeddingFunc | 
					
					
						
						| 
							 | 
						from transformers import AutoModel, AutoTokenizer | 
					
					
						
						| 
							 | 
						from lightrag.kg.shared_storage import initialize_pipeline_status | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						import asyncio | 
					
					
						
						| 
							 | 
						import nest_asyncio | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						nest_asyncio.apply() | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						WORKING_DIR = "./dickens" | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						if not os.path.exists(WORKING_DIR): | 
					
					
						
						| 
							 | 
						    os.mkdir(WORKING_DIR) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						async def lmdeploy_model_complete( | 
					
					
						
						| 
							 | 
						    prompt=None, | 
					
					
						
						| 
							 | 
						    system_prompt=None, | 
					
					
						
						| 
							 | 
						    history_messages=[], | 
					
					
						
						| 
							 | 
						    keyword_extraction=False, | 
					
					
						
						| 
							 | 
						    **kwargs, | 
					
					
						
						| 
							 | 
						) -> str: | 
					
					
						
						| 
							 | 
						    model_name = kwargs["hashing_kv"].global_config["llm_model_name"] | 
					
					
						
						| 
							 | 
						    return await lmdeploy_model_if_cache( | 
					
					
						
						| 
							 | 
						        model_name, | 
					
					
						
						| 
							 | 
						        prompt, | 
					
					
						
						| 
							 | 
						        system_prompt=system_prompt, | 
					
					
						
						| 
							 | 
						        history_messages=history_messages, | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						        chat_template="llama3", | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						        **kwargs, | 
					
					
						
						| 
							 | 
						    ) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						async def initialize_rag(): | 
					
					
						
						| 
							 | 
						    rag = LightRAG( | 
					
					
						
						| 
							 | 
						        working_dir=WORKING_DIR, | 
					
					
						
						| 
							 | 
						        llm_model_func=lmdeploy_model_complete, | 
					
					
						
						| 
							 | 
						        llm_model_name="meta-llama/Llama-3.1-8B-Instruct",   | 
					
					
						
						| 
							 | 
						        embedding_func=EmbeddingFunc( | 
					
					
						
						| 
							 | 
						            embedding_dim=384, | 
					
					
						
						| 
							 | 
						            max_token_size=5000, | 
					
					
						
						| 
							 | 
						            func=lambda texts: hf_embed( | 
					
					
						
						| 
							 | 
						                texts, | 
					
					
						
						| 
							 | 
						                tokenizer=AutoTokenizer.from_pretrained( | 
					
					
						
						| 
							 | 
						                    "sentence-transformers/all-MiniLM-L6-v2" | 
					
					
						
						| 
							 | 
						                ), | 
					
					
						
						| 
							 | 
						                embed_model=AutoModel.from_pretrained( | 
					
					
						
						| 
							 | 
						                    "sentence-transformers/all-MiniLM-L6-v2" | 
					
					
						
						| 
							 | 
						                ), | 
					
					
						
						| 
							 | 
						            ), | 
					
					
						
						| 
							 | 
						        ), | 
					
					
						
						| 
							 | 
						    ) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						    await rag.initialize_storages() | 
					
					
						
						| 
							 | 
						    await initialize_pipeline_status() | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						    return rag | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						def main(): | 
					
					
						
						| 
							 | 
						     | 
					
					
						
						| 
							 | 
						    rag = asyncio.run(initialize_rag()) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						     | 
					
					
						
						| 
							 | 
						    with open("./book.txt", "r", encoding="utf-8") as f: | 
					
					
						
						| 
							 | 
						        rag.insert(f.read()) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						     | 
					
					
						
						| 
							 | 
						    print("\nNaive Search:") | 
					
					
						
						| 
							 | 
						    print( | 
					
					
						
						| 
							 | 
						        rag.query( | 
					
					
						
						| 
							 | 
						            "What are the top themes in this story?", param=QueryParam(mode="naive") | 
					
					
						
						| 
							 | 
						        ) | 
					
					
						
						| 
							 | 
						    ) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						    print("\nLocal Search:") | 
					
					
						
						| 
							 | 
						    print( | 
					
					
						
						| 
							 | 
						        rag.query( | 
					
					
						
						| 
							 | 
						            "What are the top themes in this story?", param=QueryParam(mode="local") | 
					
					
						
						| 
							 | 
						        ) | 
					
					
						
						| 
							 | 
						    ) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						    print("\nGlobal Search:") | 
					
					
						
						| 
							 | 
						    print( | 
					
					
						
						| 
							 | 
						        rag.query( | 
					
					
						
						| 
							 | 
						            "What are the top themes in this story?", param=QueryParam(mode="global") | 
					
					
						
						| 
							 | 
						        ) | 
					
					
						
						| 
							 | 
						    ) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						    print("\nHybrid Search:") | 
					
					
						
						| 
							 | 
						    print( | 
					
					
						
						| 
							 | 
						        rag.query( | 
					
					
						
						| 
							 | 
						            "What are the top themes in this story?", param=QueryParam(mode="hybrid") | 
					
					
						
						| 
							 | 
						        ) | 
					
					
						
						| 
							 | 
						    ) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						if __name__ == "__main__": | 
					
					
						
						| 
							 | 
						    main() | 
					
					
						
						| 
							 | 
						
 |