Pankaj Kaushal
		
	commited on
		
		
					Commit 
							
							Β·
						
						531302d
	
1
								Parent(s):
							
							f1449cf
								
Moved back to llm dir as per
Browse fileshttps://github.com/HKUDS/LightRAG/pull/864#issuecomment-2669705946
- Created two new example scripts demonstrating LightRAG integration with LlamaIndex:
  - `lightrag_llamaindex_direct_demo.py`: Direct OpenAI integration
  - `lightrag_llamaindex_litellm_demo.py`: LiteLLM proxy integration
- Both examples showcase different search modes (naive, local, global, hybrid)
- Includes configuration for working directory, models, and API settings
- Demonstrates text insertion and querying using LightRAG with LlamaIndex
- removed wrapper directory and references to it
- examples/{lightrag_api_llamaindex_direct_demo_simplified.py β lightrag_llamaindex_direct_demo.py} +4 -4
 - examples/{lightrag_api_llamaindex_litellm_demo_simplified.py β lightrag_llamaindex_litellm_demo.py} +4 -5
 - lightrag/{wrapper β llm}/Readme.md +0 -0
 - lightrag/{wrapper β llm}/llama_index_impl.py +0 -0
 - lightrag/wrapper/__init__.py +0 -0
 
    	
        examples/{lightrag_api_llamaindex_direct_demo_simplified.py β lightrag_llamaindex_direct_demo.py}
    RENAMED
    
    | 
         @@ -1,6 +1,6 @@ 
     | 
|
| 1 | 
         
             
            import os
         
     | 
| 2 | 
         
             
            from lightrag import LightRAG, QueryParam
         
     | 
| 3 | 
         
            -
            from lightrag. 
     | 
| 4 | 
         
             
                llama_index_complete_if_cache,
         
     | 
| 5 | 
         
             
                llama_index_embed,
         
     | 
| 6 | 
         
             
            )
         
     | 
| 
         @@ -10,14 +10,13 @@ from llama_index.embeddings.openai import OpenAIEmbedding 
     | 
|
| 10 | 
         
             
            import asyncio
         
     | 
| 11 | 
         | 
| 12 | 
         
             
            # Configure working directory
         
     | 
| 13 | 
         
            -
             
     | 
| 14 | 
         
            -
            WORKING_DIR = os.environ.get("RAG_DIR", f"{DEFAULT_RAG_DIR}")
         
     | 
| 15 | 
         
             
            print(f"WORKING_DIR: {WORKING_DIR}")
         
     | 
| 16 | 
         | 
| 17 | 
         
             
            # Model configuration
         
     | 
| 18 | 
         
             
            LLM_MODEL = os.environ.get("LLM_MODEL", "gpt-4")
         
     | 
| 19 | 
         
             
            print(f"LLM_MODEL: {LLM_MODEL}")
         
     | 
| 20 | 
         
            -
            EMBEDDING_MODEL = os.environ.get("EMBEDDING_MODEL", "text-embedding-3- 
     | 
| 21 | 
         
             
            print(f"EMBEDDING_MODEL: {EMBEDDING_MODEL}")
         
     | 
| 22 | 
         
             
            EMBEDDING_MAX_TOKEN_SIZE = int(os.environ.get("EMBEDDING_MAX_TOKEN_SIZE", 8192))
         
     | 
| 23 | 
         
             
            print(f"EMBEDDING_MAX_TOKEN_SIZE: {EMBEDDING_MAX_TOKEN_SIZE}")
         
     | 
| 
         @@ -26,6 +25,7 @@ print(f"EMBEDDING_MAX_TOKEN_SIZE: {EMBEDDING_MAX_TOKEN_SIZE}") 
     | 
|
| 26 | 
         
             
            OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY", "your-api-key-here")
         
     | 
| 27 | 
         | 
| 28 | 
         
             
            if not os.path.exists(WORKING_DIR):
         
     | 
| 
         | 
|
| 29 | 
         
             
                os.mkdir(WORKING_DIR)
         
     | 
| 30 | 
         | 
| 31 | 
         | 
| 
         | 
|
| 1 | 
         
             
            import os
         
     | 
| 2 | 
         
             
            from lightrag import LightRAG, QueryParam
         
     | 
| 3 | 
         
            +
            from lightrag.llm.llama_index_impl import (
         
     | 
| 4 | 
         
             
                llama_index_complete_if_cache,
         
     | 
| 5 | 
         
             
                llama_index_embed,
         
     | 
| 6 | 
         
             
            )
         
     | 
| 
         | 
|
| 10 | 
         
             
            import asyncio
         
     | 
| 11 | 
         | 
| 12 | 
         
             
            # Configure working directory
         
     | 
| 13 | 
         
            +
            WORKING_DIR = "./index_default"
         
     | 
| 
         | 
|
| 14 | 
         
             
            print(f"WORKING_DIR: {WORKING_DIR}")
         
     | 
| 15 | 
         | 
| 16 | 
         
             
            # Model configuration
         
     | 
| 17 | 
         
             
            LLM_MODEL = os.environ.get("LLM_MODEL", "gpt-4")
         
     | 
| 18 | 
         
             
            print(f"LLM_MODEL: {LLM_MODEL}")
         
     | 
| 19 | 
         
            +
            EMBEDDING_MODEL = os.environ.get("EMBEDDING_MODEL", "text-embedding-3-large")
         
     | 
| 20 | 
         
             
            print(f"EMBEDDING_MODEL: {EMBEDDING_MODEL}")
         
     | 
| 21 | 
         
             
            EMBEDDING_MAX_TOKEN_SIZE = int(os.environ.get("EMBEDDING_MAX_TOKEN_SIZE", 8192))
         
     | 
| 22 | 
         
             
            print(f"EMBEDDING_MAX_TOKEN_SIZE: {EMBEDDING_MAX_TOKEN_SIZE}")
         
     | 
| 
         | 
|
| 25 | 
         
             
            OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY", "your-api-key-here")
         
     | 
| 26 | 
         | 
| 27 | 
         
             
            if not os.path.exists(WORKING_DIR):
         
     | 
| 28 | 
         
            +
                print(f"Creating working directory: {WORKING_DIR}")
         
     | 
| 29 | 
         
             
                os.mkdir(WORKING_DIR)
         
     | 
| 30 | 
         | 
| 31 | 
         | 
    	
        examples/{lightrag_api_llamaindex_litellm_demo_simplified.py β lightrag_llamaindex_litellm_demo.py}
    RENAMED
    
    | 
         @@ -1,6 +1,6 @@ 
     | 
|
| 1 | 
         
             
            import os
         
     | 
| 2 | 
         
             
            from lightrag import LightRAG, QueryParam
         
     | 
| 3 | 
         
            -
            from lightrag. 
     | 
| 4 | 
         
             
                llama_index_complete_if_cache,
         
     | 
| 5 | 
         
             
                llama_index_embed,
         
     | 
| 6 | 
         
             
            )
         
     | 
| 
         @@ -10,14 +10,13 @@ from llama_index.embeddings.litellm import LiteLLMEmbedding 
     | 
|
| 10 | 
         
             
            import asyncio
         
     | 
| 11 | 
         | 
| 12 | 
         
             
            # Configure working directory
         
     | 
| 13 | 
         
            -
             
     | 
| 14 | 
         
            -
            WORKING_DIR = os.environ.get("RAG_DIR", f"{DEFAULT_RAG_DIR}")
         
     | 
| 15 | 
         
             
            print(f"WORKING_DIR: {WORKING_DIR}")
         
     | 
| 16 | 
         | 
| 17 | 
         
             
            # Model configuration
         
     | 
| 18 | 
         
            -
            LLM_MODEL = os.environ.get("LLM_MODEL", "gpt- 
     | 
| 19 | 
         
             
            print(f"LLM_MODEL: {LLM_MODEL}")
         
     | 
| 20 | 
         
            -
            EMBEDDING_MODEL = os.environ.get("EMBEDDING_MODEL", "embedding- 
     | 
| 21 | 
         
             
            print(f"EMBEDDING_MODEL: {EMBEDDING_MODEL}")
         
     | 
| 22 | 
         
             
            EMBEDDING_MAX_TOKEN_SIZE = int(os.environ.get("EMBEDDING_MAX_TOKEN_SIZE", 8192))
         
     | 
| 23 | 
         
             
            print(f"EMBEDDING_MAX_TOKEN_SIZE: {EMBEDDING_MAX_TOKEN_SIZE}")
         
     | 
| 
         | 
|
| 1 | 
         
             
            import os
         
     | 
| 2 | 
         
             
            from lightrag import LightRAG, QueryParam
         
     | 
| 3 | 
         
            +
            from lightrag.llm.llama_index_impl import (
         
     | 
| 4 | 
         
             
                llama_index_complete_if_cache,
         
     | 
| 5 | 
         
             
                llama_index_embed,
         
     | 
| 6 | 
         
             
            )
         
     | 
| 
         | 
|
| 10 | 
         
             
            import asyncio
         
     | 
| 11 | 
         | 
| 12 | 
         
             
            # Configure working directory
         
     | 
| 13 | 
         
            +
            WORKING_DIR = "./index_default"
         
     | 
| 
         | 
|
| 14 | 
         
             
            print(f"WORKING_DIR: {WORKING_DIR}")
         
     | 
| 15 | 
         | 
| 16 | 
         
             
            # Model configuration
         
     | 
| 17 | 
         
            +
            LLM_MODEL = os.environ.get("LLM_MODEL", "gpt-4")
         
     | 
| 18 | 
         
             
            print(f"LLM_MODEL: {LLM_MODEL}")
         
     | 
| 19 | 
         
            +
            EMBEDDING_MODEL = os.environ.get("EMBEDDING_MODEL", "text-embedding-3-large")
         
     | 
| 20 | 
         
             
            print(f"EMBEDDING_MODEL: {EMBEDDING_MODEL}")
         
     | 
| 21 | 
         
             
            EMBEDDING_MAX_TOKEN_SIZE = int(os.environ.get("EMBEDDING_MAX_TOKEN_SIZE", 8192))
         
     | 
| 22 | 
         
             
            print(f"EMBEDDING_MAX_TOKEN_SIZE: {EMBEDDING_MAX_TOKEN_SIZE}")
         
     | 
    	
        lightrag/{wrapper β llm}/Readme.md
    RENAMED
    
    | 
         
            File without changes
         
     | 
    	
        lightrag/{wrapper β llm}/llama_index_impl.py
    RENAMED
    
    | 
         
            File without changes
         
     | 
    	
        lightrag/wrapper/__init__.py
    DELETED
    
    | 
         
            File without changes
         
     |