| # Server Configuration | |
| HOST=0.0.0.0 | |
| PORT=9621 | |
| # Directory Configuration | |
| WORKING_DIR=/app/data/rag_storage | |
| INPUT_DIR=/app/data/inputs | |
| # LLM Configuration (Use valid host. For local services, you can use host.docker.internal) | |
| # Ollama example | |
| LLM_BINDING=ollama | |
| LLM_BINDING_HOST=http://host.docker.internal:11434 | |
| LLM_MODEL=mistral-nemo:latest | |
| # OpenAI alike example | |
| # LLM_BINDING=openai | |
| # LLM_MODEL=deepseek-chat | |
| # LLM_BINDING_HOST=https://api.deepseek.com | |
| # LLM_BINDING_API_KEY=your_api_key | |
| # for OpenAI LLM (LLM_BINDING_API_KEY take priority) | |
| # OPENAI_API_KEY=your_api_key | |
| # Lollms example | |
| # LLM_BINDING=lollms | |
| # LLM_BINDING_HOST=http://host.docker.internal:9600 | |
| # LLM_MODEL=mistral-nemo:latest | |
| # Embedding Configuration (Use valid host. For local services, you can use host.docker.internal) | |
| # Ollama example | |
| EMBEDDING_BINDING=ollama | |
| EMBEDDING_BINDING_HOST=http://host.docker.internal:11434 | |
| EMBEDDING_MODEL=bge-m3:latest | |
| # Lollms example | |
| # EMBEDDING_BINDING=lollms | |
| # EMBEDDING_BINDING_HOST=http://host.docker.internal:9600 | |
| # EMBEDDING_MODEL=bge-m3:latest | |
| # RAG Configuration | |
| MAX_ASYNC=4 | |
| MAX_TOKENS=32768 | |
| EMBEDDING_DIM=1024 | |
| MAX_EMBED_TOKENS=8192 | |
| # Security (empty for no key) | |
| LIGHTRAG_API_KEY=your-secure-api-key-here | |
| # Logging | |
| LOG_LEVEL=INFO | |
| # Optional SSL Configuration | |
| #SSL=true | |
| #SSL_CERTFILE=/path/to/cert.pem | |
| #SSL_KEYFILE=/path/to/key.pem | |
| # Optional Timeout | |
| #TIMEOUT=30 | |
| # Optional for Azure (LLM_BINDING_HOST, LLM_BINDING_API_KEY take priority) | |
| # AZURE_OPENAI_API_VERSION=2024-08-01-preview | |
| # AZURE_OPENAI_DEPLOYMENT=gpt-4o | |
| # AZURE_OPENAI_API_KEY=myapikey | |
| # AZURE_OPENAI_ENDPOINT=https://myendpoint.openai.azure.com | |
| # AZURE_EMBEDDING_DEPLOYMENT=text-embedding-3-large | |
| # AZURE_EMBEDDING_API_VERSION=2023-05-15 | |
| # Ollama Emulating Model Tag | |
| # OLLAMA_EMULATING_MODEL_TAG=latest | |