########################## # Server configuration: ########################## APP_TITLE=OpenGPT # The server will listen to localhost:3080 by default. You can change the target IP as you want. # If you want to make this server available externally, for example to share the server with others # or expose this from a Docker container, set host to 0.0.0.0 or your external IP interface. # Tips: Setting host to 0.0.0.0 means listening on all interfaces. It's not a real IP. # Use localhost:port rather than 0.0.0.0:port to access the server. # Set Node env to development if running in dev mode. HOST=localhost PORT=3080 # Note: the following enables user balances, which you can add manually # or you will need to build out a balance accruing system for users. # For more info, see https://docs.librechat.ai/features/token_usage.html # To manually add balances, run the following command: # `npm run add-balance` # You can also specify the email and token credit amount to add, e.g.: # `npm run add-balance example@example.com 1000` # This works well to track your own usage for personal use; 1000 credits = $0.001 (1 mill USD) # Set to true to enable token credit balances for the OpenAI/Plugins endpoints CHECK_BALANCE=false # Automated Moderation System # The Automated Moderation System uses a scoring mechanism to track user violations. As users commit actions # like excessive logins, registrations, or messaging, they accumulate violation scores. Upon reaching # a set threshold, the user and their IP are temporarily banned. This system ensures platform security # by monitoring and penalizing rapid or suspicious activities. BAN_VIOLATIONS=true # Whether or not to enable banning users for violations (they will still be logged) BAN_DURATION=1000 * 60 * 60 * 2 # how long the user and associated IP are banned for BAN_INTERVAL=20 # a user will be banned everytime their score reaches/crosses over the interval threshold # The score for each violation LOGIN_VIOLATION_SCORE=1 REGISTRATION_VIOLATION_SCORE=1 CONCURRENT_VIOLATION_SCORE=1 MESSAGE_VIOLATION_SCORE=1 NON_BROWSER_VIOLATION_SCORE=20 # Login and registration rate limiting. LOGIN_MAX=7 # The max amount of logins allowed per IP per LOGIN_WINDOW LOGIN_WINDOW=5 # in minutes, determines the window of time for LOGIN_MAX logins REGISTER_MAX=5 # The max amount of registrations allowed per IP per REGISTER_WINDOW REGISTER_WINDOW=60 # in minutes, determines the window of time for REGISTER_MAX registrations # Message rate limiting (per user & IP) LIMIT_CONCURRENT_MESSAGES=true # Whether to limit the amount of messages a user can send per request CONCURRENT_MESSAGE_MAX=2 # The max amount of messages a user can send per request LIMIT_MESSAGE_IP=true # Whether to limit the amount of messages an IP can send per MESSAGE_IP_WINDOW MESSAGE_IP_MAX=40 # The max amount of messages an IP can send per MESSAGE_IP_WINDOW MESSAGE_IP_WINDOW=1 # in minutes, determines the window of time for MESSAGE_IP_MAX messages # Note: You can utilize both limiters, but default is to limit by IP only. LIMIT_MESSAGE_USER=false # Whether to limit the amount of messages an IP can send per MESSAGE_USER_WINDOW MESSAGE_USER_MAX=40 # The max amount of messages an IP can send per MESSAGE_USER_WINDOW MESSAGE_USER_WINDOW=1 # in minutes, determines the window of time for MESSAGE_USER_MAX messages # If you have permission problems, set here the UID and GID of the user running # the docker compose command. The applications in the container will run with these uid/gid. UID=1000 GID=1000 # Change this to proxy any API request. # It's useful if your machine has difficulty calling the original API server. # PROXY= # Change this to your MongoDB URI if different. I recommend appending LibreChat. MONGO_URI=mongodb://127.0.0.1:27018/LibreChat ########################## # OpenAI Endpoint: ########################## # Access key from OpenAI platform. # Leave it blank to disable this feature. # Set to "user_provided" to allow the user to provide their API key from the UI. OPENAI_API_KEY=user_provided DEBUG_OPENAI=false # Set to true to enable debug mode for the OpenAI endpoint # Identify the available models, separated by commas *without spaces*. # The first will be default. # Leave it blank to use internal settings. # OPENAI_MODELS=gpt-3.5-turbo,gpt-3.5-turbo-16k,gpt-3.5-turbo-0301,text-davinci-003,gpt-4,gpt-4-0314,gpt-4-0613 # Titling is enabled by default when initiating a conversation. # Uncomment the following variable to disable this feature. # TITLE_CONVO=false # (Optional) The default model used for titling by is gpt-3.5-turbo-0613 # You can change it by uncommenting the following and setting the desired model # Must be compatible with the OpenAI Endpoint. # OPENAI_TITLE_MODEL=gpt-3.5-turbo # (Optional/Experimental) Enable message summarization by uncommenting the following: # Note: this may affect response time when a summary is being generated. # OPENAI_SUMMARIZE=true # Not yet implemented: this will be a conversation option enabled by default to save users on tokens # We are using the ConversationSummaryBufferMemory method to summarize messages. # To learn more about this, see this article: # https://www.pinecone.io/learn/series/langchain/langchain-conversational-memory/ # (Optional) The default model used for summarizing is gpt-3.5-turbo # You can change it by uncommenting the following and setting the desired model # Must be compatible with the OpenAI Endpoint. # OPENAI_SUMMARY_MODEL=gpt-3.5-turbo # Reverse proxy settings for OpenAI: # https://github.com/waylaidwanderer/node-chatgpt-api#using-a-reverse-proxy # OPENAI_REVERSE_PROXY= # (Advanced) Sometimes when using Local LLM APIs, you may need to force the API # to be called with a `prompt` payload instead of a `messages` payload; to mimic the # a `/v1/completions` request instead of `/v1/chat/completions` # This may be the case for LocalAI with some models. To do so, uncomment the following: # OPENAI_FORCE_PROMPT=true ########################## # OpenRouter (overrides OpenAI and Plugins Endpoints): ########################## # OpenRouter is a legitimate proxy service to a multitude of LLMs, both closed and open source, including: # OpenAI models, Anthropic models, Meta's Llama models, pygmalionai/mythalion-13b # and many more open source models. Newer integrations are usually discounted, too! # Note: this overrides the OpenAI and Plugins Endpoints. # See ./docs/install/free_ai_apis.md for more info. # OPENROUTER_API_KEY= ########################## # AZURE Endpoint: ########################## # To use Azure with this project, set the following variables. These will be used to build the API URL. # Chat completion: # `https://{AZURE_OPENAI_API_INSTANCE_NAME}.openai.azure.com/openai/deployments/{AZURE_OPENAI_API_DEPLOYMENT_NAME}/chat/completions?api-version={AZURE_OPENAI_API_VERSION}`; # You should also consider changing the `OPENAI_MODELS` variable above to the models available in your instance/deployment. # Note: I've noticed that the Azure API is much faster than the OpenAI API, so the streaming looks almost instantaneous. # Note "AZURE_OPENAI_API_COMPLETIONS_DEPLOYMENT_NAME" and "AZURE_OPENAI_API_EMBEDDINGS_DEPLOYMENT_NAME" are optional but might be used in the future # AZURE_API_KEY= # AZURE_OPENAI_API_INSTANCE_NAME= # AZURE_OPENAI_API_DEPLOYMENT_NAME= # AZURE_OPENAI_API_VERSION= # AZURE_OPENAI_API_COMPLETIONS_DEPLOYMENT_NAME= # AZURE_OPENAI_API_EMBEDDINGS_DEPLOYMENT_NAME= # Identify the available models, separated by commas *without spaces*. # The first will be default. # Leave it blank to use internal settings. AZURE_OPENAI_MODELS=gpt-3.5-turbo,gpt-4 # To use Azure with the Plugins endpoint, you need the variables above, and uncomment the following variable: # NOTE: This may not work as expected and Azure OpenAI may not support OpenAI Functions yet # Omit/leave it commented to use the default OpenAI API # PLUGINS_USE_AZURE="true" ########################## # ChatGPT Endpoint: ########################## # ChatGPT Browser Client (free but use at your own risk) # Access token from https://chat.openai.com/api/auth/session # Exposes your access token to `CHATGPT_REVERSE_PROXY` # Set to "user_provided" to allow the user to provide its token from the UI. # Leave it blank to disable this endpoint CHATGPT_TOKEN=user_provided # Identify the available models, separated by commas. The first will be default. # Leave it blank to use internal settings. CHATGPT_MODELS=text-davinci-002-render-sha,gpt-4 # NOTE: you can add gpt-4-plugins, gpt-4-code-interpreter, and gpt-4-browsing to the list above and use the models for these features; # however, the view/display portion of these features are not supported, but you can use the underlying models, which have higher token context # Also: text-davinci-002-render-paid is deprecated as of May 2023 # Reverse proxy setting for OpenAI # https://github.com/waylaidwanderer/node-chatgpt-api#using-a-reverse-proxy # By default it will use the node-chatgpt-api recommended proxy, (it's a third party server) # CHATGPT_REVERSE_PROXY= ########################## # BingAI Endpoint: ########################## # Also used for Sydney and jailbreak # To get your Access token for Bing, login to https://www.bing.com # Use dev tools or an extension while logged into the site to copy the content of the _U cookie. # If this fails, follow these instructions https://github.com/danny-avila/LibreChat/issues/370#issuecomment-1560382302 to provide the full cookie strings # or check out our discord https://discord.com/channels/1086345563026489514/1143941308684177429 # Set to "user_provided" to allow the user to provide its token from the UI. # Leave it blank to disable this endpoint. BINGAI_TOKEN=user_provided # BingAI Host: # Necessary for some people in different countries, e.g. China (https://cn.bing.com) # Leave it blank to use default server. # BINGAI_HOST=https://cn.bing.com ############################# # Plugins: ############################# # Identify the available models, separated by commas *without spaces*. # The first will be default. # Leave it blank to use internal settings. # PLUGIN_MODELS=gpt-3.5-turbo,gpt-3.5-turbo-16k,gpt-3.5-turbo-0301,gpt-4,gpt-4-0314,gpt-4-0613 DEBUG_PLUGINS=true # Set to false or comment out to disable debug mode for plugins # For securely storing credentials, you need a fixed key and IV. You can set them here for prod and dev environments # If you don't set them, the app will crash on startup. # You need a 32-byte key (64 characters in hex) and 16-byte IV (32 characters in hex) # Use this replit to generate some quickly: https://replit.com/@daavila/crypto#index.js # Here are some examples (THESE ARE NOT SECURE!) CREDS_KEY=f34be427ebb29de8d88c107a71546019685ed8b241d8f2ed00c3df97ad2566f0 CREDS_IV=e2341419ec3dd3d19b13a1a87fafcbfb # AI-Assisted Google Search # This bot supports searching google for answers to your questions with assistance from GPT! # See detailed instructions here: https://github.com/danny-avila/LibreChat/blob/main/docs/features/plugins/google_search.md GOOGLE_API_KEY= GOOGLE_CSE_ID= # StableDiffusion WebUI # This bot supports StableDiffusion WebUI, using it's API to generated requested images. # See detailed instructions here: https://github.com/danny-avila/LibreChat/blob/main/docs/features/plugins/stable_diffusion.md # Use "http://127.0.0.1:7860" with local install and "http://host.docker.internal:7860" for docker SD_WEBUI_URL=http://host.docker.internal:7860 # Azure Cognitive Search # This plugin supports searching Azure Cognitive Search for answers to your questions. # See detailed instructions here: https://github.com/danny-avila/LibreChat/blob/main/docs/features/plugins/azure_cognitive_search.md AZURE_COGNITIVE_SEARCH_SERVICE_ENDPOINT= AZURE_COGNITIVE_SEARCH_INDEX_NAME= AZURE_COGNITIVE_SEARCH_API_KEY= AZURE_COGNITIVE_SEARCH_API_VERSION= AZURE_COGNITIVE_SEARCH_SEARCH_OPTION_QUERY_TYPE= AZURE_COGNITIVE_SEARCH_SEARCH_OPTION_TOP= AZURE_COGNITIVE_SEARCH_SEARCH_OPTION_SELECT= ########################## # PaLM (Google) Endpoint: ########################## # Follow the instruction here to setup: # https://github.com/danny-avila/LibreChat/blob/main/docs/install/apis_and_tokens.md PALM_KEY=user_provided # In case you need a reverse proxy for this endpoint: # GOOGLE_REVERSE_PROXY= ########################## # Anthropic Endpoint: ########################## # Access key from https://console.anthropic.com/ # Leave it blank to disable this feature. # Set to "user_provided" to allow the user to provide their API key from the UI. # Note that access to claude-1 may potentially become unavailable with the release of claude-2. ANTHROPIC_API_KEY=user_provided ANTHROPIC_MODELS=claude-1,claude-instant-1,claude-2 ########################## # Proxy: To be Used by all endpoints ########################## PROXY= ########################## # Search: ########################## # ENABLING SEARCH MESSAGES/CONVOS # Requires the installation of the free self-hosted Meilisearch or a paid Remote Plan (Remote not tested) # The easiest setup for this is through docker-compose, which takes care of it for you. SEARCH=true # HIGHLY RECOMMENDED: Disable anonymized telemetry analytics for MeiliSearch for absolute privacy. MEILI_NO_ANALYTICS=true # REQUIRED FOR SEARCH: MeiliSearch Host, mainly for the API server to connect to the search server. # Replace '0.0.0.0' with 'meilisearch' if serving MeiliSearch with docker-compose. MEILI_HOST=http://0.0.0.0:7700 # REQUIRED FOR SEARCH: MeiliSearch HTTP Address, mainly for docker-compose to expose the search server. # Replace '0.0.0.0' with 'meilisearch' if serving MeiliSearch with docker-compose. MEILI_HTTP_ADDR=0.0.0.0:7700 # REQUIRED FOR SEARCH: In production env., a secure key is needed. You can generate your own. # This master key must be at least 16 bytes, composed of valid UTF-8 characters. # MeiliSearch will throw an error and refuse to launch if no master key is provided, # or if it is under 16 bytes. MeiliSearch will suggest a secure autogenerated master key. # Using docker, it seems recognized as production so use a secure key. # This is a ready made secure key for docker-compose, you can replace it with your own. MEILI_MASTER_KEY=DrhYf7zENyR6AlUCKmnz0eYASOQdl6zxH7s7MKFSfFCt ########################## # User System: ########################## # Allow Public Registration ALLOW_REGISTRATION=true # Allow Social Registration ALLOW_SOCIAL_LOGIN=false # Allow Social Registration (WORKS ONLY for Google, Github, Discord) ALLOW_SOCIAL_REGISTRATION=false # JWT Secrets # You should use secure values. The examples given are 32-byte keys (64 characters in hex) # Use this replit to generate some quickly: https://replit.com/@daavila/crypto#index.js JWT_SECRET=16f8c0ef4a5d391b26034086c628469d3f9f497f08163ab9b40137092f2909ef JWT_REFRESH_SECRET=eaa5191f2914e30b9387fd84e254e4ba6fc51b4654968a9b0803b456a54b8418 # Google: # Add your Google Client ID and Secret here, you must register an app with Google Cloud to get these values # https://cloud.google.com/ GOOGLE_CLIENT_ID= GOOGLE_CLIENT_SECRET= GOOGLE_CALLBACK_URL=/oauth/google/callback # Facebook: # Add your Facebook Client ID and Secret here, you must register an app with Facebook to get these values # https://developers.facebook.com/ FACEBOOK_CLIENT_ID= FACEBOOK_CLIENT_SECRET= FACEBOOK_CALLBACK_URL=/oauth/facebook/callback # OpenID: # See OpenID provider to get the below values # Create random string for OPENID_SESSION_SECRET # For Azure AD # ISSUER: https://login.microsoftonline.com/(tenant id)/v2.0/ # SCOPE: openid profile email OPENID_CLIENT_ID= OPENID_CLIENT_SECRET= OPENID_ISSUER= OPENID_SESSION_SECRET= OPENID_SCOPE="openid profile email" OPENID_CALLBACK_URL=/oauth/openid/callback # If LABEL and URL are left empty, then the default OpenID label and logo are used. OPENID_BUTTON_LABEL= OPENID_IMAGE_URL= # Set the expiration delay for the secure cookie with the JWT token # Recommend session expiry to be 15 minutes # Delay is in millisecond e.g. 7 days is 1000*60*60*24*7 SESSION_EXPIRY=1000 * 60 * 15 REFRESH_TOKEN_EXPIRY=(1000 * 60 * 60 * 24) * 7 # Github: # Get the Client ID and Secret from your Discord Application # Add your Discord Client ID and Client Secret here: GITHUB_CLIENT_ID=your_client_id GITHUB_CLIENT_SECRET=your_client_secret GITHUB_CALLBACK_URL=/oauth/github/callback # this should be the same for everyone # Discord: # Get the Client ID and Secret from your Discord Application # Add your Github Client ID and Client Secret here: DISCORD_CLIENT_ID=your_client_id DISCORD_CLIENT_SECRET=your_client_secret DISCORD_CALLBACK_URL=/oauth/discord/callback # this should be the same for everyone ########################### # Application Domains ########################### # Note: # Server = Backend # Client = Public (the client is the url you visit) # For the Google login to work in dev mode, you will need to change DOMAIN_SERVER to localhost:3090 or place it in .env.development DOMAIN_CLIENT=http://localhost:3080 DOMAIN_SERVER=http://localhost:3080 ########################### # Email ########################### # Email is used for password reset. Note that all 4 values must be set for email to work. EMAIL_SERVICE= # eg. gmail EMAIL_USERNAME= # eg. your email address if using gmail EMAIL_PASSWORD= # eg. this is the "app password" if using gmail EMAIL_FROM= # eg. email address for from field like noreply@librechat.ai