# Use .env.local to change these variables, or directly change your env | |
# DO NOT EDIT THIS FILE WITH SENSITIVE DATA | |
MONGODB_URL=#your mongodb URL here | |
MONGODB_DB_NAME=chat-ui | |
COOKIE_NAME=hf-chat | |
# Increase depending on the model | |
PUBLIC_MAX_INPUT_TOKENS=1000 | |
PUBLIC_ORIGIN=#https://hf.co | |
PUBLIC_MODEL_NAME=OpenAssistant/oasst-sft-6-llama-30b # public facing link | |
PUBLIC_MODEL_ID=OpenAssistant/oasst-sft-6-llama-30b-xor # used to link to model page | |
PUBLIC_DISABLE_INTRO_TILES=false | |
PUBLIC_USER_MESSAGE_TOKEN=<|prompter|> | |
PUBLIC_ASSISTANT_MESSAGE_TOKEN=<|assistant|> | |
PUBLIC_SEP_TOKEN=</s> | |
PUBLIC_PREPROMPT="Below are a series of dialogues between various people and an AI assistant. The AI tries to be helpful, polite, honest, sophisticated, emotionally aware, and humble-but-knowledgeable. The assistant is happy to help with almost anything, and will do its best to understand exactly what is needed. It also tries to avoid giving false or misleading information, and it caveats when it isn't entirely sure about the right answer. That said, the assistant is practical and really does its best, and doesn't let caution get too much in the way of being useful." | |
PUBLIC_VERSION=0 | |
PUBLIC_GOOGLE_ANALYTICS_ID=#G-XXXXXXXX / Leave empty to disable | |
# Copy this in .env.local with and replace "hf_<token>" your HF token from https://huggingface.co/settings/token | |
# You can also change the model from OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5 to your own model | |
MODEL_ENDPOINTS=`[{ | |
"endpoint": "https://api-inference.huggingface.co/models/OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5", | |
"authorization": "Bearer hf_<token>", | |
"weight": 1 | |
}]` |