smu-ai / .env.example
dh-mc's picture
update default env vars
3cecdfd
# LLM_MODEL_TYPE=openai
# LLM_MODEL_TYPE=gpt4all-j
# LLM_MODEL_TYPE=gpt4all
# LLM_MODEL_TYPE=llamacpp
# LLM_MODEL_TYPE=ctransformers
# LLM_MODEL_TYPE=huggingface
# LLM_MODEL_TYPE=mosaicml
# LLM_MODEL_TYPE=stablelm
# LLM_MODEL_TYPE=openllm
LLM_MODEL_TYPE=hftgi
OPENLLM_SERVER_URL=http://localhost:64300
HFTGI_SERVER_URL=https://enabled-factually-cougar.ngrok-free.app
OPENAI_API_KEY=
# if unset, default to "gpt-3.5-turbo"
OPENAI_MODEL_NAME=
# cpu, mps or cuda:0 - if unset, use whatever detected
HF_EMBEDDINGS_DEVICE_TYPE=
HF_PIPELINE_DEVICE_TYPE=
# uncomment one of the below to load corresponding quantized model
# LOAD_QUANTIZED_MODEL=4bit
# LOAD_QUANTIZED_MODEL=8bit
# USE_LLAMA_2_PROMPT_TEMPLATE=true
DISABLE_MODEL_PRELOADING=true
CHAT_HISTORY_ENABLED=false
SHOW_PARAM_SETTINGS=false
SHARE_GRADIO_APP=false
# if unset, default to "hkunlp/instructor-xl"
HF_EMBEDDINGS_MODEL_NAME="hkunlp/instructor-large"
# number of cpu cores - used to set n_threads for GPT4ALL & LlamaCpp models
NUMBER_OF_CPU_CORES=
HUGGINGFACE_AUTH_TOKEN=
USING_TORCH_BFLOAT16=true
# HUGGINGFACE_MODEL_NAME_OR_PATH="databricks/dolly-v2-3b"
# HUGGINGFACE_MODEL_NAME_OR_PATH="databricks/dolly-v2-7b"
# HUGGINGFACE_MODEL_NAME_OR_PATH="databricks/dolly-v2-12b"
# LLM_MODEL_TYPE must be set to huggingface
# HUGGINGFACE_MODEL_NAME_OR_PATH="TheBloke/wizardLM-7B-HF"
# HUGGINGFACE_MODEL_NAME_OR_PATH="TheBloke/vicuna-7B-1.1-HF"
# HUGGINGFACE_MODEL_NAME_OR_PATH="nomic-ai/gpt4all-j"
# HUGGINGFACE_MODEL_NAME_OR_PATH="nomic-ai/gpt4all-falcon"
# HUGGINGFACE_MODEL_NAME_OR_PATH="lmsys/fastchat-t5-3b-v1.0"
HUGGINGFACE_MODEL_NAME_OR_PATH="meta-llama/Llama-2-7b-chat-hf"
# HUGGINGFACE_MODEL_NAME_OR_PATH="meta-llama/Llama-2-13b-chat-hf"
# HUGGINGFACE_MODEL_NAME_OR_PATH="meta-llama/Llama-2-70b-chat-hf"
# HUGGINGFACE_MODEL_NAME_OR_PATH="Qwen/Qwen-7B-Chat"
STABLELM_MODEL_NAME_OR_PATH="OpenAssistant/stablelm-7b-sft-v7-epoch-3"
MOSAICML_MODEL_NAME_OR_PATH="mosaicml/mpt-7b-instruct"
FALCON_MODEL_NAME_OR_PATH="tiiuae/falcon-7b-instruct"
GPT4ALL_J_MODEL_PATH="../models/llama-2-7b-chat.ggmlv3.q4_K_M.bin"
GPT4ALL_J_DOWNLOAD_LINK=https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGML/resolve/main/llama-2-7b-chat.ggmlv3.q4_0.bin
GPT4ALL_MODEL_PATH="../models/ggml-nous-gpt4-vicuna-13b.bin"
GPT4ALL_DOWNLOAD_LINK=https://gpt4all.io/models/ggml-nous-gpt4-vicuna-13b.bin
LLAMACPP_MODEL_PATH="../models/llama-2-7b-chat.ggmlv3.q4_K_M.bin"
LLAMACPP_DOWNLOAD_LINK=https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGML/resolve/main/llama-2-7b-chat.ggmlv3.q4_K_M.bin
CTRANSFORMERS_MODEL_PATH="../models/llama-2-7b-chat.ggmlv3.q4_K_M.bin"
CTRANSFORMERS_DOWNLOAD_LINK=https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGML/resolve/main/llama-2-7b-chat.ggmlv3.q4_K_M.bin
\CHAT_QUESTION="What's the capital city of Malaysia?"
QA_QUESTION="What's deep learning?"
QUESTIONS_FILE_PATH="./data/questions.txt"
TOKENIZERS_PARALLELISM=true
# env variables for ingesting source PDF files
CHUNCK_SIZE=1024
CHUNK_OVERLAP=64
SOURCE_PATH="data/pdfs/smu_lib_html/"
PDF_FILE_BASE_URL=
# Index for SMU Library Chatbot PDF files - chunk_size=1024 chunk_overlap=512
FAISS_INDEX_PATH="data/smu_lib_index/"
# telegram bot
RUN_TELEGRAM_BOT=false
TELEGRAM_API_TOKEN=
# template for env/tgi.conf
export PORT=64300
export NGROK_AUTHTOKEN=
export NGROK_EDGE=
export HUGGINGFACE_HUB_CACHE=$HOME/.cache/huggingface/hub/
export HUGGING_FACE_HUB_TOKEN=