|
|
|
|
|
|
|
|
|
LLM_MODEL_TYPE=huggingface |
|
|
|
|
|
|
|
OPENAI_API_KEY= |
|
|
|
|
|
OPENAI_MODEL_NAME= |
|
|
|
|
|
HF_EMBEDDINGS_DEVICE_TYPE= |
|
HF_PIPELINE_DEVICE_TYPE= |
|
|
|
|
|
|
|
|
|
|
|
DISABLE_MODEL_PRELOADING=true |
|
CHAT_HISTORY_ENABLED=true |
|
SHOW_PARAM_SETTINGS=false |
|
SHARE_GRADIO_APP=false |
|
PDF_FILE_BASE_URL=https://ai-engd.netlify.app/pdfs/books/ |
|
|
|
|
|
HF_EMBEDDINGS_MODEL_NAME="hkunlp/instructor-large" |
|
|
|
|
|
NUMBER_OF_CPU_CORES= |
|
|
|
HUGGINGFACE_AUTH_TOKEN= |
|
|
|
USING_TORCH_BFLOAT16=true |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
HUGGINGFACE_MODEL_NAME_OR_PATH="meta-llama/Llama-2-7b-chat-hf" |
|
|
|
|
|
|
|
STABLELM_MODEL_NAME_OR_PATH="OpenAssistant/stablelm-7b-sft-v7-epoch-3" |
|
|
|
MOSAICML_MODEL_NAME_OR_PATH="mosaicml/mpt-7b-instruct" |
|
|
|
FALCON_MODEL_NAME_OR_PATH="tiiuae/falcon-7b-instruct" |
|
|
|
GPT4ALL_J_MODEL_PATH="../models/llama-2-7b-chat.ggmlv3.q4_0.bin" |
|
GPT4ALL_J_DOWNLOAD_LINK=https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGML/resolve/main/llama-2-7b-chat.ggmlv3.q4_0.bin |
|
|
|
GPT4ALL_MODEL_PATH="./models/ggml-nous-gpt4-vicuna-13b.bin" |
|
GPT4ALL_DOWNLOAD_LINK=https://gpt4all.io/models/ggml-nous-gpt4-vicuna-13b.bin |
|
|
|
LLAMACPP_MODEL_PATH="./models/llama-2-7b-chat.ggmlv3.q4_K_M.bin" |
|
LLAMACPP_DOWNLOAD_LINK=https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGML/resolve/main/llama-2-7b-chat.ggmlv3.q4_K_M.bin |
|
|
|
|
|
|
|
FAISS_INDEX_PATH="./data/faiss_1024_512/" |
|
|
|
QUESTIONS_FILE_PATH="./data/questions.txt" |
|
|
|
TOKENIZERS_PARALLELISM=true |
|
|
|
|
|
SOURCE_PDFS_PATH="./data/pdfs/" |
|
SOURCE_URLS="./data/pci_dss_urls.txt" |
|
CHUNCK_SIZE=1024 |
|
CHUNK_OVERLAP=512 |
|
|