File size: 649 Bytes
17c1e65
 
 
 
ddc3bcb
17c1e65
 
 
 
 
 
 
19fb14c
17c1e65
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
# Configuration parameters for LLaMA-2 model
import torch
import os

MODEL_NAME = "meta-llama/Llama-2-7b-chat-hf"  # For loading and fine-tuning the 13b version --> MODEL_NAME = "meta-llama/Llama-2-13b-chat-hf"
TOKENIZER_NAME = "meta-llama/Llama-2-7b-chat-hf"
QUANTIZATION = '4bit'  # Options: '4bit', '8bit', or None
FROM_SAVED = False
MODEL_PATH = None
TRUST_REMOTE = False
USE_FAST = True
ADD_EOS_TOKEN = True
# ACCESS_TOKEN = "xx"  # HF Read-only Token, to be added here if this code is run outside HF.
huggingface_token = os.getenv('HUGGINGFACE_TOKEN')  # for use as a secret on hf space
DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'