Update my_model/LLAMA2/LLAMA2_model.py
Browse files
my_model/LLAMA2/LLAMA2_model.py
CHANGED
@@ -3,7 +3,7 @@ from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
|
|
3 |
from typing import Optional
|
4 |
import bitsandbytes # only for using on GPU
|
5 |
import accelerate # only for using on GPU
|
6 |
-
from my_model.
|
7 |
import warnings
|
8 |
|
9 |
# Suppress only FutureWarning from transformers
|
|
|
3 |
from typing import Optional
|
4 |
import bitsandbytes # only for using on GPU
|
5 |
import accelerate # only for using on GPU
|
6 |
+
from my_model.config import LLAMA2_config as config # Importing LLAMA2 configuration file
|
7 |
import warnings
|
8 |
|
9 |
# Suppress only FutureWarning from transformers
|