add peft install back since it doesn't get installed by setup.py (#331)
Browse files- docker/Dockerfile +1 -0
- src/axolotl/utils/models.py +11 -7
docker/Dockerfile
CHANGED
@@ -11,6 +11,7 @@ RUN apt-get update && \
|
|
11 |
|
12 |
WORKDIR /workspace
|
13 |
|
|
|
14 |
RUN git clone --depth=1 https://github.com/OpenAccess-AI-Collective/axolotl.git
|
15 |
# If AXOLOTL_EXTRAS is set, append it in brackets
|
16 |
RUN cd axolotl && \
|
|
|
11 |
|
12 |
WORKDIR /workspace
|
13 |
|
14 |
+
RUN pip3 install --force-reinstall "peft @ git+https://github.com/huggingface/peft.git@main"
|
15 |
RUN git clone --depth=1 https://github.com/OpenAccess-AI-Collective/axolotl.git
|
16 |
# If AXOLOTL_EXTRAS is set, append it in brackets
|
17 |
RUN cd axolotl && \
|
src/axolotl/utils/models.py
CHANGED
@@ -147,13 +147,17 @@ def load_model(
|
|
147 |
LOG.exception(err)
|
148 |
raise err
|
149 |
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
|
|
|
|
|
|
|
|
157 |
|
158 |
model_kwargs = {}
|
159 |
if cfg.model_revision:
|
|
|
147 |
LOG.exception(err)
|
148 |
raise err
|
149 |
|
150 |
+
if not cfg.gptq and (
|
151 |
+
(cfg.adapter == "lora" and load_in_8bit)
|
152 |
+
or (cfg.adapter == "qlora" and cfg.load_in_4bit)
|
153 |
+
):
|
154 |
+
try:
|
155 |
+
from peft import prepare_model_for_kbit_training
|
156 |
+
except ImportError:
|
157 |
+
# For backward compatibility
|
158 |
+
from peft import (
|
159 |
+
prepare_model_for_int8_training as prepare_model_for_kbit_training,
|
160 |
+
)
|
161 |
|
162 |
model_kwargs = {}
|
163 |
if cfg.model_revision:
|