Update README.md
Browse files
README.md
CHANGED
@@ -11,7 +11,7 @@ model = FastLanguageModel.get_peft_model(
|
|
11 |
# [NEW] "unsloth" uses 30% less VRAM, fits 2x larger batch sizes!
|
12 |
use_gradient_checkpointing = "unsloth", # True or "unsloth" for very long context
|
13 |
random_state = 3407,
|
14 |
-
use_rslora =
|
15 |
loftq_config = None, # And LoftQ
|
16 |
)
|
17 |
|
|
|
11 |
# [NEW] "unsloth" uses 30% less VRAM, fits 2x larger batch sizes!
|
12 |
use_gradient_checkpointing = "unsloth", # True or "unsloth" for very long context
|
13 |
random_state = 3407,
|
14 |
+
use_rslora = False, # We support rank stabilized LoRA
|
15 |
loftq_config = None, # And LoftQ
|
16 |
)
|
17 |
|