Spaces:
Runtime error
Runtime error
Update model.py
Browse files
model.py
CHANGED
@@ -3,11 +3,12 @@ str_cmd1 = 'pip install "unsloth[colab-new] @ git+https://github.com/unslothai/u
|
|
3 |
str_cmd2 = 'pip install --no-deps xformers "trl<0.9.0" peft accelerate bitsandbytes'
|
4 |
os.system(str_cmd1)
|
5 |
os.system(str_cmd2)
|
6 |
-
os.environ["CUDA_VISIBLE_DEVICES"] = "
|
7 |
|
8 |
|
9 |
from unsloth import FastLanguageModel
|
10 |
import torch
|
|
|
11 |
max_seq_length = 2048 # Choose any! We auto support RoPE Scaling internally!
|
12 |
dtype = None # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+
|
13 |
load_in_4bit = True # Use 4bit quantization to reduce memory usage. Can be False.
|
|
|
3 |
str_cmd2 = 'pip install --no-deps xformers "trl<0.9.0" peft accelerate bitsandbytes'
|
4 |
os.system(str_cmd1)
|
5 |
os.system(str_cmd2)
|
6 |
+
#os.environ["CUDA_VISIBLE_DEVICES"] = "0" # or "0,1" for multiple GPUs
|
7 |
|
8 |
|
9 |
from unsloth import FastLanguageModel
|
10 |
import torch
|
11 |
+
device = torch.device("cpu")
|
12 |
max_seq_length = 2048 # Choose any! We auto support RoPE Scaling internally!
|
13 |
dtype = None # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+
|
14 |
load_in_4bit = True # Use 4bit quantization to reduce memory usage. Can be False.
|