Spaces:
Paused
Paused
Update utils.py
Browse files
utils.py
CHANGED
@@ -66,12 +66,6 @@ def generate_prompt_with_history(text, history, tokenizer, max_length=2048):
|
|
66 |
|
67 |
|
68 |
|
69 |
-
#tokenizer = AutoTokenizer.from_pretrained("project-baize/baize-v2-7b")
|
70 |
-
#model = AutoModelForCausalLM.from_pretrained("project-baize/baize-v2-7b")
|
71 |
-
#tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neo-1.3B")
|
72 |
-
#model = AutoModelForCausalLM.from_pretrained("EleutherAI/gpt-neo-1.3B")
|
73 |
-
|
74 |
-
|
75 |
def load_tokenizer_and_model(base_model,load_8bit=False):
|
76 |
if torch.cuda.is_available():
|
77 |
device = "cuda"
|
@@ -82,8 +76,8 @@ def load_tokenizer_and_model(base_model,load_8bit=False):
|
|
82 |
if device == "cuda":
|
83 |
model = AutoModelForCausalLM.from_pretrained(
|
84 |
base_model,
|
85 |
-
|
86 |
-
|
87 |
device_map="auto",
|
88 |
)
|
89 |
else:
|
|
|
66 |
|
67 |
|
68 |
|
|
|
|
|
|
|
|
|
|
|
|
|
69 |
def load_tokenizer_and_model(base_model,load_8bit=False):
|
70 |
if torch.cuda.is_available():
|
71 |
device = "cuda"
|
|
|
76 |
if device == "cuda":
|
77 |
model = AutoModelForCausalLM.from_pretrained(
|
78 |
base_model,
|
79 |
+
load_in_8bit=load_8bit,
|
80 |
+
torch_dtype=torch.float16,
|
81 |
device_map="auto",
|
82 |
)
|
83 |
else:
|