Updated Path
#7
by
zxxatxxz
- opened
README.md
CHANGED
@@ -263,7 +263,7 @@ import bitsandbytes, flash_attn
|
|
263 |
|
264 |
tokenizer = AutoTokenizer.from_pretrained('NousResearch/Hermes-2-Pro-Llama-3-8B', trust_remote_code=True)
|
265 |
model = LlamaForCausalLM.from_pretrained(
|
266 |
-
"Hermes-2-Pro-Llama-3-8B",
|
267 |
torch_dtype=torch.float16,
|
268 |
device_map="auto",
|
269 |
load_in_8bit=False,
|
|
|
263 |
|
264 |
tokenizer = AutoTokenizer.from_pretrained('NousResearch/Hermes-2-Pro-Llama-3-8B', trust_remote_code=True)
|
265 |
model = LlamaForCausalLM.from_pretrained(
|
266 |
+
"NousResearch/Hermes-2-Pro-Llama-3-8B",
|
267 |
torch_dtype=torch.float16,
|
268 |
device_map="auto",
|
269 |
load_in_8bit=False,
|