Update README.md
Browse files
README.md
CHANGED
@@ -36,10 +36,14 @@ import torch
|
|
36 |
from transformers import AutoTokenizer, AutoModelForCausalLM, MistralForCausalLM
|
37 |
|
38 |
model = MistralForCausalLM.from_pretrained("ai-agi/neural-zephyr", use_cache=False, torch_dtype=torch.bfloat16, device_map="auto")
|
|
|
39 |
state_dict = torch.load('model_weights.pth')
|
|
|
40 |
model.load_state_dict(state_dict)
|
41 |
|
42 |
tokenizer = AutoTokenizer.from_pretrained("ai-agi/neural-zephyr", use_fast=True)
|
|
|
43 |
if tokenizer.pad_token is None:
|
|
|
44 |
tokenizer.pad_token = tokenizer.eos_token)
|
45 |
|
|
|
36 |
from transformers import AutoTokenizer, AutoModelForCausalLM, MistralForCausalLM
|
37 |
|
38 |
model = MistralForCausalLM.from_pretrained("ai-agi/neural-zephyr", use_cache=False, torch_dtype=torch.bfloat16, device_map="auto")
|
39 |
+
|
40 |
state_dict = torch.load('model_weights.pth')
|
41 |
+
|
42 |
model.load_state_dict(state_dict)
|
43 |
|
44 |
tokenizer = AutoTokenizer.from_pretrained("ai-agi/neural-zephyr", use_fast=True)
|
45 |
+
|
46 |
if tokenizer.pad_token is None:
|
47 |
+
|
48 |
tokenizer.pad_token = tokenizer.eos_token)
|
49 |
|