ai-agi commited on
Commit
a8f92fd
1 Parent(s): f1ca299

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +5 -7
README.md CHANGED
@@ -32,16 +32,14 @@ You can find more details in the [technical report](https://arxiv.org/abs/2310.1
32
 
33
  ## Use in Transformers
34
  ## Load model directly
35
- import torch
36
  from transformers import AutoTokenizer, AutoModelForCausalLM, MistralForCausalLM
37
 
38
  model = MistralForCausalLM.from_pretrained("ai-agi/neural-zephyr", use_cache=False, torch_dtype=torch.bfloat16, device_map="auto") \
39
  state_dict = torch.load('model_weights.pth') \
40
- model.load_state_dict(state_dict) \
41
 
42
- tokenizer = AutoTokenizer.from_pretrained("ai-agi/neural-zephyr", use_fast=True)
43
-
44
- if tokenizer.pad_token is None:
45
-
46
- tokenizer.pad_token = tokenizer.eos_token)
47
 
 
32
 
33
  ## Use in Transformers
34
  ## Load model directly
35
+ import torch \
36
  from transformers import AutoTokenizer, AutoModelForCausalLM, MistralForCausalLM
37
 
38
  model = MistralForCausalLM.from_pretrained("ai-agi/neural-zephyr", use_cache=False, torch_dtype=torch.bfloat16, device_map="auto") \
39
  state_dict = torch.load('model_weights.pth') \
40
+ model.load_state_dict(state_dict)
41
 
42
+ tokenizer = AutoTokenizer.from_pretrained("ai-agi/neural-zephyr", use_fast=True) \
43
+ if tokenizer.pad_token is None: \
44
+     tokenizer.pad_token = tokenizer.eos_token)
 
 
45