mrm8488 commited on
Commit
3ef0f9a
1 Parent(s): 45d9134

Fix scripts

Browse files
Files changed (1) hide show
  1. README.md +4 -4
README.md CHANGED
@@ -62,7 +62,7 @@ It is recommended to directly call the [`generate`](https://huggingface.co/docs/
62
  >>> model = AutoModelForCausalLM.from_pretrained("facebook/opt-13b", torch_dtype=torch.float16).cuda()
63
 
64
  >>> # the fast tokenizer currently does not work correctly
65
- >>> tokenizer = AutoTokenizer.from_pretrained(path, use_fast=False)
66
 
67
  >>> prompt = "Hello, I'm am conscious and"
68
 
@@ -84,7 +84,7 @@ By default, generation is deterministic. In order to use the top-k sampling, ple
84
  >>> model = AutoModelForCausalLM.from_pretrained("facebook/opt-13b", torch_dtype=torch.float16).cuda()
85
 
86
  >>> # the fast tokenizer currently does not work correctly
87
- >>> tokenizer = AutoTokenizer.from_pretrained(path, use_fast=False)
88
 
89
  >>> prompt = "Hello, I'm am conscious and"
90
 
@@ -117,7 +117,7 @@ Here's an example of how the model can have biased predictions:
117
  >>> model = AutoModelForCausalLM.from_pretrained("facebook/opt-13b", torch_dtype=torch.float16).cuda()
118
 
119
  >>> # the fast tokenizer currently does not work correctly
120
- >>> tokenizer = AutoTokenizer.from_pretrained(path, use_fast=False)
121
 
122
  >>> prompt = "The woman worked as a"
123
 
@@ -143,7 +143,7 @@ compared to:
143
  >>> model = AutoModelForCausalLM.from_pretrained("facebook/opt-13b", torch_dtype=torch.float16).cuda()
144
 
145
  >>> # the fast tokenizer currently does not work correctly
146
- >>> tokenizer = AutoTokenizer.from_pretrained(path, use_fast=False)
147
 
148
  >>> prompt = "The man worked as a"
149
 
 
62
  >>> model = AutoModelForCausalLM.from_pretrained("facebook/opt-13b", torch_dtype=torch.float16).cuda()
63
 
64
  >>> # the fast tokenizer currently does not work correctly
65
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/opt-13b", use_fast=False)
66
 
67
  >>> prompt = "Hello, I'm am conscious and"
68
 
 
84
  >>> model = AutoModelForCausalLM.from_pretrained("facebook/opt-13b", torch_dtype=torch.float16).cuda()
85
 
86
  >>> # the fast tokenizer currently does not work correctly
87
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/opt-13b", use_fast=False)
88
 
89
  >>> prompt = "Hello, I'm am conscious and"
90
 
 
117
  >>> model = AutoModelForCausalLM.from_pretrained("facebook/opt-13b", torch_dtype=torch.float16).cuda()
118
 
119
  >>> # the fast tokenizer currently does not work correctly
120
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/opt-13b", use_fast=False)
121
 
122
  >>> prompt = "The woman worked as a"
123
 
 
143
  >>> model = AutoModelForCausalLM.from_pretrained("facebook/opt-13b", torch_dtype=torch.float16).cuda()
144
 
145
  >>> # the fast tokenizer currently does not work correctly
146
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/opt-13b", use_fast=False)
147
 
148
  >>> prompt = "The man worked as a"
149