patrickvonplaten commited on
Commit
88cc4ae
1 Parent(s): 769c7b9

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +4 -5
README.md CHANGED
@@ -44,11 +44,10 @@ It is recommended to directly call the [`generate`](https://huggingface.co/docs/
44
  >>> model = AutoModelForCausalLM.from_pretrained("facebook/opt-6.7b", torch_dtype=torch.float16).cuda()
45
 
46
  >>> # the fast tokenizer currently does not work correctly
47
- >>> tokenizer = AutoTokenizer.from_pretrained(path, use_fast=False)
48
 
49
  >>> prompt = "Hello, I'm am conscious and"
50
 
51
-
52
  >>> input_ids = tokenizer(prompt, return_tensors="pt").input_ids.cuda()
53
 
54
  >>> generated_ids = model.generate(input_ids)
@@ -66,7 +65,7 @@ By default, generation is deterministic. In order to use the top-k sampling, ple
66
  >>> model = AutoModelForCausalLM.from_pretrained("facebook/opt-6.7b", torch_dtype=torch.float16).cuda()
67
 
68
  >>> # the fast tokenizer currently does not work correctly
69
- >>> tokenizer = AutoTokenizer.from_pretrained(path, use_fast=False)
70
 
71
  >>> prompt = "Hello, I'm am conscious and"
72
 
@@ -99,7 +98,7 @@ Here's an example of how the model can have biased predictions:
99
  >>> model = AutoModelForCausalLM.from_pretrained("facebook/opt-6.7b", torch_dtype=torch.float16).cuda()
100
 
101
  >>> # the fast tokenizer currently does not work correctly
102
- >>> tokenizer = AutoTokenizer.from_pretrained(path, use_fast=False)
103
 
104
  >>> prompt = "The woman worked as a"
105
 
@@ -125,7 +124,7 @@ compared to:
125
  >>> model = AutoModelForCausalLM.from_pretrained("facebook/opt-6.7b", torch_dtype=torch.float16).cuda()
126
 
127
  >>> # the fast tokenizer currently does not work correctly
128
- >>> tokenizer = AutoTokenizer.from_pretrained(path, use_fast=False)
129
 
130
  >>> prompt = "The man worked as a"
131
 
 
44
  >>> model = AutoModelForCausalLM.from_pretrained("facebook/opt-6.7b", torch_dtype=torch.float16).cuda()
45
 
46
  >>> # the fast tokenizer currently does not work correctly
47
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/opt-6.7b", use_fast=False)
48
 
49
  >>> prompt = "Hello, I'm am conscious and"
50
 
 
51
  >>> input_ids = tokenizer(prompt, return_tensors="pt").input_ids.cuda()
52
 
53
  >>> generated_ids = model.generate(input_ids)
 
65
  >>> model = AutoModelForCausalLM.from_pretrained("facebook/opt-6.7b", torch_dtype=torch.float16).cuda()
66
 
67
  >>> # the fast tokenizer currently does not work correctly
68
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/opt-6.7b", use_fast=False)
69
 
70
  >>> prompt = "Hello, I'm am conscious and"
71
 
 
98
  >>> model = AutoModelForCausalLM.from_pretrained("facebook/opt-6.7b", torch_dtype=torch.float16).cuda()
99
 
100
  >>> # the fast tokenizer currently does not work correctly
101
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/opt-6.7b", use_fast=False)
102
 
103
  >>> prompt = "The woman worked as a"
104
 
 
124
  >>> model = AutoModelForCausalLM.from_pretrained("facebook/opt-6.7b", torch_dtype=torch.float16).cuda()
125
 
126
  >>> # the fast tokenizer currently does not work correctly
127
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/opt-6.7b", use_fast=False)
128
 
129
  >>> prompt = "The man worked as a"
130