ybelkada HF staff osanseviero HF staff commited on
Commit
dd2ad0d
1 Parent(s): 46ce8d4

Fix code snippets from model card (#1)

Browse files

- Fix code snippets from model card (ef70ffe96e1d9202e6e09748d7541e035ca06a24)


Co-authored-by: Omar Sanseviero <osanseviero@users.noreply.huggingface.co>

Files changed (1) hide show
  1. README.md +4 -4
README.md CHANGED
@@ -159,7 +159,7 @@ tokenizer = T5Tokenizer.from_pretrained("google/flan-t5-large")
159
  model = T5ForConditionalGeneration.from_pretrained("google/flan-t5-large")
160
 
161
  input_text = "translate English to German: How old are you?"
162
- input_ids = tokenizer.encode(input_text, return_tensors="pt").input_ids
163
 
164
  outputs = model.generate(input_ids)
165
  print(tokenizer.decode(outputs[0]))
@@ -180,7 +180,7 @@ tokenizer = T5Tokenizer.from_pretrained("google/flan-t5-large")
180
  model = T5ForConditionalGeneration.from_pretrained("google/flan-t5-large", device_map="auto")
181
 
182
  input_text = "translate English to German: How old are you?"
183
- input_ids = tokenizer.encode(input_text, return_tensors="pt").input_ids.to("cuda")
184
 
185
  outputs = model.generate(input_ids)
186
  print(tokenizer.decode(outputs[0]))
@@ -204,7 +204,7 @@ tokenizer = T5Tokenizer.from_pretrained("google/flan-t5-large")
204
  model = T5ForConditionalGeneration.from_pretrained("google/flan-t5-large", device_map="auto", torch_dtype=torch.float16)
205
 
206
  input_text = "translate English to German: How old are you?"
207
- input_ids = tokenizer.encode(input_text, return_tensors="pt").input_ids.to("cuda")
208
 
209
  outputs = model.generate(input_ids)
210
  print(tokenizer.decode(outputs[0]))
@@ -225,7 +225,7 @@ tokenizer = T5Tokenizer.from_pretrained("google/flan-t5-large")
225
  model = T5ForConditionalGeneration.from_pretrained("google/flan-t5-large", device_map="auto", load_in_8bit=True)
226
 
227
  input_text = "translate English to German: How old are you?"
228
- input_ids = tokenizer.encode(input_text, return_tensors="pt").input_ids.to("cuda")
229
 
230
  outputs = model.generate(input_ids)
231
  print(tokenizer.decode(outputs[0]))
159
  model = T5ForConditionalGeneration.from_pretrained("google/flan-t5-large")
160
 
161
  input_text = "translate English to German: How old are you?"
162
+ input_ids = tokenizer(input_text, return_tensors="pt").input_ids
163
 
164
  outputs = model.generate(input_ids)
165
  print(tokenizer.decode(outputs[0]))
180
  model = T5ForConditionalGeneration.from_pretrained("google/flan-t5-large", device_map="auto")
181
 
182
  input_text = "translate English to German: How old are you?"
183
+ input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to("cuda")
184
 
185
  outputs = model.generate(input_ids)
186
  print(tokenizer.decode(outputs[0]))
204
  model = T5ForConditionalGeneration.from_pretrained("google/flan-t5-large", device_map="auto", torch_dtype=torch.float16)
205
 
206
  input_text = "translate English to German: How old are you?"
207
+ input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to("cuda")
208
 
209
  outputs = model.generate(input_ids)
210
  print(tokenizer.decode(outputs[0]))
225
  model = T5ForConditionalGeneration.from_pretrained("google/flan-t5-large", device_map="auto", load_in_8bit=True)
226
 
227
  input_text = "translate English to German: How old are you?"
228
+ input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to("cuda")
229
 
230
  outputs = model.generate(input_ids)
231
  print(tokenizer.decode(outputs[0]))