Spaces:
Sleeping
Sleeping
File size: 353 Bytes
fb4a3c6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 |
from transformers import AutoTokenizer
model_nm = 't5-base'
device = 'cuda'
def tokenize_for_inference(text):
tokenizer = AutoTokenizer.from_pretrained(model_nm)
model_inputs = tokenizer.encode(
text,
max_length = 512,
padding=True,
truncation=True,
return_tensors='pt'
)
return model_inputs.to(device)
|