LogiT5 / README.md
shri07's picture
Update README.md
0a98241
|
raw
history blame
611 Bytes

import transformers import datasets from transformers import AutoTokenizer, AutoModelForSeq2SeqLM from datasets import load_dataset # if loading a dataset

model_name = 'logicreasoning/LogiT5' tokenize = AutoTokenizer.from_pretrained(model_name) model = AutoModelForSeq2SeqLM.from_pretrained(model_name) device = 'cuda:0' if torch.cuda.is_available() else 'cpu' input_text = '' #your input text here input = tokenize(input, return_tensors='pt', padding=True).to(device) model = model.to(device) output = model.generate(*input, max_length=1024) prediction = tokenize.decode(output[0],skip_special_tokens=True)