LogiT5 / README.md
shri07's picture
Update README.md
6d46d58

import transformers import datasets from transformers import AutoTokenizer, AutoModelForSeq2SeqLM from datasets import load_dataset # if loading a dataset

model_name = 'logicreasoning/LogiT5' tokenize = AutoTokenizer.from_pretrained(model_name) model = AutoModelForSeq2SeqLM.from_pretrained(model_name) device = 'cuda:0' if torch.cuda.is_available() else 'cpu' input_text = '' #your input text here must be a string input = tokenize(input_text, return_tensors='pt', padding=True).to(device) model = model.to(device) output = model.generate(*input, max_length=1024) prediction = tokenize.decode(output[0],skip_special_tokens=True)