File size: 1,149 Bytes
17a0e49 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 |
from transformers import AutoConfig, AutoModelForSequenceClassification, AutoTokenizer
device = "cuda"
predictor = AutoModelForSequenceClassification.from_pretrained("/home/afabbri/evaluation/qafacteval/models/quip-512-mocha").to(device)
predictor.eval()
tokenizer = AutoTokenizer.from_pretrained("/home/afabbri/evaluation/qafacteval/models/quip-512-mocha")
batch_size = 32
context = "The Yankees beat the Braves in the World Series."
questions = ["Who won the World Series?", "Who beat the Braves?"]
answers = ["The Yankees", "The Yankees"]
predictions = ["The Yankees", "The Yankees"]
batch_sentences = []
for question, answer, prediction in zip(questions, answers, predictions):
sentence1 = f"{question} <q> {answer} <r> {prediction} <c> {context}"
batch_sentences.append(sentence1)
inputs = tokenizer(batch_sentences, max_length=512, truncation=True, padding="max_length", return_tensors="pt")
import pdb;pdb.set_trace()
outputs = predictor(input_ids=inputs["input_ids"].to(device), attention_mask=inputs["attention_mask"].to(device))
outputs = [x[0] for x in outputs[0].cpu().tolist()]
import pdb;pdb.set_trace()
print("HI")
|