import numpy as np
import torch
from transformers.models.deberta import DebertaForSequenceClassification, DebertaTokenizer
t_model = DebertaForSequenceClassification.from_pretrained('microsoft/deberta-xlarge-mnli')
t_model.eval()

tokenizer = DebertaTokenizer.from_pretrained('microsoft/deberta-xlarge-mnli')

sentences_pairs = [
    [
        "The Old One always comforted Ca'daan, except today.",
        "Ca'daan knew the Old One very well."
    ],[
        "Your gift is appreciated by each and every student who will benefit from your generosity.",
        "Hundreds of students will benefit from your generosity."
    ],[
        "At the other end of Pennsylvania Avenue, people began to line up for a White House tour.",
        "People formed a line at the end of Pennsylvania Avenue."
    ]
]


for pair in sentences_pairs:
    emb = tokenizer(*pair, max_length=256, padding='max_length')
    t_emb = {k: torch.tensor(v).unsqueeze(0) for k, v in emb.items()}
    t_out = t_model(**t_emb)
    print(pair, t_out)




