# paddle model
import numpy as np
# from deberta.deberta_model import DebertaModel
# from deberta.config import Deberta_xlarge_mnli_Config
# import paddle as pp

# pp_model_config = Deberta_xlarge_mnli_Config()
# pp_model = DebertaModel(pp_model_config)

# sd = pp.load('/home/gana/deberta-xlarge-mnli.pdparams')
# sd2 = {k.replace('deberta.', '') : v for k, v in sd.items()}
# pp_model.load_dict(sd2)
# pp_model.eval()


# huggingface model
import torch
from transformers.models.deberta import DebertaModel, DebertaTokenizer
t_model = DebertaModel.from_pretrained('microsoft/deberta-xlarge-mnli')
t_model.eval()


tokenizer = DebertaTokenizer.from_pretrained('microsoft/deberta-xlarge-mnli')

sentences_pairs = [
    [
        "The Old One always comforted Ca'daan, except today.",
        "Ca'daan knew the Old One very well."
    ],[
        "Your gift is appreciated by each and every student who will benefit from your generosity.",
        "Hundreds of students will benefit from your generosity."
    ],[
        "At the other end of Pennsylvania Avenue, people began to line up for a White House tour.",
        "People formed a line at the end of Pennsylvania Avenue."
    ]
]


t_dict = {}


for pair in sentences_pairs:
    emb = tokenizer(*pair, max_length=256, padding='max_length')
    t_emb = {k: torch.tensor(v).unsqueeze(0) for k, v in emb.items()}
    t_out = t_model(**t_emb)['last_hidden_state']
    # p_emb = {k: pp.to_tensor(v).unsqueeze(0) for k, v in emb.items()}
    # p_out = pp_model(**p_emb)['last_hidden_state']
    # print(pair)
    # print(t_out)
    # print(p_out)
    # print(np.allclose(t_out.detach().numpy(), p_out.numpy()))


