import torch
import torch.nn as nn
import numpy as np
import pickle
import numpy as np
import torch.utils.data as Data
from transformers import (
    AutoModel,
    AutoModelForCausalLM,
    AutoTokenizer,
    AutoConfig,
    BertForSequenceClassification,
)

if __name__=="__main__":

    tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
    encoder = AutoModel.from_pretrained("bert-base-uncased")
    # encoder=torch.load('model.pkl')
    input = tokenizer("my dog is cute", add_special_tokens=True, return_tensors='pt')
    print(input)
    encode_out = encoder(input_ids=input['input_ids'],token_type_ids=input['token_type_ids'],attention_mask=input['attention_mask'],return_dict=True)
    print(encode_out[0],encode_out[0].shape)
    decoder_input = tokenizer("I like it so much", add_special_tokens=True, return_tensors='pt')
    decoder = AutoModelForCausalLM.from_pretrained("bert-base-uncased",add_cross_attention=True,is_decoder=True)
    decoder_output = decoder(input_ids=decoder_input['input_ids'],encoder_hidden_states=encode_out[0],attention_mask=decoder_input['attention_mask'],return_dict=True)
    print(decoder_output.logits.shape) # past_key_values
    logits = decoder_output.logits.view(-1, 30522).detach().numpy()
    pred_flat = np.argmax(logits, axis=1).flatten()
    print(pred_flat)
    out = torch.tensor(pred_flat)
    print(out)
    out = tokenizer.decode(out)
    print(out)












































