import torch
from torch.nn.utils.rnn import pad_sequence
from transformers import BertModel, BertTokenizer

from config import Config
from model import softMaskedBert, biGruDetector


def TestSingle(text):
    checkpoint = torch.load('checkpoints/skpt_epoch1109.pt')
    config = Config()
    dev = device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    bert = BertModel.from_pretrained('bert')

    embedding = bert.embeddings.to(dev)
    bert_encoder = bert.encoder.to(dev)
    tokenizer = BertTokenizer.from_pretrained("bert/vocab.txt")
    masked_e = embedding(torch.tensor([[tokenizer.mask_token_id]], dtype=torch.long).to(dev))
    vocab_size = tokenizer.vocab_size
    detector_model = biGruDetector(config.embedding_size, config.hidden_size)
    detector_model.load_state_dict(checkpoint['detector_model'])
    model1 = torch.load('./checkpoints/model')
    detector_model1 = torch.load('./checkpoints/detector_model')
    # detector_model1.load_state_dict(checkpoint['detector_model'])
    # model1.load_state_dict(checkpoint['model'])


    text = tokenizer.convert_tokens_to_ids(['[CLS]']+list(text)+['[SEP]'])

    mask = [1] * len(text)
    mask = torch.tensor(mask).unsqueeze(0).float().to(dev)

    model = softMaskedBert(
        config,
        vocab_size=vocab_size,
        masked_e=masked_e,
        bert_encoder=bert_encoder)
    model.load_state_dict(checkpoint['model'])
    detector_model1.to(dev)
    model1.to(dev)

    text = torch.tensor(text).unsqueeze(0).to(dev)
    print(text.shape)

    text_embedding = embedding(text)

    prob = detector_model1(text_embedding)
    # prob = torch.round(prob)
    # print(prob.squeeze() * mask)
    print(prob.squeeze() * mask .reshape(-1))

    out = model1(text_embedding, prob,
                bert.get_extended_attention_mask(mask, text.shape,
                                                 text.device))
    # print("out:{}".format(out))
    # print(out.shape)
    # [1, 8, 词数]

    out = torch.argmax(out, dim=-1).reshape(-1)
    out = (out*mask).reshape(-1)

    return tokenizer.convert_ids_to_tokens(out)


if __name__ == '__main__':
    text = input()

    print(TestSingle(text))
